Git fork

Merge branch 'sk/reftable-clarify-tests'

The reftable unit tests are now ported to the "clar" unit testing
framework.

* sk/reftable-clarify-tests:
t/unit-tests: finalize migration of reftable-related tests
t/unit-tests: convert reftable stack test to use clar
t/unit-tests: convert reftable record test to use clar
t/unit-tests: convert reftable readwrite test to use clar
t/unit-tests: convert reftable table test to use clar
t/unit-tests: convert reftable pq test to use clar
t/unit-tests: convert reftable merged test to use clar
t/unit-tests: convert reftable block test to use clar
t/unit-tests: convert reftable basics test to use clar test framework
t/unit-tests: implement clar specific reftable test helper functions

+2118 -2332
+9 -11
Makefile
··· 1362 CLAR_TEST_SUITES += u-oidmap 1363 CLAR_TEST_SUITES += u-oidtree 1364 CLAR_TEST_SUITES += u-prio-queue 1365 CLAR_TEST_SUITES += u-reftable-tree 1366 CLAR_TEST_SUITES += u-strbuf 1367 CLAR_TEST_SUITES += u-strcmp-offset ··· 1372 CLAR_TEST_PROG = $(UNIT_TEST_BIN)/unit-tests$(X) 1373 CLAR_TEST_OBJS = $(patsubst %,$(UNIT_TEST_DIR)/%.o,$(CLAR_TEST_SUITES)) 1374 CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/clar/clar.o 1375 CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/unit-test.o 1376 - CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/lib-oid.o 1377 1378 - UNIT_TEST_PROGRAMS += t-reftable-basics 1379 - UNIT_TEST_PROGRAMS += t-reftable-block 1380 - UNIT_TEST_PROGRAMS += t-reftable-merged 1381 - UNIT_TEST_PROGRAMS += t-reftable-pq 1382 - UNIT_TEST_PROGRAMS += t-reftable-readwrite 1383 - UNIT_TEST_PROGRAMS += t-reftable-record 1384 - UNIT_TEST_PROGRAMS += t-reftable-stack 1385 - UNIT_TEST_PROGRAMS += t-reftable-table 1386 - UNIT_TEST_PROGS = $(patsubst %,$(UNIT_TEST_BIN)/%$X,$(UNIT_TEST_PROGRAMS)) 1387 UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/test-lib.o 1388 - UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/lib-reftable.o 1389 1390 # xdiff and reftable libs may in turn depend on what is in libgit.a 1391 GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(LIB_FILE)
··· 1362 CLAR_TEST_SUITES += u-oidmap 1363 CLAR_TEST_SUITES += u-oidtree 1364 CLAR_TEST_SUITES += u-prio-queue 1365 + CLAR_TEST_SUITES += u-reftable-basics 1366 + CLAR_TEST_SUITES += u-reftable-block 1367 + CLAR_TEST_SUITES += u-reftable-merged 1368 + CLAR_TEST_SUITES += u-reftable-pq 1369 + CLAR_TEST_SUITES += u-reftable-readwrite 1370 + CLAR_TEST_SUITES += u-reftable-stack 1371 + CLAR_TEST_SUITES += u-reftable-table 1372 CLAR_TEST_SUITES += u-reftable-tree 1373 CLAR_TEST_SUITES += u-strbuf 1374 CLAR_TEST_SUITES += u-strcmp-offset ··· 1379 CLAR_TEST_PROG = $(UNIT_TEST_BIN)/unit-tests$(X) 1380 CLAR_TEST_OBJS = $(patsubst %,$(UNIT_TEST_DIR)/%.o,$(CLAR_TEST_SUITES)) 1381 CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/clar/clar.o 1382 + CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/lib-oid.o 1383 + CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/lib-reftable.o 1384 CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/unit-test.o 1385 1386 UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/test-lib.o 1387 1388 # xdiff and reftable libs may in turn depend on what is in libgit.a 1389 GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(LIB_FILE)
-1
t/Makefile
··· 125 @mkdir -p mesontmp && \ 126 printf "%s\n" \ 127 "integration_tests t[0-9][0-9][0-9][0-9]-*.sh" \ 128 - "unit_test_programs unit-tests/t-*.c" \ 129 "clar_test_suites unit-tests/u-*.c" | \ 130 while read -r variable pattern; do \ 131 awk "/^$$variable = \[\$$/ {flag=1 ; next } /^]$$/ { flag=0 } flag { gsub(/^ \047/, \"\"); gsub(/\047,\$$/, \"\"); print }" meson.build >mesontmp/meson.txt && \
··· 125 @mkdir -p mesontmp && \ 126 printf "%s\n" \ 127 "integration_tests t[0-9][0-9][0-9][0-9]-*.sh" \ 128 "clar_test_suites unit-tests/u-*.c" | \ 129 while read -r variable pattern; do \ 130 awk "/^$$variable = \[\$$/ {flag=1 ; next } /^]$$/ { flag=0 } flag { gsub(/^ \047/, \"\"); gsub(/\047,\$$/, \"\"); print }" meson.build >mesontmp/meson.txt && \
+11 -31
t/meson.build
··· 8 'unit-tests/u-oidmap.c', 9 'unit-tests/u-oidtree.c', 10 'unit-tests/u-prio-queue.c', 11 'unit-tests/u-reftable-tree.c', 12 'unit-tests/u-strbuf.c', 13 'unit-tests/u-strcmp-offset.c', ··· 20 clar_sources = [ 21 'unit-tests/clar/clar.c', 22 'unit-tests/unit-test.c', 23 - 'unit-tests/lib-oid.c' 24 ] 25 26 clar_decls_h = custom_target( ··· 53 dependencies: [libgit_commonmain], 54 ) 55 test('unit-tests', clar_unit_tests, kwargs: test_kwargs) 56 - 57 - unit_test_programs = [ 58 - 'unit-tests/t-reftable-basics.c', 59 - 'unit-tests/t-reftable-block.c', 60 - 'unit-tests/t-reftable-merged.c', 61 - 'unit-tests/t-reftable-pq.c', 62 - 'unit-tests/t-reftable-readwrite.c', 63 - 'unit-tests/t-reftable-record.c', 64 - 'unit-tests/t-reftable-stack.c', 65 - 'unit-tests/t-reftable-table.c', 66 - ] 67 - 68 - foreach unit_test_program : unit_test_programs 69 - unit_test_name = fs.stem(unit_test_program) 70 - unit_test = executable(unit_test_name, 71 - sources: [ 72 - 'unit-tests/test-lib.c', 73 - 'unit-tests/lib-reftable.c', 74 - unit_test_program, 75 - ], 76 - dependencies: [libgit_commonmain], 77 - ) 78 - test(unit_test_name, unit_test, 79 - workdir: meson.current_source_dir(), 80 - kwargs: test_kwargs, 81 - ) 82 - endforeach 83 84 subdir('helper') 85 ··· 1163 # sufficient to catch missing test suites in our CI though. 1164 foreach glob, tests : { 1165 't[0-9][0-9][0-9][0-9]-*.sh': integration_tests, 1166 - 'perf/p[0-9][0-9][0-9][0-9]-*.sh': benchmarks, 1167 - 'unit-tests/t-*.c': unit_test_programs, 1168 'unit-tests/u-*.c': clar_test_suites, 1169 } 1170 actual_tests = run_command(shell, '-c', 'ls ' + glob, ··· 1231 timeout: 0, 1232 ) 1233 endforeach 1234 - endif
··· 8 'unit-tests/u-oidmap.c', 9 'unit-tests/u-oidtree.c', 10 'unit-tests/u-prio-queue.c', 11 + 'unit-tests/u-reftable-basics.c', 12 + 'unit-tests/u-reftable-block.c', 13 + 'unit-tests/u-reftable-merged.c', 14 + 'unit-tests/u-reftable-pq.c', 15 + 'unit-tests/u-reftable-readwrite.c', 16 + 'unit-tests/u-reftable-record.c', 17 + 'unit-tests/u-reftable-stack.c', 18 + 'unit-tests/u-reftable-table.c', 19 'unit-tests/u-reftable-tree.c', 20 'unit-tests/u-strbuf.c', 21 'unit-tests/u-strcmp-offset.c', ··· 28 clar_sources = [ 29 'unit-tests/clar/clar.c', 30 'unit-tests/unit-test.c', 31 + 'unit-tests/lib-oid.c', 32 + 'unit-tests/lib-reftable.c' 33 ] 34 35 clar_decls_h = custom_target( ··· 62 dependencies: [libgit_commonmain], 63 ) 64 test('unit-tests', clar_unit_tests, kwargs: test_kwargs) 65 66 subdir('helper') 67 ··· 1145 # sufficient to catch missing test suites in our CI though. 1146 foreach glob, tests : { 1147 't[0-9][0-9][0-9][0-9]-*.sh': integration_tests, 1148 'unit-tests/u-*.c': clar_test_suites, 1149 } 1150 actual_tests = run_command(shell, '-c', 'ls ' + glob, ··· 1211 timeout: 0, 1212 ) 1213 endforeach 1214 + endif
+19 -16
t/unit-tests/lib-reftable.c
··· 1 - #define DISABLE_SIGN_COMPARE_WARNINGS 2 - 3 #include "lib-reftable.h" 4 - #include "test-lib.h" 5 #include "reftable/constants.h" 6 #include "reftable/writer.h" 7 #include "strbuf.h" 8 9 - void t_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id) 10 { 11 memset(p, (uint8_t)i, hash_size(id)); 12 } ··· 22 return 0; 23 } 24 25 - struct reftable_writer *t_reftable_strbuf_writer(struct reftable_buf *buf, 26 struct reftable_write_options *opts) 27 { 28 struct reftable_writer *writer; 29 int ret = reftable_writer_new(&writer, &strbuf_writer_write, &strbuf_writer_flush, 30 buf, opts); 31 - check(!ret); 32 return writer; 33 } 34 35 - void t_reftable_write_to_buf(struct reftable_buf *buf, 36 struct reftable_ref_record *refs, 37 size_t nrefs, 38 struct reftable_log_record *logs, ··· 64 min = ui; 65 } 66 67 - writer = t_reftable_strbuf_writer(buf, &opts); 68 - reftable_writer_set_limits(writer, min, max); 69 70 if (nrefs) { 71 ret = reftable_writer_add_refs(writer, refs, nrefs); 72 - check_int(ret, ==, 0); 73 } 74 75 if (nlogs) { 76 ret = reftable_writer_add_logs(writer, logs, nlogs); 77 - check_int(ret, ==, 0); 78 } 79 80 ret = reftable_writer_close(writer); 81 - check_int(ret, ==, 0); 82 83 stats = reftable_writer_stats(writer); 84 - for (size_t i = 0; i < stats->ref_stats.blocks; i++) { 85 size_t off = i * (opts.block_size ? opts.block_size 86 : DEFAULT_BLOCK_SIZE); 87 if (!off) 88 off = header_size(opts.hash_id == REFTABLE_HASH_SHA256 ? 2 : 1); 89 - check_char(buf->buf[off], ==, 'r'); 90 } 91 92 if (nrefs) 93 - check_int(stats->ref_stats.blocks, >, 0); 94 if (nlogs) 95 - check_int(stats->log_stats.blocks, >, 0); 96 97 reftable_writer_free(writer); 98 }
··· 1 + #include "unit-test.h" 2 #include "lib-reftable.h" 3 + #include "hex.h" 4 + #include "parse-options.h" 5 #include "reftable/constants.h" 6 #include "reftable/writer.h" 7 #include "strbuf.h" 8 + #include "string-list.h" 9 + #include "strvec.h" 10 11 + void cl_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id) 12 { 13 memset(p, (uint8_t)i, hash_size(id)); 14 } ··· 24 return 0; 25 } 26 27 + struct reftable_writer *cl_reftable_strbuf_writer(struct reftable_buf *buf, 28 struct reftable_write_options *opts) 29 { 30 struct reftable_writer *writer; 31 int ret = reftable_writer_new(&writer, &strbuf_writer_write, &strbuf_writer_flush, 32 buf, opts); 33 + cl_assert(!ret); 34 return writer; 35 } 36 37 + void cl_reftable_write_to_buf(struct reftable_buf *buf, 38 struct reftable_ref_record *refs, 39 size_t nrefs, 40 struct reftable_log_record *logs, ··· 66 min = ui; 67 } 68 69 + writer = cl_reftable_strbuf_writer(buf, &opts); 70 + ret = reftable_writer_set_limits(writer, min, max); 71 + cl_assert(!ret); 72 73 if (nrefs) { 74 ret = reftable_writer_add_refs(writer, refs, nrefs); 75 + cl_assert_equal_i(ret, 0); 76 } 77 78 if (nlogs) { 79 ret = reftable_writer_add_logs(writer, logs, nlogs); 80 + cl_assert_equal_i(ret, 0); 81 } 82 83 ret = reftable_writer_close(writer); 84 + cl_assert_equal_i(ret, 0); 85 86 stats = reftable_writer_stats(writer); 87 + for (size_t i = 0; i < (size_t)stats->ref_stats.blocks; i++) { 88 size_t off = i * (opts.block_size ? opts.block_size 89 : DEFAULT_BLOCK_SIZE); 90 if (!off) 91 off = header_size(opts.hash_id == REFTABLE_HASH_SHA256 ? 2 : 1); 92 + cl_assert(buf->buf[off] == 'r'); 93 } 94 95 if (nrefs) 96 + cl_assert(stats->ref_stats.blocks > 0); 97 if (nlogs) 98 + cl_assert(stats->log_stats.blocks > 0); 99 100 reftable_writer_free(writer); 101 }
+7 -8
t/unit-tests/lib-reftable.h
··· 1 - #ifndef LIB_REFTABLE_H 2 - #define LIB_REFTABLE_H 3 - 4 #include "git-compat-util.h" 5 #include "reftable/reftable-writer.h" 6 7 struct reftable_buf; 8 9 - void t_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id); 10 11 - struct reftable_writer *t_reftable_strbuf_writer(struct reftable_buf *buf, 12 struct reftable_write_options *opts); 13 14 - void t_reftable_write_to_buf(struct reftable_buf *buf, 15 struct reftable_ref_record *refs, 16 size_t nrecords, 17 struct reftable_log_record *logs, 18 size_t nlogs, 19 struct reftable_write_options *opts); 20 - 21 - #endif
··· 1 + #include "git-compat-util.h" 2 + #include "clar/clar.h" 3 + #include "clar-decls.h" 4 #include "git-compat-util.h" 5 #include "reftable/reftable-writer.h" 6 + #include "strbuf.h" 7 8 struct reftable_buf; 9 10 + void cl_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id); 11 12 + struct reftable_writer *cl_reftable_strbuf_writer(struct reftable_buf *buf, 13 struct reftable_write_options *opts); 14 15 + void cl_reftable_write_to_buf(struct reftable_buf *buf, 16 struct reftable_ref_record *refs, 17 size_t nrecords, 18 struct reftable_log_record *logs, 19 size_t nlogs, 20 struct reftable_write_options *opts);
-219
t/unit-tests/t-reftable-basics.c
··· 1 - /* 2 - Copyright 2020 Google LLC 3 - 4 - Use of this source code is governed by a BSD-style 5 - license that can be found in the LICENSE file or at 6 - https://developers.google.com/open-source/licenses/bsd 7 - */ 8 - 9 - #include "test-lib.h" 10 - #include "reftable/basics.h" 11 - 12 - struct integer_needle_lesseq_args { 13 - int needle; 14 - int *haystack; 15 - }; 16 - 17 - static int integer_needle_lesseq(size_t i, void *_args) 18 - { 19 - struct integer_needle_lesseq_args *args = _args; 20 - return args->needle <= args->haystack[i]; 21 - } 22 - 23 - static void *realloc_stub(void *p UNUSED, size_t size UNUSED) 24 - { 25 - return NULL; 26 - } 27 - 28 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 29 - { 30 - if_test ("binary search with binsearch works") { 31 - int haystack[] = { 2, 4, 6, 8, 10 }; 32 - struct { 33 - int needle; 34 - size_t expected_idx; 35 - } testcases[] = { 36 - {-9000, 0}, 37 - {-1, 0}, 38 - {0, 0}, 39 - {2, 0}, 40 - {3, 1}, 41 - {4, 1}, 42 - {7, 3}, 43 - {9, 4}, 44 - {10, 4}, 45 - {11, 5}, 46 - {9000, 5}, 47 - }; 48 - 49 - for (size_t i = 0; i < ARRAY_SIZE(testcases); i++) { 50 - struct integer_needle_lesseq_args args = { 51 - .haystack = haystack, 52 - .needle = testcases[i].needle, 53 - }; 54 - size_t idx; 55 - 56 - idx = binsearch(ARRAY_SIZE(haystack), 57 - &integer_needle_lesseq, &args); 58 - check_int(idx, ==, testcases[i].expected_idx); 59 - } 60 - } 61 - 62 - if_test ("names_length returns size of a NULL-terminated string array") { 63 - const char *a[] = { "a", "b", NULL }; 64 - check_int(names_length(a), ==, 2); 65 - } 66 - 67 - if_test ("names_equal compares NULL-terminated string arrays") { 68 - const char *a[] = { "a", "b", "c", NULL }; 69 - const char *b[] = { "a", "b", "d", NULL }; 70 - const char *c[] = { "a", "b", NULL }; 71 - 72 - check(names_equal(a, a)); 73 - check(!names_equal(a, b)); 74 - check(!names_equal(a, c)); 75 - } 76 - 77 - if_test ("parse_names works for basic input") { 78 - char in1[] = "line\n"; 79 - char in2[] = "a\nb\nc"; 80 - char **out = parse_names(in1, strlen(in1)); 81 - check(out != NULL); 82 - check_str(out[0], "line"); 83 - check(!out[1]); 84 - free_names(out); 85 - 86 - out = parse_names(in2, strlen(in2)); 87 - check(out != NULL); 88 - check_str(out[0], "a"); 89 - check_str(out[1], "b"); 90 - check_str(out[2], "c"); 91 - check(!out[3]); 92 - free_names(out); 93 - } 94 - 95 - if_test ("parse_names drops empty string") { 96 - char in[] = "a\n\nb\n"; 97 - char **out = parse_names(in, strlen(in)); 98 - check(out != NULL); 99 - check_str(out[0], "a"); 100 - /* simply '\n' should be dropped as empty string */ 101 - check_str(out[1], "b"); 102 - check(!out[2]); 103 - free_names(out); 104 - } 105 - 106 - if_test ("common_prefix_size works") { 107 - struct reftable_buf a = REFTABLE_BUF_INIT; 108 - struct reftable_buf b = REFTABLE_BUF_INIT; 109 - struct { 110 - const char *a, *b; 111 - int want; 112 - } cases[] = { 113 - {"abcdef", "abc", 3}, 114 - { "abc", "ab", 2 }, 115 - { "", "abc", 0 }, 116 - { "abc", "abd", 2 }, 117 - { "abc", "pqr", 0 }, 118 - }; 119 - 120 - for (size_t i = 0; i < ARRAY_SIZE(cases); i++) { 121 - check(!reftable_buf_addstr(&a, cases[i].a)); 122 - check(!reftable_buf_addstr(&b, cases[i].b)); 123 - check_uint(common_prefix_size(&a, &b), ==, cases[i].want); 124 - reftable_buf_reset(&a); 125 - reftable_buf_reset(&b); 126 - } 127 - reftable_buf_release(&a); 128 - reftable_buf_release(&b); 129 - } 130 - 131 - if_test ("reftable_put_be64 and reftable_get_be64 work") { 132 - uint64_t in = 0x1122334455667788; 133 - uint8_t dest[8]; 134 - uint64_t out; 135 - reftable_put_be64(dest, in); 136 - out = reftable_get_be64(dest); 137 - check_int(in, ==, out); 138 - } 139 - 140 - if_test ("reftable_put_be32 and reftable_get_be32 work") { 141 - uint32_t in = 0x11223344; 142 - uint8_t dest[4]; 143 - uint32_t out; 144 - reftable_put_be32(dest, in); 145 - out = reftable_get_be32(dest); 146 - check_int(in, ==, out); 147 - } 148 - 149 - if_test ("reftable_put_be24 and reftable_get_be24 work") { 150 - uint32_t in = 0x112233; 151 - uint8_t dest[3]; 152 - uint32_t out; 153 - reftable_put_be24(dest, in); 154 - out = reftable_get_be24(dest); 155 - check_int(in, ==, out); 156 - } 157 - 158 - if_test ("put_be16 and get_be16 work") { 159 - uint32_t in = 0xfef1; 160 - uint8_t dest[3]; 161 - uint32_t out; 162 - reftable_put_be16(dest, in); 163 - out = reftable_get_be16(dest); 164 - check_int(in, ==, out); 165 - } 166 - 167 - if_test ("REFTABLE_ALLOC_GROW works") { 168 - int *arr = NULL, *old_arr; 169 - size_t alloc = 0, old_alloc; 170 - 171 - check(!REFTABLE_ALLOC_GROW(arr, 1, alloc)); 172 - check(arr != NULL); 173 - check_uint(alloc, >=, 1); 174 - arr[0] = 42; 175 - 176 - old_alloc = alloc; 177 - old_arr = arr; 178 - reftable_set_alloc(NULL, realloc_stub, NULL); 179 - check(REFTABLE_ALLOC_GROW(arr, old_alloc + 1, alloc)); 180 - check(arr == old_arr); 181 - check_uint(alloc, ==, old_alloc); 182 - 183 - old_alloc = alloc; 184 - reftable_set_alloc(NULL, NULL, NULL); 185 - check(!REFTABLE_ALLOC_GROW(arr, old_alloc + 1, alloc)); 186 - check(arr != NULL); 187 - check_uint(alloc, >, old_alloc); 188 - arr[alloc - 1] = 42; 189 - 190 - reftable_free(arr); 191 - } 192 - 193 - if_test ("REFTABLE_ALLOC_GROW_OR_NULL works") { 194 - int *arr = NULL; 195 - size_t alloc = 0, old_alloc; 196 - 197 - REFTABLE_ALLOC_GROW_OR_NULL(arr, 1, alloc); 198 - check(arr != NULL); 199 - check_uint(alloc, >=, 1); 200 - arr[0] = 42; 201 - 202 - old_alloc = alloc; 203 - REFTABLE_ALLOC_GROW_OR_NULL(arr, old_alloc + 1, alloc); 204 - check(arr != NULL); 205 - check_uint(alloc, >, old_alloc); 206 - arr[alloc - 1] = 42; 207 - 208 - old_alloc = alloc; 209 - reftable_set_alloc(NULL, realloc_stub, NULL); 210 - REFTABLE_ALLOC_GROW_OR_NULL(arr, old_alloc + 1, alloc); 211 - check(arr == NULL); 212 - check_uint(alloc, ==, 0); 213 - reftable_set_alloc(NULL, NULL, NULL); 214 - 215 - reftable_free(arr); 216 - } 217 - 218 - return test_done(); 219 - }
···
+78 -86
t/unit-tests/t-reftable-block.c t/unit-tests/u-reftable-block.c
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 - #include "test-lib.h" 10 #include "reftable/block.h" 11 #include "reftable/blocksource.h" 12 #include "reftable/constants.h" 13 #include "reftable/reftable-error.h" 14 #include "strbuf.h" 15 16 - static void t_ref_block_read_write(void) 17 { 18 const int header_off = 21; /* random */ 19 struct reftable_record recs[30]; ··· 34 struct reftable_buf block_data = REFTABLE_BUF_INIT; 35 36 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 37 - check(block_data.buf != NULL); 38 block_data.len = block_size; 39 40 - ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_REF, (uint8_t *) block_data.buf, block_size, 41 header_off, hash_size(REFTABLE_HASH_SHA1)); 42 - check(!ret); 43 44 rec.u.ref.refname = (char *) ""; 45 rec.u.ref.value_type = REFTABLE_REF_DELETION; 46 ret = block_writer_add(&bw, &rec); 47 - check_int(ret, ==, REFTABLE_API_ERROR); 48 49 for (i = 0; i < N; i++) { 50 rec.u.ref.refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); ··· 55 ret = block_writer_add(&bw, &rec); 56 rec.u.ref.refname = NULL; 57 rec.u.ref.value_type = REFTABLE_REF_DELETION; 58 - check_int(ret, ==, 0); 59 } 60 61 ret = block_writer_finish(&bw); 62 - check_int(ret, >, 0); 63 64 block_writer_release(&bw); 65 ··· 71 72 for (i = 0; ; i++) { 73 ret = block_iter_next(&it, &rec); 74 - check_int(ret, >=, 0); 75 if (ret > 0) { 76 - check_int(i, ==, N); 77 break; 78 } 79 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 80 } 81 82 for (i = 0; i < N; i++) { 83 reftable_record_key(&recs[i], &want); 84 85 ret = block_iter_seek_key(&it, &want); 86 - check_int(ret, ==, 0); 87 88 ret = block_iter_next(&it, &rec); 89 - check_int(ret, ==, 0); 90 91 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 92 93 want.len--; 94 ret = block_iter_seek_key(&it, &want); 95 - check_int(ret, ==, 0); 96 97 ret = block_iter_next(&it, &rec); 98 - check_int(ret, ==, 0); 99 - check(reftable_record_equal(&recs[10 * (i / 10)], &rec, REFTABLE_HASH_SIZE_SHA1)); 100 } 101 102 reftable_block_release(&block); ··· 108 reftable_record_release(&recs[i]); 109 } 110 111 - static void t_log_block_read_write(void) 112 { 113 const int header_off = 21; 114 struct reftable_record recs[30]; ··· 129 struct reftable_buf block_data = REFTABLE_BUF_INIT; 130 131 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 132 - check(block_data.buf != NULL); 133 block_data.len = block_size; 134 135 ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_LOG, (uint8_t *) block_data.buf, block_size, 136 header_off, hash_size(REFTABLE_HASH_SHA1)); 137 - check(!ret); 138 139 for (i = 0; i < N; i++) { 140 rec.u.log.refname = xstrfmt("branch%02"PRIuMAX , (uintmax_t)i); ··· 145 ret = block_writer_add(&bw, &rec); 146 rec.u.log.refname = NULL; 147 rec.u.log.value_type = REFTABLE_LOG_DELETION; 148 - check_int(ret, ==, 0); 149 } 150 151 ret = block_writer_finish(&bw); 152 - check_int(ret, >, 0); 153 154 block_writer_release(&bw); 155 ··· 161 162 for (i = 0; ; i++) { 163 ret = block_iter_next(&it, &rec); 164 - check_int(ret, >=, 0); 165 if (ret > 0) { 166 - check_int(i, ==, N); 167 break; 168 } 169 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 170 } 171 172 for (i = 0; i < N; i++) { 173 reftable_buf_reset(&want); 174 - check(!reftable_buf_addstr(&want, recs[i].u.log.refname)); 175 176 ret = block_iter_seek_key(&it, &want); 177 - check_int(ret, ==, 0); 178 179 ret = block_iter_next(&it, &rec); 180 - check_int(ret, ==, 0); 181 182 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 183 184 want.len--; 185 ret = block_iter_seek_key(&it, &want); 186 - check_int(ret, ==, 0); 187 188 ret = block_iter_next(&it, &rec); 189 - check_int(ret, ==, 0); 190 - check(reftable_record_equal(&recs[10 * (i / 10)], &rec, REFTABLE_HASH_SIZE_SHA1)); 191 } 192 193 reftable_block_release(&block); ··· 199 reftable_record_release(&recs[i]); 200 } 201 202 - static void t_obj_block_read_write(void) 203 { 204 const int header_off = 21; 205 struct reftable_record recs[30]; ··· 220 struct reftable_buf block_data = REFTABLE_BUF_INIT; 221 222 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 223 - check(block_data.buf != NULL); 224 block_data.len = block_size; 225 226 ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_OBJ, (uint8_t *) block_data.buf, block_size, 227 header_off, hash_size(REFTABLE_HASH_SHA1)); 228 - check(!ret); 229 230 for (i = 0; i < N; i++) { 231 uint8_t bytes[] = { i, i + 1, i + 2, i + 3, i + 5 }, *allocated; ··· 238 ret = block_writer_add(&bw, &rec); 239 rec.u.obj.hash_prefix = NULL; 240 rec.u.obj.hash_prefix_len = 0; 241 - check_int(ret, ==, 0); 242 } 243 244 ret = block_writer_finish(&bw); 245 - check_int(ret, >, 0); 246 247 block_writer_release(&bw); 248 ··· 254 255 for (i = 0; ; i++) { 256 ret = block_iter_next(&it, &rec); 257 - check_int(ret, >=, 0); 258 if (ret > 0) { 259 - check_int(i, ==, N); 260 break; 261 } 262 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 263 } 264 265 for (i = 0; i < N; i++) { 266 reftable_record_key(&recs[i], &want); 267 268 ret = block_iter_seek_key(&it, &want); 269 - check_int(ret, ==, 0); 270 271 ret = block_iter_next(&it, &rec); 272 - check_int(ret, ==, 0); 273 274 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 275 } 276 277 reftable_block_release(&block); ··· 283 reftable_record_release(&recs[i]); 284 } 285 286 - static void t_index_block_read_write(void) 287 { 288 const int header_off = 21; 289 struct reftable_record recs[30]; ··· 305 struct reftable_buf block_data = REFTABLE_BUF_INIT; 306 307 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 308 - check(block_data.buf != NULL); 309 block_data.len = block_size; 310 311 ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_INDEX, (uint8_t *) block_data.buf, block_size, 312 header_off, hash_size(REFTABLE_HASH_SHA1)); 313 - check(!ret); 314 315 for (i = 0; i < N; i++) { 316 char buf[128]; ··· 319 320 reftable_buf_init(&recs[i].u.idx.last_key); 321 recs[i].type = REFTABLE_BLOCK_TYPE_INDEX; 322 - check(!reftable_buf_addstr(&recs[i].u.idx.last_key, buf)); 323 recs[i].u.idx.offset = i; 324 325 ret = block_writer_add(&bw, &recs[i]); 326 - check_int(ret, ==, 0); 327 } 328 329 ret = block_writer_finish(&bw); 330 - check_int(ret, >, 0); 331 332 block_writer_release(&bw); 333 ··· 339 340 for (i = 0; ; i++) { 341 ret = block_iter_next(&it, &rec); 342 - check_int(ret, >=, 0); 343 if (ret > 0) { 344 - check_int(i, ==, N); 345 break; 346 } 347 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 348 } 349 350 for (i = 0; i < N; i++) { 351 reftable_record_key(&recs[i], &want); 352 353 ret = block_iter_seek_key(&it, &want); 354 - check_int(ret, ==, 0); 355 356 ret = block_iter_next(&it, &rec); 357 - check_int(ret, ==, 0); 358 359 - check(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1)); 360 361 want.len--; 362 ret = block_iter_seek_key(&it, &want); 363 - check_int(ret, ==, 0); 364 365 ret = block_iter_next(&it, &rec); 366 - check_int(ret, ==, 0); 367 - check(reftable_record_equal(&recs[10 * (i / 10)], &rec, REFTABLE_HASH_SIZE_SHA1)); 368 } 369 370 reftable_block_release(&block); ··· 376 reftable_record_release(&recs[i]); 377 } 378 379 - static void t_block_iterator(void) 380 { 381 struct reftable_block_source source = { 0 }; 382 struct block_writer writer = { ··· 391 392 data.len = 1024; 393 REFTABLE_CALLOC_ARRAY(data.buf, data.len); 394 - check(data.buf != NULL); 395 396 - err = block_writer_init(&writer, REFTABLE_BLOCK_TYPE_REF, (uint8_t *) data.buf, data.len, 397 0, hash_size(REFTABLE_HASH_SHA1)); 398 - check(!err); 399 400 for (size_t i = 0; i < ARRAY_SIZE(expected_refs); i++) { 401 expected_refs[i] = (struct reftable_record) { ··· 408 memset(expected_refs[i].u.ref.value.val1, i, REFTABLE_HASH_SIZE_SHA1); 409 410 err = block_writer_add(&writer, &expected_refs[i]); 411 - check_int(err, ==, 0); 412 } 413 414 err = block_writer_finish(&writer); 415 - check_int(err, >, 0); 416 417 block_source_from_buf(&source, &data); 418 reftable_block_init(&block, &source, 0, 0, data.len, 419 REFTABLE_HASH_SIZE_SHA1, REFTABLE_BLOCK_TYPE_REF); 420 421 err = reftable_block_init_iterator(&block, &it); 422 - check_int(err, ==, 0); 423 424 for (size_t i = 0; ; i++) { 425 err = reftable_iterator_next_ref(&it, &ref); 426 if (err > 0) { 427 - check_int(i, ==, ARRAY_SIZE(expected_refs)); 428 break; 429 } 430 - check_int(err, ==, 0); 431 432 - check(reftable_ref_record_equal(&ref, &expected_refs[i].u.ref, 433 - REFTABLE_HASH_SIZE_SHA1)); 434 } 435 436 err = reftable_iterator_seek_ref(&it, "refs/heads/does-not-exist"); 437 - check_int(err, ==, 0); 438 err = reftable_iterator_next_ref(&it, &ref); 439 - check_int(err, ==, 1); 440 441 err = reftable_iterator_seek_ref(&it, "refs/heads/branch-13"); 442 - check_int(err, ==, 0); 443 err = reftable_iterator_next_ref(&it, &ref); 444 - check_int(err, ==, 0); 445 - check(reftable_ref_record_equal(&ref, &expected_refs[13].u.ref, 446 - REFTABLE_HASH_SIZE_SHA1)); 447 448 for (size_t i = 0; i < ARRAY_SIZE(expected_refs); i++) 449 reftable_free(expected_refs[i].u.ref.refname); ··· 453 block_writer_release(&writer); 454 reftable_buf_release(&data); 455 } 456 - 457 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 458 - { 459 - TEST(t_index_block_read_write(), "read-write operations on index blocks work"); 460 - TEST(t_log_block_read_write(), "read-write operations on log blocks work"); 461 - TEST(t_obj_block_read_write(), "read-write operations on obj blocks work"); 462 - TEST(t_ref_block_read_write(), "read-write operations on ref blocks work"); 463 - TEST(t_block_iterator(), "block iterator works"); 464 - 465 - return test_done(); 466 - }
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 + #include "unit-test.h" 10 + #include "lib-reftable.h" 11 #include "reftable/block.h" 12 #include "reftable/blocksource.h" 13 #include "reftable/constants.h" 14 #include "reftable/reftable-error.h" 15 #include "strbuf.h" 16 17 + void test_reftable_block__read_write(void) 18 { 19 const int header_off = 21; /* random */ 20 struct reftable_record recs[30]; ··· 35 struct reftable_buf block_data = REFTABLE_BUF_INIT; 36 37 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 38 + cl_assert(block_data.buf != NULL); 39 block_data.len = block_size; 40 41 + ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_REF, 42 + (uint8_t *) block_data.buf, block_size, 43 header_off, hash_size(REFTABLE_HASH_SHA1)); 44 + cl_assert(!ret); 45 46 rec.u.ref.refname = (char *) ""; 47 rec.u.ref.value_type = REFTABLE_REF_DELETION; 48 ret = block_writer_add(&bw, &rec); 49 + cl_assert_equal_i(ret, REFTABLE_API_ERROR); 50 51 for (i = 0; i < N; i++) { 52 rec.u.ref.refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); ··· 57 ret = block_writer_add(&bw, &rec); 58 rec.u.ref.refname = NULL; 59 rec.u.ref.value_type = REFTABLE_REF_DELETION; 60 + cl_assert_equal_i(ret, 0); 61 } 62 63 ret = block_writer_finish(&bw); 64 + cl_assert(ret > 0); 65 66 block_writer_release(&bw); 67 ··· 73 74 for (i = 0; ; i++) { 75 ret = block_iter_next(&it, &rec); 76 + cl_assert(ret >= 0); 77 if (ret > 0) { 78 + cl_assert_equal_i(i, N); 79 break; 80 } 81 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 82 } 83 84 for (i = 0; i < N; i++) { 85 reftable_record_key(&recs[i], &want); 86 87 ret = block_iter_seek_key(&it, &want); 88 + cl_assert_equal_i(ret, 0); 89 90 ret = block_iter_next(&it, &rec); 91 + cl_assert_equal_i(ret, 0); 92 93 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 94 95 want.len--; 96 ret = block_iter_seek_key(&it, &want); 97 + cl_assert_equal_i(ret, 0); 98 99 ret = block_iter_next(&it, &rec); 100 + cl_assert_equal_i(ret, 0); 101 + cl_assert_equal_i(reftable_record_equal(&recs[10 * (i / 10)], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 102 } 103 104 reftable_block_release(&block); ··· 110 reftable_record_release(&recs[i]); 111 } 112 113 + void test_reftable_block__log_read_write(void) 114 { 115 const int header_off = 21; 116 struct reftable_record recs[30]; ··· 131 struct reftable_buf block_data = REFTABLE_BUF_INIT; 132 133 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 134 + cl_assert(block_data.buf != NULL); 135 block_data.len = block_size; 136 137 ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_LOG, (uint8_t *) block_data.buf, block_size, 138 header_off, hash_size(REFTABLE_HASH_SHA1)); 139 + cl_assert(!ret); 140 141 for (i = 0; i < N; i++) { 142 rec.u.log.refname = xstrfmt("branch%02"PRIuMAX , (uintmax_t)i); ··· 147 ret = block_writer_add(&bw, &rec); 148 rec.u.log.refname = NULL; 149 rec.u.log.value_type = REFTABLE_LOG_DELETION; 150 + cl_assert_equal_i(ret, 0); 151 } 152 153 ret = block_writer_finish(&bw); 154 + cl_assert(ret > 0); 155 156 block_writer_release(&bw); 157 ··· 163 164 for (i = 0; ; i++) { 165 ret = block_iter_next(&it, &rec); 166 + cl_assert(ret >= 0); 167 if (ret > 0) { 168 + cl_assert_equal_i(i, N); 169 break; 170 } 171 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 172 } 173 174 for (i = 0; i < N; i++) { 175 reftable_buf_reset(&want); 176 + cl_assert(reftable_buf_addstr(&want, recs[i].u.log.refname) == 0); 177 178 ret = block_iter_seek_key(&it, &want); 179 + cl_assert_equal_i(ret, 0); 180 181 ret = block_iter_next(&it, &rec); 182 + cl_assert_equal_i(ret, 0); 183 184 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 185 186 want.len--; 187 ret = block_iter_seek_key(&it, &want); 188 + cl_assert_equal_i(ret, 0); 189 190 ret = block_iter_next(&it, &rec); 191 + cl_assert_equal_i(ret, 0); 192 + cl_assert_equal_i(reftable_record_equal(&recs[10 * (i / 10)], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 193 } 194 195 reftable_block_release(&block); ··· 201 reftable_record_release(&recs[i]); 202 } 203 204 + void test_reftable_block__obj_read_write(void) 205 { 206 const int header_off = 21; 207 struct reftable_record recs[30]; ··· 222 struct reftable_buf block_data = REFTABLE_BUF_INIT; 223 224 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 225 + cl_assert(block_data.buf != NULL); 226 block_data.len = block_size; 227 228 ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_OBJ, (uint8_t *) block_data.buf, block_size, 229 header_off, hash_size(REFTABLE_HASH_SHA1)); 230 + cl_assert(!ret); 231 232 for (i = 0; i < N; i++) { 233 uint8_t bytes[] = { i, i + 1, i + 2, i + 3, i + 5 }, *allocated; ··· 240 ret = block_writer_add(&bw, &rec); 241 rec.u.obj.hash_prefix = NULL; 242 rec.u.obj.hash_prefix_len = 0; 243 + cl_assert_equal_i(ret, 0); 244 } 245 246 ret = block_writer_finish(&bw); 247 + cl_assert(ret > 0); 248 249 block_writer_release(&bw); 250 ··· 256 257 for (i = 0; ; i++) { 258 ret = block_iter_next(&it, &rec); 259 + cl_assert(ret >= 0); 260 if (ret > 0) { 261 + cl_assert_equal_i(i, N); 262 break; 263 } 264 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 265 } 266 267 for (i = 0; i < N; i++) { 268 reftable_record_key(&recs[i], &want); 269 270 ret = block_iter_seek_key(&it, &want); 271 + cl_assert_equal_i(ret, 0); 272 273 ret = block_iter_next(&it, &rec); 274 + cl_assert_equal_i(ret, 0); 275 276 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 277 } 278 279 reftable_block_release(&block); ··· 285 reftable_record_release(&recs[i]); 286 } 287 288 + void test_reftable_block__ref_read_write(void) 289 { 290 const int header_off = 21; 291 struct reftable_record recs[30]; ··· 307 struct reftable_buf block_data = REFTABLE_BUF_INIT; 308 309 REFTABLE_CALLOC_ARRAY(block_data.buf, block_size); 310 + cl_assert(block_data.buf != NULL); 311 block_data.len = block_size; 312 313 ret = block_writer_init(&bw, REFTABLE_BLOCK_TYPE_INDEX, (uint8_t *) block_data.buf, block_size, 314 header_off, hash_size(REFTABLE_HASH_SHA1)); 315 + cl_assert(!ret); 316 317 for (i = 0; i < N; i++) { 318 char buf[128]; ··· 321 322 reftable_buf_init(&recs[i].u.idx.last_key); 323 recs[i].type = REFTABLE_BLOCK_TYPE_INDEX; 324 + cl_assert(!reftable_buf_addstr(&recs[i].u.idx.last_key, buf)); 325 recs[i].u.idx.offset = i; 326 327 ret = block_writer_add(&bw, &recs[i]); 328 + cl_assert_equal_i(ret, 0); 329 } 330 331 ret = block_writer_finish(&bw); 332 + cl_assert(ret > 0); 333 334 block_writer_release(&bw); 335 ··· 341 342 for (i = 0; ; i++) { 343 ret = block_iter_next(&it, &rec); 344 + cl_assert(ret >= 0); 345 if (ret > 0) { 346 + cl_assert_equal_i(i, N); 347 break; 348 } 349 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 350 } 351 352 for (i = 0; i < N; i++) { 353 reftable_record_key(&recs[i], &want); 354 355 ret = block_iter_seek_key(&it, &want); 356 + cl_assert_equal_i(ret, 0); 357 358 ret = block_iter_next(&it, &rec); 359 + cl_assert_equal_i(ret, 0); 360 361 + cl_assert_equal_i(reftable_record_equal(&recs[i], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 362 363 want.len--; 364 ret = block_iter_seek_key(&it, &want); 365 + cl_assert_equal_i(ret, 0); 366 367 ret = block_iter_next(&it, &rec); 368 + cl_assert_equal_i(ret, 0); 369 + cl_assert_equal_i(reftable_record_equal(&recs[10 * (i / 10)], &rec, REFTABLE_HASH_SIZE_SHA1), 1); 370 } 371 372 reftable_block_release(&block); ··· 378 reftable_record_release(&recs[i]); 379 } 380 381 + void test_reftable_block__iterator(void) 382 { 383 struct reftable_block_source source = { 0 }; 384 struct block_writer writer = { ··· 393 394 data.len = 1024; 395 REFTABLE_CALLOC_ARRAY(data.buf, data.len); 396 + cl_assert(data.buf != NULL); 397 398 + err = block_writer_init(&writer, REFTABLE_BLOCK_TYPE_REF, 399 + (uint8_t *) data.buf, data.len, 400 0, hash_size(REFTABLE_HASH_SHA1)); 401 + cl_assert(!err); 402 403 for (size_t i = 0; i < ARRAY_SIZE(expected_refs); i++) { 404 expected_refs[i] = (struct reftable_record) { ··· 411 memset(expected_refs[i].u.ref.value.val1, i, REFTABLE_HASH_SIZE_SHA1); 412 413 err = block_writer_add(&writer, &expected_refs[i]); 414 + cl_assert_equal_i(err, 0); 415 } 416 417 err = block_writer_finish(&writer); 418 + cl_assert(err > 0); 419 420 block_source_from_buf(&source, &data); 421 reftable_block_init(&block, &source, 0, 0, data.len, 422 REFTABLE_HASH_SIZE_SHA1, REFTABLE_BLOCK_TYPE_REF); 423 424 err = reftable_block_init_iterator(&block, &it); 425 + cl_assert_equal_i(err, 0); 426 427 for (size_t i = 0; ; i++) { 428 err = reftable_iterator_next_ref(&it, &ref); 429 if (err > 0) { 430 + cl_assert_equal_i(i, ARRAY_SIZE(expected_refs)); 431 break; 432 } 433 + cl_assert_equal_i(err, 0); 434 435 + cl_assert(reftable_ref_record_equal(&ref, 436 + &expected_refs[i].u.ref, REFTABLE_HASH_SIZE_SHA1)); 437 } 438 439 err = reftable_iterator_seek_ref(&it, "refs/heads/does-not-exist"); 440 + cl_assert_equal_i(err, 0); 441 err = reftable_iterator_next_ref(&it, &ref); 442 + cl_assert_equal_i(err, 1); 443 444 err = reftable_iterator_seek_ref(&it, "refs/heads/branch-13"); 445 + cl_assert_equal_i(err, 0); 446 err = reftable_iterator_next_ref(&it, &ref); 447 + cl_assert_equal_i(err, 0); 448 + cl_assert(reftable_ref_record_equal(&ref, 449 + &expected_refs[13].u.ref,REFTABLE_HASH_SIZE_SHA1)); 450 451 for (size_t i = 0; i < ARRAY_SIZE(expected_refs); i++) 452 reftable_free(expected_refs[i].u.ref.refname); ··· 456 block_writer_release(&writer); 457 reftable_buf_release(&data); 458 }
+69 -91
t/unit-tests/t-reftable-merged.c t/unit-tests/u-reftable-merged.c
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 - #include "test-lib.h" 10 #include "lib-reftable.h" 11 #include "reftable/blocksource.h" 12 #include "reftable/constants.h" ··· 29 int err; 30 31 REFTABLE_CALLOC_ARRAY(*tables, n); 32 - check(*tables != NULL); 33 REFTABLE_CALLOC_ARRAY(*source, n); 34 - check(*source != NULL); 35 36 for (size_t i = 0; i < n; i++) { 37 - t_reftable_write_to_buf(&buf[i], refs[i], sizes[i], NULL, 0, &opts); 38 block_source_from_buf(&(*source)[i], &buf[i]); 39 40 err = reftable_table_new(&(*tables)[i], &(*source)[i], 41 "name"); 42 - check(!err); 43 } 44 45 err = reftable_merged_table_new(&mt, *tables, n, REFTABLE_HASH_SHA1); 46 - check(!err); 47 return mt; 48 } 49 ··· 54 reftable_free(tables); 55 } 56 57 - static void t_merged_single_record(void) 58 { 59 struct reftable_ref_record r1[] = { { 60 .refname = (char *) "b", ··· 85 int err; 86 87 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_REF); 88 - check(!err); 89 err = reftable_iterator_seek_ref(&it, "a"); 90 - check(!err); 91 92 err = reftable_iterator_next_ref(&it, &ref); 93 - check(!err); 94 - check(reftable_ref_record_equal(&r2[0], &ref, REFTABLE_HASH_SIZE_SHA1)); 95 reftable_ref_record_release(&ref); 96 reftable_iterator_destroy(&it); 97 tables_destroy(tables, 3); ··· 101 reftable_free(bs); 102 } 103 104 - static void t_merged_refs(void) 105 { 106 struct reftable_ref_record r1[] = { 107 { ··· 165 size_t i; 166 167 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_REF); 168 - check(!err); 169 err = reftable_iterator_seek_ref(&it, "a"); 170 - check(!err); 171 - check_int(reftable_merged_table_hash_id(mt), ==, REFTABLE_HASH_SHA1); 172 - check_int(reftable_merged_table_min_update_index(mt), ==, 1); 173 - check_int(reftable_merged_table_max_update_index(mt), ==, 3); 174 175 while (len < 100) { /* cap loops/recursion. */ 176 struct reftable_ref_record ref = { 0 }; ··· 178 if (err > 0) 179 break; 180 181 - check(!REFTABLE_ALLOC_GROW(out, len + 1, cap)); 182 out[len++] = ref; 183 } 184 reftable_iterator_destroy(&it); 185 186 - check_int(ARRAY_SIZE(want), ==, len); 187 for (i = 0; i < len; i++) 188 - check(reftable_ref_record_equal(want[i], &out[i], 189 - REFTABLE_HASH_SIZE_SHA1)); 190 for (i = 0; i < len; i++) 191 reftable_ref_record_release(&out[i]); 192 reftable_free(out); ··· 198 reftable_free(bs); 199 } 200 201 - static void t_merged_seek_multiple_times(void) 202 { 203 struct reftable_ref_record r1[] = { 204 { ··· 248 249 for (size_t i = 0; i < 5; i++) { 250 int err = reftable_iterator_seek_ref(&it, "c"); 251 - check(!err); 252 253 - err = reftable_iterator_next_ref(&it, &rec); 254 - check(!err); 255 - err = reftable_ref_record_equal(&rec, &r1[1], REFTABLE_HASH_SIZE_SHA1); 256 - check(err == 1); 257 258 - err = reftable_iterator_next_ref(&it, &rec); 259 - check(!err); 260 - err = reftable_ref_record_equal(&rec, &r2[1], REFTABLE_HASH_SIZE_SHA1); 261 - check(err == 1); 262 263 - err = reftable_iterator_next_ref(&it, &rec); 264 - check(err > 0); 265 } 266 267 for (size_t i = 0; i < ARRAY_SIZE(bufs); i++) ··· 273 reftable_free(sources); 274 } 275 276 - static void t_merged_seek_multiple_times_without_draining(void) 277 { 278 struct reftable_ref_record r1[] = { 279 { ··· 317 struct reftable_ref_record rec = { 0 }; 318 struct reftable_iterator it = { 0 }; 319 struct reftable_merged_table *mt; 320 - int err; 321 322 mt = merged_table_from_records(refs, &sources, &tables, sizes, bufs, 2); 323 merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_REF); 324 325 - err = reftable_iterator_seek_ref(&it, "b"); 326 - check(!err); 327 - err = reftable_iterator_next_ref(&it, &rec); 328 - check(!err); 329 - err = reftable_ref_record_equal(&rec, &r2[0], REFTABLE_HASH_SIZE_SHA1); 330 - check(err == 1); 331 332 - err = reftable_iterator_seek_ref(&it, "a"); 333 - check(!err); 334 - err = reftable_iterator_next_ref(&it, &rec); 335 - check(!err); 336 - err = reftable_ref_record_equal(&rec, &r1[0], REFTABLE_HASH_SIZE_SHA1); 337 - check(err == 1); 338 339 for (size_t i = 0; i < ARRAY_SIZE(bufs); i++) 340 reftable_buf_release(&bufs[i]); ··· 359 int err; 360 361 REFTABLE_CALLOC_ARRAY(*tables, n); 362 - check(*tables != NULL); 363 REFTABLE_CALLOC_ARRAY(*source, n); 364 - check(*source != NULL); 365 366 for (size_t i = 0; i < n; i++) { 367 - t_reftable_write_to_buf(&buf[i], NULL, 0, logs[i], sizes[i], &opts); 368 block_source_from_buf(&(*source)[i], &buf[i]); 369 370 err = reftable_table_new(&(*tables)[i], &(*source)[i], 371 "name"); 372 - check(!err); 373 } 374 375 err = reftable_merged_table_new(&mt, *tables, n, REFTABLE_HASH_SHA1); 376 - check(!err); 377 return mt; 378 } 379 380 - static void t_merged_logs(void) 381 { 382 struct reftable_log_record r1[] = { 383 { ··· 439 struct reftable_merged_table *mt = merged_table_from_log_records( 440 logs, &bs, &tables, sizes, bufs, 3); 441 struct reftable_iterator it = { 0 }; 442 - int err; 443 struct reftable_log_record *out = NULL; 444 size_t len = 0; 445 size_t cap = 0; 446 size_t i; 447 448 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_LOG); 449 - check(!err); 450 err = reftable_iterator_seek_log(&it, "a"); 451 - check(!err); 452 - check_int(reftable_merged_table_hash_id(mt), ==, REFTABLE_HASH_SHA1); 453 - check_int(reftable_merged_table_min_update_index(mt), ==, 1); 454 - check_int(reftable_merged_table_max_update_index(mt), ==, 3); 455 456 while (len < 100) { /* cap loops/recursion. */ 457 struct reftable_log_record log = { 0 }; ··· 459 if (err > 0) 460 break; 461 462 - check(!REFTABLE_ALLOC_GROW(out, len + 1, cap)); 463 out[len++] = log; 464 } 465 reftable_iterator_destroy(&it); 466 467 - check_int(ARRAY_SIZE(want), ==, len); 468 for (i = 0; i < len; i++) 469 - check(reftable_log_record_equal(want[i], &out[i], 470 - REFTABLE_HASH_SIZE_SHA1)); 471 472 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_LOG); 473 - check(!err); 474 err = reftable_iterator_seek_log_at(&it, "a", 2); 475 - check(!err); 476 reftable_log_record_release(&out[0]); 477 - err = reftable_iterator_next_log(&it, &out[0]); 478 - check(!err); 479 - check(reftable_log_record_equal(&out[0], &r3[0], REFTABLE_HASH_SIZE_SHA1)); 480 reftable_iterator_destroy(&it); 481 482 for (i = 0; i < len; i++) ··· 490 reftable_free(bs); 491 } 492 493 - static void t_default_write_opts(void) 494 { 495 struct reftable_write_options opts = { 0 }; 496 struct reftable_buf buf = REFTABLE_BUF_INIT; 497 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 498 struct reftable_ref_record rec = { 499 .refname = (char *) "master", 500 .update_index = 1, ··· 507 508 reftable_writer_set_limits(w, 1, 1); 509 510 - err = reftable_writer_add_ref(w, &rec); 511 - check(!err); 512 513 - err = reftable_writer_close(w); 514 - check(!err); 515 reftable_writer_free(w); 516 517 block_source_from_buf(&source, &buf); 518 519 err = reftable_table_new(&table, &source, "filename"); 520 - check(!err); 521 522 hash_id = reftable_table_hash_id(table); 523 - check_int(hash_id, ==, REFTABLE_HASH_SHA1); 524 525 err = reftable_merged_table_new(&merged, &table, 1, REFTABLE_HASH_SHA256); 526 - check_int(err, ==, REFTABLE_FORMAT_ERROR); 527 err = reftable_merged_table_new(&merged, &table, 1, REFTABLE_HASH_SHA1); 528 - check(!err); 529 530 reftable_table_decref(table); 531 reftable_merged_table_free(merged); 532 reftable_buf_release(&buf); 533 } 534 - 535 - 536 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 537 - { 538 - TEST(t_default_write_opts(), "merged table with default write opts"); 539 - TEST(t_merged_logs(), "merged table with multiple log updates for same ref"); 540 - TEST(t_merged_refs(), "merged table with multiple updates to same ref"); 541 - TEST(t_merged_seek_multiple_times(), "merged table can seek multiple times"); 542 - TEST(t_merged_seek_multiple_times_without_draining(), "merged table can seek multiple times without draining"); 543 - TEST(t_merged_single_record(), "ref occurring in only one record can be fetched"); 544 - 545 - return test_done(); 546 - }
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 + #include "unit-test.h" 10 #include "lib-reftable.h" 11 #include "reftable/blocksource.h" 12 #include "reftable/constants.h" ··· 29 int err; 30 31 REFTABLE_CALLOC_ARRAY(*tables, n); 32 + cl_assert(*tables != NULL); 33 REFTABLE_CALLOC_ARRAY(*source, n); 34 + cl_assert(*source != NULL); 35 36 for (size_t i = 0; i < n; i++) { 37 + cl_reftable_write_to_buf(&buf[i], refs[i], sizes[i], NULL, 0, &opts); 38 block_source_from_buf(&(*source)[i], &buf[i]); 39 40 err = reftable_table_new(&(*tables)[i], &(*source)[i], 41 "name"); 42 + cl_assert(!err); 43 } 44 45 err = reftable_merged_table_new(&mt, *tables, n, REFTABLE_HASH_SHA1); 46 + cl_assert(!err); 47 return mt; 48 } 49 ··· 54 reftable_free(tables); 55 } 56 57 + void test_reftable_merged__single_record(void) 58 { 59 struct reftable_ref_record r1[] = { { 60 .refname = (char *) "b", ··· 85 int err; 86 87 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_REF); 88 + cl_assert(!err); 89 err = reftable_iterator_seek_ref(&it, "a"); 90 + cl_assert(!err); 91 92 err = reftable_iterator_next_ref(&it, &ref); 93 + cl_assert(!err); 94 + cl_assert(reftable_ref_record_equal(&r2[0], &ref, 95 + REFTABLE_HASH_SIZE_SHA1) != 0); 96 reftable_ref_record_release(&ref); 97 reftable_iterator_destroy(&it); 98 tables_destroy(tables, 3); ··· 102 reftable_free(bs); 103 } 104 105 + void test_reftable_merged__refs(void) 106 { 107 struct reftable_ref_record r1[] = { 108 { ··· 166 size_t i; 167 168 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_REF); 169 + cl_assert(!err); 170 err = reftable_iterator_seek_ref(&it, "a"); 171 + cl_assert(err == 0); 172 + cl_assert_equal_i(reftable_merged_table_hash_id(mt), REFTABLE_HASH_SHA1); 173 + cl_assert_equal_i(reftable_merged_table_min_update_index(mt), 1); 174 + cl_assert_equal_i(reftable_merged_table_max_update_index(mt), 3); 175 176 while (len < 100) { /* cap loops/recursion. */ 177 struct reftable_ref_record ref = { 0 }; ··· 179 if (err > 0) 180 break; 181 182 + cl_assert(REFTABLE_ALLOC_GROW(out, len + 1, cap) == 0); 183 out[len++] = ref; 184 } 185 reftable_iterator_destroy(&it); 186 187 + cl_assert_equal_i(ARRAY_SIZE(want), len); 188 for (i = 0; i < len; i++) 189 + cl_assert(reftable_ref_record_equal(want[i], &out[i], 190 + REFTABLE_HASH_SIZE_SHA1) != 0); 191 for (i = 0; i < len; i++) 192 reftable_ref_record_release(&out[i]); 193 reftable_free(out); ··· 199 reftable_free(bs); 200 } 201 202 + void test_reftable_merged__seek_multiple_times(void) 203 { 204 struct reftable_ref_record r1[] = { 205 { ··· 249 250 for (size_t i = 0; i < 5; i++) { 251 int err = reftable_iterator_seek_ref(&it, "c"); 252 + cl_assert(!err); 253 254 + cl_assert(reftable_iterator_next_ref(&it, &rec) == 0); 255 + cl_assert_equal_i(reftable_ref_record_equal(&rec, &r1[1], 256 + REFTABLE_HASH_SIZE_SHA1), 1); 257 258 + cl_assert(reftable_iterator_next_ref(&it, &rec) == 0); 259 + cl_assert_equal_i(reftable_ref_record_equal(&rec, &r2[1], 260 + REFTABLE_HASH_SIZE_SHA1), 1); 261 262 + cl_assert(reftable_iterator_next_ref(&it, &rec) > 0); 263 } 264 265 for (size_t i = 0; i < ARRAY_SIZE(bufs); i++) ··· 271 reftable_free(sources); 272 } 273 274 + void test_reftable_merged__seek_multiple_times_no_drain(void) 275 { 276 struct reftable_ref_record r1[] = { 277 { ··· 315 struct reftable_ref_record rec = { 0 }; 316 struct reftable_iterator it = { 0 }; 317 struct reftable_merged_table *mt; 318 319 mt = merged_table_from_records(refs, &sources, &tables, sizes, bufs, 2); 320 merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_REF); 321 322 + cl_assert(reftable_iterator_seek_ref(&it, "b") == 0); 323 + cl_assert(reftable_iterator_next_ref(&it, &rec) == 0); 324 + cl_assert_equal_i(reftable_ref_record_equal(&rec, &r2[0], 325 + REFTABLE_HASH_SIZE_SHA1), 1); 326 327 + cl_assert(reftable_iterator_seek_ref(&it, "a") == 0); 328 + cl_assert(reftable_iterator_next_ref(&it, &rec) == 0); 329 + cl_assert_equal_i(reftable_ref_record_equal(&rec, &r1[0], 330 + REFTABLE_HASH_SIZE_SHA1), 1); 331 332 for (size_t i = 0; i < ARRAY_SIZE(bufs); i++) 333 reftable_buf_release(&bufs[i]); ··· 352 int err; 353 354 REFTABLE_CALLOC_ARRAY(*tables, n); 355 + cl_assert(*tables != NULL); 356 REFTABLE_CALLOC_ARRAY(*source, n); 357 + cl_assert(*source != NULL); 358 359 for (size_t i = 0; i < n; i++) { 360 + cl_reftable_write_to_buf(&buf[i], NULL, 0, logs[i], sizes[i], &opts); 361 block_source_from_buf(&(*source)[i], &buf[i]); 362 363 err = reftable_table_new(&(*tables)[i], &(*source)[i], 364 "name"); 365 + cl_assert(!err); 366 } 367 368 err = reftable_merged_table_new(&mt, *tables, n, REFTABLE_HASH_SHA1); 369 + cl_assert(!err); 370 return mt; 371 } 372 373 + void test_reftable_merged__logs(void) 374 { 375 struct reftable_log_record r1[] = { 376 { ··· 432 struct reftable_merged_table *mt = merged_table_from_log_records( 433 logs, &bs, &tables, sizes, bufs, 3); 434 struct reftable_iterator it = { 0 }; 435 struct reftable_log_record *out = NULL; 436 size_t len = 0; 437 size_t cap = 0; 438 size_t i; 439 + int err; 440 441 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_LOG); 442 + cl_assert(!err); 443 err = reftable_iterator_seek_log(&it, "a"); 444 + cl_assert(!err); 445 + cl_assert_equal_i(reftable_merged_table_hash_id(mt), REFTABLE_HASH_SHA1); 446 + cl_assert_equal_i(reftable_merged_table_min_update_index(mt), 1); 447 + cl_assert_equal_i(reftable_merged_table_max_update_index(mt), 3); 448 449 while (len < 100) { /* cap loops/recursion. */ 450 struct reftable_log_record log = { 0 }; ··· 452 if (err > 0) 453 break; 454 455 + cl_assert(REFTABLE_ALLOC_GROW(out, len + 1, cap) == 0); 456 out[len++] = log; 457 } 458 reftable_iterator_destroy(&it); 459 460 + cl_assert_equal_i(ARRAY_SIZE(want), len); 461 for (i = 0; i < len; i++) 462 + cl_assert(reftable_log_record_equal(want[i], &out[i], 463 + REFTABLE_HASH_SIZE_SHA1) != 0); 464 465 err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_LOG); 466 + cl_assert(!err); 467 err = reftable_iterator_seek_log_at(&it, "a", 2); 468 + cl_assert(!err); 469 reftable_log_record_release(&out[0]); 470 + cl_assert(reftable_iterator_next_log(&it, &out[0]) == 0); 471 + cl_assert(reftable_log_record_equal(&out[0], &r3[0], 472 + REFTABLE_HASH_SIZE_SHA1) != 0); 473 reftable_iterator_destroy(&it); 474 475 for (i = 0; i < len; i++) ··· 483 reftable_free(bs); 484 } 485 486 + void test_reftable_merged__default_write_opts(void) 487 { 488 struct reftable_write_options opts = { 0 }; 489 struct reftable_buf buf = REFTABLE_BUF_INIT; 490 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, &opts); 491 struct reftable_ref_record rec = { 492 .refname = (char *) "master", 493 .update_index = 1, ··· 500 501 reftable_writer_set_limits(w, 1, 1); 502 503 + cl_assert_equal_i(reftable_writer_add_ref(w, &rec), 0); 504 505 + cl_assert_equal_i(reftable_writer_close(w), 0); 506 reftable_writer_free(w); 507 508 block_source_from_buf(&source, &buf); 509 510 err = reftable_table_new(&table, &source, "filename"); 511 + cl_assert(!err); 512 513 hash_id = reftable_table_hash_id(table); 514 + cl_assert_equal_i(hash_id, REFTABLE_HASH_SHA1); 515 516 err = reftable_merged_table_new(&merged, &table, 1, REFTABLE_HASH_SHA256); 517 + cl_assert_equal_i(err, REFTABLE_FORMAT_ERROR); 518 err = reftable_merged_table_new(&merged, &table, 1, REFTABLE_HASH_SHA1); 519 + cl_assert(!err); 520 521 reftable_table_decref(table); 522 reftable_merged_table_free(merged); 523 reftable_buf_release(&buf); 524 }
+27 -32
t/unit-tests/t-reftable-pq.c t/unit-tests/u-reftable-pq.c
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 - #include "test-lib.h" 10 #include "reftable/constants.h" 11 #include "reftable/pq.h" 12 #include "strbuf.h" ··· 15 { 16 for (size_t i = 1; i < pq->len; i++) { 17 size_t parent = (i - 1) / 2; 18 - check(pq_less(&pq->heap[parent], &pq->heap[i])); 19 } 20 } 21 22 static int pq_entry_equal(struct pq_entry *a, struct pq_entry *b) 23 { 24 int cmp; 25 - check(!reftable_record_cmp(a->rec, b->rec, &cmp)); 26 return !cmp && (a->index == b->index); 27 } 28 29 - static void t_pq_record(void) 30 { 31 struct merged_iter_pqueue pq = { 0 }; 32 struct reftable_record recs[54]; ··· 34 char *last = NULL; 35 36 for (i = 0; i < N; i++) { 37 - check(!reftable_record_init(&recs[i], REFTABLE_BLOCK_TYPE_REF)); 38 recs[i].u.ref.refname = xstrfmt("%02"PRIuMAX, (uintmax_t)i); 39 } 40 ··· 53 struct pq_entry top = merged_iter_pqueue_top(pq); 54 struct pq_entry e; 55 56 - check(!merged_iter_pqueue_remove(&pq, &e)); 57 merged_iter_pqueue_check(&pq); 58 59 - check(pq_entry_equal(&top, &e)); 60 - check(reftable_record_type(e.rec) == REFTABLE_BLOCK_TYPE_REF); 61 if (last) 62 - check_int(strcmp(last, e.rec->u.ref.refname), <, 0); 63 last = e.rec->u.ref.refname; 64 } 65 ··· 68 merged_iter_pqueue_release(&pq); 69 } 70 71 - static void t_pq_index(void) 72 { 73 struct merged_iter_pqueue pq = { 0 }; 74 struct reftable_record recs[13]; ··· 76 size_t N = ARRAY_SIZE(recs), i; 77 78 for (i = 0; i < N; i++) { 79 - check(!reftable_record_init(&recs[i], REFTABLE_BLOCK_TYPE_REF)); 80 recs[i].u.ref.refname = (char *) "refs/heads/master"; 81 } 82 ··· 96 struct pq_entry top = merged_iter_pqueue_top(pq); 97 struct pq_entry e; 98 99 - check(!merged_iter_pqueue_remove(&pq, &e)); 100 merged_iter_pqueue_check(&pq); 101 102 - check(pq_entry_equal(&top, &e)); 103 - check(reftable_record_type(e.rec) == REFTABLE_BLOCK_TYPE_REF); 104 - check_int(e.index, ==, i); 105 if (last) 106 - check_str(last, e.rec->u.ref.refname); 107 last = e.rec->u.ref.refname; 108 } 109 110 merged_iter_pqueue_release(&pq); 111 } 112 113 - static void t_merged_iter_pqueue_top(void) 114 { 115 struct merged_iter_pqueue pq = { 0 }; 116 struct reftable_record recs[13]; 117 size_t N = ARRAY_SIZE(recs), i; 118 119 for (i = 0; i < N; i++) { 120 - check(!reftable_record_init(&recs[i], REFTABLE_BLOCK_TYPE_REF)); 121 recs[i].u.ref.refname = (char *) "refs/heads/master"; 122 } 123 ··· 137 struct pq_entry top = merged_iter_pqueue_top(pq); 138 struct pq_entry e; 139 140 - check(!merged_iter_pqueue_remove(&pq, &e)); 141 142 merged_iter_pqueue_check(&pq); 143 - check(pq_entry_equal(&top, &e)); 144 - check(reftable_record_equal(top.rec, &recs[i], REFTABLE_HASH_SIZE_SHA1)); 145 for (size_t j = 0; i < pq.len; j++) { 146 - check(pq_less(&top, &pq.heap[j])); 147 - check_int(top.index, >, j); 148 } 149 } 150 151 merged_iter_pqueue_release(&pq); 152 } 153 - 154 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 155 - { 156 - TEST(t_pq_record(), "pq works with record-based comparison"); 157 - TEST(t_pq_index(), "pq works with index-based comparison"); 158 - TEST(t_merged_iter_pqueue_top(), "merged_iter_pqueue_top works"); 159 - 160 - return test_done(); 161 - }
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 + #include "unit-test.h" 10 + #include "lib-reftable.h" 11 #include "reftable/constants.h" 12 #include "reftable/pq.h" 13 #include "strbuf.h" ··· 16 { 17 for (size_t i = 1; i < pq->len; i++) { 18 size_t parent = (i - 1) / 2; 19 + cl_assert(pq_less(&pq->heap[parent], &pq->heap[i]) != 0); 20 } 21 } 22 23 static int pq_entry_equal(struct pq_entry *a, struct pq_entry *b) 24 { 25 int cmp; 26 + cl_assert_equal_i(reftable_record_cmp(a->rec, b->rec, &cmp), 0); 27 return !cmp && (a->index == b->index); 28 } 29 30 + void test_reftable_pq__record(void) 31 { 32 struct merged_iter_pqueue pq = { 0 }; 33 struct reftable_record recs[54]; ··· 35 char *last = NULL; 36 37 for (i = 0; i < N; i++) { 38 + cl_assert(!reftable_record_init(&recs[i], 39 + REFTABLE_BLOCK_TYPE_REF)); 40 recs[i].u.ref.refname = xstrfmt("%02"PRIuMAX, (uintmax_t)i); 41 } 42 ··· 55 struct pq_entry top = merged_iter_pqueue_top(pq); 56 struct pq_entry e; 57 58 + cl_assert_equal_i(merged_iter_pqueue_remove(&pq, &e), 0); 59 merged_iter_pqueue_check(&pq); 60 61 + cl_assert(pq_entry_equal(&top, &e)); 62 + cl_assert(reftable_record_type(e.rec) == REFTABLE_BLOCK_TYPE_REF); 63 if (last) 64 + cl_assert(strcmp(last, e.rec->u.ref.refname) < 0); 65 last = e.rec->u.ref.refname; 66 } 67 ··· 70 merged_iter_pqueue_release(&pq); 71 } 72 73 + void test_reftable_pq__index(void) 74 { 75 struct merged_iter_pqueue pq = { 0 }; 76 struct reftable_record recs[13]; ··· 78 size_t N = ARRAY_SIZE(recs), i; 79 80 for (i = 0; i < N; i++) { 81 + cl_assert(!reftable_record_init(&recs[i], 82 + REFTABLE_BLOCK_TYPE_REF)); 83 recs[i].u.ref.refname = (char *) "refs/heads/master"; 84 } 85 ··· 99 struct pq_entry top = merged_iter_pqueue_top(pq); 100 struct pq_entry e; 101 102 + cl_assert_equal_i(merged_iter_pqueue_remove(&pq, &e), 0); 103 merged_iter_pqueue_check(&pq); 104 105 + cl_assert(pq_entry_equal(&top, &e)); 106 + cl_assert(reftable_record_type(e.rec) == REFTABLE_BLOCK_TYPE_REF); 107 + cl_assert_equal_i(e.index, i); 108 if (last) 109 + cl_assert_equal_s(last, e.rec->u.ref.refname); 110 last = e.rec->u.ref.refname; 111 } 112 113 merged_iter_pqueue_release(&pq); 114 } 115 116 + void test_reftable_pq__merged_iter_pqueue_top(void) 117 { 118 struct merged_iter_pqueue pq = { 0 }; 119 struct reftable_record recs[13]; 120 size_t N = ARRAY_SIZE(recs), i; 121 122 for (i = 0; i < N; i++) { 123 + cl_assert(!reftable_record_init(&recs[i], 124 + REFTABLE_BLOCK_TYPE_REF)); 125 recs[i].u.ref.refname = (char *) "refs/heads/master"; 126 } 127 ··· 141 struct pq_entry top = merged_iter_pqueue_top(pq); 142 struct pq_entry e; 143 144 + cl_assert_equal_i(merged_iter_pqueue_remove(&pq, &e), 0); 145 146 merged_iter_pqueue_check(&pq); 147 + cl_assert(pq_entry_equal(&top, &e) != 0); 148 + cl_assert(reftable_record_equal(top.rec, &recs[i], REFTABLE_HASH_SIZE_SHA1) != 0); 149 for (size_t j = 0; i < pq.len; j++) { 150 + cl_assert(pq_less(&top, &pq.heap[j]) != 0); 151 + cl_assert(top.index > j); 152 } 153 } 154 155 merged_iter_pqueue_release(&pq); 156 }
+176 -227
t/unit-tests/t-reftable-readwrite.c t/unit-tests/u-reftable-readwrite.c
··· 8 9 #define DISABLE_SIGN_COMPARE_WARNINGS 10 11 - #include "test-lib.h" 12 #include "lib-reftable.h" 13 #include "reftable/basics.h" 14 #include "reftable/blocksource.h" ··· 19 20 static const int update_index = 5; 21 22 - static void t_buffer(void) 23 { 24 struct reftable_buf buf = REFTABLE_BUF_INIT; 25 struct reftable_block_source source = { 0 }; 26 struct reftable_block_data out = { 0 }; 27 int n; 28 uint8_t in[] = "hello"; 29 - check(!reftable_buf_add(&buf, in, sizeof(in))); 30 block_source_from_buf(&source, &buf); 31 - check_int(block_source_size(&source), ==, 6); 32 n = block_source_read_data(&source, &out, 0, sizeof(in)); 33 - check_int(n, ==, sizeof(in)); 34 - check(!memcmp(in, out.data, n)); 35 block_source_release_data(&out); 36 37 n = block_source_read_data(&source, &out, 1, 2); 38 - check_int(n, ==, 2); 39 - check(!memcmp(out.data, "el", 2)); 40 41 block_source_release_data(&out); 42 block_source_close(&source); ··· 55 int i; 56 57 REFTABLE_CALLOC_ARRAY(*names, N + 1); 58 - check(*names != NULL); 59 REFTABLE_CALLOC_ARRAY(refs, N); 60 - check(refs != NULL); 61 REFTABLE_CALLOC_ARRAY(logs, N); 62 - check(logs != NULL); 63 64 for (i = 0; i < N; i++) { 65 refs[i].refname = (*names)[i] = xstrfmt("refs/heads/branch%02d", i); 66 refs[i].update_index = update_index; 67 refs[i].value_type = REFTABLE_REF_VAL1; 68 - t_reftable_set_hash(refs[i].value.val1, i, REFTABLE_HASH_SHA1); 69 } 70 71 for (i = 0; i < N; i++) { 72 logs[i].refname = (*names)[i]; 73 logs[i].update_index = update_index; 74 logs[i].value_type = REFTABLE_LOG_UPDATE; 75 - t_reftable_set_hash(logs[i].value.update.new_hash, i, 76 - REFTABLE_HASH_SHA1); 77 logs[i].value.update.message = (char *) "message"; 78 } 79 80 - t_reftable_write_to_buf(buf, refs, N, logs, N, &opts); 81 82 reftable_free(refs); 83 reftable_free(logs); 84 } 85 86 - static void t_log_buffer_size(void) 87 { 88 struct reftable_buf buf = REFTABLE_BUF_INIT; 89 struct reftable_write_options opts = { 90 .block_size = 4096, 91 }; 92 - int err; 93 int i; 94 struct reftable_log_record 95 log = { .refname = (char *) "refs/heads/master", ··· 102 .time = 0x5e430672, 103 .message = (char *) "commit: 9\n", 104 } } }; 105 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 106 107 /* This tests buffer extension for log compression. Must use a random 108 hash, to ensure that the compressed part is larger than the original. ··· 112 log.value.update.new_hash[i] = (uint8_t)(git_rand(0) % 256); 113 } 114 reftable_writer_set_limits(w, update_index, update_index); 115 - err = reftable_writer_add_log(w, &log); 116 - check(!err); 117 - err = reftable_writer_close(w); 118 - check(!err); 119 reftable_writer_free(w); 120 reftable_buf_release(&buf); 121 } 122 123 - static void t_log_overflow(void) 124 { 125 struct reftable_buf buf = REFTABLE_BUF_INIT; 126 char msg[256] = { 0 }; 127 struct reftable_write_options opts = { 128 .block_size = ARRAY_SIZE(msg), 129 }; 130 - int err; 131 struct reftable_log_record log = { 132 .refname = (char *) "refs/heads/master", 133 .update_index = update_index, ··· 144 }, 145 }, 146 }; 147 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 148 149 memset(msg, 'x', sizeof(msg) - 1); 150 reftable_writer_set_limits(w, update_index, update_index); 151 - err = reftable_writer_add_log(w, &log); 152 - check_int(err, ==, REFTABLE_ENTRY_TOO_BIG_ERROR); 153 reftable_writer_free(w); 154 reftable_buf_release(&buf); 155 } 156 157 - static void t_log_write_limits(void) 158 { 159 struct reftable_write_options opts = { 0 }; 160 struct reftable_buf buf = REFTABLE_BUF_INIT; 161 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 162 struct reftable_log_record log = { 163 .refname = (char *)"refs/head/master", 164 .update_index = 0, ··· 174 }, 175 }, 176 }; 177 - int err; 178 179 reftable_writer_set_limits(w, 1, 1); 180 181 /* write with update_index (0) below set limits (1, 1) */ 182 - err = reftable_writer_add_log(w, &log); 183 - check_int(err, ==, 0); 184 185 /* write with update_index (1) in the set limits (1, 1) */ 186 log.update_index = 1; 187 - err = reftable_writer_add_log(w, &log); 188 - check_int(err, ==, 0); 189 190 /* write with update_index (3) above set limits (1, 1) */ 191 log.update_index = 3; 192 - err = reftable_writer_add_log(w, &log); 193 - check_int(err, ==, REFTABLE_API_ERROR); 194 195 reftable_writer_free(w); 196 reftable_buf_release(&buf); 197 } 198 199 - static void t_log_write_read(void) 200 { 201 struct reftable_write_options opts = { 202 .block_size = 256, ··· 207 struct reftable_table *table; 208 struct reftable_block_source source = { 0 }; 209 struct reftable_buf buf = REFTABLE_BUF_INIT; 210 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 211 const struct reftable_stats *stats = NULL; 212 - int N = 2, err, i, n; 213 char **names; 214 215 names = reftable_calloc(N + 1, sizeof(*names)); 216 - check(names != NULL); 217 218 reftable_writer_set_limits(w, 0, N); 219 ··· 225 ref.refname = name; 226 ref.update_index = i; 227 228 - err = reftable_writer_add_ref(w, &ref); 229 - check(!err); 230 } 231 232 for (i = 0; i < N; i++) { ··· 235 log.refname = names[i]; 236 log.update_index = i; 237 log.value_type = REFTABLE_LOG_UPDATE; 238 - t_reftable_set_hash(log.value.update.old_hash, i, 239 - REFTABLE_HASH_SHA1); 240 - t_reftable_set_hash(log.value.update.new_hash, i + 1, 241 - REFTABLE_HASH_SHA1); 242 243 - err = reftable_writer_add_log(w, &log); 244 - check(!err); 245 } 246 247 - n = reftable_writer_close(w); 248 - check_int(n, ==, 0); 249 250 stats = reftable_writer_stats(w); 251 - check_int(stats->log_stats.blocks, >, 0); 252 reftable_writer_free(w); 253 w = NULL; 254 255 block_source_from_buf(&source, &buf); 256 257 err = reftable_table_new(&table, &source, "file.log"); 258 - check(!err); 259 260 err = reftable_table_init_ref_iterator(table, &it); 261 - check(!err); 262 263 err = reftable_iterator_seek_ref(&it, names[N - 1]); 264 - check(!err); 265 266 err = reftable_iterator_next_ref(&it, &ref); 267 - check(!err); 268 269 /* end of iteration. */ 270 - err = reftable_iterator_next_ref(&it, &ref); 271 - check_int(err, >, 0); 272 273 reftable_iterator_destroy(&it); 274 reftable_ref_record_release(&ref); 275 276 err = reftable_table_init_log_iterator(table, &it); 277 - check(!err); 278 err = reftable_iterator_seek_log(&it, ""); 279 - check(!err); 280 281 for (i = 0; ; i++) { 282 int err = reftable_iterator_next_log(&it, &log); 283 if (err > 0) 284 break; 285 - check(!err); 286 - check_str(names[i], log.refname); 287 - check_int(i, ==, log.update_index); 288 reftable_log_record_release(&log); 289 } 290 291 - check_int(i, ==, N); 292 reftable_iterator_destroy(&it); 293 294 /* cleanup. */ ··· 297 reftable_table_decref(table); 298 } 299 300 - static void t_log_zlib_corruption(void) 301 { 302 struct reftable_write_options opts = { 303 .block_size = 256, ··· 306 struct reftable_table *table; 307 struct reftable_block_source source = { 0 }; 308 struct reftable_buf buf = REFTABLE_BUF_INIT; 309 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 310 const struct reftable_stats *stats = NULL; 311 char message[100] = { 0 }; 312 - int err, i, n; 313 struct reftable_log_record log = { 314 .refname = (char *) "refname", 315 .value_type = REFTABLE_LOG_UPDATE, ··· 329 330 reftable_writer_set_limits(w, 1, 1); 331 332 - err = reftable_writer_add_log(w, &log); 333 - check(!err); 334 - 335 - n = reftable_writer_close(w); 336 - check_int(n, ==, 0); 337 338 stats = reftable_writer_stats(w); 339 - check_int(stats->log_stats.blocks, >, 0); 340 reftable_writer_free(w); 341 w = NULL; 342 ··· 346 block_source_from_buf(&source, &buf); 347 348 err = reftable_table_new(&table, &source, "file.log"); 349 - check(!err); 350 351 err = reftable_table_init_log_iterator(table, &it); 352 - check(!err); 353 err = reftable_iterator_seek_log(&it, "refname"); 354 - check_int(err, ==, REFTABLE_ZLIB_ERROR); 355 356 reftable_iterator_destroy(&it); 357 ··· 360 reftable_buf_release(&buf); 361 } 362 363 - static void t_table_read_write_sequential(void) 364 { 365 char **names; 366 struct reftable_buf buf = REFTABLE_BUF_INIT; ··· 376 block_source_from_buf(&source, &buf); 377 378 err = reftable_table_new(&table, &source, "file.ref"); 379 - check(!err); 380 381 err = reftable_table_init_ref_iterator(table, &it); 382 - check(!err); 383 err = reftable_iterator_seek_ref(&it, ""); 384 - check(!err); 385 386 for (j = 0; ; j++) { 387 struct reftable_ref_record ref = { 0 }; 388 int r = reftable_iterator_next_ref(&it, &ref); 389 - check_int(r, >=, 0); 390 if (r > 0) 391 break; 392 - check_str(names[j], ref.refname); 393 - check_int(update_index, ==, ref.update_index); 394 reftable_ref_record_release(&ref); 395 } 396 - check_int(j, ==, N); 397 398 reftable_iterator_destroy(&it); 399 reftable_table_decref(table); ··· 401 free_names(names); 402 } 403 404 - static void t_table_write_small_table(void) 405 { 406 char **names; 407 struct reftable_buf buf = REFTABLE_BUF_INIT; 408 int N = 1; 409 write_table(&names, &buf, N, 4096, REFTABLE_HASH_SHA1); 410 - check_int(buf.len, <, 200); 411 reftable_buf_release(&buf); 412 free_names(names); 413 } 414 415 - static void t_table_read_api(void) 416 { 417 char **names; 418 struct reftable_buf buf = REFTABLE_BUF_INIT; 419 int N = 50; 420 struct reftable_table *table; 421 struct reftable_block_source source = { 0 }; 422 - int err; 423 struct reftable_log_record log = { 0 }; 424 struct reftable_iterator it = { 0 }; 425 426 write_table(&names, &buf, N, 256, REFTABLE_HASH_SHA1); 427 428 block_source_from_buf(&source, &buf); 429 430 err = reftable_table_new(&table, &source, "file.ref"); 431 - check(!err); 432 433 err = reftable_table_init_ref_iterator(table, &it); 434 - check(!err); 435 err = reftable_iterator_seek_ref(&it, names[0]); 436 - check(!err); 437 438 err = reftable_iterator_next_log(&it, &log); 439 - check_int(err, ==, REFTABLE_API_ERROR); 440 441 reftable_buf_release(&buf); 442 free_names(names); ··· 464 block_source_from_buf(&source, &buf); 465 466 err = reftable_table_new(&table, &source, "file.ref"); 467 - check(!err); 468 - check_int(hash_id, ==, reftable_table_hash_id(table)); 469 470 if (!index) { 471 table->ref_offsets.index_offset = 0; 472 } else { 473 - check_int(table->ref_offsets.index_offset, >, 0); 474 } 475 476 for (i = 1; i < N; i++) { 477 err = reftable_table_init_ref_iterator(table, &it); 478 - check(!err); 479 err = reftable_iterator_seek_ref(&it, names[i]); 480 - check(!err); 481 err = reftable_iterator_next_ref(&it, &ref); 482 - check(!err); 483 - check_str(names[i], ref.refname); 484 - check_int(REFTABLE_REF_VAL1, ==, ref.value_type); 485 - check_int(i, ==, ref.value.val1[0]); 486 487 reftable_ref_record_release(&ref); 488 reftable_iterator_destroy(&it); 489 } 490 491 - check(!reftable_buf_addstr(&pastLast, names[N - 1])); 492 - check(!reftable_buf_addstr(&pastLast, "/")); 493 494 err = reftable_table_init_ref_iterator(table, &it); 495 - check(!err); 496 err = reftable_iterator_seek_ref(&it, pastLast.buf); 497 if (err == 0) { 498 struct reftable_ref_record ref = { 0 }; 499 int err = reftable_iterator_next_ref(&it, &ref); 500 - check_int(err, >, 0); 501 } else { 502 - check_int(err, >, 0); 503 } 504 505 reftable_buf_release(&pastLast); ··· 510 reftable_table_decref(table); 511 } 512 513 - static void t_table_read_write_seek_linear(void) 514 { 515 t_table_read_write_seek(0, REFTABLE_HASH_SHA1); 516 } 517 518 - static void t_table_read_write_seek_linear_sha256(void) 519 { 520 t_table_read_write_seek(0, REFTABLE_HASH_SHA256); 521 } 522 523 - static void t_table_read_write_seek_index(void) 524 { 525 t_table_read_write_seek(1, REFTABLE_HASH_SHA1); 526 } ··· 538 struct reftable_table *table; 539 struct reftable_block_source source = { 0 }; 540 struct reftable_buf buf = REFTABLE_BUF_INIT; 541 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 542 struct reftable_iterator it = { 0 }; 543 - int N = 50, n, j, err, i; 544 545 want_names = reftable_calloc(N + 1, sizeof(*want_names)); 546 - check(want_names != NULL); 547 548 - t_reftable_set_hash(want_hash, 4, REFTABLE_HASH_SHA1); 549 550 for (i = 0; i < N; i++) { 551 uint8_t hash[REFTABLE_HASH_SIZE_SHA1]; ··· 561 ref.refname = name; 562 563 ref.value_type = REFTABLE_REF_VAL2; 564 - t_reftable_set_hash(ref.value.val2.value, i / 4, 565 - REFTABLE_HASH_SHA1); 566 - t_reftable_set_hash(ref.value.val2.target_value, 3 + i / 4, 567 - REFTABLE_HASH_SHA1); 568 569 /* 80 bytes / entry, so 3 entries per block. Yields 17 570 */ 571 /* blocks. */ 572 - n = reftable_writer_add_ref(w, &ref); 573 - check_int(n, ==, 0); 574 575 if (!memcmp(ref.value.val2.value, want_hash, REFTABLE_HASH_SIZE_SHA1) || 576 !memcmp(ref.value.val2.target_value, want_hash, REFTABLE_HASH_SIZE_SHA1)) 577 want_names[want_names_len++] = xstrdup(name); 578 } 579 580 - n = reftable_writer_close(w); 581 - check_int(n, ==, 0); 582 583 reftable_writer_free(w); 584 w = NULL; ··· 586 block_source_from_buf(&source, &buf); 587 588 err = reftable_table_new(&table, &source, "file.ref"); 589 - check(!err); 590 if (!indexed) 591 table->obj_offsets.is_present = 0; 592 593 err = reftable_table_init_ref_iterator(table, &it); 594 - check(!err); 595 err = reftable_iterator_seek_ref(&it, ""); 596 - check(!err); 597 reftable_iterator_destroy(&it); 598 599 err = reftable_table_refs_for(table, &it, want_hash); 600 - check(!err); 601 602 for (j = 0; ; j++) { 603 int err = reftable_iterator_next_ref(&it, &ref); 604 - check_int(err, >=, 0); 605 if (err > 0) 606 break; 607 - check_int(j, <, want_names_len); 608 - check_str(ref.refname, want_names[j]); 609 reftable_ref_record_release(&ref); 610 } 611 - check_int(j, ==, want_names_len); 612 613 reftable_buf_release(&buf); 614 free_names(want_names); ··· 616 reftable_table_decref(table); 617 } 618 619 - static void t_table_refs_for_no_index(void) 620 { 621 t_table_refs_for(0); 622 } 623 624 - static void t_table_refs_for_obj_index(void) 625 { 626 t_table_refs_for(1); 627 } 628 629 - static void t_write_empty_table(void) 630 { 631 struct reftable_write_options opts = { 0 }; 632 struct reftable_buf buf = REFTABLE_BUF_INIT; 633 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 634 struct reftable_block_source source = { 0 }; 635 struct reftable_table *table = NULL; 636 struct reftable_ref_record rec = { 0 }; ··· 639 640 reftable_writer_set_limits(w, 1, 1); 641 642 - err = reftable_writer_close(w); 643 - check_int(err, ==, REFTABLE_EMPTY_TABLE_ERROR); 644 reftable_writer_free(w); 645 646 - check_uint(buf.len, ==, header_size(1) + footer_size(1)); 647 648 block_source_from_buf(&source, &buf); 649 650 err = reftable_table_new(&table, &source, "filename"); 651 - check(!err); 652 653 err = reftable_table_init_ref_iterator(table, &it); 654 - check(!err); 655 err = reftable_iterator_seek_ref(&it, ""); 656 - check(!err); 657 658 err = reftable_iterator_next_ref(&it, &rec); 659 - check_int(err, >, 0); 660 661 reftable_iterator_destroy(&it); 662 reftable_table_decref(table); 663 reftable_buf_release(&buf); 664 } 665 666 - static void t_write_object_id_min_length(void) 667 { 668 struct reftable_write_options opts = { 669 .block_size = 75, 670 }; 671 struct reftable_buf buf = REFTABLE_BUF_INIT; 672 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 673 struct reftable_ref_record ref = { 674 .update_index = 1, 675 .value_type = REFTABLE_REF_VAL1, 676 .value.val1 = {42}, 677 }; 678 - int err; 679 int i; 680 681 reftable_writer_set_limits(w, 1, 1); ··· 686 char name[256]; 687 snprintf(name, sizeof(name), "ref%05d", i); 688 ref.refname = name; 689 - err = reftable_writer_add_ref(w, &ref); 690 - check(!err); 691 } 692 693 - err = reftable_writer_close(w); 694 - check(!err); 695 - check_int(reftable_writer_stats(w)->object_id_len, ==, 2); 696 reftable_writer_free(w); 697 reftable_buf_release(&buf); 698 } 699 700 - static void t_write_object_id_length(void) 701 { 702 struct reftable_write_options opts = { 703 .block_size = 75, 704 }; 705 struct reftable_buf buf = REFTABLE_BUF_INIT; 706 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 707 struct reftable_ref_record ref = { 708 .update_index = 1, 709 .value_type = REFTABLE_REF_VAL1, 710 .value.val1 = {42}, 711 }; 712 - int err; 713 int i; 714 715 reftable_writer_set_limits(w, 1, 1); ··· 721 snprintf(name, sizeof(name), "ref%05d", i); 722 ref.refname = name; 723 ref.value.val1[15] = i; 724 - err = reftable_writer_add_ref(w, &ref); 725 - check(!err); 726 } 727 728 - err = reftable_writer_close(w); 729 - check(!err); 730 - check_int(reftable_writer_stats(w)->object_id_len, ==, 16); 731 reftable_writer_free(w); 732 reftable_buf_release(&buf); 733 } 734 735 - static void t_write_empty_key(void) 736 { 737 struct reftable_write_options opts = { 0 }; 738 struct reftable_buf buf = REFTABLE_BUF_INIT; 739 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 740 struct reftable_ref_record ref = { 741 .refname = (char *) "", 742 .update_index = 1, 743 .value_type = REFTABLE_REF_DELETION, 744 }; 745 - int err; 746 747 reftable_writer_set_limits(w, 1, 1); 748 - err = reftable_writer_add_ref(w, &ref); 749 - check_int(err, ==, REFTABLE_API_ERROR); 750 - 751 - err = reftable_writer_close(w); 752 - check_int(err, ==, REFTABLE_EMPTY_TABLE_ERROR); 753 reftable_writer_free(w); 754 reftable_buf_release(&buf); 755 } 756 757 - static void t_write_key_order(void) 758 { 759 struct reftable_write_options opts = { 0 }; 760 struct reftable_buf buf = REFTABLE_BUF_INIT; 761 - struct reftable_writer *w = t_reftable_strbuf_writer(&buf, &opts); 762 struct reftable_ref_record refs[2] = { 763 { 764 .refname = (char *) "b", ··· 776 }, 777 } 778 }; 779 - int err; 780 781 reftable_writer_set_limits(w, 1, 1); 782 - err = reftable_writer_add_ref(w, &refs[0]); 783 - check(!err); 784 - err = reftable_writer_add_ref(w, &refs[1]); 785 - check_int(err, ==, REFTABLE_API_ERROR); 786 787 refs[0].update_index = 2; 788 - err = reftable_writer_add_ref(w, &refs[0]); 789 - check_int(err, ==, REFTABLE_API_ERROR); 790 791 reftable_writer_close(w); 792 reftable_writer_free(w); 793 reftable_buf_release(&buf); 794 } 795 796 - static void t_write_multiple_indices(void) 797 { 798 struct reftable_write_options opts = { 799 .block_size = 100, ··· 805 struct reftable_writer *writer; 806 struct reftable_table *table; 807 char buf[128]; 808 - int err, i; 809 810 - writer = t_reftable_strbuf_writer(&writer_buf, &opts); 811 reftable_writer_set_limits(writer, 1, 1); 812 for (i = 0; i < 100; i++) { 813 struct reftable_ref_record ref = { ··· 819 snprintf(buf, sizeof(buf), "refs/heads/%04d", i); 820 ref.refname = buf; 821 822 - err = reftable_writer_add_ref(writer, &ref); 823 - check(!err); 824 } 825 826 for (i = 0; i < 100; i++) { ··· 836 snprintf(buf, sizeof(buf), "refs/heads/%04d", i); 837 log.refname = buf; 838 839 - err = reftable_writer_add_log(writer, &log); 840 - check(!err); 841 } 842 843 reftable_writer_close(writer); ··· 847 * for each of the block types. 848 */ 849 stats = reftable_writer_stats(writer); 850 - check_int(stats->ref_stats.index_offset, >, 0); 851 - check_int(stats->obj_stats.index_offset, >, 0); 852 - check_int(stats->log_stats.index_offset, >, 0); 853 854 block_source_from_buf(&source, &writer_buf); 855 err = reftable_table_new(&table, &source, "filename"); 856 - check(!err); 857 858 /* 859 * Seeking the log uses the log index now. In case there is any 860 * confusion regarding indices we would notice here. 861 */ 862 err = reftable_table_init_log_iterator(table, &it); 863 - check(!err); 864 err = reftable_iterator_seek_log(&it, ""); 865 - check(!err); 866 867 reftable_iterator_destroy(&it); 868 reftable_writer_free(writer); ··· 870 reftable_buf_release(&writer_buf); 871 } 872 873 - static void t_write_multi_level_index(void) 874 { 875 struct reftable_write_options opts = { 876 .block_size = 100, ··· 883 struct reftable_table *table; 884 int err; 885 886 - writer = t_reftable_strbuf_writer(&writer_buf, &opts); 887 reftable_writer_set_limits(writer, 1, 1); 888 for (size_t i = 0; i < 200; i++) { 889 struct reftable_ref_record ref = { ··· 896 snprintf(buf, sizeof(buf), "refs/heads/%03" PRIuMAX, (uintmax_t)i); 897 ref.refname = buf; 898 899 - err = reftable_writer_add_ref(writer, &ref); 900 - check(!err); 901 } 902 reftable_writer_close(writer); 903 ··· 906 * multi-level index. 907 */ 908 stats = reftable_writer_stats(writer); 909 - check_int(stats->ref_stats.max_index_level, ==, 2); 910 911 block_source_from_buf(&source, &writer_buf); 912 err = reftable_table_new(&table, &source, "filename"); 913 - check(!err); 914 915 /* 916 * Seeking the last ref should work as expected. 917 */ 918 err = reftable_table_init_ref_iterator(table, &it); 919 - check(!err); 920 err = reftable_iterator_seek_ref(&it, "refs/heads/199"); 921 - check(!err); 922 923 reftable_iterator_destroy(&it); 924 reftable_writer_free(writer); ··· 927 reftable_buf_release(&buf); 928 } 929 930 - static void t_corrupt_table_empty(void) 931 { 932 struct reftable_buf buf = REFTABLE_BUF_INIT; 933 struct reftable_block_source source = { 0 }; ··· 936 937 block_source_from_buf(&source, &buf); 938 err = reftable_table_new(&table, &source, "file.log"); 939 - check_int(err, ==, REFTABLE_FORMAT_ERROR); 940 } 941 942 - static void t_corrupt_table(void) 943 { 944 uint8_t zeros[1024] = { 0 }; 945 struct reftable_buf buf = REFTABLE_BUF_INIT; 946 struct reftable_block_source source = { 0 }; 947 struct reftable_table *table; 948 int err; 949 - check(!reftable_buf_add(&buf, zeros, sizeof(zeros))); 950 951 block_source_from_buf(&source, &buf); 952 err = reftable_table_new(&table, &source, "file.log"); 953 - check_int(err, ==, REFTABLE_FORMAT_ERROR); 954 955 reftable_buf_release(&buf); 956 } 957 - 958 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 959 - { 960 - TEST(t_buffer(), "strbuf works as blocksource"); 961 - TEST(t_corrupt_table(), "read-write on corrupted table"); 962 - TEST(t_corrupt_table_empty(), "read-write on an empty table"); 963 - TEST(t_log_buffer_size(), "buffer extension for log compression"); 964 - TEST(t_log_overflow(), "log overflow returns expected error"); 965 - TEST(t_log_write_limits(), "writer limits for writing log records"); 966 - TEST(t_log_write_read(), "read-write on log records"); 967 - TEST(t_log_zlib_corruption(), "reading corrupted log record returns expected error"); 968 - TEST(t_table_read_api(), "read on a table"); 969 - TEST(t_table_read_write_seek_index(), "read-write on a table with index"); 970 - TEST(t_table_read_write_seek_linear(), "read-write on a table without index (SHA1)"); 971 - TEST(t_table_read_write_seek_linear_sha256(), "read-write on a table without index (SHA256)"); 972 - TEST(t_table_read_write_sequential(), "sequential read-write on a table"); 973 - TEST(t_table_refs_for_no_index(), "refs-only table with no index"); 974 - TEST(t_table_refs_for_obj_index(), "refs-only table with index"); 975 - TEST(t_table_write_small_table(), "write_table works"); 976 - TEST(t_write_empty_key(), "write on refs with empty keys"); 977 - TEST(t_write_empty_table(), "read-write on empty tables"); 978 - TEST(t_write_key_order(), "refs must be written in increasing order"); 979 - TEST(t_write_multi_level_index(), "table with multi-level index"); 980 - TEST(t_write_multiple_indices(), "table with indices for multiple block types"); 981 - TEST(t_write_object_id_length(), "prefix compression on writing refs"); 982 - TEST(t_write_object_id_min_length(), "prefix compression on writing refs"); 983 - 984 - return test_done(); 985 - }
··· 8 9 #define DISABLE_SIGN_COMPARE_WARNINGS 10 11 + #include "unit-test.h" 12 #include "lib-reftable.h" 13 #include "reftable/basics.h" 14 #include "reftable/blocksource.h" ··· 19 20 static const int update_index = 5; 21 22 + void test_reftable_readwrite__buffer(void) 23 { 24 struct reftable_buf buf = REFTABLE_BUF_INIT; 25 struct reftable_block_source source = { 0 }; 26 struct reftable_block_data out = { 0 }; 27 int n; 28 uint8_t in[] = "hello"; 29 + cl_assert_equal_i(reftable_buf_add(&buf, in, sizeof(in)), 0); 30 block_source_from_buf(&source, &buf); 31 + cl_assert_equal_i(block_source_size(&source), 6); 32 n = block_source_read_data(&source, &out, 0, sizeof(in)); 33 + cl_assert_equal_i(n, sizeof(in)); 34 + cl_assert(!memcmp(in, out.data, n)); 35 block_source_release_data(&out); 36 37 n = block_source_read_data(&source, &out, 1, 2); 38 + cl_assert_equal_i(n, 2); 39 + cl_assert(!memcmp(out.data, "el", 2)); 40 41 block_source_release_data(&out); 42 block_source_close(&source); ··· 55 int i; 56 57 REFTABLE_CALLOC_ARRAY(*names, N + 1); 58 + cl_assert(*names != NULL); 59 REFTABLE_CALLOC_ARRAY(refs, N); 60 + cl_assert(refs != NULL); 61 REFTABLE_CALLOC_ARRAY(logs, N); 62 + cl_assert(logs != NULL); 63 64 for (i = 0; i < N; i++) { 65 refs[i].refname = (*names)[i] = xstrfmt("refs/heads/branch%02d", i); 66 refs[i].update_index = update_index; 67 refs[i].value_type = REFTABLE_REF_VAL1; 68 + cl_reftable_set_hash(refs[i].value.val1, i, 69 + REFTABLE_HASH_SHA1); 70 } 71 72 for (i = 0; i < N; i++) { 73 logs[i].refname = (*names)[i]; 74 logs[i].update_index = update_index; 75 logs[i].value_type = REFTABLE_LOG_UPDATE; 76 + cl_reftable_set_hash(logs[i].value.update.new_hash, i, 77 + REFTABLE_HASH_SHA1); 78 logs[i].value.update.message = (char *) "message"; 79 } 80 81 + cl_reftable_write_to_buf(buf, refs, N, logs, N, &opts); 82 83 reftable_free(refs); 84 reftable_free(logs); 85 } 86 87 + void test_reftable_readwrite__log_buffer_size(void) 88 { 89 struct reftable_buf buf = REFTABLE_BUF_INIT; 90 struct reftable_write_options opts = { 91 .block_size = 4096, 92 }; 93 int i; 94 struct reftable_log_record 95 log = { .refname = (char *) "refs/heads/master", ··· 102 .time = 0x5e430672, 103 .message = (char *) "commit: 9\n", 104 } } }; 105 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, 106 + &opts); 107 108 /* This tests buffer extension for log compression. Must use a random 109 hash, to ensure that the compressed part is larger than the original. ··· 113 log.value.update.new_hash[i] = (uint8_t)(git_rand(0) % 256); 114 } 115 reftable_writer_set_limits(w, update_index, update_index); 116 + cl_assert_equal_i(reftable_writer_add_log(w, &log), 0); 117 + cl_assert_equal_i(reftable_writer_close(w), 0); 118 reftable_writer_free(w); 119 reftable_buf_release(&buf); 120 } 121 122 + void test_reftable_readwrite__log_overflow(void) 123 { 124 struct reftable_buf buf = REFTABLE_BUF_INIT; 125 char msg[256] = { 0 }; 126 struct reftable_write_options opts = { 127 .block_size = ARRAY_SIZE(msg), 128 }; 129 struct reftable_log_record log = { 130 .refname = (char *) "refs/heads/master", 131 .update_index = update_index, ··· 142 }, 143 }, 144 }; 145 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, 146 + &opts); 147 148 memset(msg, 'x', sizeof(msg) - 1); 149 reftable_writer_set_limits(w, update_index, update_index); 150 + cl_assert_equal_i(reftable_writer_add_log(w, &log), REFTABLE_ENTRY_TOO_BIG_ERROR); 151 reftable_writer_free(w); 152 reftable_buf_release(&buf); 153 } 154 155 + void test_reftable_readwrite__log_write_limits(void) 156 { 157 struct reftable_write_options opts = { 0 }; 158 struct reftable_buf buf = REFTABLE_BUF_INIT; 159 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, 160 + &opts); 161 struct reftable_log_record log = { 162 .refname = (char *)"refs/head/master", 163 .update_index = 0, ··· 173 }, 174 }, 175 }; 176 177 reftable_writer_set_limits(w, 1, 1); 178 179 /* write with update_index (0) below set limits (1, 1) */ 180 + cl_assert_equal_i(reftable_writer_add_log(w, &log), 0); 181 182 /* write with update_index (1) in the set limits (1, 1) */ 183 log.update_index = 1; 184 + cl_assert_equal_i(reftable_writer_add_log(w, &log), 0); 185 186 /* write with update_index (3) above set limits (1, 1) */ 187 log.update_index = 3; 188 + cl_assert_equal_i(reftable_writer_add_log(w, &log), REFTABLE_API_ERROR); 189 190 reftable_writer_free(w); 191 reftable_buf_release(&buf); 192 } 193 194 + void test_reftable_readwrite__log_write_read(void) 195 { 196 struct reftable_write_options opts = { 197 .block_size = 256, ··· 202 struct reftable_table *table; 203 struct reftable_block_source source = { 0 }; 204 struct reftable_buf buf = REFTABLE_BUF_INIT; 205 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, &opts); 206 const struct reftable_stats *stats = NULL; 207 + int N = 2, i; 208 char **names; 209 + int err; 210 211 names = reftable_calloc(N + 1, sizeof(*names)); 212 + cl_assert(names != NULL); 213 214 reftable_writer_set_limits(w, 0, N); 215 ··· 221 ref.refname = name; 222 ref.update_index = i; 223 224 + cl_assert_equal_i(reftable_writer_add_ref(w, &ref), 0); 225 } 226 227 for (i = 0; i < N; i++) { ··· 230 log.refname = names[i]; 231 log.update_index = i; 232 log.value_type = REFTABLE_LOG_UPDATE; 233 + cl_reftable_set_hash(log.value.update.old_hash, i, 234 + REFTABLE_HASH_SHA1); 235 + cl_reftable_set_hash(log.value.update.new_hash, i + 1, 236 + REFTABLE_HASH_SHA1); 237 238 + cl_assert_equal_i(reftable_writer_add_log(w, &log), 0); 239 } 240 241 + cl_assert_equal_i(reftable_writer_close(w), 0); 242 243 stats = reftable_writer_stats(w); 244 + cl_assert(stats->log_stats.blocks > 0); 245 reftable_writer_free(w); 246 w = NULL; 247 248 block_source_from_buf(&source, &buf); 249 250 err = reftable_table_new(&table, &source, "file.log"); 251 + cl_assert(!err); 252 253 err = reftable_table_init_ref_iterator(table, &it); 254 + cl_assert(!err); 255 256 err = reftable_iterator_seek_ref(&it, names[N - 1]); 257 + cl_assert(!err); 258 259 err = reftable_iterator_next_ref(&it, &ref); 260 + cl_assert(!err); 261 262 /* end of iteration. */ 263 + cl_assert(reftable_iterator_next_ref(&it, &ref) > 0); 264 265 reftable_iterator_destroy(&it); 266 reftable_ref_record_release(&ref); 267 268 err = reftable_table_init_log_iterator(table, &it); 269 + cl_assert(!err); 270 err = reftable_iterator_seek_log(&it, ""); 271 + cl_assert(!err); 272 273 for (i = 0; ; i++) { 274 int err = reftable_iterator_next_log(&it, &log); 275 if (err > 0) 276 break; 277 + cl_assert(!err); 278 + cl_assert_equal_s(names[i], log.refname); 279 + cl_assert_equal_i(i, log.update_index); 280 reftable_log_record_release(&log); 281 } 282 283 + cl_assert_equal_i(i, N); 284 reftable_iterator_destroy(&it); 285 286 /* cleanup. */ ··· 289 reftable_table_decref(table); 290 } 291 292 + void test_reftable_readwrite__log_zlib_corruption(void) 293 { 294 struct reftable_write_options opts = { 295 .block_size = 256, ··· 298 struct reftable_table *table; 299 struct reftable_block_source source = { 0 }; 300 struct reftable_buf buf = REFTABLE_BUF_INIT; 301 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, 302 + &opts); 303 const struct reftable_stats *stats = NULL; 304 char message[100] = { 0 }; 305 + int i; 306 + int err; 307 struct reftable_log_record log = { 308 .refname = (char *) "refname", 309 .value_type = REFTABLE_LOG_UPDATE, ··· 323 324 reftable_writer_set_limits(w, 1, 1); 325 326 + cl_assert_equal_i(reftable_writer_add_log(w, &log), 0); 327 + cl_assert_equal_i(reftable_writer_close(w), 0); 328 329 stats = reftable_writer_stats(w); 330 + cl_assert(stats->log_stats.blocks > 0); 331 reftable_writer_free(w); 332 w = NULL; 333 ··· 337 block_source_from_buf(&source, &buf); 338 339 err = reftable_table_new(&table, &source, "file.log"); 340 + cl_assert(!err); 341 342 err = reftable_table_init_log_iterator(table, &it); 343 + cl_assert(!err); 344 err = reftable_iterator_seek_log(&it, "refname"); 345 + cl_assert_equal_i(err, REFTABLE_ZLIB_ERROR); 346 347 reftable_iterator_destroy(&it); 348 ··· 351 reftable_buf_release(&buf); 352 } 353 354 + void test_reftable_readwrite__table_read_write_sequential(void) 355 { 356 char **names; 357 struct reftable_buf buf = REFTABLE_BUF_INIT; ··· 367 block_source_from_buf(&source, &buf); 368 369 err = reftable_table_new(&table, &source, "file.ref"); 370 + cl_assert(!err); 371 372 err = reftable_table_init_ref_iterator(table, &it); 373 + cl_assert(!err); 374 err = reftable_iterator_seek_ref(&it, ""); 375 + cl_assert(!err); 376 377 for (j = 0; ; j++) { 378 struct reftable_ref_record ref = { 0 }; 379 int r = reftable_iterator_next_ref(&it, &ref); 380 + cl_assert(r >= 0); 381 if (r > 0) 382 break; 383 + cl_assert_equal_s(names[j], ref.refname); 384 + cl_assert_equal_i(update_index, ref.update_index); 385 reftable_ref_record_release(&ref); 386 } 387 + cl_assert_equal_i(j, N); 388 389 reftable_iterator_destroy(&it); 390 reftable_table_decref(table); ··· 392 free_names(names); 393 } 394 395 + void test_reftable_readwrite__table_write_small_table(void) 396 { 397 char **names; 398 struct reftable_buf buf = REFTABLE_BUF_INIT; 399 int N = 1; 400 write_table(&names, &buf, N, 4096, REFTABLE_HASH_SHA1); 401 + cl_assert(buf.len < 200); 402 reftable_buf_release(&buf); 403 free_names(names); 404 } 405 406 + void test_reftable_readwrite__table_read_api(void) 407 { 408 char **names; 409 struct reftable_buf buf = REFTABLE_BUF_INIT; 410 int N = 50; 411 struct reftable_table *table; 412 struct reftable_block_source source = { 0 }; 413 struct reftable_log_record log = { 0 }; 414 struct reftable_iterator it = { 0 }; 415 + int err; 416 417 write_table(&names, &buf, N, 256, REFTABLE_HASH_SHA1); 418 419 block_source_from_buf(&source, &buf); 420 421 err = reftable_table_new(&table, &source, "file.ref"); 422 + cl_assert(!err); 423 424 err = reftable_table_init_ref_iterator(table, &it); 425 + cl_assert(!err); 426 err = reftable_iterator_seek_ref(&it, names[0]); 427 + cl_assert(!err); 428 429 err = reftable_iterator_next_log(&it, &log); 430 + cl_assert_equal_i(err, REFTABLE_API_ERROR); 431 432 reftable_buf_release(&buf); 433 free_names(names); ··· 455 block_source_from_buf(&source, &buf); 456 457 err = reftable_table_new(&table, &source, "file.ref"); 458 + cl_assert(!err); 459 + cl_assert_equal_i(hash_id, reftable_table_hash_id(table)); 460 461 if (!index) { 462 table->ref_offsets.index_offset = 0; 463 } else { 464 + cl_assert(table->ref_offsets.index_offset > 0); 465 } 466 467 for (i = 1; i < N; i++) { 468 err = reftable_table_init_ref_iterator(table, &it); 469 + cl_assert(!err); 470 err = reftable_iterator_seek_ref(&it, names[i]); 471 + cl_assert(!err); 472 err = reftable_iterator_next_ref(&it, &ref); 473 + cl_assert(!err); 474 + cl_assert_equal_s(names[i], ref.refname); 475 + cl_assert_equal_i(REFTABLE_REF_VAL1, ref.value_type); 476 + cl_assert_equal_i(i, ref.value.val1[0]); 477 478 reftable_ref_record_release(&ref); 479 reftable_iterator_destroy(&it); 480 } 481 482 + cl_assert_equal_i(reftable_buf_addstr(&pastLast, names[N - 1]), 483 + 0); 484 + cl_assert_equal_i(reftable_buf_addstr(&pastLast, "/"), 0); 485 486 err = reftable_table_init_ref_iterator(table, &it); 487 + cl_assert(!err); 488 err = reftable_iterator_seek_ref(&it, pastLast.buf); 489 if (err == 0) { 490 struct reftable_ref_record ref = { 0 }; 491 int err = reftable_iterator_next_ref(&it, &ref); 492 + cl_assert(err > 0); 493 } else { 494 + cl_assert(err > 0); 495 } 496 497 reftable_buf_release(&pastLast); ··· 502 reftable_table_decref(table); 503 } 504 505 + void test_reftable_readwrite__table_read_write_seek_linear(void) 506 { 507 t_table_read_write_seek(0, REFTABLE_HASH_SHA1); 508 } 509 510 + void test_reftable_readwrite__table_read_write_seek_linear_sha256(void) 511 { 512 t_table_read_write_seek(0, REFTABLE_HASH_SHA256); 513 } 514 515 + void test_reftable_readwrite__table_read_write_seek_index(void) 516 { 517 t_table_read_write_seek(1, REFTABLE_HASH_SHA1); 518 } ··· 530 struct reftable_table *table; 531 struct reftable_block_source source = { 0 }; 532 struct reftable_buf buf = REFTABLE_BUF_INIT; 533 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, 534 + &opts); 535 struct reftable_iterator it = { 0 }; 536 + int N = 50, j, i; 537 + int err; 538 539 want_names = reftable_calloc(N + 1, sizeof(*want_names)); 540 + cl_assert(want_names != NULL); 541 542 + cl_reftable_set_hash(want_hash, 4, REFTABLE_HASH_SHA1); 543 544 for (i = 0; i < N; i++) { 545 uint8_t hash[REFTABLE_HASH_SIZE_SHA1]; ··· 555 ref.refname = name; 556 557 ref.value_type = REFTABLE_REF_VAL2; 558 + cl_reftable_set_hash(ref.value.val2.value, i / 4, 559 + REFTABLE_HASH_SHA1); 560 + cl_reftable_set_hash(ref.value.val2.target_value, 561 + 3 + i / 4, REFTABLE_HASH_SHA1); 562 563 /* 80 bytes / entry, so 3 entries per block. Yields 17 564 */ 565 /* blocks. */ 566 + cl_assert_equal_i(reftable_writer_add_ref(w, &ref), 0); 567 568 if (!memcmp(ref.value.val2.value, want_hash, REFTABLE_HASH_SIZE_SHA1) || 569 !memcmp(ref.value.val2.target_value, want_hash, REFTABLE_HASH_SIZE_SHA1)) 570 want_names[want_names_len++] = xstrdup(name); 571 } 572 573 + cl_assert_equal_i(reftable_writer_close(w), 0); 574 575 reftable_writer_free(w); 576 w = NULL; ··· 578 block_source_from_buf(&source, &buf); 579 580 err = reftable_table_new(&table, &source, "file.ref"); 581 + cl_assert(!err); 582 if (!indexed) 583 table->obj_offsets.is_present = 0; 584 585 err = reftable_table_init_ref_iterator(table, &it); 586 + cl_assert(!err); 587 err = reftable_iterator_seek_ref(&it, ""); 588 + cl_assert(!err); 589 reftable_iterator_destroy(&it); 590 591 err = reftable_table_refs_for(table, &it, want_hash); 592 + cl_assert(!err); 593 594 for (j = 0; ; j++) { 595 int err = reftable_iterator_next_ref(&it, &ref); 596 + cl_assert(err >= 0); 597 if (err > 0) 598 break; 599 + cl_assert(j < want_names_len); 600 + cl_assert_equal_s(ref.refname, want_names[j]); 601 reftable_ref_record_release(&ref); 602 } 603 + cl_assert_equal_i(j, want_names_len); 604 605 reftable_buf_release(&buf); 606 free_names(want_names); ··· 608 reftable_table_decref(table); 609 } 610 611 + void test_reftable_readwrite__table_refs_for_no_index(void) 612 { 613 t_table_refs_for(0); 614 } 615 616 + void test_reftable_readwrite__table_refs_for_obj_index(void) 617 { 618 t_table_refs_for(1); 619 } 620 621 + void test_reftable_readwrite__write_empty_table(void) 622 { 623 struct reftable_write_options opts = { 0 }; 624 struct reftable_buf buf = REFTABLE_BUF_INIT; 625 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, &opts); 626 struct reftable_block_source source = { 0 }; 627 struct reftable_table *table = NULL; 628 struct reftable_ref_record rec = { 0 }; ··· 631 632 reftable_writer_set_limits(w, 1, 1); 633 634 + cl_assert_equal_i(reftable_writer_close(w), REFTABLE_EMPTY_TABLE_ERROR); 635 reftable_writer_free(w); 636 637 + cl_assert_equal_i(buf.len, header_size(1) + footer_size(1)); 638 639 block_source_from_buf(&source, &buf); 640 641 err = reftable_table_new(&table, &source, "filename"); 642 + cl_assert(!err); 643 644 err = reftable_table_init_ref_iterator(table, &it); 645 + cl_assert(!err); 646 err = reftable_iterator_seek_ref(&it, ""); 647 + cl_assert(!err); 648 649 err = reftable_iterator_next_ref(&it, &rec); 650 + cl_assert(err > 0); 651 652 reftable_iterator_destroy(&it); 653 reftable_table_decref(table); 654 reftable_buf_release(&buf); 655 } 656 657 + void test_reftable_readwrite__write_object_id_min_length(void) 658 { 659 struct reftable_write_options opts = { 660 .block_size = 75, 661 }; 662 struct reftable_buf buf = REFTABLE_BUF_INIT; 663 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, &opts); 664 struct reftable_ref_record ref = { 665 .update_index = 1, 666 .value_type = REFTABLE_REF_VAL1, 667 .value.val1 = {42}, 668 }; 669 int i; 670 671 reftable_writer_set_limits(w, 1, 1); ··· 676 char name[256]; 677 snprintf(name, sizeof(name), "ref%05d", i); 678 ref.refname = name; 679 + cl_assert_equal_i(reftable_writer_add_ref(w, &ref), 0); 680 } 681 682 + cl_assert_equal_i(reftable_writer_close(w), 0); 683 + cl_assert_equal_i(reftable_writer_stats(w)->object_id_len, 2); 684 reftable_writer_free(w); 685 reftable_buf_release(&buf); 686 } 687 688 + void test_reftable_readwrite__write_object_id_length(void) 689 { 690 struct reftable_write_options opts = { 691 .block_size = 75, 692 }; 693 struct reftable_buf buf = REFTABLE_BUF_INIT; 694 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, &opts); 695 struct reftable_ref_record ref = { 696 .update_index = 1, 697 .value_type = REFTABLE_REF_VAL1, 698 .value.val1 = {42}, 699 }; 700 int i; 701 702 reftable_writer_set_limits(w, 1, 1); ··· 708 snprintf(name, sizeof(name), "ref%05d", i); 709 ref.refname = name; 710 ref.value.val1[15] = i; 711 + cl_assert(reftable_writer_add_ref(w, &ref) == 0); 712 } 713 714 + cl_assert_equal_i(reftable_writer_close(w), 0); 715 + cl_assert_equal_i(reftable_writer_stats(w)->object_id_len, 16); 716 reftable_writer_free(w); 717 reftable_buf_release(&buf); 718 } 719 720 + void test_reftable_readwrite__write_empty_key(void) 721 { 722 struct reftable_write_options opts = { 0 }; 723 struct reftable_buf buf = REFTABLE_BUF_INIT; 724 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, &opts); 725 struct reftable_ref_record ref = { 726 .refname = (char *) "", 727 .update_index = 1, 728 .value_type = REFTABLE_REF_DELETION, 729 }; 730 731 reftable_writer_set_limits(w, 1, 1); 732 + cl_assert_equal_i(reftable_writer_add_ref(w, &ref), REFTABLE_API_ERROR); 733 + cl_assert_equal_i(reftable_writer_close(w), 734 + REFTABLE_EMPTY_TABLE_ERROR); 735 reftable_writer_free(w); 736 reftable_buf_release(&buf); 737 } 738 739 + void test_reftable_readwrite__write_key_order(void) 740 { 741 struct reftable_write_options opts = { 0 }; 742 struct reftable_buf buf = REFTABLE_BUF_INIT; 743 + struct reftable_writer *w = cl_reftable_strbuf_writer(&buf, &opts); 744 struct reftable_ref_record refs[2] = { 745 { 746 .refname = (char *) "b", ··· 758 }, 759 } 760 }; 761 762 reftable_writer_set_limits(w, 1, 1); 763 + cl_assert_equal_i(reftable_writer_add_ref(w, &refs[0]), 0); 764 + cl_assert_equal_i(reftable_writer_add_ref(w, &refs[1]), 765 + REFTABLE_API_ERROR); 766 767 refs[0].update_index = 2; 768 + cl_assert_equal_i(reftable_writer_add_ref(w, &refs[0]), REFTABLE_API_ERROR); 769 770 reftable_writer_close(w); 771 reftable_writer_free(w); 772 reftable_buf_release(&buf); 773 } 774 775 + void test_reftable_readwrite__write_multiple_indices(void) 776 { 777 struct reftable_write_options opts = { 778 .block_size = 100, ··· 784 struct reftable_writer *writer; 785 struct reftable_table *table; 786 char buf[128]; 787 + int i; 788 + int err; 789 790 + writer = cl_reftable_strbuf_writer(&writer_buf, &opts); 791 reftable_writer_set_limits(writer, 1, 1); 792 for (i = 0; i < 100; i++) { 793 struct reftable_ref_record ref = { ··· 799 snprintf(buf, sizeof(buf), "refs/heads/%04d", i); 800 ref.refname = buf; 801 802 + cl_assert_equal_i(reftable_writer_add_ref(writer, &ref), 0); 803 } 804 805 for (i = 0; i < 100; i++) { ··· 815 snprintf(buf, sizeof(buf), "refs/heads/%04d", i); 816 log.refname = buf; 817 818 + cl_assert_equal_i(reftable_writer_add_log(writer, &log), 0); 819 } 820 821 reftable_writer_close(writer); ··· 825 * for each of the block types. 826 */ 827 stats = reftable_writer_stats(writer); 828 + cl_assert(stats->ref_stats.index_offset > 0); 829 + cl_assert(stats->obj_stats.index_offset > 0); 830 + cl_assert(stats->log_stats.index_offset > 0); 831 832 block_source_from_buf(&source, &writer_buf); 833 err = reftable_table_new(&table, &source, "filename"); 834 + cl_assert(!err); 835 836 /* 837 * Seeking the log uses the log index now. In case there is any 838 * confusion regarding indices we would notice here. 839 */ 840 err = reftable_table_init_log_iterator(table, &it); 841 + cl_assert(!err); 842 err = reftable_iterator_seek_log(&it, ""); 843 + cl_assert(!err); 844 845 reftable_iterator_destroy(&it); 846 reftable_writer_free(writer); ··· 848 reftable_buf_release(&writer_buf); 849 } 850 851 + void test_reftable_readwrite__write_multi_level_index(void) 852 { 853 struct reftable_write_options opts = { 854 .block_size = 100, ··· 861 struct reftable_table *table; 862 int err; 863 864 + writer = cl_reftable_strbuf_writer(&writer_buf, &opts); 865 reftable_writer_set_limits(writer, 1, 1); 866 for (size_t i = 0; i < 200; i++) { 867 struct reftable_ref_record ref = { ··· 874 snprintf(buf, sizeof(buf), "refs/heads/%03" PRIuMAX, (uintmax_t)i); 875 ref.refname = buf; 876 877 + cl_assert_equal_i(reftable_writer_add_ref(writer, &ref), 0); 878 } 879 reftable_writer_close(writer); 880 ··· 883 * multi-level index. 884 */ 885 stats = reftable_writer_stats(writer); 886 + cl_assert_equal_i(stats->ref_stats.max_index_level, 2); 887 888 block_source_from_buf(&source, &writer_buf); 889 err = reftable_table_new(&table, &source, "filename"); 890 + cl_assert(!err); 891 892 /* 893 * Seeking the last ref should work as expected. 894 */ 895 err = reftable_table_init_ref_iterator(table, &it); 896 + cl_assert(!err); 897 err = reftable_iterator_seek_ref(&it, "refs/heads/199"); 898 + cl_assert(!err); 899 900 reftable_iterator_destroy(&it); 901 reftable_writer_free(writer); ··· 904 reftable_buf_release(&buf); 905 } 906 907 + void test_reftable_readwrite__corrupt_table_empty(void) 908 { 909 struct reftable_buf buf = REFTABLE_BUF_INIT; 910 struct reftable_block_source source = { 0 }; ··· 913 914 block_source_from_buf(&source, &buf); 915 err = reftable_table_new(&table, &source, "file.log"); 916 + cl_assert_equal_i(err, REFTABLE_FORMAT_ERROR); 917 } 918 919 + void test_reftable_readwrite__corrupt_table(void) 920 { 921 uint8_t zeros[1024] = { 0 }; 922 struct reftable_buf buf = REFTABLE_BUF_INIT; 923 struct reftable_block_source source = { 0 }; 924 struct reftable_table *table; 925 int err; 926 + 927 + cl_assert(!reftable_buf_add(&buf, zeros, sizeof(zeros))); 928 929 block_source_from_buf(&source, &buf); 930 err = reftable_table_new(&table, &source, "file.log"); 931 + cl_assert_equal_i(err, REFTABLE_FORMAT_ERROR); 932 933 reftable_buf_release(&buf); 934 }
+130 -120
t/unit-tests/t-reftable-record.c t/unit-tests/u-reftable-record.c
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 - #include "test-lib.h" 10 #include "reftable/basics.h" 11 #include "reftable/constants.h" 12 #include "reftable/record.h" ··· 17 uint8_t typ; 18 19 typ = reftable_record_type(rec); 20 - check(!reftable_record_init(&copy, typ)); 21 reftable_record_copy_from(&copy, rec, REFTABLE_HASH_SIZE_SHA1); 22 /* do it twice to catch memory leaks */ 23 reftable_record_copy_from(&copy, rec, REFTABLE_HASH_SIZE_SHA1); 24 - check(reftable_record_equal(rec, &copy, REFTABLE_HASH_SIZE_SHA1)); 25 26 reftable_record_release(&copy); 27 } 28 29 - static void t_varint_roundtrip(void) 30 { 31 uint64_t inputs[] = { 0, 32 1, ··· 49 int n = put_var_int(&out, in); 50 uint64_t got = 0; 51 52 - check_int(n, >, 0); 53 out.len = n; 54 n = get_var_int(&got, &out); 55 - check_int(n, >, 0); 56 57 - check_int(got, ==, in); 58 } 59 } 60 61 - static void t_varint_overflow(void) 62 { 63 unsigned char buf[] = { 64 0xFF, 0xFF, 0xFF, 0xFF, ··· 70 .len = sizeof(buf), 71 }; 72 uint64_t value; 73 - int err = get_var_int(&value, &view); 74 - check_int(err, ==, -1); 75 } 76 77 static void set_hash(uint8_t *h, int j) ··· 80 h[i] = (j >> i) & 0xff; 81 } 82 83 - static void t_reftable_ref_record_comparison(void) 84 { 85 struct reftable_record in[3] = { 86 { ··· 102 }; 103 int cmp; 104 105 - check(!reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 106 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 107 - check(!cmp); 108 109 - check(!reftable_record_equal(&in[1], &in[2], REFTABLE_HASH_SIZE_SHA1)); 110 - check(!reftable_record_cmp(&in[1], &in[2], &cmp)); 111 - check_int(cmp, >, 0); 112 113 in[1].u.ref.value_type = in[0].u.ref.value_type; 114 - check(reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 115 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 116 - check(!cmp); 117 } 118 119 - static void t_reftable_ref_record_compare_name(void) 120 { 121 struct reftable_ref_record recs[3] = { 122 { ··· 130 }, 131 }; 132 133 - check_int(reftable_ref_record_compare_name(&recs[0], &recs[1]), <, 0); 134 - check_int(reftable_ref_record_compare_name(&recs[1], &recs[0]), >, 0); 135 - check_int(reftable_ref_record_compare_name(&recs[0], &recs[2]), ==, 0); 136 } 137 138 - static void t_reftable_ref_record_roundtrip(void) 139 { 140 struct reftable_buf scratch = REFTABLE_BUF_INIT; 141 ··· 172 173 t_copy(&in); 174 175 - check_int(reftable_record_val_type(&in), ==, i); 176 - check_int(reftable_record_is_deletion(&in), ==, i == REFTABLE_REF_DELETION); 177 178 reftable_record_key(&in, &key); 179 n = reftable_record_encode(&in, dest, REFTABLE_HASH_SIZE_SHA1); 180 - check_int(n, >, 0); 181 182 /* decode into a non-zero reftable_record to test for leaks. */ 183 m = reftable_record_decode(&out, key, i, dest, REFTABLE_HASH_SIZE_SHA1, &scratch); 184 - check_int(n, ==, m); 185 186 - check(reftable_ref_record_equal(&in.u.ref, &out.u.ref, 187 - REFTABLE_HASH_SIZE_SHA1)); 188 reftable_record_release(&in); 189 190 reftable_buf_release(&key); ··· 194 reftable_buf_release(&scratch); 195 } 196 197 - static void t_reftable_log_record_comparison(void) 198 { 199 struct reftable_record in[3] = { 200 { ··· 215 }; 216 int cmp; 217 218 - check(!reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 219 - check(!reftable_record_equal(&in[1], &in[2], REFTABLE_HASH_SIZE_SHA1)); 220 - check(!reftable_record_cmp(&in[1], &in[2], &cmp)); 221 - check_int(cmp, >, 0); 222 /* comparison should be reversed for equal keys, because 223 * comparison is now performed on the basis of update indices */ 224 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 225 - check_int(cmp, <, 0); 226 227 in[1].u.log.update_index = in[0].u.log.update_index; 228 - check(reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 229 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 230 } 231 232 - static void t_reftable_log_record_compare_key(void) 233 { 234 struct reftable_log_record logs[3] = { 235 { ··· 246 }, 247 }; 248 249 - check_int(reftable_log_record_compare_key(&logs[0], &logs[1]), <, 0); 250 - check_int(reftable_log_record_compare_key(&logs[1], &logs[0]), >, 0); 251 252 logs[1].update_index = logs[0].update_index; 253 - check_int(reftable_log_record_compare_key(&logs[0], &logs[1]), <, 0); 254 255 - check_int(reftable_log_record_compare_key(&logs[0], &logs[2]), >, 0); 256 - check_int(reftable_log_record_compare_key(&logs[2], &logs[0]), <, 0); 257 logs[2].update_index = logs[0].update_index; 258 - check_int(reftable_log_record_compare_key(&logs[0], &logs[2]), ==, 0); 259 } 260 261 - static void t_reftable_log_record_roundtrip(void) 262 { 263 struct reftable_log_record in[] = { 264 { ··· 292 set_hash(in[2].value.update.new_hash, 3); 293 set_hash(in[2].value.update.old_hash, 4); 294 295 - check(!reftable_log_record_is_deletion(&in[0])); 296 - check(reftable_log_record_is_deletion(&in[1])); 297 - check(!reftable_log_record_is_deletion(&in[2])); 298 299 for (size_t i = 0; i < ARRAY_SIZE(in); i++) { 300 struct reftable_record rec = { .type = REFTABLE_BLOCK_TYPE_LOG }; ··· 328 reftable_record_key(&rec, &key); 329 330 n = reftable_record_encode(&rec, dest, REFTABLE_HASH_SIZE_SHA1); 331 - check_int(n, >=, 0); 332 valtype = reftable_record_val_type(&rec); 333 m = reftable_record_decode(&out, key, valtype, dest, 334 REFTABLE_HASH_SIZE_SHA1, &scratch); 335 - check_int(n, ==, m); 336 337 - check(reftable_log_record_equal(&in[i], &out.u.log, 338 - REFTABLE_HASH_SIZE_SHA1)); 339 reftable_log_record_release(&in[i]); 340 reftable_buf_release(&key); 341 reftable_record_release(&out); ··· 344 reftable_buf_release(&scratch); 345 } 346 347 - static void t_key_roundtrip(void) 348 { 349 uint8_t buffer[1024] = { 0 }; 350 struct string_view dest = { ··· 359 int n, m; 360 uint8_t rt_extra; 361 362 - check(!reftable_buf_addstr(&last_key, "refs/heads/master")); 363 - check(!reftable_buf_addstr(&key, "refs/tags/bla")); 364 extra = 6; 365 n = reftable_encode_key(&restart, dest, last_key, key, extra); 366 - check(!restart); 367 - check_int(n, >, 0); 368 369 - check(!reftable_buf_addstr(&roundtrip, "refs/heads/master")); 370 m = reftable_decode_key(&roundtrip, &rt_extra, dest); 371 - check_int(n, ==, m); 372 - check(!reftable_buf_cmp(&key, &roundtrip)); 373 - check_int(rt_extra, ==, extra); 374 375 reftable_buf_release(&last_key); 376 reftable_buf_release(&key); 377 reftable_buf_release(&roundtrip); 378 } 379 380 - static void t_reftable_obj_record_comparison(void) 381 { 382 383 uint8_t id_bytes[] = { 0, 1, 2, 3, 4, 5, 6 }; ··· 405 }; 406 int cmp; 407 408 - check(!reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 409 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 410 - check(!cmp); 411 412 - check(!reftable_record_equal(&in[1], &in[2], REFTABLE_HASH_SIZE_SHA1)); 413 - check(!reftable_record_cmp(&in[1], &in[2], &cmp)); 414 - check_int(cmp, >, 0); 415 416 in[1].u.obj.offset_len = in[0].u.obj.offset_len; 417 - check(reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 418 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 419 - check(!cmp); 420 } 421 422 - static void t_reftable_obj_record_roundtrip(void) 423 { 424 uint8_t testHash1[REFTABLE_HASH_SIZE_SHA1] = { 1, 2, 3, 4, 0 }; 425 uint64_t till9[] = { 1, 2, 3, 4, 500, 600, 700, 800, 9000 }; ··· 460 int n, m; 461 uint8_t extra; 462 463 - check(!reftable_record_is_deletion(&in)); 464 t_copy(&in); 465 reftable_record_key(&in, &key); 466 n = reftable_record_encode(&in, dest, REFTABLE_HASH_SIZE_SHA1); 467 - check_int(n, >, 0); 468 extra = reftable_record_val_type(&in); 469 m = reftable_record_decode(&out, key, extra, dest, 470 REFTABLE_HASH_SIZE_SHA1, &scratch); 471 - check_int(n, ==, m); 472 473 - check(reftable_record_equal(&in, &out, REFTABLE_HASH_SIZE_SHA1)); 474 reftable_buf_release(&key); 475 reftable_record_release(&out); 476 } ··· 478 reftable_buf_release(&scratch); 479 } 480 481 - static void t_reftable_index_record_comparison(void) 482 { 483 struct reftable_record in[3] = { 484 { ··· 499 }; 500 int cmp; 501 502 - check(!reftable_buf_addstr(&in[0].u.idx.last_key, "refs/heads/master")); 503 - check(!reftable_buf_addstr(&in[1].u.idx.last_key, "refs/heads/master")); 504 - check(!reftable_buf_addstr(&in[2].u.idx.last_key, "refs/heads/branch")); 505 506 - check(!reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 507 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 508 - check(!cmp); 509 510 - check(!reftable_record_equal(&in[1], &in[2], REFTABLE_HASH_SIZE_SHA1)); 511 - check(!reftable_record_cmp(&in[1], &in[2], &cmp)); 512 - check_int(cmp, >, 0); 513 514 in[1].u.idx.offset = in[0].u.idx.offset; 515 - check(reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1)); 516 - check(!reftable_record_cmp(&in[0], &in[1], &cmp)); 517 - check(!cmp); 518 519 for (size_t i = 0; i < ARRAY_SIZE(in); i++) 520 reftable_record_release(&in[i]); 521 } 522 523 - static void t_reftable_index_record_roundtrip(void) 524 { 525 struct reftable_record in = { 526 .type = REFTABLE_BLOCK_TYPE_INDEX, ··· 543 int n, m; 544 uint8_t extra; 545 546 - check(!reftable_buf_addstr(&in.u.idx.last_key, "refs/heads/master")); 547 reftable_record_key(&in, &key); 548 t_copy(&in); 549 550 - check(!reftable_record_is_deletion(&in)); 551 - check(!reftable_buf_cmp(&key, &in.u.idx.last_key)); 552 n = reftable_record_encode(&in, dest, REFTABLE_HASH_SIZE_SHA1); 553 - check_int(n, >, 0); 554 555 extra = reftable_record_val_type(&in); 556 - m = reftable_record_decode(&out, key, extra, dest, REFTABLE_HASH_SIZE_SHA1, 557 - &scratch); 558 - check_int(m, ==, n); 559 560 - check(reftable_record_equal(&in, &out, REFTABLE_HASH_SIZE_SHA1)); 561 562 reftable_record_release(&out); 563 reftable_buf_release(&key); 564 reftable_buf_release(&scratch); 565 reftable_buf_release(&in.u.idx.last_key); 566 } 567 - 568 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 569 - { 570 - TEST(t_reftable_ref_record_comparison(), "comparison operations work on ref record"); 571 - TEST(t_reftable_log_record_comparison(), "comparison operations work on log record"); 572 - TEST(t_reftable_index_record_comparison(), "comparison operations work on index record"); 573 - TEST(t_reftable_obj_record_comparison(), "comparison operations work on obj record"); 574 - TEST(t_reftable_ref_record_compare_name(), "reftable_ref_record_compare_name works"); 575 - TEST(t_reftable_log_record_compare_key(), "reftable_log_record_compare_key works"); 576 - TEST(t_reftable_log_record_roundtrip(), "record operations work on log record"); 577 - TEST(t_reftable_ref_record_roundtrip(), "record operations work on ref record"); 578 - TEST(t_varint_roundtrip(), "put_var_int and get_var_int work"); 579 - TEST(t_varint_overflow(), "get_var_int notices an integer overflow"); 580 - TEST(t_key_roundtrip(), "reftable_encode_key and reftable_decode_key work"); 581 - TEST(t_reftable_obj_record_roundtrip(), "record operations work on obj record"); 582 - TEST(t_reftable_index_record_roundtrip(), "record operations work on index record"); 583 - 584 - return test_done(); 585 - }
··· 6 https://developers.google.com/open-source/licenses/bsd 7 */ 8 9 + #include "unit-test.h" 10 + #include "lib-reftable.h" 11 #include "reftable/basics.h" 12 #include "reftable/constants.h" 13 #include "reftable/record.h" ··· 18 uint8_t typ; 19 20 typ = reftable_record_type(rec); 21 + cl_assert_equal_i(reftable_record_init(&copy, typ), 0); 22 reftable_record_copy_from(&copy, rec, REFTABLE_HASH_SIZE_SHA1); 23 /* do it twice to catch memory leaks */ 24 reftable_record_copy_from(&copy, rec, REFTABLE_HASH_SIZE_SHA1); 25 + cl_assert(reftable_record_equal(rec, &copy, 26 + REFTABLE_HASH_SIZE_SHA1) != 0); 27 28 reftable_record_release(&copy); 29 } 30 31 + void test_reftable_record__varint_roundtrip(void) 32 { 33 uint64_t inputs[] = { 0, 34 1, ··· 51 int n = put_var_int(&out, in); 52 uint64_t got = 0; 53 54 + cl_assert(n > 0); 55 out.len = n; 56 n = get_var_int(&got, &out); 57 + cl_assert(n > 0); 58 59 + cl_assert_equal_i(got, in); 60 } 61 } 62 63 + void test_reftable_record__varint_overflow(void) 64 { 65 unsigned char buf[] = { 66 0xFF, 0xFF, 0xFF, 0xFF, ··· 72 .len = sizeof(buf), 73 }; 74 uint64_t value; 75 + cl_assert_equal_i(get_var_int(&value, &view), -1); 76 } 77 78 static void set_hash(uint8_t *h, int j) ··· 81 h[i] = (j >> i) & 0xff; 82 } 83 84 + void test_reftable_record__ref_record_comparison(void) 85 { 86 struct reftable_record in[3] = { 87 { ··· 103 }; 104 int cmp; 105 106 + cl_assert(reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1) == 0); 107 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 108 + cl_assert(!cmp); 109 110 + cl_assert(reftable_record_equal(&in[1], &in[2], 111 + REFTABLE_HASH_SIZE_SHA1) == 0); 112 + cl_assert_equal_i(reftable_record_cmp(&in[1], &in[2], &cmp), 0); 113 + cl_assert(cmp > 0); 114 115 in[1].u.ref.value_type = in[0].u.ref.value_type; 116 + cl_assert(reftable_record_equal(&in[0], &in[1], 117 + REFTABLE_HASH_SIZE_SHA1) != 0); 118 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 119 + cl_assert(!cmp); 120 } 121 122 + void test_reftable_record__ref_record_compare_name(void) 123 { 124 struct reftable_ref_record recs[3] = { 125 { ··· 133 }, 134 }; 135 136 + cl_assert(reftable_ref_record_compare_name(&recs[0], 137 + &recs[1]) < 0); 138 + cl_assert(reftable_ref_record_compare_name(&recs[1], 139 + &recs[0]) > 0); 140 + cl_assert_equal_i(reftable_ref_record_compare_name(&recs[0], 141 + &recs[2]), 0); 142 } 143 144 + void test_reftable_record__ref_record_roundtrip(void) 145 { 146 struct reftable_buf scratch = REFTABLE_BUF_INIT; 147 ··· 178 179 t_copy(&in); 180 181 + cl_assert_equal_i(reftable_record_val_type(&in), i); 182 + cl_assert_equal_i(reftable_record_is_deletion(&in), 183 + i == REFTABLE_REF_DELETION); 184 185 reftable_record_key(&in, &key); 186 n = reftable_record_encode(&in, dest, REFTABLE_HASH_SIZE_SHA1); 187 + cl_assert(n > 0); 188 189 /* decode into a non-zero reftable_record to test for leaks. */ 190 m = reftable_record_decode(&out, key, i, dest, REFTABLE_HASH_SIZE_SHA1, &scratch); 191 + cl_assert_equal_i(n, m); 192 193 + cl_assert(reftable_ref_record_equal(&in.u.ref, 194 + &out.u.ref, 195 + REFTABLE_HASH_SIZE_SHA1) != 0); 196 reftable_record_release(&in); 197 198 reftable_buf_release(&key); ··· 202 reftable_buf_release(&scratch); 203 } 204 205 + void test_reftable_record__log_record_comparison(void) 206 { 207 struct reftable_record in[3] = { 208 { ··· 223 }; 224 int cmp; 225 226 + cl_assert_equal_i(reftable_record_equal(&in[0], &in[1], 227 + REFTABLE_HASH_SIZE_SHA1), 0); 228 + cl_assert_equal_i(reftable_record_equal(&in[1], &in[2], 229 + REFTABLE_HASH_SIZE_SHA1), 0); 230 + cl_assert_equal_i(reftable_record_cmp(&in[1], &in[2], &cmp), 0); 231 + cl_assert(cmp > 0); 232 /* comparison should be reversed for equal keys, because 233 * comparison is now performed on the basis of update indices */ 234 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 235 + cl_assert(cmp < 0); 236 237 in[1].u.log.update_index = in[0].u.log.update_index; 238 + cl_assert(reftable_record_equal(&in[0], &in[1], 239 + REFTABLE_HASH_SIZE_SHA1) != 0); 240 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 241 } 242 243 + void test_reftable_record__log_record_compare_key(void) 244 { 245 struct reftable_log_record logs[3] = { 246 { ··· 257 }, 258 }; 259 260 + cl_assert(reftable_log_record_compare_key(&logs[0], 261 + &logs[1]) < 0); 262 + cl_assert(reftable_log_record_compare_key(&logs[1], 263 + &logs[0]) > 0); 264 265 logs[1].update_index = logs[0].update_index; 266 + cl_assert(reftable_log_record_compare_key(&logs[0], 267 + &logs[1]) < 0); 268 269 + cl_assert(reftable_log_record_compare_key(&logs[0], 270 + &logs[2]) > 0); 271 + cl_assert(reftable_log_record_compare_key(&logs[2], 272 + &logs[0]) < 0); 273 logs[2].update_index = logs[0].update_index; 274 + cl_assert_equal_i(reftable_log_record_compare_key(&logs[0], &logs[2]), 0); 275 } 276 277 + void test_reftable_record__log_record_roundtrip(void) 278 { 279 struct reftable_log_record in[] = { 280 { ··· 308 set_hash(in[2].value.update.new_hash, 3); 309 set_hash(in[2].value.update.old_hash, 4); 310 311 + cl_assert_equal_i(reftable_log_record_is_deletion(&in[0]), 0); 312 + cl_assert(reftable_log_record_is_deletion(&in[1]) != 0); 313 + cl_assert_equal_i(reftable_log_record_is_deletion(&in[2]), 0); 314 315 for (size_t i = 0; i < ARRAY_SIZE(in); i++) { 316 struct reftable_record rec = { .type = REFTABLE_BLOCK_TYPE_LOG }; ··· 344 reftable_record_key(&rec, &key); 345 346 n = reftable_record_encode(&rec, dest, REFTABLE_HASH_SIZE_SHA1); 347 + cl_assert(n >= 0); 348 valtype = reftable_record_val_type(&rec); 349 m = reftable_record_decode(&out, key, valtype, dest, 350 REFTABLE_HASH_SIZE_SHA1, &scratch); 351 + cl_assert_equal_i(n, m); 352 353 + cl_assert(reftable_log_record_equal(&in[i], &out.u.log, 354 + REFTABLE_HASH_SIZE_SHA1) != 0); 355 reftable_log_record_release(&in[i]); 356 reftable_buf_release(&key); 357 reftable_record_release(&out); ··· 360 reftable_buf_release(&scratch); 361 } 362 363 + void test_reftable_record__key_roundtrip(void) 364 { 365 uint8_t buffer[1024] = { 0 }; 366 struct string_view dest = { ··· 375 int n, m; 376 uint8_t rt_extra; 377 378 + cl_assert_equal_i(reftable_buf_addstr(&last_key, 379 + "refs/heads/master"), 0); 380 + cl_assert_equal_i(reftable_buf_addstr(&key, 381 + "refs/tags/bla"), 0); 382 extra = 6; 383 n = reftable_encode_key(&restart, dest, last_key, key, extra); 384 + cl_assert(!restart); 385 + cl_assert(n > 0); 386 387 + cl_assert_equal_i(reftable_buf_addstr(&roundtrip, 388 + "refs/heads/master"), 0); 389 m = reftable_decode_key(&roundtrip, &rt_extra, dest); 390 + cl_assert_equal_i(n, m); 391 + cl_assert_equal_i(reftable_buf_cmp(&key, &roundtrip), 0); 392 + cl_assert_equal_i(rt_extra, extra); 393 394 reftable_buf_release(&last_key); 395 reftable_buf_release(&key); 396 reftable_buf_release(&roundtrip); 397 } 398 399 + void test_reftable_record__obj_record_comparison(void) 400 { 401 402 uint8_t id_bytes[] = { 0, 1, 2, 3, 4, 5, 6 }; ··· 424 }; 425 int cmp; 426 427 + cl_assert_equal_i(reftable_record_equal(&in[0], &in[1], 428 + REFTABLE_HASH_SIZE_SHA1), 0); 429 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 430 + cl_assert(!cmp); 431 432 + cl_assert_equal_i(reftable_record_equal(&in[1], &in[2], 433 + REFTABLE_HASH_SIZE_SHA1), 0); 434 + cl_assert_equal_i(reftable_record_cmp(&in[1], &in[2], &cmp), 0); 435 + cl_assert(cmp > 0); 436 437 in[1].u.obj.offset_len = in[0].u.obj.offset_len; 438 + cl_assert(reftable_record_equal(&in[0], &in[1], REFTABLE_HASH_SIZE_SHA1) != 0); 439 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 440 + cl_assert(!cmp); 441 } 442 443 + void test_reftable_record__obj_record_roundtrip(void) 444 { 445 uint8_t testHash1[REFTABLE_HASH_SIZE_SHA1] = { 1, 2, 3, 4, 0 }; 446 uint64_t till9[] = { 1, 2, 3, 4, 500, 600, 700, 800, 9000 }; ··· 481 int n, m; 482 uint8_t extra; 483 484 + cl_assert_equal_i(reftable_record_is_deletion(&in), 0); 485 t_copy(&in); 486 reftable_record_key(&in, &key); 487 n = reftable_record_encode(&in, dest, REFTABLE_HASH_SIZE_SHA1); 488 + cl_assert(n > 0); 489 extra = reftable_record_val_type(&in); 490 m = reftable_record_decode(&out, key, extra, dest, 491 REFTABLE_HASH_SIZE_SHA1, &scratch); 492 + cl_assert_equal_i(n, m); 493 494 + cl_assert(reftable_record_equal(&in, &out, 495 + REFTABLE_HASH_SIZE_SHA1) != 0); 496 reftable_buf_release(&key); 497 reftable_record_release(&out); 498 } ··· 500 reftable_buf_release(&scratch); 501 } 502 503 + void test_reftable_record__index_record_comparison(void) 504 { 505 struct reftable_record in[3] = { 506 { ··· 521 }; 522 int cmp; 523 524 + cl_assert_equal_i(reftable_buf_addstr(&in[0].u.idx.last_key, 525 + "refs/heads/master"), 0); 526 + cl_assert_equal_i(reftable_buf_addstr(&in[1].u.idx.last_key, "refs/heads/master"), 0); 527 + cl_assert(reftable_buf_addstr(&in[2].u.idx.last_key, 528 + "refs/heads/branch") == 0); 529 530 + cl_assert_equal_i(reftable_record_equal(&in[0], &in[1], 531 + REFTABLE_HASH_SIZE_SHA1), 0); 532 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 533 + cl_assert(!cmp); 534 535 + cl_assert_equal_i(reftable_record_equal(&in[1], &in[2], 536 + REFTABLE_HASH_SIZE_SHA1), 0); 537 + cl_assert_equal_i(reftable_record_cmp(&in[1], &in[2], &cmp), 0); 538 + cl_assert(cmp > 0); 539 540 in[1].u.idx.offset = in[0].u.idx.offset; 541 + cl_assert(reftable_record_equal(&in[0], &in[1], 542 + REFTABLE_HASH_SIZE_SHA1) != 0); 543 + cl_assert_equal_i(reftable_record_cmp(&in[0], &in[1], &cmp), 0); 544 + cl_assert(!cmp); 545 546 for (size_t i = 0; i < ARRAY_SIZE(in); i++) 547 reftable_record_release(&in[i]); 548 } 549 550 + void test_reftable_record__index_record_roundtrip(void) 551 { 552 struct reftable_record in = { 553 .type = REFTABLE_BLOCK_TYPE_INDEX, ··· 570 int n, m; 571 uint8_t extra; 572 573 + cl_assert_equal_i(reftable_buf_addstr(&in.u.idx.last_key, 574 + "refs/heads/master"), 0); 575 reftable_record_key(&in, &key); 576 t_copy(&in); 577 578 + cl_assert_equal_i(reftable_record_is_deletion(&in), 0); 579 + cl_assert_equal_i(reftable_buf_cmp(&key, &in.u.idx.last_key), 0); 580 n = reftable_record_encode(&in, dest, REFTABLE_HASH_SIZE_SHA1); 581 + cl_assert(n > 0); 582 583 extra = reftable_record_val_type(&in); 584 + m = reftable_record_decode(&out, key, extra, dest, 585 + REFTABLE_HASH_SIZE_SHA1, &scratch); 586 + cl_assert_equal_i(m, n); 587 588 + cl_assert(reftable_record_equal(&in, &out, 589 + REFTABLE_HASH_SIZE_SHA1) != 0); 590 591 reftable_record_release(&out); 592 reftable_buf_release(&key); 593 reftable_buf_release(&scratch); 594 reftable_buf_release(&in.u.idx.last_key); 595 }
-1451
t/unit-tests/t-reftable-stack.c
··· 1 - /* 2 - Copyright 2020 Google LLC 3 - 4 - Use of this source code is governed by a BSD-style 5 - license that can be found in the LICENSE file or at 6 - https://developers.google.com/open-source/licenses/bsd 7 - */ 8 - 9 - #define DISABLE_SIGN_COMPARE_WARNINGS 10 - 11 - #include "test-lib.h" 12 - #include "lib-reftable.h" 13 - #include "dir.h" 14 - #include "reftable/merged.h" 15 - #include "reftable/reftable-error.h" 16 - #include "reftable/stack.h" 17 - #include "reftable/table.h" 18 - #include "strbuf.h" 19 - #include "tempfile.h" 20 - #include <dirent.h> 21 - 22 - static void clear_dir(const char *dirname) 23 - { 24 - struct strbuf path = REFTABLE_BUF_INIT; 25 - strbuf_addstr(&path, dirname); 26 - remove_dir_recursively(&path, 0); 27 - strbuf_release(&path); 28 - } 29 - 30 - static int count_dir_entries(const char *dirname) 31 - { 32 - DIR *dir = opendir(dirname); 33 - int len = 0; 34 - struct dirent *d; 35 - if (!dir) 36 - return 0; 37 - 38 - while ((d = readdir(dir))) { 39 - /* 40 - * Besides skipping over "." and "..", we also need to 41 - * skip over other files that have a leading ".". This 42 - * is due to behaviour of NFS, which will rename files 43 - * to ".nfs*" to emulate delete-on-last-close. 44 - * 45 - * In any case this should be fine as the reftable 46 - * library will never write files with leading dots 47 - * anyway. 48 - */ 49 - if (starts_with(d->d_name, ".")) 50 - continue; 51 - len++; 52 - } 53 - closedir(dir); 54 - return len; 55 - } 56 - 57 - /* 58 - * Work linenumber into the tempdir, so we can see which tests forget to 59 - * cleanup. 60 - */ 61 - static char *get_tmp_template(int linenumber) 62 - { 63 - const char *tmp = getenv("TMPDIR"); 64 - static char template[1024]; 65 - snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX", 66 - tmp ? tmp : "/tmp", linenumber); 67 - return template; 68 - } 69 - 70 - static char *get_tmp_dir(int linenumber) 71 - { 72 - char *dir = get_tmp_template(linenumber); 73 - check(mkdtemp(dir) != NULL); 74 - return dir; 75 - } 76 - 77 - static void t_read_file(void) 78 - { 79 - char *fn = get_tmp_template(__LINE__); 80 - struct tempfile *tmp = mks_tempfile(fn); 81 - int fd = get_tempfile_fd(tmp); 82 - char out[1024] = "line1\n\nline2\nline3"; 83 - int n, err; 84 - char **names = NULL; 85 - const char *want[] = { "line1", "line2", "line3" }; 86 - 87 - check_int(fd, >, 0); 88 - n = write_in_full(fd, out, strlen(out)); 89 - check_int(n, ==, strlen(out)); 90 - err = close(fd); 91 - check_int(err, >=, 0); 92 - 93 - err = read_lines(fn, &names); 94 - check(!err); 95 - 96 - for (size_t i = 0; names[i]; i++) 97 - check_str(want[i], names[i]); 98 - free_names(names); 99 - (void) remove(fn); 100 - delete_tempfile(&tmp); 101 - } 102 - 103 - static int write_test_ref(struct reftable_writer *wr, void *arg) 104 - { 105 - struct reftable_ref_record *ref = arg; 106 - check(!reftable_writer_set_limits(wr, ref->update_index, 107 - ref->update_index)); 108 - return reftable_writer_add_ref(wr, ref); 109 - } 110 - 111 - static void write_n_ref_tables(struct reftable_stack *st, 112 - size_t n) 113 - { 114 - int disable_auto_compact; 115 - int err; 116 - 117 - disable_auto_compact = st->opts.disable_auto_compact; 118 - st->opts.disable_auto_compact = 1; 119 - 120 - for (size_t i = 0; i < n; i++) { 121 - struct reftable_ref_record ref = { 122 - .update_index = reftable_stack_next_update_index(st), 123 - .value_type = REFTABLE_REF_VAL1, 124 - }; 125 - char buf[128]; 126 - 127 - snprintf(buf, sizeof(buf), "refs/heads/branch-%04"PRIuMAX, (uintmax_t)i); 128 - ref.refname = buf; 129 - t_reftable_set_hash(ref.value.val1, i, REFTABLE_HASH_SHA1); 130 - 131 - err = reftable_stack_add(st, &write_test_ref, &ref); 132 - check(!err); 133 - } 134 - 135 - st->opts.disable_auto_compact = disable_auto_compact; 136 - } 137 - 138 - struct write_log_arg { 139 - struct reftable_log_record *log; 140 - uint64_t update_index; 141 - }; 142 - 143 - static int write_test_log(struct reftable_writer *wr, void *arg) 144 - { 145 - struct write_log_arg *wla = arg; 146 - 147 - check(!reftable_writer_set_limits(wr, wla->update_index, 148 - wla->update_index)); 149 - return reftable_writer_add_log(wr, wla->log); 150 - } 151 - 152 - static void t_reftable_stack_add_one(void) 153 - { 154 - char *dir = get_tmp_dir(__LINE__); 155 - struct reftable_buf scratch = REFTABLE_BUF_INIT; 156 - int mask = umask(002); 157 - struct reftable_write_options opts = { 158 - .default_permissions = 0660, 159 - }; 160 - struct reftable_stack *st = NULL; 161 - int err; 162 - struct reftable_ref_record ref = { 163 - .refname = (char *) "HEAD", 164 - .update_index = 1, 165 - .value_type = REFTABLE_REF_SYMREF, 166 - .value.symref = (char *) "master", 167 - }; 168 - struct reftable_ref_record dest = { 0 }; 169 - struct stat stat_result = { 0 }; 170 - err = reftable_new_stack(&st, dir, &opts); 171 - check(!err); 172 - 173 - err = reftable_stack_add(st, write_test_ref, &ref); 174 - check(!err); 175 - 176 - err = reftable_stack_read_ref(st, ref.refname, &dest); 177 - check(!err); 178 - check(reftable_ref_record_equal(&ref, &dest, REFTABLE_HASH_SIZE_SHA1)); 179 - check_int(st->tables_len, >, 0); 180 - 181 - #ifndef GIT_WINDOWS_NATIVE 182 - check(!reftable_buf_addstr(&scratch, dir)); 183 - check(!reftable_buf_addstr(&scratch, "/tables.list")); 184 - err = stat(scratch.buf, &stat_result); 185 - check(!err); 186 - check_int((stat_result.st_mode & 0777), ==, opts.default_permissions); 187 - 188 - reftable_buf_reset(&scratch); 189 - check(!reftable_buf_addstr(&scratch, dir)); 190 - check(!reftable_buf_addstr(&scratch, "/")); 191 - /* do not try at home; not an external API for reftable. */ 192 - check(!reftable_buf_addstr(&scratch, st->tables[0]->name)); 193 - err = stat(scratch.buf, &stat_result); 194 - check(!err); 195 - check_int((stat_result.st_mode & 0777), ==, opts.default_permissions); 196 - #else 197 - (void) stat_result; 198 - #endif 199 - 200 - reftable_ref_record_release(&dest); 201 - reftable_stack_destroy(st); 202 - reftable_buf_release(&scratch); 203 - clear_dir(dir); 204 - umask(mask); 205 - } 206 - 207 - static void t_reftable_stack_uptodate(void) 208 - { 209 - struct reftable_write_options opts = { 0 }; 210 - struct reftable_stack *st1 = NULL; 211 - struct reftable_stack *st2 = NULL; 212 - char *dir = get_tmp_dir(__LINE__); 213 - 214 - int err; 215 - struct reftable_ref_record ref1 = { 216 - .refname = (char *) "HEAD", 217 - .update_index = 1, 218 - .value_type = REFTABLE_REF_SYMREF, 219 - .value.symref = (char *) "master", 220 - }; 221 - struct reftable_ref_record ref2 = { 222 - .refname = (char *) "branch2", 223 - .update_index = 2, 224 - .value_type = REFTABLE_REF_SYMREF, 225 - .value.symref = (char *) "master", 226 - }; 227 - 228 - 229 - /* simulate multi-process access to the same stack 230 - by creating two stacks for the same directory. 231 - */ 232 - err = reftable_new_stack(&st1, dir, &opts); 233 - check(!err); 234 - 235 - err = reftable_new_stack(&st2, dir, &opts); 236 - check(!err); 237 - 238 - err = reftable_stack_add(st1, write_test_ref, &ref1); 239 - check(!err); 240 - 241 - err = reftable_stack_add(st2, write_test_ref, &ref2); 242 - check_int(err, ==, REFTABLE_OUTDATED_ERROR); 243 - 244 - err = reftable_stack_reload(st2); 245 - check(!err); 246 - 247 - err = reftable_stack_add(st2, write_test_ref, &ref2); 248 - check(!err); 249 - reftable_stack_destroy(st1); 250 - reftable_stack_destroy(st2); 251 - clear_dir(dir); 252 - } 253 - 254 - static void t_reftable_stack_transaction_api(void) 255 - { 256 - char *dir = get_tmp_dir(__LINE__); 257 - struct reftable_write_options opts = { 0 }; 258 - struct reftable_stack *st = NULL; 259 - int err; 260 - struct reftable_addition *add = NULL; 261 - 262 - struct reftable_ref_record ref = { 263 - .refname = (char *) "HEAD", 264 - .update_index = 1, 265 - .value_type = REFTABLE_REF_SYMREF, 266 - .value.symref = (char *) "master", 267 - }; 268 - struct reftable_ref_record dest = { 0 }; 269 - 270 - err = reftable_new_stack(&st, dir, &opts); 271 - check(!err); 272 - 273 - reftable_addition_destroy(add); 274 - 275 - err = reftable_stack_new_addition(&add, st, 0); 276 - check(!err); 277 - 278 - err = reftable_addition_add(add, write_test_ref, &ref); 279 - check(!err); 280 - 281 - err = reftable_addition_commit(add); 282 - check(!err); 283 - 284 - reftable_addition_destroy(add); 285 - 286 - err = reftable_stack_read_ref(st, ref.refname, &dest); 287 - check(!err); 288 - check_int(REFTABLE_REF_SYMREF, ==, dest.value_type); 289 - check(reftable_ref_record_equal(&ref, &dest, REFTABLE_HASH_SIZE_SHA1)); 290 - 291 - reftable_ref_record_release(&dest); 292 - reftable_stack_destroy(st); 293 - clear_dir(dir); 294 - } 295 - 296 - static void t_reftable_stack_transaction_with_reload(void) 297 - { 298 - char *dir = get_tmp_dir(__LINE__); 299 - struct reftable_stack *st1 = NULL, *st2 = NULL; 300 - int err; 301 - struct reftable_addition *add = NULL; 302 - struct reftable_ref_record refs[2] = { 303 - { 304 - .refname = (char *) "refs/heads/a", 305 - .update_index = 1, 306 - .value_type = REFTABLE_REF_VAL1, 307 - .value.val1 = { '1' }, 308 - }, 309 - { 310 - .refname = (char *) "refs/heads/b", 311 - .update_index = 2, 312 - .value_type = REFTABLE_REF_VAL1, 313 - .value.val1 = { '1' }, 314 - }, 315 - }; 316 - struct reftable_ref_record ref = { 0 }; 317 - 318 - err = reftable_new_stack(&st1, dir, NULL); 319 - check(!err); 320 - err = reftable_new_stack(&st2, dir, NULL); 321 - check(!err); 322 - 323 - err = reftable_stack_new_addition(&add, st1, 0); 324 - check(!err); 325 - err = reftable_addition_add(add, write_test_ref, &refs[0]); 326 - check(!err); 327 - err = reftable_addition_commit(add); 328 - check(!err); 329 - reftable_addition_destroy(add); 330 - 331 - /* 332 - * The second stack is now outdated, which we should notice. We do not 333 - * create the addition and lock the stack by default, but allow the 334 - * reload to happen when REFTABLE_STACK_NEW_ADDITION_RELOAD is set. 335 - */ 336 - err = reftable_stack_new_addition(&add, st2, 0); 337 - check_int(err, ==, REFTABLE_OUTDATED_ERROR); 338 - err = reftable_stack_new_addition(&add, st2, REFTABLE_STACK_NEW_ADDITION_RELOAD); 339 - check(!err); 340 - err = reftable_addition_add(add, write_test_ref, &refs[1]); 341 - check(!err); 342 - err = reftable_addition_commit(add); 343 - check(!err); 344 - reftable_addition_destroy(add); 345 - 346 - for (size_t i = 0; i < ARRAY_SIZE(refs); i++) { 347 - err = reftable_stack_read_ref(st2, refs[i].refname, &ref); 348 - check(!err); 349 - check(reftable_ref_record_equal(&refs[i], &ref, REFTABLE_HASH_SIZE_SHA1)); 350 - } 351 - 352 - reftable_ref_record_release(&ref); 353 - reftable_stack_destroy(st1); 354 - reftable_stack_destroy(st2); 355 - clear_dir(dir); 356 - } 357 - 358 - static void t_reftable_stack_transaction_api_performs_auto_compaction(void) 359 - { 360 - char *dir = get_tmp_dir(__LINE__); 361 - struct reftable_write_options opts = {0}; 362 - struct reftable_addition *add = NULL; 363 - struct reftable_stack *st = NULL; 364 - size_t n = 20; 365 - int err; 366 - 367 - err = reftable_new_stack(&st, dir, &opts); 368 - check(!err); 369 - 370 - for (size_t i = 0; i <= n; i++) { 371 - struct reftable_ref_record ref = { 372 - .update_index = reftable_stack_next_update_index(st), 373 - .value_type = REFTABLE_REF_SYMREF, 374 - .value.symref = (char *) "master", 375 - }; 376 - char name[100]; 377 - 378 - snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 379 - ref.refname = name; 380 - 381 - /* 382 - * Disable auto-compaction for all but the last runs. Like this 383 - * we can ensure that we indeed honor this setting and have 384 - * better control over when exactly auto compaction runs. 385 - */ 386 - st->opts.disable_auto_compact = i != n; 387 - 388 - err = reftable_stack_new_addition(&add, st, 0); 389 - check(!err); 390 - 391 - err = reftable_addition_add(add, write_test_ref, &ref); 392 - check(!err); 393 - 394 - err = reftable_addition_commit(add); 395 - check(!err); 396 - 397 - reftable_addition_destroy(add); 398 - 399 - /* 400 - * The stack length should grow continuously for all runs where 401 - * auto compaction is disabled. When enabled, we should merge 402 - * all tables in the stack. 403 - */ 404 - if (i != n) 405 - check_int(st->merged->tables_len, ==, i + 1); 406 - else 407 - check_int(st->merged->tables_len, ==, 1); 408 - } 409 - 410 - reftable_stack_destroy(st); 411 - clear_dir(dir); 412 - } 413 - 414 - static void t_reftable_stack_auto_compaction_fails_gracefully(void) 415 - { 416 - struct reftable_ref_record ref = { 417 - .refname = (char *) "refs/heads/master", 418 - .update_index = 1, 419 - .value_type = REFTABLE_REF_VAL1, 420 - .value.val1 = {0x01}, 421 - }; 422 - struct reftable_write_options opts = { 0 }; 423 - struct reftable_stack *st; 424 - struct reftable_buf table_path = REFTABLE_BUF_INIT; 425 - char *dir = get_tmp_dir(__LINE__); 426 - int err; 427 - 428 - err = reftable_new_stack(&st, dir, &opts); 429 - check(!err); 430 - 431 - err = reftable_stack_add(st, write_test_ref, &ref); 432 - check(!err); 433 - check_int(st->merged->tables_len, ==, 1); 434 - check_int(st->stats.attempts, ==, 0); 435 - check_int(st->stats.failures, ==, 0); 436 - 437 - /* 438 - * Lock the newly written table such that it cannot be compacted. 439 - * Adding a new table to the stack should not be impacted by this, even 440 - * though auto-compaction will now fail. 441 - */ 442 - check(!reftable_buf_addstr(&table_path, dir)); 443 - check(!reftable_buf_addstr(&table_path, "/")); 444 - check(!reftable_buf_addstr(&table_path, st->tables[0]->name)); 445 - check(!reftable_buf_addstr(&table_path, ".lock")); 446 - write_file_buf(table_path.buf, "", 0); 447 - 448 - ref.update_index = 2; 449 - err = reftable_stack_add(st, write_test_ref, &ref); 450 - check(!err); 451 - check_int(st->merged->tables_len, ==, 2); 452 - check_int(st->stats.attempts, ==, 1); 453 - check_int(st->stats.failures, ==, 1); 454 - 455 - reftable_stack_destroy(st); 456 - reftable_buf_release(&table_path); 457 - clear_dir(dir); 458 - } 459 - 460 - static int write_error(struct reftable_writer *wr UNUSED, void *arg) 461 - { 462 - return *((int *)arg); 463 - } 464 - 465 - static void t_reftable_stack_update_index_check(void) 466 - { 467 - char *dir = get_tmp_dir(__LINE__); 468 - struct reftable_write_options opts = { 0 }; 469 - struct reftable_stack *st = NULL; 470 - int err; 471 - struct reftable_ref_record ref1 = { 472 - .refname = (char *) "name1", 473 - .update_index = 1, 474 - .value_type = REFTABLE_REF_SYMREF, 475 - .value.symref = (char *) "master", 476 - }; 477 - struct reftable_ref_record ref2 = { 478 - .refname = (char *) "name2", 479 - .update_index = 1, 480 - .value_type = REFTABLE_REF_SYMREF, 481 - .value.symref = (char *) "master", 482 - }; 483 - 484 - err = reftable_new_stack(&st, dir, &opts); 485 - check(!err); 486 - 487 - err = reftable_stack_add(st, write_test_ref, &ref1); 488 - check(!err); 489 - 490 - err = reftable_stack_add(st, write_test_ref, &ref2); 491 - check_int(err, ==, REFTABLE_API_ERROR); 492 - reftable_stack_destroy(st); 493 - clear_dir(dir); 494 - } 495 - 496 - static void t_reftable_stack_lock_failure(void) 497 - { 498 - char *dir = get_tmp_dir(__LINE__); 499 - struct reftable_write_options opts = { 0 }; 500 - struct reftable_stack *st = NULL; 501 - int err, i; 502 - 503 - err = reftable_new_stack(&st, dir, &opts); 504 - check(!err); 505 - for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--) { 506 - err = reftable_stack_add(st, write_error, &i); 507 - check_int(err, ==, i); 508 - } 509 - 510 - reftable_stack_destroy(st); 511 - clear_dir(dir); 512 - } 513 - 514 - static void t_reftable_stack_add(void) 515 - { 516 - int err = 0; 517 - struct reftable_write_options opts = { 518 - .exact_log_message = 1, 519 - .default_permissions = 0660, 520 - .disable_auto_compact = 1, 521 - }; 522 - struct reftable_stack *st = NULL; 523 - char *dir = get_tmp_dir(__LINE__); 524 - struct reftable_ref_record refs[2] = { 0 }; 525 - struct reftable_log_record logs[2] = { 0 }; 526 - struct reftable_buf path = REFTABLE_BUF_INIT; 527 - struct stat stat_result; 528 - size_t i, N = ARRAY_SIZE(refs); 529 - 530 - err = reftable_new_stack(&st, dir, &opts); 531 - check(!err); 532 - 533 - for (i = 0; i < N; i++) { 534 - char buf[256]; 535 - snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i); 536 - refs[i].refname = xstrdup(buf); 537 - refs[i].update_index = i + 1; 538 - refs[i].value_type = REFTABLE_REF_VAL1; 539 - t_reftable_set_hash(refs[i].value.val1, i, REFTABLE_HASH_SHA1); 540 - 541 - logs[i].refname = xstrdup(buf); 542 - logs[i].update_index = N + i + 1; 543 - logs[i].value_type = REFTABLE_LOG_UPDATE; 544 - logs[i].value.update.email = xstrdup("identity@invalid"); 545 - t_reftable_set_hash(logs[i].value.update.new_hash, i, REFTABLE_HASH_SHA1); 546 - } 547 - 548 - for (i = 0; i < N; i++) { 549 - int err = reftable_stack_add(st, write_test_ref, &refs[i]); 550 - check(!err); 551 - } 552 - 553 - for (i = 0; i < N; i++) { 554 - struct write_log_arg arg = { 555 - .log = &logs[i], 556 - .update_index = reftable_stack_next_update_index(st), 557 - }; 558 - int err = reftable_stack_add(st, write_test_log, &arg); 559 - check(!err); 560 - } 561 - 562 - err = reftable_stack_compact_all(st, NULL); 563 - check(!err); 564 - 565 - for (i = 0; i < N; i++) { 566 - struct reftable_ref_record dest = { 0 }; 567 - 568 - int err = reftable_stack_read_ref(st, refs[i].refname, &dest); 569 - check(!err); 570 - check(reftable_ref_record_equal(&dest, refs + i, 571 - REFTABLE_HASH_SIZE_SHA1)); 572 - reftable_ref_record_release(&dest); 573 - } 574 - 575 - for (i = 0; i < N; i++) { 576 - struct reftable_log_record dest = { 0 }; 577 - int err = reftable_stack_read_log(st, refs[i].refname, &dest); 578 - check(!err); 579 - check(reftable_log_record_equal(&dest, logs + i, 580 - REFTABLE_HASH_SIZE_SHA1)); 581 - reftable_log_record_release(&dest); 582 - } 583 - 584 - #ifndef GIT_WINDOWS_NATIVE 585 - check(!reftable_buf_addstr(&path, dir)); 586 - check(!reftable_buf_addstr(&path, "/tables.list")); 587 - err = stat(path.buf, &stat_result); 588 - check(!err); 589 - check_int((stat_result.st_mode & 0777), ==, opts.default_permissions); 590 - 591 - reftable_buf_reset(&path); 592 - check(!reftable_buf_addstr(&path, dir)); 593 - check(!reftable_buf_addstr(&path, "/")); 594 - /* do not try at home; not an external API for reftable. */ 595 - check(!reftable_buf_addstr(&path, st->tables[0]->name)); 596 - err = stat(path.buf, &stat_result); 597 - check(!err); 598 - check_int((stat_result.st_mode & 0777), ==, opts.default_permissions); 599 - #else 600 - (void) stat_result; 601 - #endif 602 - 603 - /* cleanup */ 604 - reftable_stack_destroy(st); 605 - for (i = 0; i < N; i++) { 606 - reftable_ref_record_release(&refs[i]); 607 - reftable_log_record_release(&logs[i]); 608 - } 609 - reftable_buf_release(&path); 610 - clear_dir(dir); 611 - } 612 - 613 - static void t_reftable_stack_iterator(void) 614 - { 615 - struct reftable_write_options opts = { 0 }; 616 - struct reftable_stack *st = NULL; 617 - char *dir = get_tmp_dir(__LINE__); 618 - struct reftable_ref_record refs[10] = { 0 }; 619 - struct reftable_log_record logs[10] = { 0 }; 620 - struct reftable_iterator it = { 0 }; 621 - size_t N = ARRAY_SIZE(refs), i; 622 - int err; 623 - 624 - err = reftable_new_stack(&st, dir, &opts); 625 - check(!err); 626 - 627 - for (i = 0; i < N; i++) { 628 - refs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); 629 - refs[i].update_index = i + 1; 630 - refs[i].value_type = REFTABLE_REF_VAL1; 631 - t_reftable_set_hash(refs[i].value.val1, i, REFTABLE_HASH_SHA1); 632 - 633 - logs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); 634 - logs[i].update_index = i + 1; 635 - logs[i].value_type = REFTABLE_LOG_UPDATE; 636 - logs[i].value.update.email = xstrdup("johndoe@invalid"); 637 - logs[i].value.update.message = xstrdup("commit\n"); 638 - t_reftable_set_hash(logs[i].value.update.new_hash, i, REFTABLE_HASH_SHA1); 639 - } 640 - 641 - for (i = 0; i < N; i++) { 642 - err = reftable_stack_add(st, write_test_ref, &refs[i]); 643 - check(!err); 644 - } 645 - 646 - for (i = 0; i < N; i++) { 647 - struct write_log_arg arg = { 648 - .log = &logs[i], 649 - .update_index = reftable_stack_next_update_index(st), 650 - }; 651 - 652 - err = reftable_stack_add(st, write_test_log, &arg); 653 - check(!err); 654 - } 655 - 656 - reftable_stack_init_ref_iterator(st, &it); 657 - reftable_iterator_seek_ref(&it, refs[0].refname); 658 - for (i = 0; ; i++) { 659 - struct reftable_ref_record ref = { 0 }; 660 - 661 - err = reftable_iterator_next_ref(&it, &ref); 662 - if (err > 0) 663 - break; 664 - check(!err); 665 - check(reftable_ref_record_equal(&ref, &refs[i], REFTABLE_HASH_SIZE_SHA1)); 666 - reftable_ref_record_release(&ref); 667 - } 668 - check_int(i, ==, N); 669 - 670 - reftable_iterator_destroy(&it); 671 - 672 - err = reftable_stack_init_log_iterator(st, &it); 673 - check(!err); 674 - 675 - reftable_iterator_seek_log(&it, logs[0].refname); 676 - for (i = 0; ; i++) { 677 - struct reftable_log_record log = { 0 }; 678 - 679 - err = reftable_iterator_next_log(&it, &log); 680 - if (err > 0) 681 - break; 682 - check(!err); 683 - check(reftable_log_record_equal(&log, &logs[i], REFTABLE_HASH_SIZE_SHA1)); 684 - reftable_log_record_release(&log); 685 - } 686 - check_int(i, ==, N); 687 - 688 - reftable_stack_destroy(st); 689 - reftable_iterator_destroy(&it); 690 - for (i = 0; i < N; i++) { 691 - reftable_ref_record_release(&refs[i]); 692 - reftable_log_record_release(&logs[i]); 693 - } 694 - clear_dir(dir); 695 - } 696 - 697 - static void t_reftable_stack_log_normalize(void) 698 - { 699 - int err = 0; 700 - struct reftable_write_options opts = { 701 - 0, 702 - }; 703 - struct reftable_stack *st = NULL; 704 - char *dir = get_tmp_dir(__LINE__); 705 - struct reftable_log_record input = { 706 - .refname = (char *) "branch", 707 - .update_index = 1, 708 - .value_type = REFTABLE_LOG_UPDATE, 709 - .value = { 710 - .update = { 711 - .new_hash = { 1 }, 712 - .old_hash = { 2 }, 713 - }, 714 - }, 715 - }; 716 - struct reftable_log_record dest = { 717 - .update_index = 0, 718 - }; 719 - struct write_log_arg arg = { 720 - .log = &input, 721 - .update_index = 1, 722 - }; 723 - 724 - err = reftable_new_stack(&st, dir, &opts); 725 - check(!err); 726 - 727 - input.value.update.message = (char *) "one\ntwo"; 728 - err = reftable_stack_add(st, write_test_log, &arg); 729 - check_int(err, ==, REFTABLE_API_ERROR); 730 - 731 - input.value.update.message = (char *) "one"; 732 - err = reftable_stack_add(st, write_test_log, &arg); 733 - check(!err); 734 - 735 - err = reftable_stack_read_log(st, input.refname, &dest); 736 - check(!err); 737 - check_str(dest.value.update.message, "one\n"); 738 - 739 - input.value.update.message = (char *) "two\n"; 740 - arg.update_index = 2; 741 - err = reftable_stack_add(st, write_test_log, &arg); 742 - check(!err); 743 - err = reftable_stack_read_log(st, input.refname, &dest); 744 - check(!err); 745 - check_str(dest.value.update.message, "two\n"); 746 - 747 - /* cleanup */ 748 - reftable_stack_destroy(st); 749 - reftable_log_record_release(&dest); 750 - clear_dir(dir); 751 - } 752 - 753 - static void t_reftable_stack_tombstone(void) 754 - { 755 - char *dir = get_tmp_dir(__LINE__); 756 - struct reftable_write_options opts = { 0 }; 757 - struct reftable_stack *st = NULL; 758 - int err; 759 - struct reftable_ref_record refs[2] = { 0 }; 760 - struct reftable_log_record logs[2] = { 0 }; 761 - size_t i, N = ARRAY_SIZE(refs); 762 - struct reftable_ref_record dest = { 0 }; 763 - struct reftable_log_record log_dest = { 0 }; 764 - 765 - err = reftable_new_stack(&st, dir, &opts); 766 - check(!err); 767 - 768 - /* even entries add the refs, odd entries delete them. */ 769 - for (i = 0; i < N; i++) { 770 - const char *buf = "branch"; 771 - refs[i].refname = xstrdup(buf); 772 - refs[i].update_index = i + 1; 773 - if (i % 2 == 0) { 774 - refs[i].value_type = REFTABLE_REF_VAL1; 775 - t_reftable_set_hash(refs[i].value.val1, i, 776 - REFTABLE_HASH_SHA1); 777 - } 778 - 779 - logs[i].refname = xstrdup(buf); 780 - /* 781 - * update_index is part of the key so should be constant. 782 - * The value itself should be less than the writer's upper 783 - * limit. 784 - */ 785 - logs[i].update_index = 1; 786 - if (i % 2 == 0) { 787 - logs[i].value_type = REFTABLE_LOG_UPDATE; 788 - t_reftable_set_hash(logs[i].value.update.new_hash, i, 789 - REFTABLE_HASH_SHA1); 790 - logs[i].value.update.email = 791 - xstrdup("identity@invalid"); 792 - } 793 - } 794 - for (i = 0; i < N; i++) { 795 - int err = reftable_stack_add(st, write_test_ref, &refs[i]); 796 - check(!err); 797 - } 798 - 799 - for (i = 0; i < N; i++) { 800 - struct write_log_arg arg = { 801 - .log = &logs[i], 802 - .update_index = reftable_stack_next_update_index(st), 803 - }; 804 - int err = reftable_stack_add(st, write_test_log, &arg); 805 - check(!err); 806 - } 807 - 808 - err = reftable_stack_read_ref(st, "branch", &dest); 809 - check_int(err, ==, 1); 810 - reftable_ref_record_release(&dest); 811 - 812 - err = reftable_stack_read_log(st, "branch", &log_dest); 813 - check_int(err, ==, 1); 814 - reftable_log_record_release(&log_dest); 815 - 816 - err = reftable_stack_compact_all(st, NULL); 817 - check(!err); 818 - 819 - err = reftable_stack_read_ref(st, "branch", &dest); 820 - check_int(err, ==, 1); 821 - 822 - err = reftable_stack_read_log(st, "branch", &log_dest); 823 - check_int(err, ==, 1); 824 - reftable_ref_record_release(&dest); 825 - reftable_log_record_release(&log_dest); 826 - 827 - /* cleanup */ 828 - reftable_stack_destroy(st); 829 - for (i = 0; i < N; i++) { 830 - reftable_ref_record_release(&refs[i]); 831 - reftable_log_record_release(&logs[i]); 832 - } 833 - clear_dir(dir); 834 - } 835 - 836 - static void t_reftable_stack_hash_id(void) 837 - { 838 - char *dir = get_tmp_dir(__LINE__); 839 - struct reftable_write_options opts = { 0 }; 840 - struct reftable_stack *st = NULL; 841 - int err; 842 - 843 - struct reftable_ref_record ref = { 844 - .refname = (char *) "master", 845 - .value_type = REFTABLE_REF_SYMREF, 846 - .value.symref = (char *) "target", 847 - .update_index = 1, 848 - }; 849 - struct reftable_write_options opts32 = { .hash_id = REFTABLE_HASH_SHA256 }; 850 - struct reftable_stack *st32 = NULL; 851 - struct reftable_write_options opts_default = { 0 }; 852 - struct reftable_stack *st_default = NULL; 853 - struct reftable_ref_record dest = { 0 }; 854 - 855 - err = reftable_new_stack(&st, dir, &opts); 856 - check(!err); 857 - 858 - err = reftable_stack_add(st, write_test_ref, &ref); 859 - check(!err); 860 - 861 - /* can't read it with the wrong hash ID. */ 862 - err = reftable_new_stack(&st32, dir, &opts32); 863 - check_int(err, ==, REFTABLE_FORMAT_ERROR); 864 - 865 - /* check that we can read it back with default opts too. */ 866 - err = reftable_new_stack(&st_default, dir, &opts_default); 867 - check(!err); 868 - 869 - err = reftable_stack_read_ref(st_default, "master", &dest); 870 - check(!err); 871 - 872 - check(reftable_ref_record_equal(&ref, &dest, REFTABLE_HASH_SIZE_SHA1)); 873 - reftable_ref_record_release(&dest); 874 - reftable_stack_destroy(st); 875 - reftable_stack_destroy(st_default); 876 - clear_dir(dir); 877 - } 878 - 879 - static void t_suggest_compaction_segment(void) 880 - { 881 - uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 }; 882 - struct segment min = 883 - suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2); 884 - check_int(min.start, ==, 1); 885 - check_int(min.end, ==, 10); 886 - } 887 - 888 - static void t_suggest_compaction_segment_nothing(void) 889 - { 890 - uint64_t sizes[] = { 64, 32, 16, 8, 4, 2 }; 891 - struct segment result = 892 - suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2); 893 - check_int(result.start, ==, result.end); 894 - } 895 - 896 - static void t_reflog_expire(void) 897 - { 898 - char *dir = get_tmp_dir(__LINE__); 899 - struct reftable_write_options opts = { 0 }; 900 - struct reftable_stack *st = NULL; 901 - struct reftable_log_record logs[20] = { 0 }; 902 - size_t i, N = ARRAY_SIZE(logs) - 1; 903 - int err; 904 - struct reftable_log_expiry_config expiry = { 905 - .time = 10, 906 - }; 907 - struct reftable_log_record log = { 0 }; 908 - 909 - err = reftable_new_stack(&st, dir, &opts); 910 - check(!err); 911 - 912 - for (i = 1; i <= N; i++) { 913 - char buf[256]; 914 - snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i); 915 - 916 - logs[i].refname = xstrdup(buf); 917 - logs[i].update_index = i; 918 - logs[i].value_type = REFTABLE_LOG_UPDATE; 919 - logs[i].value.update.time = i; 920 - logs[i].value.update.email = xstrdup("identity@invalid"); 921 - t_reftable_set_hash(logs[i].value.update.new_hash, i, 922 - REFTABLE_HASH_SHA1); 923 - } 924 - 925 - for (i = 1; i <= N; i++) { 926 - struct write_log_arg arg = { 927 - .log = &logs[i], 928 - .update_index = reftable_stack_next_update_index(st), 929 - }; 930 - int err = reftable_stack_add(st, write_test_log, &arg); 931 - check(!err); 932 - } 933 - 934 - err = reftable_stack_compact_all(st, NULL); 935 - check(!err); 936 - 937 - err = reftable_stack_compact_all(st, &expiry); 938 - check(!err); 939 - 940 - err = reftable_stack_read_log(st, logs[9].refname, &log); 941 - check_int(err, ==, 1); 942 - 943 - err = reftable_stack_read_log(st, logs[11].refname, &log); 944 - check(!err); 945 - 946 - expiry.min_update_index = 15; 947 - err = reftable_stack_compact_all(st, &expiry); 948 - check(!err); 949 - 950 - err = reftable_stack_read_log(st, logs[14].refname, &log); 951 - check_int(err, ==, 1); 952 - 953 - err = reftable_stack_read_log(st, logs[16].refname, &log); 954 - check(!err); 955 - 956 - /* cleanup */ 957 - reftable_stack_destroy(st); 958 - for (i = 0; i <= N; i++) 959 - reftable_log_record_release(&logs[i]); 960 - clear_dir(dir); 961 - reftable_log_record_release(&log); 962 - } 963 - 964 - static int write_nothing(struct reftable_writer *wr, void *arg UNUSED) 965 - { 966 - check(!reftable_writer_set_limits(wr, 1, 1)); 967 - return 0; 968 - } 969 - 970 - static void t_empty_add(void) 971 - { 972 - struct reftable_write_options opts = { 0 }; 973 - struct reftable_stack *st = NULL; 974 - int err; 975 - char *dir = get_tmp_dir(__LINE__); 976 - struct reftable_stack *st2 = NULL; 977 - 978 - err = reftable_new_stack(&st, dir, &opts); 979 - check(!err); 980 - 981 - err = reftable_stack_add(st, write_nothing, NULL); 982 - check(!err); 983 - 984 - err = reftable_new_stack(&st2, dir, &opts); 985 - check(!err); 986 - clear_dir(dir); 987 - reftable_stack_destroy(st); 988 - reftable_stack_destroy(st2); 989 - } 990 - 991 - static int fastlogN(uint64_t sz, uint64_t N) 992 - { 993 - int l = 0; 994 - if (sz == 0) 995 - return 0; 996 - for (; sz; sz /= N) 997 - l++; 998 - return l - 1; 999 - } 1000 - 1001 - static void t_reftable_stack_auto_compaction(void) 1002 - { 1003 - struct reftable_write_options opts = { 1004 - .disable_auto_compact = 1, 1005 - }; 1006 - struct reftable_stack *st = NULL; 1007 - char *dir = get_tmp_dir(__LINE__); 1008 - int err; 1009 - size_t i, N = 100; 1010 - 1011 - err = reftable_new_stack(&st, dir, &opts); 1012 - check(!err); 1013 - 1014 - for (i = 0; i < N; i++) { 1015 - char name[100]; 1016 - struct reftable_ref_record ref = { 1017 - .refname = name, 1018 - .update_index = reftable_stack_next_update_index(st), 1019 - .value_type = REFTABLE_REF_SYMREF, 1020 - .value.symref = (char *) "master", 1021 - }; 1022 - snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 1023 - 1024 - err = reftable_stack_add(st, write_test_ref, &ref); 1025 - check(!err); 1026 - 1027 - err = reftable_stack_auto_compact(st); 1028 - check(!err); 1029 - check(i < 2 || st->merged->tables_len < 2 * fastlogN(i, 2)); 1030 - } 1031 - 1032 - check_int(reftable_stack_compaction_stats(st)->entries_written, <, 1033 - (uint64_t)(N * fastlogN(N, 2))); 1034 - 1035 - reftable_stack_destroy(st); 1036 - clear_dir(dir); 1037 - } 1038 - 1039 - static void t_reftable_stack_auto_compaction_factor(void) 1040 - { 1041 - struct reftable_write_options opts = { 1042 - .auto_compaction_factor = 5, 1043 - }; 1044 - struct reftable_stack *st = NULL; 1045 - char *dir = get_tmp_dir(__LINE__); 1046 - int err; 1047 - size_t N = 100; 1048 - 1049 - err = reftable_new_stack(&st, dir, &opts); 1050 - check(!err); 1051 - 1052 - for (size_t i = 0; i < N; i++) { 1053 - char name[20]; 1054 - struct reftable_ref_record ref = { 1055 - .refname = name, 1056 - .update_index = reftable_stack_next_update_index(st), 1057 - .value_type = REFTABLE_REF_VAL1, 1058 - }; 1059 - xsnprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 1060 - 1061 - err = reftable_stack_add(st, &write_test_ref, &ref); 1062 - check(!err); 1063 - 1064 - check(i < 5 || st->merged->tables_len < 5 * fastlogN(i, 5)); 1065 - } 1066 - 1067 - reftable_stack_destroy(st); 1068 - clear_dir(dir); 1069 - } 1070 - 1071 - static void t_reftable_stack_auto_compaction_with_locked_tables(void) 1072 - { 1073 - struct reftable_write_options opts = { 1074 - .disable_auto_compact = 1, 1075 - }; 1076 - struct reftable_stack *st = NULL; 1077 - struct reftable_buf buf = REFTABLE_BUF_INIT; 1078 - char *dir = get_tmp_dir(__LINE__); 1079 - int err; 1080 - 1081 - err = reftable_new_stack(&st, dir, &opts); 1082 - check(!err); 1083 - 1084 - write_n_ref_tables(st, 5); 1085 - check_int(st->merged->tables_len, ==, 5); 1086 - 1087 - /* 1088 - * Given that all tables we have written should be roughly the same 1089 - * size, we expect that auto-compaction will want to compact all of the 1090 - * tables. Locking any of the tables will keep it from doing so. 1091 - */ 1092 - check(!reftable_buf_addstr(&buf, dir)); 1093 - check(!reftable_buf_addstr(&buf, "/")); 1094 - check(!reftable_buf_addstr(&buf, st->tables[2]->name)); 1095 - check(!reftable_buf_addstr(&buf, ".lock")); 1096 - write_file_buf(buf.buf, "", 0); 1097 - 1098 - /* 1099 - * When parts of the stack are locked, then auto-compaction does a best 1100 - * effort compaction of those tables which aren't locked. So while this 1101 - * would in theory compact all tables, due to the preexisting lock we 1102 - * only compact the newest two tables. 1103 - */ 1104 - err = reftable_stack_auto_compact(st); 1105 - check(!err); 1106 - check_int(st->stats.failures, ==, 0); 1107 - check_int(st->merged->tables_len, ==, 4); 1108 - 1109 - reftable_stack_destroy(st); 1110 - reftable_buf_release(&buf); 1111 - clear_dir(dir); 1112 - } 1113 - 1114 - static void t_reftable_stack_add_performs_auto_compaction(void) 1115 - { 1116 - struct reftable_write_options opts = { 0 }; 1117 - struct reftable_stack *st = NULL; 1118 - char *dir = get_tmp_dir(__LINE__); 1119 - int err; 1120 - size_t i, n = 20; 1121 - 1122 - err = reftable_new_stack(&st, dir, &opts); 1123 - check(!err); 1124 - 1125 - for (i = 0; i <= n; i++) { 1126 - struct reftable_ref_record ref = { 1127 - .update_index = reftable_stack_next_update_index(st), 1128 - .value_type = REFTABLE_REF_SYMREF, 1129 - .value.symref = (char *) "master", 1130 - }; 1131 - char buf[128]; 1132 - 1133 - /* 1134 - * Disable auto-compaction for all but the last runs. Like this 1135 - * we can ensure that we indeed honor this setting and have 1136 - * better control over when exactly auto compaction runs. 1137 - */ 1138 - st->opts.disable_auto_compact = i != n; 1139 - 1140 - snprintf(buf, sizeof(buf), "branch-%04"PRIuMAX, (uintmax_t)i); 1141 - ref.refname = buf; 1142 - 1143 - err = reftable_stack_add(st, write_test_ref, &ref); 1144 - check(!err); 1145 - 1146 - /* 1147 - * The stack length should grow continuously for all runs where 1148 - * auto compaction is disabled. When enabled, we should merge 1149 - * all tables in the stack. 1150 - */ 1151 - if (i != n) 1152 - check_int(st->merged->tables_len, ==, i + 1); 1153 - else 1154 - check_int(st->merged->tables_len, ==, 1); 1155 - } 1156 - 1157 - reftable_stack_destroy(st); 1158 - clear_dir(dir); 1159 - } 1160 - 1161 - static void t_reftable_stack_compaction_with_locked_tables(void) 1162 - { 1163 - struct reftable_write_options opts = { 1164 - .disable_auto_compact = 1, 1165 - }; 1166 - struct reftable_stack *st = NULL; 1167 - struct reftable_buf buf = REFTABLE_BUF_INIT; 1168 - char *dir = get_tmp_dir(__LINE__); 1169 - int err; 1170 - 1171 - err = reftable_new_stack(&st, dir, &opts); 1172 - check(!err); 1173 - 1174 - write_n_ref_tables(st, 3); 1175 - check_int(st->merged->tables_len, ==, 3); 1176 - 1177 - /* Lock one of the tables that we're about to compact. */ 1178 - check(!reftable_buf_addstr(&buf, dir)); 1179 - check(!reftable_buf_addstr(&buf, "/")); 1180 - check(!reftable_buf_addstr(&buf, st->tables[1]->name)); 1181 - check(!reftable_buf_addstr(&buf, ".lock")); 1182 - write_file_buf(buf.buf, "", 0); 1183 - 1184 - /* 1185 - * Compaction is expected to fail given that we were not able to 1186 - * compact all tables. 1187 - */ 1188 - err = reftable_stack_compact_all(st, NULL); 1189 - check_int(err, ==, REFTABLE_LOCK_ERROR); 1190 - check_int(st->stats.failures, ==, 1); 1191 - check_int(st->merged->tables_len, ==, 3); 1192 - 1193 - reftable_stack_destroy(st); 1194 - reftable_buf_release(&buf); 1195 - clear_dir(dir); 1196 - } 1197 - 1198 - static void t_reftable_stack_compaction_concurrent(void) 1199 - { 1200 - struct reftable_write_options opts = { 0 }; 1201 - struct reftable_stack *st1 = NULL, *st2 = NULL; 1202 - char *dir = get_tmp_dir(__LINE__); 1203 - int err; 1204 - 1205 - err = reftable_new_stack(&st1, dir, &opts); 1206 - check(!err); 1207 - write_n_ref_tables(st1, 3); 1208 - 1209 - err = reftable_new_stack(&st2, dir, &opts); 1210 - check(!err); 1211 - 1212 - err = reftable_stack_compact_all(st1, NULL); 1213 - check(!err); 1214 - 1215 - reftable_stack_destroy(st1); 1216 - reftable_stack_destroy(st2); 1217 - 1218 - check_int(count_dir_entries(dir), ==, 2); 1219 - clear_dir(dir); 1220 - } 1221 - 1222 - static void unclean_stack_close(struct reftable_stack *st) 1223 - { 1224 - /* break abstraction boundary to simulate unclean shutdown. */ 1225 - for (size_t i = 0; i < st->tables_len; i++) 1226 - reftable_table_decref(st->tables[i]); 1227 - st->tables_len = 0; 1228 - REFTABLE_FREE_AND_NULL(st->tables); 1229 - } 1230 - 1231 - static void t_reftable_stack_compaction_concurrent_clean(void) 1232 - { 1233 - struct reftable_write_options opts = { 0 }; 1234 - struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL; 1235 - char *dir = get_tmp_dir(__LINE__); 1236 - int err; 1237 - 1238 - err = reftable_new_stack(&st1, dir, &opts); 1239 - check(!err); 1240 - write_n_ref_tables(st1, 3); 1241 - 1242 - err = reftable_new_stack(&st2, dir, &opts); 1243 - check(!err); 1244 - 1245 - err = reftable_stack_compact_all(st1, NULL); 1246 - check(!err); 1247 - 1248 - unclean_stack_close(st1); 1249 - unclean_stack_close(st2); 1250 - 1251 - err = reftable_new_stack(&st3, dir, &opts); 1252 - check(!err); 1253 - 1254 - err = reftable_stack_clean(st3); 1255 - check(!err); 1256 - check_int(count_dir_entries(dir), ==, 2); 1257 - 1258 - reftable_stack_destroy(st1); 1259 - reftable_stack_destroy(st2); 1260 - reftable_stack_destroy(st3); 1261 - 1262 - clear_dir(dir); 1263 - } 1264 - 1265 - static void t_reftable_stack_read_across_reload(void) 1266 - { 1267 - struct reftable_write_options opts = { 0 }; 1268 - struct reftable_stack *st1 = NULL, *st2 = NULL; 1269 - struct reftable_ref_record rec = { 0 }; 1270 - struct reftable_iterator it = { 0 }; 1271 - char *dir = get_tmp_dir(__LINE__); 1272 - int err; 1273 - 1274 - /* Create a first stack and set up an iterator for it. */ 1275 - err = reftable_new_stack(&st1, dir, &opts); 1276 - check(!err); 1277 - write_n_ref_tables(st1, 2); 1278 - check_int(st1->merged->tables_len, ==, 2); 1279 - reftable_stack_init_ref_iterator(st1, &it); 1280 - err = reftable_iterator_seek_ref(&it, ""); 1281 - check(!err); 1282 - 1283 - /* Set up a second stack for the same directory and compact it. */ 1284 - err = reftable_new_stack(&st2, dir, &opts); 1285 - check(!err); 1286 - check_int(st2->merged->tables_len, ==, 2); 1287 - err = reftable_stack_compact_all(st2, NULL); 1288 - check(!err); 1289 - check_int(st2->merged->tables_len, ==, 1); 1290 - 1291 - /* 1292 - * Verify that we can continue to use the old iterator even after we 1293 - * have reloaded its stack. 1294 - */ 1295 - err = reftable_stack_reload(st1); 1296 - check(!err); 1297 - check_int(st1->merged->tables_len, ==, 1); 1298 - err = reftable_iterator_next_ref(&it, &rec); 1299 - check(!err); 1300 - check_str(rec.refname, "refs/heads/branch-0000"); 1301 - err = reftable_iterator_next_ref(&it, &rec); 1302 - check(!err); 1303 - check_str(rec.refname, "refs/heads/branch-0001"); 1304 - err = reftable_iterator_next_ref(&it, &rec); 1305 - check_int(err, >, 0); 1306 - 1307 - reftable_ref_record_release(&rec); 1308 - reftable_iterator_destroy(&it); 1309 - reftable_stack_destroy(st1); 1310 - reftable_stack_destroy(st2); 1311 - clear_dir(dir); 1312 - } 1313 - 1314 - static void t_reftable_stack_reload_with_missing_table(void) 1315 - { 1316 - struct reftable_write_options opts = { 0 }; 1317 - struct reftable_stack *st = NULL; 1318 - struct reftable_ref_record rec = { 0 }; 1319 - struct reftable_iterator it = { 0 }; 1320 - struct reftable_buf table_path = REFTABLE_BUF_INIT, content = REFTABLE_BUF_INIT; 1321 - char *dir = get_tmp_dir(__LINE__); 1322 - int err; 1323 - 1324 - /* Create a first stack and set up an iterator for it. */ 1325 - err = reftable_new_stack(&st, dir, &opts); 1326 - check(!err); 1327 - write_n_ref_tables(st, 2); 1328 - check_int(st->merged->tables_len, ==, 2); 1329 - reftable_stack_init_ref_iterator(st, &it); 1330 - err = reftable_iterator_seek_ref(&it, ""); 1331 - check(!err); 1332 - 1333 - /* 1334 - * Update the tables.list file with some garbage data, while reusing 1335 - * our old tables. This should trigger a partial reload of the stack, 1336 - * where we try to reuse our old tables. 1337 - */ 1338 - check(!reftable_buf_addstr(&content, st->tables[0]->name)); 1339 - check(!reftable_buf_addstr(&content, "\n")); 1340 - check(!reftable_buf_addstr(&content, st->tables[1]->name)); 1341 - check(!reftable_buf_addstr(&content, "\n")); 1342 - check(!reftable_buf_addstr(&content, "garbage\n")); 1343 - check(!reftable_buf_addstr(&table_path, st->list_file)); 1344 - check(!reftable_buf_addstr(&table_path, ".lock")); 1345 - write_file_buf(table_path.buf, content.buf, content.len); 1346 - err = rename(table_path.buf, st->list_file); 1347 - check(!err); 1348 - 1349 - err = reftable_stack_reload(st); 1350 - check_int(err, ==, -4); 1351 - check_int(st->merged->tables_len, ==, 2); 1352 - 1353 - /* 1354 - * Even though the reload has failed, we should be able to continue 1355 - * using the iterator. 1356 - */ 1357 - err = reftable_iterator_next_ref(&it, &rec); 1358 - check(!err); 1359 - check_str(rec.refname, "refs/heads/branch-0000"); 1360 - err = reftable_iterator_next_ref(&it, &rec); 1361 - check(!err); 1362 - check_str(rec.refname, "refs/heads/branch-0001"); 1363 - err = reftable_iterator_next_ref(&it, &rec); 1364 - check_int(err, >, 0); 1365 - 1366 - reftable_ref_record_release(&rec); 1367 - reftable_iterator_destroy(&it); 1368 - reftable_stack_destroy(st); 1369 - reftable_buf_release(&table_path); 1370 - reftable_buf_release(&content); 1371 - clear_dir(dir); 1372 - } 1373 - 1374 - static int write_limits_after_ref(struct reftable_writer *wr, void *arg) 1375 - { 1376 - struct reftable_ref_record *ref = arg; 1377 - check(!reftable_writer_set_limits(wr, ref->update_index, ref->update_index)); 1378 - check(!reftable_writer_add_ref(wr, ref)); 1379 - return reftable_writer_set_limits(wr, ref->update_index, ref->update_index); 1380 - } 1381 - 1382 - static void t_reftable_invalid_limit_updates(void) 1383 - { 1384 - struct reftable_ref_record ref = { 1385 - .refname = (char *) "HEAD", 1386 - .update_index = 1, 1387 - .value_type = REFTABLE_REF_SYMREF, 1388 - .value.symref = (char *) "master", 1389 - }; 1390 - struct reftable_write_options opts = { 1391 - .default_permissions = 0660, 1392 - }; 1393 - struct reftable_addition *add = NULL; 1394 - char *dir = get_tmp_dir(__LINE__); 1395 - struct reftable_stack *st = NULL; 1396 - int err; 1397 - 1398 - err = reftable_new_stack(&st, dir, &opts); 1399 - check(!err); 1400 - 1401 - reftable_addition_destroy(add); 1402 - 1403 - err = reftable_stack_new_addition(&add, st, 0); 1404 - check(!err); 1405 - 1406 - /* 1407 - * write_limits_after_ref also updates the update indexes after adding 1408 - * the record. This should cause an err to be returned, since the limits 1409 - * must be set at the start. 1410 - */ 1411 - err = reftable_addition_add(add, write_limits_after_ref, &ref); 1412 - check_int(err, ==, REFTABLE_API_ERROR); 1413 - 1414 - reftable_addition_destroy(add); 1415 - reftable_stack_destroy(st); 1416 - clear_dir(dir); 1417 - } 1418 - 1419 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 1420 - { 1421 - TEST(t_empty_add(), "empty addition to stack"); 1422 - TEST(t_read_file(), "read_lines works"); 1423 - TEST(t_reflog_expire(), "expire reflog entries"); 1424 - TEST(t_reftable_invalid_limit_updates(), "prevent limit updates after adding records"); 1425 - TEST(t_reftable_stack_add(), "add multiple refs and logs to stack"); 1426 - TEST(t_reftable_stack_add_one(), "add a single ref record to stack"); 1427 - TEST(t_reftable_stack_add_performs_auto_compaction(), "addition to stack triggers auto-compaction"); 1428 - TEST(t_reftable_stack_auto_compaction(), "stack must form geometric sequence after compaction"); 1429 - TEST(t_reftable_stack_auto_compaction_factor(), "auto-compaction with non-default geometric factor"); 1430 - TEST(t_reftable_stack_auto_compaction_fails_gracefully(), "failure on auto-compaction"); 1431 - TEST(t_reftable_stack_auto_compaction_with_locked_tables(), "auto compaction with locked tables"); 1432 - TEST(t_reftable_stack_compaction_concurrent(), "compaction with concurrent stack"); 1433 - TEST(t_reftable_stack_compaction_concurrent_clean(), "compaction with unclean stack shutdown"); 1434 - TEST(t_reftable_stack_compaction_with_locked_tables(), "compaction with locked tables"); 1435 - TEST(t_reftable_stack_hash_id(), "read stack with wrong hash ID"); 1436 - TEST(t_reftable_stack_iterator(), "log and ref iterator for reftable stack"); 1437 - TEST(t_reftable_stack_lock_failure(), "stack addition with lockfile failure"); 1438 - TEST(t_reftable_stack_log_normalize(), "log messages should be normalized"); 1439 - TEST(t_reftable_stack_read_across_reload(), "stack iterators work across reloads"); 1440 - TEST(t_reftable_stack_reload_with_missing_table(), "stack iteration with garbage tables"); 1441 - TEST(t_reftable_stack_tombstone(), "'tombstone' refs in stack"); 1442 - TEST(t_reftable_stack_transaction_api(), "update transaction to stack"); 1443 - TEST(t_reftable_stack_transaction_with_reload(), "transaction with reload"); 1444 - TEST(t_reftable_stack_transaction_api_performs_auto_compaction(), "update transaction triggers auto-compaction"); 1445 - TEST(t_reftable_stack_update_index_check(), "update transactions with equal update indices"); 1446 - TEST(t_reftable_stack_uptodate(), "stack must be reloaded before ref update"); 1447 - TEST(t_suggest_compaction_segment(), "suggest_compaction_segment with basic input"); 1448 - TEST(t_suggest_compaction_segment_nothing(), "suggest_compaction_segment with pre-compacted input"); 1449 - 1450 - return test_done(); 1451 - }
···
+34 -39
t/unit-tests/t-reftable-table.c t/unit-tests/u-reftable-table.c
··· 1 - #include "test-lib.h" 2 #include "lib-reftable.h" 3 #include "reftable/blocksource.h" 4 #include "reftable/constants.h" ··· 6 #include "reftable/table.h" 7 #include "strbuf.h" 8 9 - static int t_table_seek_once(void) 10 { 11 struct reftable_ref_record records[] = { 12 { ··· 22 struct reftable_buf buf = REFTABLE_BUF_INIT; 23 int ret; 24 25 - t_reftable_write_to_buf(&buf, records, ARRAY_SIZE(records), NULL, 0, NULL); 26 block_source_from_buf(&source, &buf); 27 28 ret = reftable_table_new(&table, &source, "name"); 29 - check(!ret); 30 31 reftable_table_init_ref_iterator(table, &it); 32 ret = reftable_iterator_seek_ref(&it, ""); 33 - check(!ret); 34 ret = reftable_iterator_next_ref(&it, &ref); 35 - check(!ret); 36 37 - ret = reftable_ref_record_equal(&ref, &records[0], REFTABLE_HASH_SIZE_SHA1); 38 - check_int(ret, ==, 1); 39 40 ret = reftable_iterator_next_ref(&it, &ref); 41 - check_int(ret, ==, 1); 42 43 reftable_ref_record_release(&ref); 44 reftable_iterator_destroy(&it); 45 reftable_table_decref(table); 46 reftable_buf_release(&buf); 47 - return 0; 48 } 49 50 - static int t_table_reseek(void) 51 { 52 struct reftable_ref_record records[] = { 53 { ··· 63 struct reftable_buf buf = REFTABLE_BUF_INIT; 64 int ret; 65 66 - t_reftable_write_to_buf(&buf, records, ARRAY_SIZE(records), NULL, 0, NULL); 67 block_source_from_buf(&source, &buf); 68 69 ret = reftable_table_new(&table, &source, "name"); 70 - check(!ret); 71 72 reftable_table_init_ref_iterator(table, &it); 73 74 for (size_t i = 0; i < 5; i++) { 75 ret = reftable_iterator_seek_ref(&it, ""); 76 - check(!ret); 77 ret = reftable_iterator_next_ref(&it, &ref); 78 - check(!ret); 79 80 ret = reftable_ref_record_equal(&ref, &records[0], REFTABLE_HASH_SIZE_SHA1); 81 - check_int(ret, ==, 1); 82 83 ret = reftable_iterator_next_ref(&it, &ref); 84 - check_int(ret, ==, 1); 85 } 86 87 reftable_ref_record_release(&ref); 88 reftable_iterator_destroy(&it); 89 reftable_table_decref(table); 90 reftable_buf_release(&buf); 91 - return 0; 92 } 93 94 - static int t_table_block_iterator(void) 95 { 96 struct reftable_block_source source = { 0 }; 97 struct reftable_table_iterator it = { 0 }; ··· 147 (uintmax_t) i); 148 } 149 150 - t_reftable_write_to_buf(&buf, records, nrecords, NULL, 0, NULL); 151 block_source_from_buf(&source, &buf); 152 153 ret = reftable_table_new(&table, &source, "name"); 154 - check(!ret); 155 156 ret = reftable_table_iterator_init(&it, table); 157 - check(!ret); 158 159 for (size_t i = 0; i < ARRAY_SIZE(expected_blocks); i++) { 160 struct reftable_iterator record_it = { 0 }; ··· 163 }; 164 165 ret = reftable_table_iterator_next(&it, &block); 166 - check(!ret); 167 168 - check_int(block->block_type, ==, expected_blocks[i].block_type); 169 - check_int(block->header_off, ==, expected_blocks[i].header_off); 170 - check_int(block->restart_count, ==, expected_blocks[i].restart_count); 171 172 ret = reftable_block_init_iterator(block, &record_it); 173 - check(!ret); 174 175 for (size_t j = 0; ; j++) { 176 ret = iterator_next(&record_it, &record); 177 if (ret > 0) { 178 - check_int(j, ==, expected_blocks[i].record_count); 179 break; 180 } 181 - check(!ret); 182 } 183 184 reftable_iterator_destroy(&record_it); ··· 186 } 187 188 ret = reftable_table_iterator_next(&it, &block); 189 - check_int(ret, ==, 1); 190 191 for (size_t i = 0; i < nrecords; i++) 192 reftable_free(records[i].refname); ··· 194 reftable_table_decref(table); 195 reftable_buf_release(&buf); 196 reftable_free(records); 197 - return 0; 198 - } 199 - 200 - int cmd_main(int argc UNUSED, const char *argv[] UNUSED) 201 - { 202 - TEST(t_table_seek_once(), "table can seek once"); 203 - TEST(t_table_reseek(), "table can reseek multiple times"); 204 - TEST(t_table_block_iterator(), "table can iterate through blocks"); 205 - return test_done(); 206 }
··· 1 + #include "unit-test.h" 2 #include "lib-reftable.h" 3 #include "reftable/blocksource.h" 4 #include "reftable/constants.h" ··· 6 #include "reftable/table.h" 7 #include "strbuf.h" 8 9 + void test_reftable_table__seek_once(void) 10 { 11 struct reftable_ref_record records[] = { 12 { ··· 22 struct reftable_buf buf = REFTABLE_BUF_INIT; 23 int ret; 24 25 + cl_reftable_write_to_buf(&buf, records, ARRAY_SIZE(records), NULL, 0, NULL); 26 block_source_from_buf(&source, &buf); 27 28 ret = reftable_table_new(&table, &source, "name"); 29 + cl_assert(!ret); 30 31 reftable_table_init_ref_iterator(table, &it); 32 ret = reftable_iterator_seek_ref(&it, ""); 33 + cl_assert(!ret); 34 ret = reftable_iterator_next_ref(&it, &ref); 35 + cl_assert(!ret); 36 37 + ret = reftable_ref_record_equal(&ref, &records[0], 38 + REFTABLE_HASH_SIZE_SHA1); 39 + cl_assert_equal_i(ret, 1); 40 41 ret = reftable_iterator_next_ref(&it, &ref); 42 + cl_assert_equal_i(ret, 1); 43 44 reftable_ref_record_release(&ref); 45 reftable_iterator_destroy(&it); 46 reftable_table_decref(table); 47 reftable_buf_release(&buf); 48 } 49 50 + void test_reftable_table__reseek(void) 51 { 52 struct reftable_ref_record records[] = { 53 { ··· 63 struct reftable_buf buf = REFTABLE_BUF_INIT; 64 int ret; 65 66 + cl_reftable_write_to_buf(&buf, records, ARRAY_SIZE(records), 67 + NULL, 0, NULL); 68 block_source_from_buf(&source, &buf); 69 70 ret = reftable_table_new(&table, &source, "name"); 71 + cl_assert(!ret); 72 73 reftable_table_init_ref_iterator(table, &it); 74 75 for (size_t i = 0; i < 5; i++) { 76 ret = reftable_iterator_seek_ref(&it, ""); 77 + cl_assert(!ret); 78 ret = reftable_iterator_next_ref(&it, &ref); 79 + cl_assert(!ret); 80 81 ret = reftable_ref_record_equal(&ref, &records[0], REFTABLE_HASH_SIZE_SHA1); 82 + cl_assert_equal_i(ret, 1); 83 84 ret = reftable_iterator_next_ref(&it, &ref); 85 + cl_assert_equal_i(ret, 1); 86 } 87 88 reftable_ref_record_release(&ref); 89 reftable_iterator_destroy(&it); 90 reftable_table_decref(table); 91 reftable_buf_release(&buf); 92 } 93 94 + void test_reftable_table__block_iterator(void) 95 { 96 struct reftable_block_source source = { 0 }; 97 struct reftable_table_iterator it = { 0 }; ··· 147 (uintmax_t) i); 148 } 149 150 + cl_reftable_write_to_buf(&buf, records, nrecords, NULL, 0, NULL); 151 block_source_from_buf(&source, &buf); 152 153 ret = reftable_table_new(&table, &source, "name"); 154 + cl_assert(!ret); 155 156 ret = reftable_table_iterator_init(&it, table); 157 + cl_assert(!ret); 158 159 for (size_t i = 0; i < ARRAY_SIZE(expected_blocks); i++) { 160 struct reftable_iterator record_it = { 0 }; ··· 163 }; 164 165 ret = reftable_table_iterator_next(&it, &block); 166 + cl_assert(!ret); 167 168 + cl_assert_equal_i(block->block_type, 169 + expected_blocks[i].block_type); 170 + cl_assert_equal_i(block->header_off, 171 + expected_blocks[i].header_off); 172 + cl_assert_equal_i(block->restart_count, 173 + expected_blocks[i].restart_count); 174 175 ret = reftable_block_init_iterator(block, &record_it); 176 + cl_assert(!ret); 177 178 for (size_t j = 0; ; j++) { 179 ret = iterator_next(&record_it, &record); 180 if (ret > 0) { 181 + cl_assert_equal_i(j, 182 + expected_blocks[i].record_count); 183 break; 184 } 185 + cl_assert(!ret); 186 } 187 188 reftable_iterator_destroy(&record_it); ··· 190 } 191 192 ret = reftable_table_iterator_next(&it, &block); 193 + cl_assert_equal_i(ret, 1); 194 195 for (size_t i = 0; i < nrecords; i++) 196 reftable_free(records[i].refname); ··· 198 reftable_table_decref(table); 199 reftable_buf_release(&buf); 200 reftable_free(records); 201 }
+227
t/unit-tests/u-reftable-basics.c
···
··· 1 + /* 2 + Copyright 2020 Google LLC 3 + 4 + Use of this source code is governed by a BSD-style 5 + license that can be found in the LICENSE file or at 6 + https://developers.google.com/open-source/licenses/bsd 7 + */ 8 + 9 + #include "unit-test.h" 10 + #include "lib-reftable.h" 11 + #include "reftable/basics.h" 12 + 13 + struct integer_needle_lesseq_args { 14 + int needle; 15 + int *haystack; 16 + }; 17 + 18 + static int integer_needle_lesseq(size_t i, void *_args) 19 + { 20 + struct integer_needle_lesseq_args *args = _args; 21 + return args->needle <= args->haystack[i]; 22 + } 23 + 24 + static void *realloc_stub(void *p UNUSED, size_t size UNUSED) 25 + { 26 + return NULL; 27 + } 28 + 29 + void test_reftable_basics__binsearch(void) 30 + { 31 + int haystack[] = { 2, 4, 6, 8, 10 }; 32 + struct { 33 + int needle; 34 + size_t expected_idx; 35 + } testcases[] = { 36 + {-9000, 0}, 37 + {-1, 0}, 38 + {0, 0}, 39 + {2, 0}, 40 + {3, 1}, 41 + {4, 1}, 42 + {7, 3}, 43 + {9, 4}, 44 + {10, 4}, 45 + {11, 5}, 46 + {9000, 5}, 47 + }; 48 + 49 + for (size_t i = 0; i < ARRAY_SIZE(testcases); i++) { 50 + struct integer_needle_lesseq_args args = { 51 + .haystack = haystack, 52 + .needle = testcases[i].needle, 53 + }; 54 + size_t idx; 55 + 56 + idx = binsearch(ARRAY_SIZE(haystack), 57 + &integer_needle_lesseq, &args); 58 + cl_assert_equal_i(idx, testcases[i].expected_idx); 59 + } 60 + } 61 + 62 + void test_reftable_basics__names_length(void) 63 + { 64 + const char *a[] = { "a", "b", NULL }; 65 + cl_assert_equal_i(names_length(a), 2); 66 + } 67 + 68 + void test_reftable_basics__names_equal(void) 69 + { 70 + const char *a[] = { "a", "b", "c", NULL }; 71 + const char *b[] = { "a", "b", "d", NULL }; 72 + const char *c[] = { "a", "b", NULL }; 73 + 74 + cl_assert(names_equal(a, a)); 75 + cl_assert(!names_equal(a, b)); 76 + cl_assert(!names_equal(a, c)); 77 + } 78 + 79 + void test_reftable_basics__parse_names(void) 80 + { 81 + char in1[] = "line\n"; 82 + char in2[] = "a\nb\nc"; 83 + char **out = parse_names(in1, strlen(in1)); 84 + cl_assert(out != NULL); 85 + cl_assert_equal_s(out[0], "line"); 86 + cl_assert(!out[1]); 87 + free_names(out); 88 + 89 + out = parse_names(in2, strlen(in2)); 90 + cl_assert(out != NULL); 91 + cl_assert_equal_s(out[0], "a"); 92 + cl_assert_equal_s(out[1], "b"); 93 + cl_assert_equal_s(out[2], "c"); 94 + cl_assert(!out[3]); 95 + free_names(out); 96 + } 97 + 98 + void test_reftable_basics__parse_names_drop_empty_string(void) 99 + { 100 + char in[] = "a\n\nb\n"; 101 + char **out = parse_names(in, strlen(in)); 102 + cl_assert(out != NULL); 103 + cl_assert_equal_s(out[0], "a"); 104 + /* simply '\n' should be dropped as empty string */ 105 + cl_assert_equal_s(out[1], "b"); 106 + cl_assert(out[2] == NULL); 107 + free_names(out); 108 + } 109 + 110 + void test_reftable_basics__common_prefix_size(void) 111 + { 112 + struct reftable_buf a = REFTABLE_BUF_INIT; 113 + struct reftable_buf b = REFTABLE_BUF_INIT; 114 + struct { 115 + const char *a, *b; 116 + int want; 117 + } cases[] = { 118 + {"abcdef", "abc", 3}, 119 + { "abc", "ab", 2 }, 120 + { "", "abc", 0 }, 121 + { "abc", "abd", 2 }, 122 + { "abc", "pqr", 0 }, 123 + }; 124 + 125 + for (size_t i = 0; i < ARRAY_SIZE(cases); i++) { 126 + cl_assert_equal_i(reftable_buf_addstr(&a, cases[i].a), 0); 127 + cl_assert_equal_i(reftable_buf_addstr(&b, cases[i].b), 0); 128 + cl_assert_equal_i(common_prefix_size(&a, &b), cases[i].want); 129 + reftable_buf_reset(&a); 130 + reftable_buf_reset(&b); 131 + } 132 + reftable_buf_release(&a); 133 + reftable_buf_release(&b); 134 + } 135 + 136 + void test_reftable_basics__put_get_be64(void) 137 + { 138 + uint64_t in = 0x1122334455667788; 139 + uint8_t dest[8]; 140 + uint64_t out; 141 + reftable_put_be64(dest, in); 142 + out = reftable_get_be64(dest); 143 + cl_assert(in == out); 144 + } 145 + 146 + void test_reftable_basics__put_get_be32(void) 147 + { 148 + uint32_t in = 0x11223344; 149 + uint8_t dest[4]; 150 + uint32_t out; 151 + reftable_put_be32(dest, in); 152 + out = reftable_get_be32(dest); 153 + cl_assert_equal_i(in, out); 154 + } 155 + 156 + void test_reftable_basics__put_get_be24(void) 157 + { 158 + uint32_t in = 0x112233; 159 + uint8_t dest[3]; 160 + uint32_t out; 161 + reftable_put_be24(dest, in); 162 + out = reftable_get_be24(dest); 163 + cl_assert_equal_i(in, out); 164 + } 165 + 166 + void test_reftable_basics__put_get_be16(void) 167 + { 168 + uint32_t in = 0xfef1; 169 + uint8_t dest[3]; 170 + uint32_t out; 171 + reftable_put_be16(dest, in); 172 + out = reftable_get_be16(dest); 173 + cl_assert_equal_i(in, out); 174 + } 175 + 176 + void test_reftable_basics__alloc_grow(void) 177 + { 178 + int *arr = NULL, *old_arr; 179 + size_t alloc = 0, old_alloc; 180 + 181 + cl_assert_equal_i(REFTABLE_ALLOC_GROW(arr, 1, alloc), 0); 182 + cl_assert(arr != NULL); 183 + cl_assert(alloc >= 1); 184 + arr[0] = 42; 185 + 186 + old_alloc = alloc; 187 + old_arr = arr; 188 + reftable_set_alloc(NULL, realloc_stub, NULL); 189 + cl_assert(REFTABLE_ALLOC_GROW(arr, old_alloc + 1, alloc)); 190 + cl_assert(arr == old_arr); 191 + cl_assert_equal_i(alloc, old_alloc); 192 + 193 + old_alloc = alloc; 194 + reftable_set_alloc(NULL, NULL, NULL); 195 + cl_assert_equal_i(REFTABLE_ALLOC_GROW(arr, old_alloc + 1, alloc), 0); 196 + cl_assert(arr != NULL); 197 + cl_assert(alloc > old_alloc); 198 + arr[alloc - 1] = 42; 199 + 200 + reftable_free(arr); 201 + } 202 + 203 + void test_reftable_basics__alloc_grow_or_null(void) 204 + { 205 + int *arr = NULL; 206 + size_t alloc = 0, old_alloc; 207 + 208 + REFTABLE_ALLOC_GROW_OR_NULL(arr, 1, alloc); 209 + cl_assert(arr != NULL); 210 + cl_assert(alloc >= 1); 211 + arr[0] = 42; 212 + 213 + old_alloc = alloc; 214 + REFTABLE_ALLOC_GROW_OR_NULL(arr, old_alloc + 1, alloc); 215 + cl_assert(arr != NULL); 216 + cl_assert(alloc > old_alloc); 217 + arr[alloc - 1] = 42; 218 + 219 + old_alloc = alloc; 220 + reftable_set_alloc(NULL, realloc_stub, NULL); 221 + REFTABLE_ALLOC_GROW_OR_NULL(arr, old_alloc + 1, alloc); 222 + cl_assert(arr == NULL); 223 + cl_assert_equal_i(alloc, 0); 224 + reftable_set_alloc(NULL, NULL, NULL); 225 + 226 + reftable_free(arr); 227 + }
+1331
t/unit-tests/u-reftable-stack.c
···
··· 1 + /* 2 + Copyright 2020 Google LLC 3 + 4 + Use of this source code is governed by a BSD-style 5 + license that can be found in the LICENSE file or at 6 + https://developers.google.com/open-source/licenses/bsd 7 + */ 8 + 9 + #define DISABLE_SIGN_COMPARE_WARNINGS 10 + 11 + #include "unit-test.h" 12 + #include "dir.h" 13 + #include "lib-reftable.h" 14 + #include "reftable/merged.h" 15 + #include "reftable/reftable-error.h" 16 + #include "reftable/stack.h" 17 + #include "reftable/table.h" 18 + #include "strbuf.h" 19 + #include "tempfile.h" 20 + #include <dirent.h> 21 + 22 + static void clear_dir(const char *dirname) 23 + { 24 + struct strbuf path = REFTABLE_BUF_INIT; 25 + strbuf_addstr(&path, dirname); 26 + remove_dir_recursively(&path, 0); 27 + strbuf_release(&path); 28 + } 29 + 30 + static int count_dir_entries(const char *dirname) 31 + { 32 + DIR *dir = opendir(dirname); 33 + int len = 0; 34 + struct dirent *d; 35 + if (!dir) 36 + return 0; 37 + 38 + while ((d = readdir(dir))) { 39 + /* 40 + * Besides skipping over "." and "..", we also need to 41 + * skip over other files that have a leading ".". This 42 + * is due to behaviour of NFS, which will rename files 43 + * to ".nfs*" to emulate delete-on-last-close. 44 + * 45 + * In any case this should be fine as the reftable 46 + * library will never write files with leading dots 47 + * anyway. 48 + */ 49 + if (starts_with(d->d_name, ".")) 50 + continue; 51 + len++; 52 + } 53 + closedir(dir); 54 + return len; 55 + } 56 + 57 + /* 58 + * Work linenumber into the tempdir, so we can see which tests forget to 59 + * cleanup. 60 + */ 61 + static char *get_tmp_template(int linenumber) 62 + { 63 + const char *tmp = getenv("TMPDIR"); 64 + static char template[1024]; 65 + snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX", 66 + tmp ? tmp : "/tmp", linenumber); 67 + return template; 68 + } 69 + 70 + static char *get_tmp_dir(int linenumber) 71 + { 72 + char *dir = get_tmp_template(linenumber); 73 + cl_assert(mkdtemp(dir) != NULL); 74 + return dir; 75 + } 76 + 77 + void test_reftable_stack__read_file(void) 78 + { 79 + char *fn = get_tmp_template(__LINE__); 80 + struct tempfile *tmp = mks_tempfile(fn); 81 + int fd = get_tempfile_fd(tmp); 82 + char out[1024] = "line1\n\nline2\nline3"; 83 + int n, err; 84 + char **names = NULL; 85 + const char *want[] = { "line1", "line2", "line3" }; 86 + 87 + cl_assert(fd > 0); 88 + n = write_in_full(fd, out, strlen(out)); 89 + cl_assert_equal_i(n, strlen(out)); 90 + err = close(fd); 91 + cl_assert(err >= 0); 92 + 93 + err = read_lines(fn, &names); 94 + cl_assert(!err); 95 + 96 + for (size_t i = 0; names[i]; i++) 97 + cl_assert_equal_s(want[i], names[i]); 98 + free_names(names); 99 + (void) remove(fn); 100 + delete_tempfile(&tmp); 101 + } 102 + 103 + static int write_test_ref(struct reftable_writer *wr, void *arg) 104 + { 105 + struct reftable_ref_record *ref = arg; 106 + cl_assert_equal_i(reftable_writer_set_limits(wr, 107 + ref->update_index, ref->update_index), 0); 108 + return reftable_writer_add_ref(wr, ref); 109 + } 110 + 111 + static void write_n_ref_tables(struct reftable_stack *st, 112 + size_t n) 113 + { 114 + int disable_auto_compact; 115 + 116 + disable_auto_compact = st->opts.disable_auto_compact; 117 + st->opts.disable_auto_compact = 1; 118 + 119 + for (size_t i = 0; i < n; i++) { 120 + struct reftable_ref_record ref = { 121 + .update_index = reftable_stack_next_update_index(st), 122 + .value_type = REFTABLE_REF_VAL1, 123 + }; 124 + char buf[128]; 125 + 126 + snprintf(buf, sizeof(buf), "refs/heads/branch-%04"PRIuMAX, (uintmax_t)i); 127 + ref.refname = buf; 128 + cl_reftable_set_hash(ref.value.val1, i, REFTABLE_HASH_SHA1); 129 + 130 + cl_assert_equal_i(reftable_stack_add(st, 131 + &write_test_ref, &ref), 0); 132 + } 133 + 134 + st->opts.disable_auto_compact = disable_auto_compact; 135 + } 136 + 137 + struct write_log_arg { 138 + struct reftable_log_record *log; 139 + uint64_t update_index; 140 + }; 141 + 142 + static int write_test_log(struct reftable_writer *wr, void *arg) 143 + { 144 + struct write_log_arg *wla = arg; 145 + 146 + cl_assert_equal_i(reftable_writer_set_limits(wr, 147 + wla->update_index, 148 + wla->update_index), 0); 149 + return reftable_writer_add_log(wr, wla->log); 150 + } 151 + 152 + void test_reftable_stack__add_one(void) 153 + { 154 + char *dir = get_tmp_dir(__LINE__); 155 + struct reftable_buf scratch = REFTABLE_BUF_INIT; 156 + int mask = umask(002); 157 + struct reftable_write_options opts = { 158 + .default_permissions = 0660, 159 + }; 160 + struct reftable_stack *st = NULL; 161 + struct reftable_ref_record ref = { 162 + .refname = (char *) "HEAD", 163 + .update_index = 1, 164 + .value_type = REFTABLE_REF_SYMREF, 165 + .value.symref = (char *) "master", 166 + }; 167 + struct reftable_ref_record dest = { 0 }; 168 + struct stat stat_result = { 0 }; 169 + int err; 170 + 171 + err = reftable_new_stack(&st, dir, &opts); 172 + cl_assert(!err); 173 + 174 + err = reftable_stack_add(st, write_test_ref, &ref); 175 + cl_assert(!err); 176 + 177 + err = reftable_stack_read_ref(st, ref.refname, &dest); 178 + cl_assert(!err); 179 + cl_assert(reftable_ref_record_equal(&ref, &dest, 180 + REFTABLE_HASH_SIZE_SHA1)); 181 + cl_assert(st->tables_len > 0); 182 + 183 + #ifndef GIT_WINDOWS_NATIVE 184 + cl_assert_equal_i(reftable_buf_addstr(&scratch, dir), 0); 185 + cl_assert_equal_i(reftable_buf_addstr(&scratch, 186 + "/tables.list"), 0); 187 + cl_assert_equal_i(stat(scratch.buf, &stat_result), 0); 188 + cl_assert_equal_i((stat_result.st_mode & 0777), 189 + opts.default_permissions); 190 + 191 + reftable_buf_reset(&scratch); 192 + cl_assert_equal_i(reftable_buf_addstr(&scratch, dir), 0); 193 + cl_assert_equal_i(reftable_buf_addstr(&scratch, "/"), 0); 194 + /* do not try at home; not an external API for reftable. */ 195 + cl_assert(!reftable_buf_addstr(&scratch, st->tables[0]->name)); 196 + err = stat(scratch.buf, &stat_result); 197 + cl_assert(!err); 198 + cl_assert_equal_i((stat_result.st_mode & 0777), 199 + opts.default_permissions); 200 + #else 201 + (void) stat_result; 202 + #endif 203 + 204 + reftable_ref_record_release(&dest); 205 + reftable_stack_destroy(st); 206 + reftable_buf_release(&scratch); 207 + clear_dir(dir); 208 + umask(mask); 209 + } 210 + 211 + void test_reftable_stack__uptodate(void) 212 + { 213 + struct reftable_write_options opts = { 0 }; 214 + struct reftable_stack *st1 = NULL; 215 + struct reftable_stack *st2 = NULL; 216 + char *dir = get_tmp_dir(__LINE__); 217 + 218 + struct reftable_ref_record ref1 = { 219 + .refname = (char *) "HEAD", 220 + .update_index = 1, 221 + .value_type = REFTABLE_REF_SYMREF, 222 + .value.symref = (char *) "master", 223 + }; 224 + struct reftable_ref_record ref2 = { 225 + .refname = (char *) "branch2", 226 + .update_index = 2, 227 + .value_type = REFTABLE_REF_SYMREF, 228 + .value.symref = (char *) "master", 229 + }; 230 + 231 + 232 + /* simulate multi-process access to the same stack 233 + by creating two stacks for the same directory. 234 + */ 235 + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); 236 + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); 237 + cl_assert_equal_i(reftable_stack_add(st1, write_test_ref, 238 + &ref1), 0); 239 + cl_assert_equal_i(reftable_stack_add(st2, write_test_ref, 240 + &ref2), REFTABLE_OUTDATED_ERROR); 241 + cl_assert_equal_i(reftable_stack_reload(st2), 0); 242 + cl_assert_equal_i(reftable_stack_add(st2, write_test_ref, 243 + &ref2), 0); 244 + reftable_stack_destroy(st1); 245 + reftable_stack_destroy(st2); 246 + clear_dir(dir); 247 + } 248 + 249 + void test_reftable_stack__transaction_api(void) 250 + { 251 + char *dir = get_tmp_dir(__LINE__); 252 + struct reftable_write_options opts = { 0 }; 253 + struct reftable_stack *st = NULL; 254 + struct reftable_addition *add = NULL; 255 + 256 + struct reftable_ref_record ref = { 257 + .refname = (char *) "HEAD", 258 + .update_index = 1, 259 + .value_type = REFTABLE_REF_SYMREF, 260 + .value.symref = (char *) "master", 261 + }; 262 + struct reftable_ref_record dest = { 0 }; 263 + 264 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 265 + 266 + reftable_addition_destroy(add); 267 + 268 + cl_assert_equal_i(reftable_stack_new_addition(&add, st, 0), 0); 269 + cl_assert_equal_i(reftable_addition_add(add, write_test_ref, 270 + &ref), 0); 271 + cl_assert_equal_i(reftable_addition_commit(add), 0); 272 + 273 + reftable_addition_destroy(add); 274 + 275 + cl_assert_equal_i(reftable_stack_read_ref(st, ref.refname, 276 + &dest), 0); 277 + cl_assert_equal_i(REFTABLE_REF_SYMREF, dest.value_type); 278 + cl_assert(reftable_ref_record_equal(&ref, &dest, 279 + REFTABLE_HASH_SIZE_SHA1) != 0); 280 + 281 + reftable_ref_record_release(&dest); 282 + reftable_stack_destroy(st); 283 + clear_dir(dir); 284 + } 285 + 286 + void test_reftable_stack__transaction_with_reload(void) 287 + { 288 + char *dir = get_tmp_dir(__LINE__); 289 + struct reftable_stack *st1 = NULL, *st2 = NULL; 290 + struct reftable_addition *add = NULL; 291 + struct reftable_ref_record refs[2] = { 292 + { 293 + .refname = (char *) "refs/heads/a", 294 + .update_index = 1, 295 + .value_type = REFTABLE_REF_VAL1, 296 + .value.val1 = { '1' }, 297 + }, 298 + { 299 + .refname = (char *) "refs/heads/b", 300 + .update_index = 2, 301 + .value_type = REFTABLE_REF_VAL1, 302 + .value.val1 = { '1' }, 303 + }, 304 + }; 305 + struct reftable_ref_record ref = { 0 }; 306 + 307 + cl_assert_equal_i(reftable_new_stack(&st1, dir, NULL), 0); 308 + cl_assert_equal_i(reftable_new_stack(&st2, dir, NULL), 0); 309 + cl_assert_equal_i(reftable_stack_new_addition(&add, st1, 0), 0); 310 + cl_assert_equal_i(reftable_addition_add(add, write_test_ref, 311 + &refs[0]), 0); 312 + cl_assert_equal_i(reftable_addition_commit(add), 0); 313 + reftable_addition_destroy(add); 314 + 315 + /* 316 + * The second stack is now outdated, which we should notice. We do not 317 + * create the addition and lock the stack by default, but allow the 318 + * reload to happen when REFTABLE_STACK_NEW_ADDITION_RELOAD is set. 319 + */ 320 + cl_assert_equal_i(reftable_stack_new_addition(&add, st2, 0), 321 + REFTABLE_OUTDATED_ERROR); 322 + cl_assert_equal_i(reftable_stack_new_addition(&add, st2, 323 + REFTABLE_STACK_NEW_ADDITION_RELOAD), 0); 324 + cl_assert_equal_i(reftable_addition_add(add, write_test_ref, 325 + &refs[1]), 0); 326 + cl_assert_equal_i(reftable_addition_commit(add), 0); 327 + reftable_addition_destroy(add); 328 + 329 + for (size_t i = 0; i < ARRAY_SIZE(refs); i++) { 330 + cl_assert_equal_i(reftable_stack_read_ref(st2, 331 + refs[i].refname, &ref) , 0); 332 + cl_assert(reftable_ref_record_equal(&refs[i], &ref, 333 + REFTABLE_HASH_SIZE_SHA1) != 0); 334 + } 335 + 336 + reftable_ref_record_release(&ref); 337 + reftable_stack_destroy(st1); 338 + reftable_stack_destroy(st2); 339 + clear_dir(dir); 340 + } 341 + 342 + void test_reftable_stack__transaction_api_performs_auto_compaction(void) 343 + { 344 + char *dir = get_tmp_dir(__LINE__); 345 + struct reftable_write_options opts = {0}; 346 + struct reftable_addition *add = NULL; 347 + struct reftable_stack *st = NULL; 348 + size_t n = 20; 349 + 350 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 351 + 352 + for (size_t i = 0; i <= n; i++) { 353 + struct reftable_ref_record ref = { 354 + .update_index = reftable_stack_next_update_index(st), 355 + .value_type = REFTABLE_REF_SYMREF, 356 + .value.symref = (char *) "master", 357 + }; 358 + char name[100]; 359 + 360 + snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 361 + ref.refname = name; 362 + 363 + /* 364 + * Disable auto-compaction for all but the last runs. Like this 365 + * we can ensure that we indeed honor this setting and have 366 + * better control over when exactly auto compaction runs. 367 + */ 368 + st->opts.disable_auto_compact = i != n; 369 + 370 + cl_assert_equal_i(reftable_stack_new_addition(&add, 371 + st, 0), 0); 372 + cl_assert_equal_i(reftable_addition_add(add, 373 + write_test_ref, &ref), 0); 374 + cl_assert_equal_i(reftable_addition_commit(add), 0); 375 + 376 + reftable_addition_destroy(add); 377 + 378 + /* 379 + * The stack length should grow continuously for all runs where 380 + * auto compaction is disabled. When enabled, we should merge 381 + * all tables in the stack. 382 + */ 383 + if (i != n) 384 + cl_assert_equal_i(st->merged->tables_len, i + 1); 385 + else 386 + cl_assert_equal_i(st->merged->tables_len, 1); 387 + } 388 + 389 + reftable_stack_destroy(st); 390 + clear_dir(dir); 391 + } 392 + 393 + void test_reftable_stack__auto_compaction_fails_gracefully(void) 394 + { 395 + struct reftable_ref_record ref = { 396 + .refname = (char *) "refs/heads/master", 397 + .update_index = 1, 398 + .value_type = REFTABLE_REF_VAL1, 399 + .value.val1 = {0x01}, 400 + }; 401 + struct reftable_write_options opts = { 0 }; 402 + struct reftable_stack *st; 403 + struct reftable_buf table_path = REFTABLE_BUF_INIT; 404 + char *dir = get_tmp_dir(__LINE__); 405 + int err; 406 + 407 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 408 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 409 + &ref), 0); 410 + cl_assert_equal_i(st->merged->tables_len, 1); 411 + cl_assert_equal_i(st->stats.attempts, 0); 412 + cl_assert_equal_i(st->stats.failures, 0); 413 + 414 + /* 415 + * Lock the newly written table such that it cannot be compacted. 416 + * Adding a new table to the stack should not be impacted by this, even 417 + * though auto-compaction will now fail. 418 + */ 419 + cl_assert(!reftable_buf_addstr(&table_path, dir)); 420 + cl_assert(!reftable_buf_addstr(&table_path, "/")); 421 + cl_assert(!reftable_buf_addstr(&table_path, 422 + st->tables[0]->name)); 423 + cl_assert(!reftable_buf_addstr(&table_path, ".lock")); 424 + write_file_buf(table_path.buf, "", 0); 425 + 426 + ref.update_index = 2; 427 + err = reftable_stack_add(st, write_test_ref, &ref); 428 + cl_assert(!err); 429 + cl_assert_equal_i(st->merged->tables_len, 2); 430 + cl_assert_equal_i(st->stats.attempts, 1); 431 + cl_assert_equal_i(st->stats.failures, 1); 432 + 433 + reftable_stack_destroy(st); 434 + reftable_buf_release(&table_path); 435 + clear_dir(dir); 436 + } 437 + 438 + static int write_error(struct reftable_writer *wr UNUSED, void *arg) 439 + { 440 + return *((int *)arg); 441 + } 442 + 443 + void test_reftable_stack__update_index_check(void) 444 + { 445 + char *dir = get_tmp_dir(__LINE__); 446 + struct reftable_write_options opts = { 0 }; 447 + struct reftable_stack *st = NULL; 448 + struct reftable_ref_record ref1 = { 449 + .refname = (char *) "name1", 450 + .update_index = 1, 451 + .value_type = REFTABLE_REF_SYMREF, 452 + .value.symref = (char *) "master", 453 + }; 454 + struct reftable_ref_record ref2 = { 455 + .refname = (char *) "name2", 456 + .update_index = 1, 457 + .value_type = REFTABLE_REF_SYMREF, 458 + .value.symref = (char *) "master", 459 + }; 460 + 461 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 462 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 463 + &ref1), 0); 464 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 465 + &ref2), REFTABLE_API_ERROR); 466 + reftable_stack_destroy(st); 467 + clear_dir(dir); 468 + } 469 + 470 + void test_reftable_stack__lock_failure(void) 471 + { 472 + char *dir = get_tmp_dir(__LINE__); 473 + struct reftable_write_options opts = { 0 }; 474 + struct reftable_stack *st = NULL; 475 + int i; 476 + 477 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 478 + for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--) 479 + cl_assert_equal_i(reftable_stack_add(st, write_error, 480 + &i), i); 481 + 482 + reftable_stack_destroy(st); 483 + clear_dir(dir); 484 + } 485 + 486 + void test_reftable_stack__add(void) 487 + { 488 + struct reftable_write_options opts = { 489 + .exact_log_message = 1, 490 + .default_permissions = 0660, 491 + .disable_auto_compact = 1, 492 + }; 493 + struct reftable_stack *st = NULL; 494 + char *dir = get_tmp_dir(__LINE__); 495 + struct reftable_ref_record refs[2] = { 0 }; 496 + struct reftable_log_record logs[2] = { 0 }; 497 + struct reftable_buf path = REFTABLE_BUF_INIT; 498 + struct stat stat_result; 499 + size_t i, N = ARRAY_SIZE(refs); 500 + int err = 0; 501 + 502 + err = reftable_new_stack(&st, dir, &opts); 503 + cl_assert(!err); 504 + 505 + for (i = 0; i < N; i++) { 506 + char buf[256]; 507 + snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i); 508 + refs[i].refname = xstrdup(buf); 509 + refs[i].update_index = i + 1; 510 + refs[i].value_type = REFTABLE_REF_VAL1; 511 + cl_reftable_set_hash(refs[i].value.val1, i, 512 + REFTABLE_HASH_SHA1); 513 + 514 + logs[i].refname = xstrdup(buf); 515 + logs[i].update_index = N + i + 1; 516 + logs[i].value_type = REFTABLE_LOG_UPDATE; 517 + logs[i].value.update.email = xstrdup("identity@invalid"); 518 + cl_reftable_set_hash(logs[i].value.update.new_hash, i, 519 + REFTABLE_HASH_SHA1); 520 + } 521 + 522 + for (i = 0; i < N; i++) 523 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 524 + &refs[i]), 0); 525 + 526 + for (i = 0; i < N; i++) { 527 + struct write_log_arg arg = { 528 + .log = &logs[i], 529 + .update_index = reftable_stack_next_update_index(st), 530 + }; 531 + cl_assert_equal_i(reftable_stack_add(st, write_test_log, 532 + &arg), 0); 533 + } 534 + 535 + cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); 536 + 537 + for (i = 0; i < N; i++) { 538 + struct reftable_ref_record dest = { 0 }; 539 + 540 + cl_assert_equal_i(reftable_stack_read_ref(st, 541 + refs[i].refname, &dest), 0); 542 + cl_assert(reftable_ref_record_equal(&dest, refs + i, 543 + REFTABLE_HASH_SIZE_SHA1) != 0); 544 + reftable_ref_record_release(&dest); 545 + } 546 + 547 + for (i = 0; i < N; i++) { 548 + struct reftable_log_record dest = { 0 }; 549 + cl_assert_equal_i(reftable_stack_read_log(st, 550 + refs[i].refname, &dest), 0); 551 + cl_assert(reftable_log_record_equal(&dest, logs + i, 552 + REFTABLE_HASH_SIZE_SHA1) != 0); 553 + reftable_log_record_release(&dest); 554 + } 555 + 556 + #ifndef GIT_WINDOWS_NATIVE 557 + cl_assert_equal_i(reftable_buf_addstr(&path, dir), 0); 558 + cl_assert_equal_i(reftable_buf_addstr(&path, "/tables.list"), 0); 559 + cl_assert_equal_i(stat(path.buf, &stat_result), 0); 560 + cl_assert_equal_i((stat_result.st_mode & 0777), opts.default_permissions); 561 + 562 + reftable_buf_reset(&path); 563 + cl_assert_equal_i(reftable_buf_addstr(&path, dir), 0); 564 + cl_assert_equal_i(reftable_buf_addstr(&path, "/"), 0); 565 + /* do not try at home; not an external API for reftable. */ 566 + cl_assert(!reftable_buf_addstr(&path, st->tables[0]->name)); 567 + err = stat(path.buf, &stat_result); 568 + cl_assert(!err); 569 + cl_assert_equal_i((stat_result.st_mode & 0777), 570 + opts.default_permissions); 571 + #else 572 + (void) stat_result; 573 + #endif 574 + 575 + /* cleanup */ 576 + reftable_stack_destroy(st); 577 + for (i = 0; i < N; i++) { 578 + reftable_ref_record_release(&refs[i]); 579 + reftable_log_record_release(&logs[i]); 580 + } 581 + reftable_buf_release(&path); 582 + clear_dir(dir); 583 + } 584 + 585 + void test_reftable_stack__iterator(void) 586 + { 587 + struct reftable_write_options opts = { 0 }; 588 + struct reftable_stack *st = NULL; 589 + char *dir = get_tmp_dir(__LINE__); 590 + struct reftable_ref_record refs[10] = { 0 }; 591 + struct reftable_log_record logs[10] = { 0 }; 592 + struct reftable_iterator it = { 0 }; 593 + size_t N = ARRAY_SIZE(refs), i; 594 + int err; 595 + 596 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 597 + 598 + for (i = 0; i < N; i++) { 599 + refs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); 600 + refs[i].update_index = i + 1; 601 + refs[i].value_type = REFTABLE_REF_VAL1; 602 + cl_reftable_set_hash(refs[i].value.val1, i, 603 + REFTABLE_HASH_SHA1); 604 + 605 + logs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); 606 + logs[i].update_index = i + 1; 607 + logs[i].value_type = REFTABLE_LOG_UPDATE; 608 + logs[i].value.update.email = xstrdup("johndoe@invalid"); 609 + logs[i].value.update.message = xstrdup("commit\n"); 610 + cl_reftable_set_hash(logs[i].value.update.new_hash, i, 611 + REFTABLE_HASH_SHA1); 612 + } 613 + 614 + for (i = 0; i < N; i++) 615 + cl_assert_equal_i(reftable_stack_add(st, 616 + write_test_ref, &refs[i]), 0); 617 + 618 + for (i = 0; i < N; i++) { 619 + struct write_log_arg arg = { 620 + .log = &logs[i], 621 + .update_index = reftable_stack_next_update_index(st), 622 + }; 623 + 624 + cl_assert_equal_i(reftable_stack_add(st, 625 + write_test_log, &arg), 0); 626 + } 627 + 628 + reftable_stack_init_ref_iterator(st, &it); 629 + reftable_iterator_seek_ref(&it, refs[0].refname); 630 + for (i = 0; ; i++) { 631 + struct reftable_ref_record ref = { 0 }; 632 + 633 + err = reftable_iterator_next_ref(&it, &ref); 634 + if (err > 0) 635 + break; 636 + cl_assert(!err); 637 + cl_assert(reftable_ref_record_equal(&ref, &refs[i], 638 + REFTABLE_HASH_SIZE_SHA1) != 0); 639 + reftable_ref_record_release(&ref); 640 + } 641 + cl_assert_equal_i(i, N); 642 + 643 + reftable_iterator_destroy(&it); 644 + 645 + cl_assert_equal_i(reftable_stack_init_log_iterator(st, &it), 0); 646 + 647 + reftable_iterator_seek_log(&it, logs[0].refname); 648 + for (i = 0; ; i++) { 649 + struct reftable_log_record log = { 0 }; 650 + 651 + err = reftable_iterator_next_log(&it, &log); 652 + if (err > 0) 653 + break; 654 + cl_assert(!err); 655 + cl_assert(reftable_log_record_equal(&log, &logs[i], 656 + REFTABLE_HASH_SIZE_SHA1) != 0); 657 + reftable_log_record_release(&log); 658 + } 659 + cl_assert_equal_i(i, N); 660 + 661 + reftable_stack_destroy(st); 662 + reftable_iterator_destroy(&it); 663 + for (i = 0; i < N; i++) { 664 + reftable_ref_record_release(&refs[i]); 665 + reftable_log_record_release(&logs[i]); 666 + } 667 + clear_dir(dir); 668 + } 669 + 670 + void test_reftable_stack__log_normalize(void) 671 + { 672 + struct reftable_write_options opts = { 673 + 0, 674 + }; 675 + struct reftable_stack *st = NULL; 676 + char *dir = get_tmp_dir(__LINE__); 677 + struct reftable_log_record input = { 678 + .refname = (char *) "branch", 679 + .update_index = 1, 680 + .value_type = REFTABLE_LOG_UPDATE, 681 + .value = { 682 + .update = { 683 + .new_hash = { 1 }, 684 + .old_hash = { 2 }, 685 + }, 686 + }, 687 + }; 688 + struct reftable_log_record dest = { 689 + .update_index = 0, 690 + }; 691 + struct write_log_arg arg = { 692 + .log = &input, 693 + .update_index = 1, 694 + }; 695 + 696 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 697 + 698 + input.value.update.message = (char *) "one\ntwo"; 699 + cl_assert_equal_i(reftable_stack_add(st, write_test_log, 700 + &arg), REFTABLE_API_ERROR); 701 + 702 + input.value.update.message = (char *) "one"; 703 + cl_assert_equal_i(reftable_stack_add(st, write_test_log, 704 + &arg), 0); 705 + cl_assert_equal_i(reftable_stack_read_log(st, input.refname, 706 + &dest), 0); 707 + cl_assert_equal_s(dest.value.update.message, "one\n"); 708 + 709 + input.value.update.message = (char *) "two\n"; 710 + arg.update_index = 2; 711 + cl_assert_equal_i(reftable_stack_add(st, write_test_log, 712 + &arg), 0); 713 + cl_assert_equal_i(reftable_stack_read_log(st, input.refname, 714 + &dest), 0); 715 + cl_assert_equal_s(dest.value.update.message, "two\n"); 716 + 717 + /* cleanup */ 718 + reftable_stack_destroy(st); 719 + reftable_log_record_release(&dest); 720 + clear_dir(dir); 721 + } 722 + 723 + void test_reftable_stack__tombstone(void) 724 + { 725 + char *dir = get_tmp_dir(__LINE__); 726 + struct reftable_write_options opts = { 0 }; 727 + struct reftable_stack *st = NULL; 728 + struct reftable_ref_record refs[2] = { 0 }; 729 + struct reftable_log_record logs[2] = { 0 }; 730 + size_t i, N = ARRAY_SIZE(refs); 731 + struct reftable_ref_record dest = { 0 }; 732 + struct reftable_log_record log_dest = { 0 }; 733 + 734 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 735 + 736 + /* even entries add the refs, odd entries delete them. */ 737 + for (i = 0; i < N; i++) { 738 + const char *buf = "branch"; 739 + refs[i].refname = xstrdup(buf); 740 + refs[i].update_index = i + 1; 741 + if (i % 2 == 0) { 742 + refs[i].value_type = REFTABLE_REF_VAL1; 743 + cl_reftable_set_hash(refs[i].value.val1, i, 744 + REFTABLE_HASH_SHA1); 745 + } 746 + 747 + logs[i].refname = xstrdup(buf); 748 + /* 749 + * update_index is part of the key so should be constant. 750 + * The value itself should be less than the writer's upper 751 + * limit. 752 + */ 753 + logs[i].update_index = 1; 754 + if (i % 2 == 0) { 755 + logs[i].value_type = REFTABLE_LOG_UPDATE; 756 + cl_reftable_set_hash(logs[i].value.update.new_hash, i, REFTABLE_HASH_SHA1); 757 + logs[i].value.update.email = 758 + xstrdup("identity@invalid"); 759 + } 760 + } 761 + for (i = 0; i < N; i++) 762 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, &refs[i]), 0); 763 + 764 + for (i = 0; i < N; i++) { 765 + struct write_log_arg arg = { 766 + .log = &logs[i], 767 + .update_index = reftable_stack_next_update_index(st), 768 + }; 769 + cl_assert_equal_i(reftable_stack_add(st, 770 + write_test_log, &arg), 0); 771 + } 772 + 773 + cl_assert_equal_i(reftable_stack_read_ref(st, "branch", 774 + &dest), 1); 775 + reftable_ref_record_release(&dest); 776 + 777 + cl_assert_equal_i(reftable_stack_read_log(st, "branch", 778 + &log_dest), 1); 779 + reftable_log_record_release(&log_dest); 780 + 781 + cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); 782 + cl_assert_equal_i(reftable_stack_read_ref(st, "branch", 783 + &dest), 1); 784 + cl_assert_equal_i(reftable_stack_read_log(st, "branch", 785 + &log_dest), 1); 786 + reftable_ref_record_release(&dest); 787 + reftable_log_record_release(&log_dest); 788 + 789 + /* cleanup */ 790 + reftable_stack_destroy(st); 791 + for (i = 0; i < N; i++) { 792 + reftable_ref_record_release(&refs[i]); 793 + reftable_log_record_release(&logs[i]); 794 + } 795 + clear_dir(dir); 796 + } 797 + 798 + void test_reftable_stack__hash_id(void) 799 + { 800 + char *dir = get_tmp_dir(__LINE__); 801 + struct reftable_write_options opts = { 0 }; 802 + struct reftable_stack *st = NULL; 803 + 804 + struct reftable_ref_record ref = { 805 + .refname = (char *) "master", 806 + .value_type = REFTABLE_REF_SYMREF, 807 + .value.symref = (char *) "target", 808 + .update_index = 1, 809 + }; 810 + struct reftable_write_options opts32 = { .hash_id = REFTABLE_HASH_SHA256 }; 811 + struct reftable_stack *st32 = NULL; 812 + struct reftable_write_options opts_default = { 0 }; 813 + struct reftable_stack *st_default = NULL; 814 + struct reftable_ref_record dest = { 0 }; 815 + 816 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 817 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 818 + &ref), 0); 819 + 820 + /* can't read it with the wrong hash ID. */ 821 + cl_assert_equal_i(reftable_new_stack(&st32, dir, 822 + &opts32), REFTABLE_FORMAT_ERROR); 823 + 824 + /* check that we can read it back with default opts too. */ 825 + cl_assert_equal_i(reftable_new_stack(&st_default, dir, 826 + &opts_default), 0); 827 + cl_assert_equal_i(reftable_stack_read_ref(st_default, "master", 828 + &dest), 0); 829 + cl_assert(reftable_ref_record_equal(&ref, &dest, 830 + REFTABLE_HASH_SIZE_SHA1) != 0); 831 + reftable_ref_record_release(&dest); 832 + reftable_stack_destroy(st); 833 + reftable_stack_destroy(st_default); 834 + clear_dir(dir); 835 + } 836 + 837 + void test_reftable_stack__suggest_compaction_segment(void) 838 + { 839 + uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 }; 840 + struct segment min = 841 + suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2); 842 + cl_assert_equal_i(min.start, 1); 843 + cl_assert_equal_i(min.end, 10); 844 + } 845 + 846 + void test_reftable_stack__suggest_compaction_segment_nothing(void) 847 + { 848 + uint64_t sizes[] = { 64, 32, 16, 8, 4, 2 }; 849 + struct segment result = 850 + suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2); 851 + cl_assert_equal_i(result.start, result.end); 852 + } 853 + 854 + void test_reftable_stack__reflog_expire(void) 855 + { 856 + char *dir = get_tmp_dir(__LINE__); 857 + struct reftable_write_options opts = { 0 }; 858 + struct reftable_stack *st = NULL; 859 + struct reftable_log_record logs[20] = { 0 }; 860 + size_t i, N = ARRAY_SIZE(logs) - 1; 861 + struct reftable_log_expiry_config expiry = { 862 + .time = 10, 863 + }; 864 + struct reftable_log_record log = { 0 }; 865 + 866 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 867 + 868 + for (i = 1; i <= N; i++) { 869 + char buf[256]; 870 + snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i); 871 + 872 + logs[i].refname = xstrdup(buf); 873 + logs[i].update_index = i; 874 + logs[i].value_type = REFTABLE_LOG_UPDATE; 875 + logs[i].value.update.time = i; 876 + logs[i].value.update.email = xstrdup("identity@invalid"); 877 + cl_reftable_set_hash(logs[i].value.update.new_hash, i, 878 + REFTABLE_HASH_SHA1); 879 + } 880 + 881 + for (i = 1; i <= N; i++) { 882 + struct write_log_arg arg = { 883 + .log = &logs[i], 884 + .update_index = reftable_stack_next_update_index(st), 885 + }; 886 + cl_assert_equal_i(reftable_stack_add(st, write_test_log, 887 + &arg), 0); 888 + } 889 + 890 + cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); 891 + cl_assert_equal_i(reftable_stack_compact_all(st, &expiry), 0); 892 + cl_assert_equal_i(reftable_stack_read_log(st, logs[9].refname, 893 + &log), 1); 894 + cl_assert_equal_i(reftable_stack_read_log(st, logs[11].refname, 895 + &log), 0); 896 + 897 + expiry.min_update_index = 15; 898 + cl_assert_equal_i(reftable_stack_compact_all(st, &expiry), 0); 899 + cl_assert_equal_i(reftable_stack_read_log(st, logs[14].refname, 900 + &log), 1); 901 + cl_assert_equal_i(reftable_stack_read_log(st, logs[16].refname, 902 + &log), 0); 903 + 904 + /* cleanup */ 905 + reftable_stack_destroy(st); 906 + for (i = 0; i <= N; i++) 907 + reftable_log_record_release(&logs[i]); 908 + clear_dir(dir); 909 + reftable_log_record_release(&log); 910 + } 911 + 912 + static int write_nothing(struct reftable_writer *wr, void *arg UNUSED) 913 + { 914 + cl_assert_equal_i(reftable_writer_set_limits(wr, 1, 1), 0); 915 + return 0; 916 + } 917 + 918 + void test_reftable_stack__empty_add(void) 919 + { 920 + struct reftable_write_options opts = { 0 }; 921 + struct reftable_stack *st = NULL; 922 + char *dir = get_tmp_dir(__LINE__); 923 + struct reftable_stack *st2 = NULL; 924 + 925 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 926 + cl_assert_equal_i(reftable_stack_add(st, write_nothing, 927 + NULL), 0); 928 + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); 929 + clear_dir(dir); 930 + reftable_stack_destroy(st); 931 + reftable_stack_destroy(st2); 932 + } 933 + 934 + static int fastlogN(uint64_t sz, uint64_t N) 935 + { 936 + int l = 0; 937 + if (sz == 0) 938 + return 0; 939 + for (; sz; sz /= N) 940 + l++; 941 + return l - 1; 942 + } 943 + 944 + void test_reftable_stack__auto_compaction(void) 945 + { 946 + struct reftable_write_options opts = { 947 + .disable_auto_compact = 1, 948 + }; 949 + struct reftable_stack *st = NULL; 950 + char *dir = get_tmp_dir(__LINE__); 951 + size_t i, N = 100; 952 + int err; 953 + 954 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 955 + 956 + for (i = 0; i < N; i++) { 957 + char name[100]; 958 + struct reftable_ref_record ref = { 959 + .refname = name, 960 + .update_index = reftable_stack_next_update_index(st), 961 + .value_type = REFTABLE_REF_SYMREF, 962 + .value.symref = (char *) "master", 963 + }; 964 + snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 965 + 966 + err = reftable_stack_add(st, write_test_ref, &ref); 967 + cl_assert(!err); 968 + 969 + err = reftable_stack_auto_compact(st); 970 + cl_assert(!err); 971 + cl_assert(i < 2 || st->merged->tables_len < 2 * fastlogN(i, 2)); 972 + } 973 + 974 + cl_assert(reftable_stack_compaction_stats(st)->entries_written < 975 + (uint64_t)(N * fastlogN(N, 2))); 976 + 977 + reftable_stack_destroy(st); 978 + clear_dir(dir); 979 + } 980 + 981 + void test_reftable_stack__auto_compaction_factor(void) 982 + { 983 + struct reftable_write_options opts = { 984 + .auto_compaction_factor = 5, 985 + }; 986 + struct reftable_stack *st = NULL; 987 + char *dir = get_tmp_dir(__LINE__); 988 + size_t N = 100; 989 + int err; 990 + 991 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 992 + 993 + for (size_t i = 0; i < N; i++) { 994 + char name[20]; 995 + struct reftable_ref_record ref = { 996 + .refname = name, 997 + .update_index = reftable_stack_next_update_index(st), 998 + .value_type = REFTABLE_REF_VAL1, 999 + }; 1000 + xsnprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 1001 + 1002 + err = reftable_stack_add(st, &write_test_ref, &ref); 1003 + cl_assert(!err); 1004 + 1005 + cl_assert(i < 5 || st->merged->tables_len < 5 * fastlogN(i, 5)); 1006 + } 1007 + 1008 + reftable_stack_destroy(st); 1009 + clear_dir(dir); 1010 + } 1011 + 1012 + void test_reftable_stack__auto_compaction_with_locked_tables(void) 1013 + { 1014 + struct reftable_write_options opts = { 1015 + .disable_auto_compact = 1, 1016 + }; 1017 + struct reftable_stack *st = NULL; 1018 + struct reftable_buf buf = REFTABLE_BUF_INIT; 1019 + char *dir = get_tmp_dir(__LINE__); 1020 + int err; 1021 + 1022 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 1023 + 1024 + write_n_ref_tables(st, 5); 1025 + cl_assert_equal_i(st->merged->tables_len, 5); 1026 + 1027 + /* 1028 + * Given that all tables we have written should be roughly the same 1029 + * size, we expect that auto-compaction will want to compact all of the 1030 + * tables. Locking any of the tables will keep it from doing so. 1031 + */ 1032 + cl_assert(!reftable_buf_addstr(&buf, dir)); 1033 + cl_assert(!reftable_buf_addstr(&buf, "/")); 1034 + cl_assert(!reftable_buf_addstr(&buf, st->tables[2]->name)); 1035 + cl_assert(!reftable_buf_addstr(&buf, ".lock")); 1036 + write_file_buf(buf.buf, "", 0); 1037 + 1038 + /* 1039 + * When parts of the stack are locked, then auto-compaction does a best 1040 + * effort compaction of those tables which aren't locked. So while this 1041 + * would in theory compact all tables, due to the preexisting lock we 1042 + * only compact the newest two tables. 1043 + */ 1044 + err = reftable_stack_auto_compact(st); 1045 + cl_assert(!err); 1046 + cl_assert_equal_i(st->stats.failures, 0); 1047 + cl_assert_equal_i(st->merged->tables_len, 4); 1048 + 1049 + reftable_stack_destroy(st); 1050 + reftable_buf_release(&buf); 1051 + clear_dir(dir); 1052 + } 1053 + 1054 + void test_reftable_stack__add_performs_auto_compaction(void) 1055 + { 1056 + struct reftable_write_options opts = { 0 }; 1057 + struct reftable_stack *st = NULL; 1058 + char *dir = get_tmp_dir(__LINE__); 1059 + size_t i, n = 20; 1060 + 1061 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 1062 + 1063 + for (i = 0; i <= n; i++) { 1064 + struct reftable_ref_record ref = { 1065 + .update_index = reftable_stack_next_update_index(st), 1066 + .value_type = REFTABLE_REF_SYMREF, 1067 + .value.symref = (char *) "master", 1068 + }; 1069 + char buf[128]; 1070 + 1071 + /* 1072 + * Disable auto-compaction for all but the last runs. Like this 1073 + * we can ensure that we indeed honor this setting and have 1074 + * better control over when exactly auto compaction runs. 1075 + */ 1076 + st->opts.disable_auto_compact = i != n; 1077 + 1078 + snprintf(buf, sizeof(buf), "branch-%04"PRIuMAX, (uintmax_t)i); 1079 + ref.refname = buf; 1080 + 1081 + cl_assert_equal_i(reftable_stack_add(st, 1082 + write_test_ref, &ref), 0); 1083 + 1084 + /* 1085 + * The stack length should grow continuously for all runs where 1086 + * auto compaction is disabled. When enabled, we should merge 1087 + * all tables in the stack. 1088 + */ 1089 + if (i != n) 1090 + cl_assert_equal_i(st->merged->tables_len, i + 1); 1091 + else 1092 + cl_assert_equal_i(st->merged->tables_len, 1); 1093 + } 1094 + 1095 + reftable_stack_destroy(st); 1096 + clear_dir(dir); 1097 + } 1098 + 1099 + void test_reftable_stack__compaction_with_locked_tables(void) 1100 + { 1101 + struct reftable_write_options opts = { 1102 + .disable_auto_compact = 1, 1103 + }; 1104 + struct reftable_stack *st = NULL; 1105 + struct reftable_buf buf = REFTABLE_BUF_INIT; 1106 + char *dir = get_tmp_dir(__LINE__); 1107 + int err; 1108 + 1109 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 1110 + 1111 + write_n_ref_tables(st, 3); 1112 + cl_assert_equal_i(st->merged->tables_len, 3); 1113 + 1114 + /* Lock one of the tables that we're about to compact. */ 1115 + cl_assert(!reftable_buf_addstr(&buf, dir)); 1116 + cl_assert(!reftable_buf_addstr(&buf, "/")); 1117 + cl_assert(!reftable_buf_addstr(&buf, st->tables[1]->name)); 1118 + cl_assert(!reftable_buf_addstr(&buf, ".lock")); 1119 + write_file_buf(buf.buf, "", 0); 1120 + 1121 + /* 1122 + * Compaction is expected to fail given that we were not able to 1123 + * compact all tables. 1124 + */ 1125 + err = reftable_stack_compact_all(st, NULL); 1126 + cl_assert_equal_i(err, REFTABLE_LOCK_ERROR); 1127 + cl_assert_equal_i(st->stats.failures, 1); 1128 + cl_assert_equal_i(st->merged->tables_len, 3); 1129 + 1130 + reftable_stack_destroy(st); 1131 + reftable_buf_release(&buf); 1132 + clear_dir(dir); 1133 + } 1134 + 1135 + void test_reftable_stack__compaction_concurrent(void) 1136 + { 1137 + struct reftable_write_options opts = { 0 }; 1138 + struct reftable_stack *st1 = NULL, *st2 = NULL; 1139 + char *dir = get_tmp_dir(__LINE__); 1140 + 1141 + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); 1142 + write_n_ref_tables(st1, 3); 1143 + 1144 + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); 1145 + cl_assert_equal_i(reftable_stack_compact_all(st1, NULL), 0); 1146 + 1147 + reftable_stack_destroy(st1); 1148 + reftable_stack_destroy(st2); 1149 + 1150 + cl_assert_equal_i(count_dir_entries(dir), 2); 1151 + clear_dir(dir); 1152 + } 1153 + 1154 + static void unclean_stack_close(struct reftable_stack *st) 1155 + { 1156 + /* break abstraction boundary to simulate unclean shutdown. */ 1157 + for (size_t i = 0; i < st->tables_len; i++) 1158 + reftable_table_decref(st->tables[i]); 1159 + st->tables_len = 0; 1160 + REFTABLE_FREE_AND_NULL(st->tables); 1161 + } 1162 + 1163 + void test_reftable_stack__compaction_concurrent_clean(void) 1164 + { 1165 + struct reftable_write_options opts = { 0 }; 1166 + struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL; 1167 + char *dir = get_tmp_dir(__LINE__); 1168 + 1169 + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); 1170 + write_n_ref_tables(st1, 3); 1171 + 1172 + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); 1173 + cl_assert_equal_i(reftable_stack_compact_all(st1, NULL), 0); 1174 + 1175 + unclean_stack_close(st1); 1176 + unclean_stack_close(st2); 1177 + 1178 + cl_assert_equal_i(reftable_new_stack(&st3, dir, &opts), 0); 1179 + cl_assert_equal_i(reftable_stack_clean(st3), 0); 1180 + cl_assert_equal_i(count_dir_entries(dir), 2); 1181 + 1182 + reftable_stack_destroy(st1); 1183 + reftable_stack_destroy(st2); 1184 + reftable_stack_destroy(st3); 1185 + 1186 + clear_dir(dir); 1187 + } 1188 + 1189 + void test_reftable_stack__read_across_reload(void) 1190 + { 1191 + struct reftable_write_options opts = { 0 }; 1192 + struct reftable_stack *st1 = NULL, *st2 = NULL; 1193 + struct reftable_ref_record rec = { 0 }; 1194 + struct reftable_iterator it = { 0 }; 1195 + char *dir = get_tmp_dir(__LINE__); 1196 + int err; 1197 + 1198 + /* Create a first stack and set up an iterator for it. */ 1199 + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); 1200 + write_n_ref_tables(st1, 2); 1201 + cl_assert_equal_i(st1->merged->tables_len, 2); 1202 + reftable_stack_init_ref_iterator(st1, &it); 1203 + cl_assert_equal_i(reftable_iterator_seek_ref(&it, ""), 0); 1204 + 1205 + /* Set up a second stack for the same directory and compact it. */ 1206 + err = reftable_new_stack(&st2, dir, &opts); 1207 + cl_assert(!err); 1208 + cl_assert_equal_i(st2->merged->tables_len, 2); 1209 + err = reftable_stack_compact_all(st2, NULL); 1210 + cl_assert(!err); 1211 + cl_assert_equal_i(st2->merged->tables_len, 1); 1212 + 1213 + /* 1214 + * Verify that we can continue to use the old iterator even after we 1215 + * have reloaded its stack. 1216 + */ 1217 + err = reftable_stack_reload(st1); 1218 + cl_assert(!err); 1219 + cl_assert_equal_i(st1->merged->tables_len, 1); 1220 + err = reftable_iterator_next_ref(&it, &rec); 1221 + cl_assert(!err); 1222 + cl_assert_equal_s(rec.refname, "refs/heads/branch-0000"); 1223 + err = reftable_iterator_next_ref(&it, &rec); 1224 + cl_assert(!err); 1225 + cl_assert_equal_s(rec.refname, "refs/heads/branch-0001"); 1226 + err = reftable_iterator_next_ref(&it, &rec); 1227 + cl_assert(err > 0); 1228 + 1229 + reftable_ref_record_release(&rec); 1230 + reftable_iterator_destroy(&it); 1231 + reftable_stack_destroy(st1); 1232 + reftable_stack_destroy(st2); 1233 + clear_dir(dir); 1234 + } 1235 + 1236 + void test_reftable_stack__reload_with_missing_table(void) 1237 + { 1238 + struct reftable_write_options opts = { 0 }; 1239 + struct reftable_stack *st = NULL; 1240 + struct reftable_ref_record rec = { 0 }; 1241 + struct reftable_iterator it = { 0 }; 1242 + struct reftable_buf table_path = REFTABLE_BUF_INIT, content = REFTABLE_BUF_INIT; 1243 + char *dir = get_tmp_dir(__LINE__); 1244 + int err; 1245 + 1246 + /* Create a first stack and set up an iterator for it. */ 1247 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 1248 + write_n_ref_tables(st, 2); 1249 + cl_assert_equal_i(st->merged->tables_len, 2); 1250 + reftable_stack_init_ref_iterator(st, &it); 1251 + cl_assert_equal_i(reftable_iterator_seek_ref(&it, ""), 0); 1252 + 1253 + /* 1254 + * Update the tables.list file with some garbage data, while reusing 1255 + * our old tables. This should trigger a partial reload of the stack, 1256 + * where we try to reuse our old tables. 1257 + */ 1258 + cl_assert(!reftable_buf_addstr(&content, st->tables[0]->name)); 1259 + cl_assert(!reftable_buf_addstr(&content, "\n")); 1260 + cl_assert(!reftable_buf_addstr(&content, st->tables[1]->name)); 1261 + cl_assert(!reftable_buf_addstr(&content, "\n")); 1262 + cl_assert(!reftable_buf_addstr(&content, "garbage\n")); 1263 + cl_assert(!reftable_buf_addstr(&table_path, st->list_file)); 1264 + cl_assert(!reftable_buf_addstr(&table_path, ".lock")); 1265 + write_file_buf(table_path.buf, content.buf, content.len); 1266 + cl_assert_equal_i(rename(table_path.buf, st->list_file), 0); 1267 + 1268 + err = reftable_stack_reload(st); 1269 + cl_assert_equal_i(err, -4); 1270 + cl_assert_equal_i(st->merged->tables_len, 2); 1271 + 1272 + /* 1273 + * Even though the reload has failed, we should be able to continue 1274 + * using the iterator. 1275 + */ 1276 + cl_assert_equal_i(reftable_iterator_next_ref(&it, &rec), 0); 1277 + cl_assert_equal_s(rec.refname, "refs/heads/branch-0000"); 1278 + cl_assert_equal_i(reftable_iterator_next_ref(&it, &rec), 0); 1279 + cl_assert_equal_s(rec.refname, "refs/heads/branch-0001"); 1280 + cl_assert(reftable_iterator_next_ref(&it, &rec) > 0); 1281 + 1282 + reftable_ref_record_release(&rec); 1283 + reftable_iterator_destroy(&it); 1284 + reftable_stack_destroy(st); 1285 + reftable_buf_release(&table_path); 1286 + reftable_buf_release(&content); 1287 + clear_dir(dir); 1288 + } 1289 + 1290 + static int write_limits_after_ref(struct reftable_writer *wr, void *arg) 1291 + { 1292 + struct reftable_ref_record *ref = arg; 1293 + cl_assert_equal_i(reftable_writer_set_limits(wr, 1294 + ref->update_index, ref->update_index), 0); 1295 + cl_assert_equal_i(reftable_writer_add_ref(wr, ref), 0); 1296 + return reftable_writer_set_limits(wr, ref->update_index, ref->update_index); 1297 + } 1298 + 1299 + void test_reftable_stack__invalid_limit_updates(void) 1300 + { 1301 + struct reftable_ref_record ref = { 1302 + .refname = (char *) "HEAD", 1303 + .update_index = 1, 1304 + .value_type = REFTABLE_REF_SYMREF, 1305 + .value.symref = (char *) "master", 1306 + }; 1307 + struct reftable_write_options opts = { 1308 + .default_permissions = 0660, 1309 + }; 1310 + struct reftable_addition *add = NULL; 1311 + char *dir = get_tmp_dir(__LINE__); 1312 + struct reftable_stack *st = NULL; 1313 + 1314 + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 1315 + 1316 + reftable_addition_destroy(add); 1317 + 1318 + cl_assert_equal_i(reftable_stack_new_addition(&add, st, 0), 0); 1319 + 1320 + /* 1321 + * write_limits_after_ref also updates the update indexes after adding 1322 + * the record. This should cause an err to be returned, since the limits 1323 + * must be set at the start. 1324 + */ 1325 + cl_assert_equal_i(reftable_addition_add(add, 1326 + write_limits_after_ref, &ref), REFTABLE_API_ERROR); 1327 + 1328 + reftable_addition_destroy(add); 1329 + reftable_stack_destroy(st); 1330 + clear_dir(dir); 1331 + }