Git fork
1/*
2Copyright 2020 Google LLC
3
4Use of this source code is governed by a BSD-style
5license that can be found in the LICENSE file or at
6https://developers.google.com/open-source/licenses/bsd
7*/
8
9#define DISABLE_SIGN_COMPARE_WARNINGS
10
11#include "unit-test.h"
12#include "dir.h"
13#include "lib-reftable.h"
14#include "reftable/merged.h"
15#include "reftable/reftable-error.h"
16#include "reftable/stack.h"
17#include "reftable/table.h"
18#include "strbuf.h"
19#include "tempfile.h"
20#include <dirent.h>
21
22static void clear_dir(const char *dirname)
23{
24 struct strbuf path = REFTABLE_BUF_INIT;
25 strbuf_addstr(&path, dirname);
26 remove_dir_recursively(&path, 0);
27 strbuf_release(&path);
28}
29
30static int count_dir_entries(const char *dirname)
31{
32 DIR *dir = opendir(dirname);
33 int len = 0;
34 struct dirent *d;
35 if (!dir)
36 return 0;
37
38 while ((d = readdir(dir))) {
39 /*
40 * Besides skipping over "." and "..", we also need to
41 * skip over other files that have a leading ".". This
42 * is due to behaviour of NFS, which will rename files
43 * to ".nfs*" to emulate delete-on-last-close.
44 *
45 * In any case this should be fine as the reftable
46 * library will never write files with leading dots
47 * anyway.
48 */
49 if (starts_with(d->d_name, "."))
50 continue;
51 len++;
52 }
53 closedir(dir);
54 return len;
55}
56
57/*
58 * Work linenumber into the tempdir, so we can see which tests forget to
59 * cleanup.
60 */
61static char *get_tmp_template(int linenumber)
62{
63 const char *tmp = getenv("TMPDIR");
64 static char template[1024];
65 snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX",
66 tmp ? tmp : "/tmp", linenumber);
67 return template;
68}
69
70static char *get_tmp_dir(int linenumber)
71{
72 char *dir = get_tmp_template(linenumber);
73 cl_assert(mkdtemp(dir) != NULL);
74 return dir;
75}
76
77void test_reftable_stack__read_file(void)
78{
79 char *fn = get_tmp_template(__LINE__);
80 struct tempfile *tmp = mks_tempfile(fn);
81 int fd = get_tempfile_fd(tmp);
82 char out[1024] = "line1\n\nline2\nline3";
83 int n, err;
84 char **names = NULL;
85 const char *want[] = { "line1", "line2", "line3" };
86
87 cl_assert(fd > 0);
88 n = write_in_full(fd, out, strlen(out));
89 cl_assert_equal_i(n, strlen(out));
90 err = close(fd);
91 cl_assert(err >= 0);
92
93 err = read_lines(fn, &names);
94 cl_assert(!err);
95
96 for (size_t i = 0; names[i]; i++)
97 cl_assert_equal_s(want[i], names[i]);
98 free_names(names);
99 (void) remove(fn);
100 delete_tempfile(&tmp);
101}
102
103static int write_test_ref(struct reftable_writer *wr, void *arg)
104{
105 struct reftable_ref_record *ref = arg;
106 cl_assert_equal_i(reftable_writer_set_limits(wr,
107 ref->update_index, ref->update_index), 0);
108 return reftable_writer_add_ref(wr, ref);
109}
110
111static void write_n_ref_tables(struct reftable_stack *st,
112 size_t n)
113{
114 int disable_auto_compact;
115
116 disable_auto_compact = st->opts.disable_auto_compact;
117 st->opts.disable_auto_compact = 1;
118
119 for (size_t i = 0; i < n; i++) {
120 struct reftable_ref_record ref = {
121 .update_index = reftable_stack_next_update_index(st),
122 .value_type = REFTABLE_REF_VAL1,
123 };
124 char buf[128];
125
126 snprintf(buf, sizeof(buf), "refs/heads/branch-%04"PRIuMAX, (uintmax_t)i);
127 ref.refname = buf;
128 cl_reftable_set_hash(ref.value.val1, i, REFTABLE_HASH_SHA1);
129
130 cl_assert_equal_i(reftable_stack_add(st,
131 &write_test_ref, &ref, 0), 0);
132 }
133
134 st->opts.disable_auto_compact = disable_auto_compact;
135}
136
137struct write_log_arg {
138 struct reftable_log_record *log;
139 uint64_t update_index;
140};
141
142static int write_test_log(struct reftable_writer *wr, void *arg)
143{
144 struct write_log_arg *wla = arg;
145
146 cl_assert_equal_i(reftable_writer_set_limits(wr,
147 wla->update_index,
148 wla->update_index), 0);
149 return reftable_writer_add_log(wr, wla->log);
150}
151
152void test_reftable_stack__add_one(void)
153{
154 char *dir = get_tmp_dir(__LINE__);
155 struct reftable_buf scratch = REFTABLE_BUF_INIT;
156 int mask = umask(002);
157 struct reftable_write_options opts = {
158 .default_permissions = 0660,
159 };
160 struct reftable_stack *st = NULL;
161 struct reftable_ref_record ref = {
162 .refname = (char *) "HEAD",
163 .update_index = 1,
164 .value_type = REFTABLE_REF_SYMREF,
165 .value.symref = (char *) "master",
166 };
167 struct reftable_ref_record dest = { 0 };
168 struct stat stat_result = { 0 };
169 int err;
170
171 err = reftable_new_stack(&st, dir, &opts);
172 cl_assert(!err);
173
174 err = reftable_stack_add(st, write_test_ref, &ref, 0);
175 cl_assert(!err);
176
177 err = reftable_stack_read_ref(st, ref.refname, &dest);
178 cl_assert(!err);
179 cl_assert(reftable_ref_record_equal(&ref, &dest,
180 REFTABLE_HASH_SIZE_SHA1));
181 cl_assert(st->tables_len > 0);
182
183#ifndef GIT_WINDOWS_NATIVE
184 cl_assert_equal_i(reftable_buf_addstr(&scratch, dir), 0);
185 cl_assert_equal_i(reftable_buf_addstr(&scratch,
186 "/tables.list"), 0);
187 cl_assert_equal_i(stat(scratch.buf, &stat_result), 0);
188 cl_assert_equal_i((stat_result.st_mode & 0777),
189 opts.default_permissions);
190
191 reftable_buf_reset(&scratch);
192 cl_assert_equal_i(reftable_buf_addstr(&scratch, dir), 0);
193 cl_assert_equal_i(reftable_buf_addstr(&scratch, "/"), 0);
194 /* do not try at home; not an external API for reftable. */
195 cl_assert(!reftable_buf_addstr(&scratch, st->tables[0]->name));
196 err = stat(scratch.buf, &stat_result);
197 cl_assert(!err);
198 cl_assert_equal_i((stat_result.st_mode & 0777),
199 opts.default_permissions);
200#else
201 (void) stat_result;
202#endif
203
204 reftable_ref_record_release(&dest);
205 reftable_stack_destroy(st);
206 reftable_buf_release(&scratch);
207 clear_dir(dir);
208 umask(mask);
209}
210
211void test_reftable_stack__uptodate(void)
212{
213 struct reftable_write_options opts = { 0 };
214 struct reftable_stack *st1 = NULL;
215 struct reftable_stack *st2 = NULL;
216 char *dir = get_tmp_dir(__LINE__);
217
218 struct reftable_ref_record ref1 = {
219 .refname = (char *) "HEAD",
220 .update_index = 1,
221 .value_type = REFTABLE_REF_SYMREF,
222 .value.symref = (char *) "master",
223 };
224 struct reftable_ref_record ref2 = {
225 .refname = (char *) "branch2",
226 .update_index = 2,
227 .value_type = REFTABLE_REF_SYMREF,
228 .value.symref = (char *) "master",
229 };
230
231
232 /* simulate multi-process access to the same stack
233 by creating two stacks for the same directory.
234 */
235 cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0);
236 cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0);
237 cl_assert_equal_i(reftable_stack_add(st1, write_test_ref,
238 &ref1, 0), 0);
239 cl_assert_equal_i(reftable_stack_add(st2, write_test_ref,
240 &ref2, 0), REFTABLE_OUTDATED_ERROR);
241 cl_assert_equal_i(reftable_stack_reload(st2), 0);
242 cl_assert_equal_i(reftable_stack_add(st2, write_test_ref,
243 &ref2, 0), 0);
244 reftable_stack_destroy(st1);
245 reftable_stack_destroy(st2);
246 clear_dir(dir);
247}
248
249void test_reftable_stack__transaction_api(void)
250{
251 char *dir = get_tmp_dir(__LINE__);
252 struct reftable_write_options opts = { 0 };
253 struct reftable_stack *st = NULL;
254 struct reftable_addition *add = NULL;
255
256 struct reftable_ref_record ref = {
257 .refname = (char *) "HEAD",
258 .update_index = 1,
259 .value_type = REFTABLE_REF_SYMREF,
260 .value.symref = (char *) "master",
261 };
262 struct reftable_ref_record dest = { 0 };
263
264 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
265
266 reftable_addition_destroy(add);
267
268 cl_assert_equal_i(reftable_stack_new_addition(&add, st, 0), 0);
269 cl_assert_equal_i(reftable_addition_add(add, write_test_ref,
270 &ref), 0);
271 cl_assert_equal_i(reftable_addition_commit(add), 0);
272
273 reftable_addition_destroy(add);
274
275 cl_assert_equal_i(reftable_stack_read_ref(st, ref.refname,
276 &dest), 0);
277 cl_assert_equal_i(REFTABLE_REF_SYMREF, dest.value_type);
278 cl_assert(reftable_ref_record_equal(&ref, &dest,
279 REFTABLE_HASH_SIZE_SHA1) != 0);
280
281 reftable_ref_record_release(&dest);
282 reftable_stack_destroy(st);
283 clear_dir(dir);
284}
285
286void test_reftable_stack__transaction_with_reload(void)
287{
288 char *dir = get_tmp_dir(__LINE__);
289 struct reftable_stack *st1 = NULL, *st2 = NULL;
290 struct reftable_addition *add = NULL;
291 struct reftable_ref_record refs[2] = {
292 {
293 .refname = (char *) "refs/heads/a",
294 .update_index = 1,
295 .value_type = REFTABLE_REF_VAL1,
296 .value.val1 = { '1' },
297 },
298 {
299 .refname = (char *) "refs/heads/b",
300 .update_index = 2,
301 .value_type = REFTABLE_REF_VAL1,
302 .value.val1 = { '1' },
303 },
304 };
305 struct reftable_ref_record ref = { 0 };
306
307 cl_assert_equal_i(reftable_new_stack(&st1, dir, NULL), 0);
308 cl_assert_equal_i(reftable_new_stack(&st2, dir, NULL), 0);
309 cl_assert_equal_i(reftable_stack_new_addition(&add, st1, 0), 0);
310 cl_assert_equal_i(reftable_addition_add(add, write_test_ref,
311 &refs[0]), 0);
312 cl_assert_equal_i(reftable_addition_commit(add), 0);
313 reftable_addition_destroy(add);
314
315 /*
316 * The second stack is now outdated, which we should notice. We do not
317 * create the addition and lock the stack by default, but allow the
318 * reload to happen when REFTABLE_STACK_NEW_ADDITION_RELOAD is set.
319 */
320 cl_assert_equal_i(reftable_stack_new_addition(&add, st2, 0),
321 REFTABLE_OUTDATED_ERROR);
322 cl_assert_equal_i(reftable_stack_new_addition(&add, st2,
323 REFTABLE_STACK_NEW_ADDITION_RELOAD), 0);
324 cl_assert_equal_i(reftable_addition_add(add, write_test_ref,
325 &refs[1]), 0);
326 cl_assert_equal_i(reftable_addition_commit(add), 0);
327 reftable_addition_destroy(add);
328
329 for (size_t i = 0; i < ARRAY_SIZE(refs); i++) {
330 cl_assert_equal_i(reftable_stack_read_ref(st2,
331 refs[i].refname, &ref) , 0);
332 cl_assert(reftable_ref_record_equal(&refs[i], &ref,
333 REFTABLE_HASH_SIZE_SHA1) != 0);
334 }
335
336 reftable_ref_record_release(&ref);
337 reftable_stack_destroy(st1);
338 reftable_stack_destroy(st2);
339 clear_dir(dir);
340}
341
342void test_reftable_stack__transaction_api_performs_auto_compaction(void)
343{
344 char *dir = get_tmp_dir(__LINE__);
345 struct reftable_write_options opts = {0};
346 struct reftable_addition *add = NULL;
347 struct reftable_stack *st = NULL;
348 size_t n = 20;
349
350 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
351
352 for (size_t i = 0; i <= n; i++) {
353 struct reftable_ref_record ref = {
354 .update_index = reftable_stack_next_update_index(st),
355 .value_type = REFTABLE_REF_SYMREF,
356 .value.symref = (char *) "master",
357 };
358 char name[100];
359
360 snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i);
361 ref.refname = name;
362
363 /*
364 * Disable auto-compaction for all but the last runs. Like this
365 * we can ensure that we indeed honor this setting and have
366 * better control over when exactly auto compaction runs.
367 */
368 st->opts.disable_auto_compact = i != n;
369
370 cl_assert_equal_i(reftable_stack_new_addition(&add,
371 st, 0), 0);
372 cl_assert_equal_i(reftable_addition_add(add,
373 write_test_ref, &ref), 0);
374 cl_assert_equal_i(reftable_addition_commit(add), 0);
375
376 reftable_addition_destroy(add);
377
378 /*
379 * The stack length should grow continuously for all runs where
380 * auto compaction is disabled. When enabled, we should merge
381 * all tables in the stack.
382 */
383 if (i != n)
384 cl_assert_equal_i(st->merged->tables_len, i + 1);
385 else
386 cl_assert_equal_i(st->merged->tables_len, 1);
387 }
388
389 reftable_stack_destroy(st);
390 clear_dir(dir);
391}
392
393void test_reftable_stack__auto_compaction_fails_gracefully(void)
394{
395 struct reftable_ref_record ref = {
396 .refname = (char *) "refs/heads/master",
397 .update_index = 1,
398 .value_type = REFTABLE_REF_VAL1,
399 .value.val1 = {0x01},
400 };
401 struct reftable_write_options opts = { 0 };
402 struct reftable_stack *st;
403 struct reftable_buf table_path = REFTABLE_BUF_INIT;
404 char *dir = get_tmp_dir(__LINE__);
405 int err;
406
407 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
408 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
409 &ref, 0), 0);
410 cl_assert_equal_i(st->merged->tables_len, 1);
411 cl_assert_equal_i(st->stats.attempts, 0);
412 cl_assert_equal_i(st->stats.failures, 0);
413
414 /*
415 * Lock the newly written table such that it cannot be compacted.
416 * Adding a new table to the stack should not be impacted by this, even
417 * though auto-compaction will now fail.
418 */
419 cl_assert(!reftable_buf_addstr(&table_path, dir));
420 cl_assert(!reftable_buf_addstr(&table_path, "/"));
421 cl_assert(!reftable_buf_addstr(&table_path,
422 st->tables[0]->name));
423 cl_assert(!reftable_buf_addstr(&table_path, ".lock"));
424 write_file_buf(table_path.buf, "", 0);
425
426 ref.update_index = 2;
427 err = reftable_stack_add(st, write_test_ref, &ref, 0);
428 cl_assert(!err);
429 cl_assert_equal_i(st->merged->tables_len, 2);
430 cl_assert_equal_i(st->stats.attempts, 1);
431 cl_assert_equal_i(st->stats.failures, 1);
432
433 reftable_stack_destroy(st);
434 reftable_buf_release(&table_path);
435 clear_dir(dir);
436}
437
438static int write_error(struct reftable_writer *wr UNUSED, void *arg)
439{
440 return *((int *)arg);
441}
442
443void test_reftable_stack__update_index_check(void)
444{
445 char *dir = get_tmp_dir(__LINE__);
446 struct reftable_write_options opts = { 0 };
447 struct reftable_stack *st = NULL;
448 struct reftable_ref_record ref1 = {
449 .refname = (char *) "name1",
450 .update_index = 1,
451 .value_type = REFTABLE_REF_SYMREF,
452 .value.symref = (char *) "master",
453 };
454 struct reftable_ref_record ref2 = {
455 .refname = (char *) "name2",
456 .update_index = 1,
457 .value_type = REFTABLE_REF_SYMREF,
458 .value.symref = (char *) "master",
459 };
460
461 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
462 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
463 &ref1, 0), 0);
464 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
465 &ref2, 0), REFTABLE_API_ERROR);
466 reftable_stack_destroy(st);
467 clear_dir(dir);
468}
469
470void test_reftable_stack__lock_failure(void)
471{
472 char *dir = get_tmp_dir(__LINE__);
473 struct reftable_write_options opts = { 0 };
474 struct reftable_stack *st = NULL;
475 int i;
476
477 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
478 for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--)
479 cl_assert_equal_i(reftable_stack_add(st, write_error,
480 &i, 0), i);
481
482 reftable_stack_destroy(st);
483 clear_dir(dir);
484}
485
486void test_reftable_stack__add(void)
487{
488 struct reftable_write_options opts = {
489 .exact_log_message = 1,
490 .default_permissions = 0660,
491 .disable_auto_compact = 1,
492 };
493 struct reftable_stack *st = NULL;
494 char *dir = get_tmp_dir(__LINE__);
495 struct reftable_ref_record refs[2] = { 0 };
496 struct reftable_log_record logs[2] = { 0 };
497 struct reftable_buf path = REFTABLE_BUF_INIT;
498 struct stat stat_result;
499 size_t i, N = ARRAY_SIZE(refs);
500 int err = 0;
501
502 err = reftable_new_stack(&st, dir, &opts);
503 cl_assert(!err);
504
505 for (i = 0; i < N; i++) {
506 char buf[256];
507 snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i);
508 refs[i].refname = xstrdup(buf);
509 refs[i].update_index = i + 1;
510 refs[i].value_type = REFTABLE_REF_VAL1;
511 cl_reftable_set_hash(refs[i].value.val1, i,
512 REFTABLE_HASH_SHA1);
513
514 logs[i].refname = xstrdup(buf);
515 logs[i].update_index = N + i + 1;
516 logs[i].value_type = REFTABLE_LOG_UPDATE;
517 logs[i].value.update.email = xstrdup("identity@invalid");
518 cl_reftable_set_hash(logs[i].value.update.new_hash, i,
519 REFTABLE_HASH_SHA1);
520 }
521
522 for (i = 0; i < N; i++)
523 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
524 &refs[i], 0), 0);
525
526 for (i = 0; i < N; i++) {
527 struct write_log_arg arg = {
528 .log = &logs[i],
529 .update_index = reftable_stack_next_update_index(st),
530 };
531 cl_assert_equal_i(reftable_stack_add(st, write_test_log,
532 &arg, 0), 0);
533 }
534
535 cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0);
536
537 for (i = 0; i < N; i++) {
538 struct reftable_ref_record dest = { 0 };
539
540 cl_assert_equal_i(reftable_stack_read_ref(st,
541 refs[i].refname, &dest), 0);
542 cl_assert(reftable_ref_record_equal(&dest, refs + i,
543 REFTABLE_HASH_SIZE_SHA1) != 0);
544 reftable_ref_record_release(&dest);
545 }
546
547 for (i = 0; i < N; i++) {
548 struct reftable_log_record dest = { 0 };
549 cl_assert_equal_i(reftable_stack_read_log(st,
550 refs[i].refname, &dest), 0);
551 cl_assert(reftable_log_record_equal(&dest, logs + i,
552 REFTABLE_HASH_SIZE_SHA1) != 0);
553 reftable_log_record_release(&dest);
554 }
555
556#ifndef GIT_WINDOWS_NATIVE
557 cl_assert_equal_i(reftable_buf_addstr(&path, dir), 0);
558 cl_assert_equal_i(reftable_buf_addstr(&path, "/tables.list"), 0);
559 cl_assert_equal_i(stat(path.buf, &stat_result), 0);
560 cl_assert_equal_i((stat_result.st_mode & 0777), opts.default_permissions);
561
562 reftable_buf_reset(&path);
563 cl_assert_equal_i(reftable_buf_addstr(&path, dir), 0);
564 cl_assert_equal_i(reftable_buf_addstr(&path, "/"), 0);
565 /* do not try at home; not an external API for reftable. */
566 cl_assert(!reftable_buf_addstr(&path, st->tables[0]->name));
567 err = stat(path.buf, &stat_result);
568 cl_assert(!err);
569 cl_assert_equal_i((stat_result.st_mode & 0777),
570 opts.default_permissions);
571#else
572 (void) stat_result;
573#endif
574
575 /* cleanup */
576 reftable_stack_destroy(st);
577 for (i = 0; i < N; i++) {
578 reftable_ref_record_release(&refs[i]);
579 reftable_log_record_release(&logs[i]);
580 }
581 reftable_buf_release(&path);
582 clear_dir(dir);
583}
584
585void test_reftable_stack__iterator(void)
586{
587 struct reftable_write_options opts = { 0 };
588 struct reftable_stack *st = NULL;
589 char *dir = get_tmp_dir(__LINE__);
590 struct reftable_ref_record refs[10] = { 0 };
591 struct reftable_log_record logs[10] = { 0 };
592 struct reftable_iterator it = { 0 };
593 size_t N = ARRAY_SIZE(refs), i;
594 int err;
595
596 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
597
598 for (i = 0; i < N; i++) {
599 refs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i);
600 refs[i].update_index = i + 1;
601 refs[i].value_type = REFTABLE_REF_VAL1;
602 cl_reftable_set_hash(refs[i].value.val1, i,
603 REFTABLE_HASH_SHA1);
604
605 logs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i);
606 logs[i].update_index = i + 1;
607 logs[i].value_type = REFTABLE_LOG_UPDATE;
608 logs[i].value.update.email = xstrdup("johndoe@invalid");
609 logs[i].value.update.message = xstrdup("commit\n");
610 cl_reftable_set_hash(logs[i].value.update.new_hash, i,
611 REFTABLE_HASH_SHA1);
612 }
613
614 for (i = 0; i < N; i++)
615 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
616 &refs[i], 0), 0);
617
618 for (i = 0; i < N; i++) {
619 struct write_log_arg arg = {
620 .log = &logs[i],
621 .update_index = reftable_stack_next_update_index(st),
622 };
623
624 cl_assert_equal_i(reftable_stack_add(st, write_test_log,
625 &arg, 0), 0);
626 }
627
628 reftable_stack_init_ref_iterator(st, &it);
629 reftable_iterator_seek_ref(&it, refs[0].refname);
630 for (i = 0; ; i++) {
631 struct reftable_ref_record ref = { 0 };
632
633 err = reftable_iterator_next_ref(&it, &ref);
634 if (err > 0)
635 break;
636 cl_assert(!err);
637 cl_assert(reftable_ref_record_equal(&ref, &refs[i],
638 REFTABLE_HASH_SIZE_SHA1) != 0);
639 reftable_ref_record_release(&ref);
640 }
641 cl_assert_equal_i(i, N);
642
643 reftable_iterator_destroy(&it);
644
645 cl_assert_equal_i(reftable_stack_init_log_iterator(st, &it), 0);
646
647 reftable_iterator_seek_log(&it, logs[0].refname);
648 for (i = 0; ; i++) {
649 struct reftable_log_record log = { 0 };
650
651 err = reftable_iterator_next_log(&it, &log);
652 if (err > 0)
653 break;
654 cl_assert(!err);
655 cl_assert(reftable_log_record_equal(&log, &logs[i],
656 REFTABLE_HASH_SIZE_SHA1) != 0);
657 reftable_log_record_release(&log);
658 }
659 cl_assert_equal_i(i, N);
660
661 reftable_stack_destroy(st);
662 reftable_iterator_destroy(&it);
663 for (i = 0; i < N; i++) {
664 reftable_ref_record_release(&refs[i]);
665 reftable_log_record_release(&logs[i]);
666 }
667 clear_dir(dir);
668}
669
670void test_reftable_stack__log_normalize(void)
671{
672 struct reftable_write_options opts = {
673 0,
674 };
675 struct reftable_stack *st = NULL;
676 char *dir = get_tmp_dir(__LINE__);
677 struct reftable_log_record input = {
678 .refname = (char *) "branch",
679 .update_index = 1,
680 .value_type = REFTABLE_LOG_UPDATE,
681 .value = {
682 .update = {
683 .new_hash = { 1 },
684 .old_hash = { 2 },
685 },
686 },
687 };
688 struct reftable_log_record dest = {
689 .update_index = 0,
690 };
691 struct write_log_arg arg = {
692 .log = &input,
693 .update_index = 1,
694 };
695
696 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
697
698 input.value.update.message = (char *) "one\ntwo";
699 cl_assert_equal_i(reftable_stack_add(st, write_test_log,
700 &arg, 0), REFTABLE_API_ERROR);
701
702 input.value.update.message = (char *) "one";
703 cl_assert_equal_i(reftable_stack_add(st, write_test_log,
704 &arg, 0), 0);
705 cl_assert_equal_i(reftable_stack_read_log(st, input.refname,
706 &dest), 0);
707 cl_assert_equal_s(dest.value.update.message, "one\n");
708
709 input.value.update.message = (char *) "two\n";
710 arg.update_index = 2;
711 cl_assert_equal_i(reftable_stack_add(st, write_test_log,
712 &arg, 0), 0);
713 cl_assert_equal_i(reftable_stack_read_log(st, input.refname,
714 &dest), 0);
715 cl_assert_equal_s(dest.value.update.message, "two\n");
716
717 /* cleanup */
718 reftable_stack_destroy(st);
719 reftable_log_record_release(&dest);
720 clear_dir(dir);
721}
722
723void test_reftable_stack__tombstone(void)
724{
725 char *dir = get_tmp_dir(__LINE__);
726 struct reftable_write_options opts = { 0 };
727 struct reftable_stack *st = NULL;
728 struct reftable_ref_record refs[2] = { 0 };
729 struct reftable_log_record logs[2] = { 0 };
730 size_t i, N = ARRAY_SIZE(refs);
731 struct reftable_ref_record dest = { 0 };
732 struct reftable_log_record log_dest = { 0 };
733
734 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
735
736 /* even entries add the refs, odd entries delete them. */
737 for (i = 0; i < N; i++) {
738 const char *buf = "branch";
739 refs[i].refname = xstrdup(buf);
740 refs[i].update_index = i + 1;
741 if (i % 2 == 0) {
742 refs[i].value_type = REFTABLE_REF_VAL1;
743 cl_reftable_set_hash(refs[i].value.val1, i,
744 REFTABLE_HASH_SHA1);
745 }
746
747 logs[i].refname = xstrdup(buf);
748 /*
749 * update_index is part of the key so should be constant.
750 * The value itself should be less than the writer's upper
751 * limit.
752 */
753 logs[i].update_index = 1;
754 if (i % 2 == 0) {
755 logs[i].value_type = REFTABLE_LOG_UPDATE;
756 cl_reftable_set_hash(logs[i].value.update.new_hash, i, REFTABLE_HASH_SHA1);
757 logs[i].value.update.email =
758 xstrdup("identity@invalid");
759 }
760 }
761 for (i = 0; i < N; i++)
762 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
763 &refs[i], 0), 0);
764
765 for (i = 0; i < N; i++) {
766 struct write_log_arg arg = {
767 .log = &logs[i],
768 .update_index = reftable_stack_next_update_index(st),
769 };
770 cl_assert_equal_i(reftable_stack_add(st, write_test_log,
771 &arg, 0), 0);
772 }
773
774 cl_assert_equal_i(reftable_stack_read_ref(st, "branch",
775 &dest), 1);
776 reftable_ref_record_release(&dest);
777
778 cl_assert_equal_i(reftable_stack_read_log(st, "branch",
779 &log_dest), 1);
780 reftable_log_record_release(&log_dest);
781
782 cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0);
783 cl_assert_equal_i(reftable_stack_read_ref(st, "branch",
784 &dest), 1);
785 cl_assert_equal_i(reftable_stack_read_log(st, "branch",
786 &log_dest), 1);
787 reftable_ref_record_release(&dest);
788 reftable_log_record_release(&log_dest);
789
790 /* cleanup */
791 reftable_stack_destroy(st);
792 for (i = 0; i < N; i++) {
793 reftable_ref_record_release(&refs[i]);
794 reftable_log_record_release(&logs[i]);
795 }
796 clear_dir(dir);
797}
798
799void test_reftable_stack__hash_id(void)
800{
801 char *dir = get_tmp_dir(__LINE__);
802 struct reftable_write_options opts = { 0 };
803 struct reftable_stack *st = NULL;
804
805 struct reftable_ref_record ref = {
806 .refname = (char *) "master",
807 .value_type = REFTABLE_REF_SYMREF,
808 .value.symref = (char *) "target",
809 .update_index = 1,
810 };
811 struct reftable_write_options opts32 = { .hash_id = REFTABLE_HASH_SHA256 };
812 struct reftable_stack *st32 = NULL;
813 struct reftable_write_options opts_default = { 0 };
814 struct reftable_stack *st_default = NULL;
815 struct reftable_ref_record dest = { 0 };
816
817 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
818 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
819 &ref, 0), 0);
820
821 /* can't read it with the wrong hash ID. */
822 cl_assert_equal_i(reftable_new_stack(&st32, dir,
823 &opts32), REFTABLE_FORMAT_ERROR);
824
825 /* check that we can read it back with default opts too. */
826 cl_assert_equal_i(reftable_new_stack(&st_default, dir,
827 &opts_default), 0);
828 cl_assert_equal_i(reftable_stack_read_ref(st_default, "master",
829 &dest), 0);
830 cl_assert(reftable_ref_record_equal(&ref, &dest,
831 REFTABLE_HASH_SIZE_SHA1) != 0);
832 reftable_ref_record_release(&dest);
833 reftable_stack_destroy(st);
834 reftable_stack_destroy(st_default);
835 clear_dir(dir);
836}
837
838void test_reftable_stack__suggest_compaction_segment(void)
839{
840 uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 };
841 struct segment min =
842 suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2);
843 cl_assert_equal_i(min.start, 1);
844 cl_assert_equal_i(min.end, 10);
845}
846
847void test_reftable_stack__suggest_compaction_segment_nothing(void)
848{
849 uint64_t sizes[] = { 64, 32, 16, 8, 4, 2 };
850 struct segment result =
851 suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2);
852 cl_assert_equal_i(result.start, result.end);
853}
854
855void test_reftable_stack__reflog_expire(void)
856{
857 char *dir = get_tmp_dir(__LINE__);
858 struct reftable_write_options opts = { 0 };
859 struct reftable_stack *st = NULL;
860 struct reftable_log_record logs[20] = { 0 };
861 size_t i, N = ARRAY_SIZE(logs) - 1;
862 struct reftable_log_expiry_config expiry = {
863 .time = 10,
864 };
865 struct reftable_log_record log = { 0 };
866
867 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
868
869 for (i = 1; i <= N; i++) {
870 char buf[256];
871 snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i);
872
873 logs[i].refname = xstrdup(buf);
874 logs[i].update_index = i;
875 logs[i].value_type = REFTABLE_LOG_UPDATE;
876 logs[i].value.update.time = i;
877 logs[i].value.update.email = xstrdup("identity@invalid");
878 cl_reftable_set_hash(logs[i].value.update.new_hash, i,
879 REFTABLE_HASH_SHA1);
880 }
881
882 for (i = 1; i <= N; i++) {
883 struct write_log_arg arg = {
884 .log = &logs[i],
885 .update_index = reftable_stack_next_update_index(st),
886 };
887 cl_assert_equal_i(reftable_stack_add(st, write_test_log,
888 &arg, 0), 0);
889 }
890
891 cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0);
892 cl_assert_equal_i(reftable_stack_compact_all(st, &expiry), 0);
893 cl_assert_equal_i(reftable_stack_read_log(st, logs[9].refname,
894 &log), 1);
895 cl_assert_equal_i(reftable_stack_read_log(st, logs[11].refname,
896 &log), 0);
897
898 expiry.min_update_index = 15;
899 cl_assert_equal_i(reftable_stack_compact_all(st, &expiry), 0);
900 cl_assert_equal_i(reftable_stack_read_log(st, logs[14].refname,
901 &log), 1);
902 cl_assert_equal_i(reftable_stack_read_log(st, logs[16].refname,
903 &log), 0);
904
905 /* cleanup */
906 reftable_stack_destroy(st);
907 for (i = 0; i <= N; i++)
908 reftable_log_record_release(&logs[i]);
909 clear_dir(dir);
910 reftable_log_record_release(&log);
911}
912
913static int write_nothing(struct reftable_writer *wr, void *arg UNUSED)
914{
915 cl_assert_equal_i(reftable_writer_set_limits(wr, 1, 1), 0);
916 return 0;
917}
918
919void test_reftable_stack__empty_add(void)
920{
921 struct reftable_write_options opts = { 0 };
922 struct reftable_stack *st = NULL;
923 char *dir = get_tmp_dir(__LINE__);
924 struct reftable_stack *st2 = NULL;
925
926 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
927 cl_assert_equal_i(reftable_stack_add(st, write_nothing,
928 NULL, 0), 0);
929 cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0);
930 clear_dir(dir);
931 reftable_stack_destroy(st);
932 reftable_stack_destroy(st2);
933}
934
935static int fastlogN(uint64_t sz, uint64_t N)
936{
937 int l = 0;
938 if (sz == 0)
939 return 0;
940 for (; sz; sz /= N)
941 l++;
942 return l - 1;
943}
944
945void test_reftable_stack__auto_compaction(void)
946{
947 struct reftable_write_options opts = {
948 .disable_auto_compact = 1,
949 };
950 struct reftable_stack *st = NULL;
951 char *dir = get_tmp_dir(__LINE__);
952 size_t i, N = 100;
953 int err;
954
955 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
956
957 for (i = 0; i < N; i++) {
958 char name[100];
959 struct reftable_ref_record ref = {
960 .refname = name,
961 .update_index = reftable_stack_next_update_index(st),
962 .value_type = REFTABLE_REF_SYMREF,
963 .value.symref = (char *) "master",
964 };
965 snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i);
966
967 err = reftable_stack_add(st, write_test_ref, &ref, 0);
968 cl_assert(!err);
969
970 err = reftable_stack_auto_compact(st);
971 cl_assert(!err);
972 cl_assert(i < 2 || st->merged->tables_len < 2 * fastlogN(i, 2));
973 }
974
975 cl_assert(reftable_stack_compaction_stats(st)->entries_written <
976 (uint64_t)(N * fastlogN(N, 2)));
977
978 reftable_stack_destroy(st);
979 clear_dir(dir);
980}
981
982void test_reftable_stack__auto_compaction_factor(void)
983{
984 struct reftable_write_options opts = {
985 .auto_compaction_factor = 5,
986 };
987 struct reftable_stack *st = NULL;
988 char *dir = get_tmp_dir(__LINE__);
989 size_t N = 100;
990 int err;
991
992 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
993
994 for (size_t i = 0; i < N; i++) {
995 char name[20];
996 struct reftable_ref_record ref = {
997 .refname = name,
998 .update_index = reftable_stack_next_update_index(st),
999 .value_type = REFTABLE_REF_VAL1,
1000 };
1001 xsnprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i);
1002
1003 err = reftable_stack_add(st, &write_test_ref, &ref, 0);
1004 cl_assert(!err);
1005
1006 cl_assert(i < 5 || st->merged->tables_len < 5 * fastlogN(i, 5));
1007 }
1008
1009 reftable_stack_destroy(st);
1010 clear_dir(dir);
1011}
1012
1013void test_reftable_stack__auto_compaction_with_locked_tables(void)
1014{
1015 struct reftable_write_options opts = {
1016 .disable_auto_compact = 1,
1017 };
1018 struct reftable_stack *st = NULL;
1019 struct reftable_buf buf = REFTABLE_BUF_INIT;
1020 char *dir = get_tmp_dir(__LINE__);
1021 int err;
1022
1023 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
1024
1025 write_n_ref_tables(st, 5);
1026 cl_assert_equal_i(st->merged->tables_len, 5);
1027
1028 /*
1029 * Given that all tables we have written should be roughly the same
1030 * size, we expect that auto-compaction will want to compact all of the
1031 * tables. Locking any of the tables will keep it from doing so.
1032 */
1033 cl_assert(!reftable_buf_addstr(&buf, dir));
1034 cl_assert(!reftable_buf_addstr(&buf, "/"));
1035 cl_assert(!reftable_buf_addstr(&buf, st->tables[2]->name));
1036 cl_assert(!reftable_buf_addstr(&buf, ".lock"));
1037 write_file_buf(buf.buf, "", 0);
1038
1039 /*
1040 * When parts of the stack are locked, then auto-compaction does a best
1041 * effort compaction of those tables which aren't locked. So while this
1042 * would in theory compact all tables, due to the preexisting lock we
1043 * only compact the newest two tables.
1044 */
1045 err = reftable_stack_auto_compact(st);
1046 cl_assert(!err);
1047 cl_assert_equal_i(st->stats.failures, 0);
1048 cl_assert_equal_i(st->merged->tables_len, 4);
1049
1050 reftable_stack_destroy(st);
1051 reftable_buf_release(&buf);
1052 clear_dir(dir);
1053}
1054
1055void test_reftable_stack__add_performs_auto_compaction(void)
1056{
1057 struct reftable_write_options opts = { 0 };
1058 struct reftable_stack *st = NULL;
1059 char *dir = get_tmp_dir(__LINE__);
1060 size_t i, n = 20;
1061
1062 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
1063
1064 for (i = 0; i <= n; i++) {
1065 struct reftable_ref_record ref = {
1066 .update_index = reftable_stack_next_update_index(st),
1067 .value_type = REFTABLE_REF_SYMREF,
1068 .value.symref = (char *) "master",
1069 };
1070 char buf[128];
1071
1072 /*
1073 * Disable auto-compaction for all but the last runs. Like this
1074 * we can ensure that we indeed honor this setting and have
1075 * better control over when exactly auto compaction runs.
1076 */
1077 st->opts.disable_auto_compact = i != n;
1078
1079 snprintf(buf, sizeof(buf), "branch-%04"PRIuMAX, (uintmax_t)i);
1080 ref.refname = buf;
1081
1082 cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
1083 &ref, 0), 0);
1084
1085 /*
1086 * The stack length should grow continuously for all runs where
1087 * auto compaction is disabled. When enabled, we should merge
1088 * all tables in the stack.
1089 */
1090 if (i != n)
1091 cl_assert_equal_i(st->merged->tables_len, i + 1);
1092 else
1093 cl_assert_equal_i(st->merged->tables_len, 1);
1094 }
1095
1096 reftable_stack_destroy(st);
1097 clear_dir(dir);
1098}
1099
1100void test_reftable_stack__compaction_with_locked_tables(void)
1101{
1102 struct reftable_write_options opts = {
1103 .disable_auto_compact = 1,
1104 };
1105 struct reftable_stack *st = NULL;
1106 struct reftable_buf buf = REFTABLE_BUF_INIT;
1107 char *dir = get_tmp_dir(__LINE__);
1108 int err;
1109
1110 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
1111
1112 write_n_ref_tables(st, 3);
1113 cl_assert_equal_i(st->merged->tables_len, 3);
1114
1115 /* Lock one of the tables that we're about to compact. */
1116 cl_assert(!reftable_buf_addstr(&buf, dir));
1117 cl_assert(!reftable_buf_addstr(&buf, "/"));
1118 cl_assert(!reftable_buf_addstr(&buf, st->tables[1]->name));
1119 cl_assert(!reftable_buf_addstr(&buf, ".lock"));
1120 write_file_buf(buf.buf, "", 0);
1121
1122 /*
1123 * Compaction is expected to fail given that we were not able to
1124 * compact all tables.
1125 */
1126 err = reftable_stack_compact_all(st, NULL);
1127 cl_assert_equal_i(err, REFTABLE_LOCK_ERROR);
1128 cl_assert_equal_i(st->stats.failures, 1);
1129 cl_assert_equal_i(st->merged->tables_len, 3);
1130
1131 reftable_stack_destroy(st);
1132 reftable_buf_release(&buf);
1133 clear_dir(dir);
1134}
1135
1136void test_reftable_stack__compaction_concurrent(void)
1137{
1138 struct reftable_write_options opts = { 0 };
1139 struct reftable_stack *st1 = NULL, *st2 = NULL;
1140 char *dir = get_tmp_dir(__LINE__);
1141
1142 cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0);
1143 write_n_ref_tables(st1, 3);
1144
1145 cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0);
1146 cl_assert_equal_i(reftable_stack_compact_all(st1, NULL), 0);
1147
1148 reftable_stack_destroy(st1);
1149 reftable_stack_destroy(st2);
1150
1151 cl_assert_equal_i(count_dir_entries(dir), 2);
1152 clear_dir(dir);
1153}
1154
1155static void unclean_stack_close(struct reftable_stack *st)
1156{
1157 /* break abstraction boundary to simulate unclean shutdown. */
1158 for (size_t i = 0; i < st->tables_len; i++)
1159 reftable_table_decref(st->tables[i]);
1160 st->tables_len = 0;
1161 REFTABLE_FREE_AND_NULL(st->tables);
1162}
1163
1164void test_reftable_stack__compaction_concurrent_clean(void)
1165{
1166 struct reftable_write_options opts = { 0 };
1167 struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL;
1168 char *dir = get_tmp_dir(__LINE__);
1169
1170 cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0);
1171 write_n_ref_tables(st1, 3);
1172
1173 cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0);
1174 cl_assert_equal_i(reftable_stack_compact_all(st1, NULL), 0);
1175
1176 unclean_stack_close(st1);
1177 unclean_stack_close(st2);
1178
1179 cl_assert_equal_i(reftable_new_stack(&st3, dir, &opts), 0);
1180 cl_assert_equal_i(reftable_stack_clean(st3), 0);
1181 cl_assert_equal_i(count_dir_entries(dir), 2);
1182
1183 reftable_stack_destroy(st1);
1184 reftable_stack_destroy(st2);
1185 reftable_stack_destroy(st3);
1186
1187 clear_dir(dir);
1188}
1189
1190void test_reftable_stack__read_across_reload(void)
1191{
1192 struct reftable_write_options opts = { 0 };
1193 struct reftable_stack *st1 = NULL, *st2 = NULL;
1194 struct reftable_ref_record rec = { 0 };
1195 struct reftable_iterator it = { 0 };
1196 char *dir = get_tmp_dir(__LINE__);
1197 int err;
1198
1199 /* Create a first stack and set up an iterator for it. */
1200 cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0);
1201 write_n_ref_tables(st1, 2);
1202 cl_assert_equal_i(st1->merged->tables_len, 2);
1203 reftable_stack_init_ref_iterator(st1, &it);
1204 cl_assert_equal_i(reftable_iterator_seek_ref(&it, ""), 0);
1205
1206 /* Set up a second stack for the same directory and compact it. */
1207 err = reftable_new_stack(&st2, dir, &opts);
1208 cl_assert(!err);
1209 cl_assert_equal_i(st2->merged->tables_len, 2);
1210 err = reftable_stack_compact_all(st2, NULL);
1211 cl_assert(!err);
1212 cl_assert_equal_i(st2->merged->tables_len, 1);
1213
1214 /*
1215 * Verify that we can continue to use the old iterator even after we
1216 * have reloaded its stack.
1217 */
1218 err = reftable_stack_reload(st1);
1219 cl_assert(!err);
1220 cl_assert_equal_i(st1->merged->tables_len, 1);
1221 err = reftable_iterator_next_ref(&it, &rec);
1222 cl_assert(!err);
1223 cl_assert_equal_s(rec.refname, "refs/heads/branch-0000");
1224 err = reftable_iterator_next_ref(&it, &rec);
1225 cl_assert(!err);
1226 cl_assert_equal_s(rec.refname, "refs/heads/branch-0001");
1227 err = reftable_iterator_next_ref(&it, &rec);
1228 cl_assert(err > 0);
1229
1230 reftable_ref_record_release(&rec);
1231 reftable_iterator_destroy(&it);
1232 reftable_stack_destroy(st1);
1233 reftable_stack_destroy(st2);
1234 clear_dir(dir);
1235}
1236
1237void test_reftable_stack__reload_with_missing_table(void)
1238{
1239 struct reftable_write_options opts = { 0 };
1240 struct reftable_stack *st = NULL;
1241 struct reftable_ref_record rec = { 0 };
1242 struct reftable_iterator it = { 0 };
1243 struct reftable_buf table_path = REFTABLE_BUF_INIT, content = REFTABLE_BUF_INIT;
1244 char *dir = get_tmp_dir(__LINE__);
1245 int err;
1246
1247 /* Create a first stack and set up an iterator for it. */
1248 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
1249 write_n_ref_tables(st, 2);
1250 cl_assert_equal_i(st->merged->tables_len, 2);
1251 reftable_stack_init_ref_iterator(st, &it);
1252 cl_assert_equal_i(reftable_iterator_seek_ref(&it, ""), 0);
1253
1254 /*
1255 * Update the tables.list file with some garbage data, while reusing
1256 * our old tables. This should trigger a partial reload of the stack,
1257 * where we try to reuse our old tables.
1258 */
1259 cl_assert(!reftable_buf_addstr(&content, st->tables[0]->name));
1260 cl_assert(!reftable_buf_addstr(&content, "\n"));
1261 cl_assert(!reftable_buf_addstr(&content, st->tables[1]->name));
1262 cl_assert(!reftable_buf_addstr(&content, "\n"));
1263 cl_assert(!reftable_buf_addstr(&content, "garbage\n"));
1264 cl_assert(!reftable_buf_addstr(&table_path, st->list_file));
1265 cl_assert(!reftable_buf_addstr(&table_path, ".lock"));
1266 write_file_buf(table_path.buf, content.buf, content.len);
1267 cl_assert_equal_i(rename(table_path.buf, st->list_file), 0);
1268
1269 err = reftable_stack_reload(st);
1270 cl_assert_equal_i(err, -4);
1271 cl_assert_equal_i(st->merged->tables_len, 2);
1272
1273 /*
1274 * Even though the reload has failed, we should be able to continue
1275 * using the iterator.
1276 */
1277 cl_assert_equal_i(reftable_iterator_next_ref(&it, &rec), 0);
1278 cl_assert_equal_s(rec.refname, "refs/heads/branch-0000");
1279 cl_assert_equal_i(reftable_iterator_next_ref(&it, &rec), 0);
1280 cl_assert_equal_s(rec.refname, "refs/heads/branch-0001");
1281 cl_assert(reftable_iterator_next_ref(&it, &rec) > 0);
1282
1283 reftable_ref_record_release(&rec);
1284 reftable_iterator_destroy(&it);
1285 reftable_stack_destroy(st);
1286 reftable_buf_release(&table_path);
1287 reftable_buf_release(&content);
1288 clear_dir(dir);
1289}
1290
1291static int write_limits_after_ref(struct reftable_writer *wr, void *arg)
1292{
1293 struct reftable_ref_record *ref = arg;
1294 cl_assert_equal_i(reftable_writer_set_limits(wr,
1295 ref->update_index, ref->update_index), 0);
1296 cl_assert_equal_i(reftable_writer_add_ref(wr, ref), 0);
1297 return reftable_writer_set_limits(wr, ref->update_index, ref->update_index);
1298}
1299
1300void test_reftable_stack__invalid_limit_updates(void)
1301{
1302 struct reftable_ref_record ref = {
1303 .refname = (char *) "HEAD",
1304 .update_index = 1,
1305 .value_type = REFTABLE_REF_SYMREF,
1306 .value.symref = (char *) "master",
1307 };
1308 struct reftable_write_options opts = {
1309 .default_permissions = 0660,
1310 };
1311 struct reftable_addition *add = NULL;
1312 char *dir = get_tmp_dir(__LINE__);
1313 struct reftable_stack *st = NULL;
1314
1315 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
1316
1317 reftable_addition_destroy(add);
1318
1319 cl_assert_equal_i(reftable_stack_new_addition(&add, st, 0), 0);
1320
1321 /*
1322 * write_limits_after_ref also updates the update indexes after adding
1323 * the record. This should cause an err to be returned, since the limits
1324 * must be set at the start.
1325 */
1326 cl_assert_equal_i(reftable_addition_add(add,
1327 write_limits_after_ref, &ref), REFTABLE_API_ERROR);
1328
1329 reftable_addition_destroy(add);
1330 reftable_stack_destroy(st);
1331 clear_dir(dir);
1332}