Git fork
1/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://developers.google.com/open-source/licenses/bsd
7 */
8
9#include "writer.h"
10
11#include "system.h"
12
13#include "block.h"
14#include "constants.h"
15#include "record.h"
16#include "tree.h"
17#include "reftable-error.h"
18
19/* finishes a block, and writes it to storage */
20static int writer_flush_block(struct reftable_writer *w);
21
22/* deallocates memory related to the index */
23static void writer_clear_index(struct reftable_writer *w);
24
25/* finishes writing a 'r' (refs) or 'g' (reflogs) section */
26static int writer_finish_public_section(struct reftable_writer *w);
27
28static struct reftable_block_stats *
29writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ)
30{
31 switch (typ) {
32 case 'r':
33 return &w->stats.ref_stats;
34 case 'o':
35 return &w->stats.obj_stats;
36 case 'i':
37 return &w->stats.idx_stats;
38 case 'g':
39 return &w->stats.log_stats;
40 }
41 abort();
42 return NULL;
43}
44
45/* write data, queuing the padding for the next write. Returns negative for
46 * error. */
47static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
48 int padding)
49{
50 int n = 0;
51 if (w->pending_padding > 0) {
52 uint8_t *zeroed;
53 int n;
54
55 zeroed = reftable_calloc(w->pending_padding, sizeof(*zeroed));
56 if (!zeroed)
57 return -1;
58
59 n = w->write(w->write_arg, zeroed, w->pending_padding);
60 if (n < 0) {
61 reftable_free(zeroed);
62 return n;
63 }
64
65 w->pending_padding = 0;
66 reftable_free(zeroed);
67 }
68
69 w->pending_padding = padding;
70 n = w->write(w->write_arg, data, len);
71 if (n < 0)
72 return n;
73 n += padding;
74 return 0;
75}
76
77static void options_set_defaults(struct reftable_write_options *opts)
78{
79 if (opts->restart_interval == 0) {
80 opts->restart_interval = 16;
81 }
82
83 if (opts->hash_id == 0) {
84 opts->hash_id = REFTABLE_HASH_SHA1;
85 }
86 if (opts->block_size == 0) {
87 opts->block_size = DEFAULT_BLOCK_SIZE;
88 }
89}
90
91static int writer_version(struct reftable_writer *w)
92{
93 return (w->opts.hash_id == 0 || w->opts.hash_id == REFTABLE_HASH_SHA1) ?
94 1 :
95 2;
96}
97
98static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
99{
100 memcpy(dest, "REFT", 4);
101
102 dest[4] = writer_version(w);
103
104 reftable_put_be24(dest + 5, w->opts.block_size);
105 reftable_put_be64(dest + 8, w->min_update_index);
106 reftable_put_be64(dest + 16, w->max_update_index);
107 if (writer_version(w) == 2) {
108 uint32_t hash_id;
109
110 switch (w->opts.hash_id) {
111 case REFTABLE_HASH_SHA1:
112 hash_id = REFTABLE_FORMAT_ID_SHA1;
113 break;
114 case REFTABLE_HASH_SHA256:
115 hash_id = REFTABLE_FORMAT_ID_SHA256;
116 break;
117 default:
118 return -1;
119 }
120
121 reftable_put_be32(dest + 24, hash_id);
122 }
123
124 return header_size(writer_version(w));
125}
126
127static int writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ)
128{
129 int block_start = 0, ret;
130
131 if (w->next == 0)
132 block_start = header_size(writer_version(w));
133
134 reftable_buf_reset(&w->last_key);
135 ret = block_writer_init(&w->block_writer_data, typ, w->block,
136 w->opts.block_size, block_start,
137 hash_size(w->opts.hash_id));
138 if (ret < 0)
139 return ret;
140
141 w->block_writer = &w->block_writer_data;
142 w->block_writer->restart_interval = w->opts.restart_interval;
143
144 return 0;
145}
146
147int reftable_writer_new(struct reftable_writer **out,
148 ssize_t (*writer_func)(void *, const void *, size_t),
149 int (*flush_func)(void *),
150 void *writer_arg, const struct reftable_write_options *_opts)
151{
152 struct reftable_write_options opts = {0};
153 struct reftable_writer *wp;
154
155 wp = reftable_calloc(1, sizeof(*wp));
156 if (!wp)
157 return REFTABLE_OUT_OF_MEMORY_ERROR;
158
159 if (_opts)
160 opts = *_opts;
161 options_set_defaults(&opts);
162 if (opts.block_size >= (1 << 24))
163 return REFTABLE_API_ERROR;
164
165 reftable_buf_init(&wp->block_writer_data.last_key);
166 reftable_buf_init(&wp->last_key);
167 reftable_buf_init(&wp->scratch);
168 REFTABLE_CALLOC_ARRAY(wp->block, opts.block_size);
169 if (!wp->block) {
170 reftable_free(wp);
171 return REFTABLE_OUT_OF_MEMORY_ERROR;
172 }
173 wp->write = writer_func;
174 wp->write_arg = writer_arg;
175 wp->opts = opts;
176 wp->flush = flush_func;
177 writer_reinit_block_writer(wp, REFTABLE_BLOCK_TYPE_REF);
178
179 *out = wp;
180
181 return 0;
182}
183
184int reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
185 uint64_t max)
186{
187 /*
188 * Set the min/max update index limits for the reftable writer.
189 * This must be called before adding any records, since:
190 * - The 'next' field gets set after writing the first block.
191 * - The 'last_key' field updates with each new record (but resets
192 * after sections).
193 * Returns REFTABLE_API_ERROR if called after writing has begun.
194 */
195 if (w->next || w->last_key.len)
196 return REFTABLE_API_ERROR;
197
198 w->min_update_index = min;
199 w->max_update_index = max;
200
201 return 0;
202}
203
204static void writer_release(struct reftable_writer *w)
205{
206 if (w) {
207 reftable_free(w->block);
208 w->block = NULL;
209 block_writer_release(&w->block_writer_data);
210 w->block_writer = NULL;
211 writer_clear_index(w);
212 reftable_buf_release(&w->last_key);
213 reftable_buf_release(&w->scratch);
214 }
215}
216
217void reftable_writer_free(struct reftable_writer *w)
218{
219 writer_release(w);
220 reftable_free(w);
221}
222
223struct obj_index_tree_node {
224 struct reftable_buf hash;
225 uint64_t *offsets;
226 size_t offset_len;
227 size_t offset_cap;
228};
229
230#define OBJ_INDEX_TREE_NODE_INIT \
231 { \
232 .hash = REFTABLE_BUF_INIT \
233 }
234
235static int obj_index_tree_node_compare(const void *a, const void *b)
236{
237 return reftable_buf_cmp(&((const struct obj_index_tree_node *)a)->hash,
238 &((const struct obj_index_tree_node *)b)->hash);
239}
240
241static int writer_index_hash(struct reftable_writer *w, struct reftable_buf *hash)
242{
243 uint64_t off = w->next;
244 struct obj_index_tree_node want = { .hash = *hash };
245 struct obj_index_tree_node *key;
246 struct tree_node *node;
247
248 node = tree_search(w->obj_index_tree, &want, &obj_index_tree_node_compare);
249 if (!node) {
250 struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT;
251 int err;
252
253 key = reftable_malloc(sizeof(*key));
254 if (!key)
255 return REFTABLE_OUT_OF_MEMORY_ERROR;
256
257 *key = empty;
258
259 reftable_buf_reset(&key->hash);
260 err = reftable_buf_add(&key->hash, hash->buf, hash->len);
261 if (err < 0) {
262 reftable_free(key);
263 return err;
264 }
265 tree_insert(&w->obj_index_tree, key,
266 &obj_index_tree_node_compare);
267 } else {
268 key = node->key;
269 }
270
271 if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off)
272 return 0;
273
274 REFTABLE_ALLOC_GROW_OR_NULL(key->offsets, key->offset_len + 1,
275 key->offset_cap);
276 if (!key->offsets)
277 return REFTABLE_OUT_OF_MEMORY_ERROR;
278 key->offsets[key->offset_len++] = off;
279
280 return 0;
281}
282
283static int writer_add_record(struct reftable_writer *w,
284 struct reftable_record *rec)
285{
286 int err;
287
288 err = reftable_record_key(rec, &w->scratch);
289 if (err < 0)
290 goto done;
291
292 if (reftable_buf_cmp(&w->last_key, &w->scratch) >= 0) {
293 err = REFTABLE_API_ERROR;
294 goto done;
295 }
296
297 reftable_buf_reset(&w->last_key);
298 err = reftable_buf_add(&w->last_key, w->scratch.buf, w->scratch.len);
299 if (err < 0)
300 goto done;
301
302 if (!w->block_writer) {
303 err = writer_reinit_block_writer(w, reftable_record_type(rec));
304 if (err < 0)
305 goto done;
306 }
307
308 if (block_writer_type(w->block_writer) != reftable_record_type(rec))
309 return REFTABLE_API_ERROR;
310
311 /*
312 * Try to add the record to the writer. If this succeeds then we're
313 * done. Otherwise the block writer may have hit the block size limit
314 * and needs to be flushed.
315 */
316 err = block_writer_add(w->block_writer, rec);
317 if (err == 0)
318 goto done;
319
320 if (err != REFTABLE_ENTRY_TOO_BIG_ERROR)
321 goto done;
322 /*
323 * The current block is full, so we need to flush and reinitialize the
324 * writer to start writing the next block.
325 */
326 err = writer_flush_block(w);
327 if (err < 0)
328 goto done;
329 err = writer_reinit_block_writer(w, reftable_record_type(rec));
330 if (err < 0)
331 goto done;
332
333 /*
334 * Try to add the record to the writer again. If this still fails then
335 * the record does not fit into the block size.
336 */
337 err = block_writer_add(w->block_writer, rec);
338 if (err)
339 goto done;
340
341done:
342 return err;
343}
344
345int reftable_writer_add_ref(struct reftable_writer *w,
346 struct reftable_ref_record *ref)
347{
348 struct reftable_record rec = {
349 .type = REFTABLE_BLOCK_TYPE_REF,
350 .u = {
351 .ref = *ref
352 },
353 };
354 int err;
355
356 if (!ref->refname ||
357 ref->update_index < w->min_update_index ||
358 ref->update_index > w->max_update_index)
359 return REFTABLE_API_ERROR;
360
361 rec.u.ref.update_index -= w->min_update_index;
362
363 err = writer_add_record(w, &rec);
364 if (err < 0)
365 goto out;
366
367 if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
368 reftable_buf_reset(&w->scratch);
369 err = reftable_buf_add(&w->scratch, (char *)reftable_ref_record_val1(ref),
370 hash_size(w->opts.hash_id));
371 if (err < 0)
372 goto out;
373
374 err = writer_index_hash(w, &w->scratch);
375 if (err < 0)
376 goto out;
377 }
378
379 if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
380 reftable_buf_reset(&w->scratch);
381 err = reftable_buf_add(&w->scratch, reftable_ref_record_val2(ref),
382 hash_size(w->opts.hash_id));
383 if (err < 0)
384 goto out;
385
386 err = writer_index_hash(w, &w->scratch);
387 if (err < 0)
388 goto out;
389 }
390
391 err = 0;
392
393out:
394 return err;
395}
396
397int reftable_writer_add_refs(struct reftable_writer *w,
398 struct reftable_ref_record *refs, size_t n)
399{
400 int err = 0;
401
402 if (n)
403 qsort(refs, n, sizeof(*refs), reftable_ref_record_compare_name);
404
405 for (size_t i = 0; err == 0 && i < n; i++)
406 err = reftable_writer_add_ref(w, &refs[i]);
407
408 return err;
409}
410
411static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
412 struct reftable_log_record *log)
413{
414 struct reftable_record rec = {
415 .type = REFTABLE_BLOCK_TYPE_LOG,
416 .u = {
417 .log = *log,
418 },
419 };
420 if (w->block_writer &&
421 block_writer_type(w->block_writer) == REFTABLE_BLOCK_TYPE_REF) {
422 int err = writer_finish_public_section(w);
423 if (err < 0)
424 return err;
425 }
426
427 w->next -= w->pending_padding;
428 w->pending_padding = 0;
429 return writer_add_record(w, &rec);
430}
431
432int reftable_writer_add_log(struct reftable_writer *w,
433 struct reftable_log_record *log)
434{
435 char *input_log_message = NULL;
436 struct reftable_buf cleaned_message = REFTABLE_BUF_INIT;
437 int err = 0;
438
439 if (log->value_type == REFTABLE_LOG_DELETION)
440 return reftable_writer_add_log_verbatim(w, log);
441
442 /*
443 * Verify only the upper limit of the update_index. Each reflog entry
444 * is tied to a specific update_index. Entries in the reflog can be
445 * replaced by adding a new entry with the same update_index,
446 * effectively canceling the old one.
447 *
448 * Consequently, reflog updates may include update_index values lower
449 * than the writer's min_update_index.
450 */
451 if (log->update_index > w->max_update_index)
452 return REFTABLE_API_ERROR;
453
454 if (!log->refname)
455 return REFTABLE_API_ERROR;
456
457 input_log_message = log->value.update.message;
458 if (!w->opts.exact_log_message && log->value.update.message) {
459 err = reftable_buf_addstr(&cleaned_message, log->value.update.message);
460 if (err < 0)
461 goto done;
462
463 while (cleaned_message.len &&
464 cleaned_message.buf[cleaned_message.len - 1] == '\n') {
465 err = reftable_buf_setlen(&cleaned_message,
466 cleaned_message.len - 1);
467 if (err < 0)
468 goto done;
469 }
470 if (strchr(cleaned_message.buf, '\n')) {
471 /* multiple lines not allowed. */
472 err = REFTABLE_API_ERROR;
473 goto done;
474 }
475
476 err = reftable_buf_addstr(&cleaned_message, "\n");
477 if (err < 0)
478 goto done;
479
480 log->value.update.message = cleaned_message.buf;
481 }
482
483 err = reftable_writer_add_log_verbatim(w, log);
484 log->value.update.message = input_log_message;
485done:
486 reftable_buf_release(&cleaned_message);
487 return err;
488}
489
490int reftable_writer_add_logs(struct reftable_writer *w,
491 struct reftable_log_record *logs, size_t n)
492{
493 int err = 0;
494
495 if (n)
496 qsort(logs, n, sizeof(*logs), reftable_log_record_compare_key);
497
498 for (size_t i = 0; err == 0 && i < n; i++)
499 err = reftable_writer_add_log(w, &logs[i]);
500
501 return err;
502}
503
504static int writer_finish_section(struct reftable_writer *w)
505{
506 struct reftable_block_stats *bstats = NULL;
507 uint8_t typ = block_writer_type(w->block_writer);
508 uint64_t index_start = 0;
509 int max_level = 0;
510 size_t threshold = w->opts.unpadded ? 1 : 3;
511 int before_blocks = w->stats.idx_stats.blocks;
512 int err;
513
514 err = writer_flush_block(w);
515 if (err < 0)
516 return err;
517
518 /*
519 * When the section we are about to index has a lot of blocks then the
520 * index itself may span across multiple blocks, as well. This would
521 * require a linear scan over index blocks only to find the desired
522 * indexed block, which is inefficient. Instead, we write a multi-level
523 * index where index records of level N+1 will refer to index blocks of
524 * level N. This isn't constant time, either, but at least logarithmic.
525 *
526 * This loop handles writing this multi-level index. Note that we write
527 * the lowest-level index pointing to the indexed blocks first. We then
528 * continue writing additional index levels until the current level has
529 * less blocks than the threshold so that the highest level will be at
530 * the end of the index section.
531 *
532 * Readers are thus required to start reading the index section from
533 * its end, which is why we set `index_start` to the beginning of the
534 * last index section.
535 */
536 while (w->index_len > threshold) {
537 struct reftable_index_record *idx = NULL;
538 size_t i, idx_len;
539
540 max_level++;
541 index_start = w->next;
542 err = writer_reinit_block_writer(w, REFTABLE_BLOCK_TYPE_INDEX);
543 if (err < 0)
544 return err;
545
546 idx = w->index;
547 idx_len = w->index_len;
548
549 w->index = NULL;
550 w->index_len = 0;
551 w->index_cap = 0;
552 for (i = 0; i < idx_len; i++) {
553 struct reftable_record rec = {
554 .type = REFTABLE_BLOCK_TYPE_INDEX,
555 .u = {
556 .idx = idx[i],
557 },
558 };
559
560 err = writer_add_record(w, &rec);
561 if (err < 0)
562 return err;
563 }
564
565 err = writer_flush_block(w);
566 if (err < 0)
567 return err;
568
569 for (i = 0; i < idx_len; i++)
570 reftable_buf_release(&idx[i].last_key);
571 reftable_free(idx);
572 }
573
574 /*
575 * The index may still contain a number of index blocks lower than the
576 * threshold. Clear it so that these entries don't leak into the next
577 * index section.
578 */
579 writer_clear_index(w);
580
581 bstats = writer_reftable_block_stats(w, typ);
582 bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks;
583 bstats->index_offset = index_start;
584 bstats->max_index_level = max_level;
585
586 /* Reinit lastKey, as the next section can start with any key. */
587 reftable_buf_reset(&w->last_key);
588
589 return 0;
590}
591
592struct common_prefix_arg {
593 struct reftable_buf *last;
594 size_t max;
595};
596
597static void update_common(void *void_arg, void *key)
598{
599 struct common_prefix_arg *arg = void_arg;
600 struct obj_index_tree_node *entry = key;
601 if (arg->last) {
602 size_t n = common_prefix_size(&entry->hash, arg->last);
603 if (n > arg->max)
604 arg->max = n;
605 }
606 arg->last = &entry->hash;
607}
608
609struct write_record_arg {
610 struct reftable_writer *w;
611 int err;
612};
613
614static void write_object_record(void *void_arg, void *key)
615{
616 struct write_record_arg *arg = void_arg;
617 struct obj_index_tree_node *entry = key;
618 struct reftable_record
619 rec = { .type = REFTABLE_BLOCK_TYPE_OBJ,
620 .u.obj = {
621 .hash_prefix = (uint8_t *)entry->hash.buf,
622 .hash_prefix_len = arg->w->stats.object_id_len,
623 .offsets = entry->offsets,
624 .offset_len = entry->offset_len,
625 } };
626 if (arg->err < 0)
627 goto done;
628
629 /*
630 * Try to add the record to the writer. If this succeeds then we're
631 * done. Otherwise the block writer may have hit the block size limit
632 * and needs to be flushed.
633 */
634 arg->err = block_writer_add(arg->w->block_writer, &rec);
635 if (arg->err == 0)
636 goto done;
637
638 if (arg->err != REFTABLE_ENTRY_TOO_BIG_ERROR)
639 goto done;
640
641 /*
642 * The current block is full, so we need to flush and reinitialize the
643 * writer to start writing the next block.
644 */
645 arg->err = writer_flush_block(arg->w);
646 if (arg->err < 0)
647 goto done;
648
649 arg->err = writer_reinit_block_writer(arg->w, REFTABLE_BLOCK_TYPE_OBJ);
650 if (arg->err < 0)
651 goto done;
652
653 /*
654 * If this still fails then we may need to reset record's offset
655 * length to reduce the data size to be written.
656 */
657 arg->err = block_writer_add(arg->w->block_writer, &rec);
658 if (arg->err == 0)
659 goto done;
660
661 if (arg->err != REFTABLE_ENTRY_TOO_BIG_ERROR)
662 goto done;
663
664 rec.u.obj.offset_len = 0;
665 arg->err = block_writer_add(arg->w->block_writer, &rec);
666
667 /* Should be able to write into a fresh block. */
668 assert(arg->err == 0);
669
670done:;
671}
672
673static void object_record_free(void *void_arg REFTABLE_UNUSED, void *key)
674{
675 struct obj_index_tree_node *entry = key;
676
677 REFTABLE_FREE_AND_NULL(entry->offsets);
678 reftable_buf_release(&entry->hash);
679 reftable_free(entry);
680}
681
682static int writer_dump_object_index(struct reftable_writer *w)
683{
684 struct write_record_arg closure = { .w = w };
685 struct common_prefix_arg common = {
686 .max = 1, /* obj_id_len should be >= 2. */
687 };
688 int err;
689
690 if (w->obj_index_tree)
691 infix_walk(w->obj_index_tree, &update_common, &common);
692 w->stats.object_id_len = common.max + 1;
693
694 err = writer_reinit_block_writer(w, REFTABLE_BLOCK_TYPE_OBJ);
695 if (err < 0)
696 return err;
697
698 if (w->obj_index_tree)
699 infix_walk(w->obj_index_tree, &write_object_record, &closure);
700
701 if (closure.err < 0)
702 return closure.err;
703 return writer_finish_section(w);
704}
705
706static int writer_finish_public_section(struct reftable_writer *w)
707{
708 uint8_t typ = 0;
709 int err = 0;
710
711 if (!w->block_writer)
712 return 0;
713
714 typ = block_writer_type(w->block_writer);
715 err = writer_finish_section(w);
716 if (err < 0)
717 return err;
718 if (typ == REFTABLE_BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
719 w->stats.ref_stats.index_blocks > 0) {
720 err = writer_dump_object_index(w);
721 if (err < 0)
722 return err;
723 }
724
725 if (w->obj_index_tree) {
726 infix_walk(w->obj_index_tree, &object_record_free, NULL);
727 tree_free(w->obj_index_tree);
728 w->obj_index_tree = NULL;
729 }
730
731 w->block_writer = NULL;
732 return 0;
733}
734
735int reftable_writer_close(struct reftable_writer *w)
736{
737 uint8_t footer[72];
738 uint8_t *p = footer;
739 int err = writer_finish_public_section(w);
740 int empty_table = w->next == 0;
741 if (err != 0)
742 goto done;
743 w->pending_padding = 0;
744 if (empty_table) {
745 /* Empty tables need a header anyway. */
746 uint8_t header[28];
747 int n = writer_write_header(w, header);
748 err = padded_write(w, header, n, 0);
749 if (err < 0)
750 goto done;
751 }
752
753 p += writer_write_header(w, footer);
754 reftable_put_be64(p, w->stats.ref_stats.index_offset);
755 p += 8;
756 reftable_put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
757 p += 8;
758 reftable_put_be64(p, w->stats.obj_stats.index_offset);
759 p += 8;
760
761 reftable_put_be64(p, w->stats.log_stats.offset);
762 p += 8;
763 reftable_put_be64(p, w->stats.log_stats.index_offset);
764 p += 8;
765
766 reftable_put_be32(p, crc32(0, footer, p - footer));
767 p += 4;
768
769 err = w->flush(w->write_arg);
770 if (err < 0) {
771 err = REFTABLE_IO_ERROR;
772 goto done;
773 }
774
775 err = padded_write(w, footer, footer_size(writer_version(w)), 0);
776 if (err < 0)
777 goto done;
778
779 if (empty_table) {
780 err = REFTABLE_EMPTY_TABLE_ERROR;
781 goto done;
782 }
783
784done:
785 writer_release(w);
786 return err;
787}
788
789static void writer_clear_index(struct reftable_writer *w)
790{
791 for (size_t i = 0; w->index && i < w->index_len; i++)
792 reftable_buf_release(&w->index[i].last_key);
793 REFTABLE_FREE_AND_NULL(w->index);
794 w->index_len = 0;
795 w->index_cap = 0;
796}
797
798static int writer_flush_nonempty_block(struct reftable_writer *w)
799{
800 struct reftable_index_record index_record = {
801 .last_key = REFTABLE_BUF_INIT,
802 };
803 uint8_t typ = block_writer_type(w->block_writer);
804 struct reftable_block_stats *bstats;
805 int raw_bytes, padding = 0, err;
806 uint64_t block_typ_off;
807
808 /*
809 * Finish the current block. This will cause the block writer to emit
810 * restart points and potentially compress records in case we are
811 * writing a log block.
812 *
813 * Note that this is still happening in memory.
814 */
815 raw_bytes = block_writer_finish(w->block_writer);
816 if (raw_bytes < 0)
817 return raw_bytes;
818
819 /*
820 * By default, all records except for log records are padded to the
821 * block size.
822 */
823 if (!w->opts.unpadded && typ != REFTABLE_BLOCK_TYPE_LOG)
824 padding = w->opts.block_size - raw_bytes;
825
826 bstats = writer_reftable_block_stats(w, typ);
827 block_typ_off = (bstats->blocks == 0) ? w->next : 0;
828 if (block_typ_off > 0)
829 bstats->offset = block_typ_off;
830 bstats->entries += w->block_writer->entries;
831 bstats->restarts += w->block_writer->restart_len;
832 bstats->blocks++;
833 w->stats.blocks++;
834
835 /*
836 * If this is the first block we're writing to the table then we need
837 * to also write the reftable header.
838 */
839 if (!w->next)
840 writer_write_header(w, w->block);
841
842 err = padded_write(w, w->block, raw_bytes, padding);
843 if (err < 0)
844 return err;
845
846 /*
847 * Add an index record for every block that we're writing. If we end up
848 * having more than a threshold of index records we will end up writing
849 * an index section in `writer_finish_section()`. Each index record
850 * contains the last record key of the block it is indexing as well as
851 * the offset of that block.
852 *
853 * Note that this also applies when flushing index blocks, in which
854 * case we will end up with a multi-level index.
855 */
856 REFTABLE_ALLOC_GROW_OR_NULL(w->index, w->index_len + 1, w->index_cap);
857 if (!w->index)
858 return REFTABLE_OUT_OF_MEMORY_ERROR;
859
860 index_record.offset = w->next;
861 reftable_buf_reset(&index_record.last_key);
862 err = reftable_buf_add(&index_record.last_key, w->block_writer->last_key.buf,
863 w->block_writer->last_key.len);
864 if (err < 0)
865 return err;
866 w->index[w->index_len] = index_record;
867 w->index_len++;
868
869 w->next += padding + raw_bytes;
870 w->block_writer = NULL;
871
872 return 0;
873}
874
875static int writer_flush_block(struct reftable_writer *w)
876{
877 if (!w->block_writer)
878 return 0;
879 if (w->block_writer->entries == 0)
880 return 0;
881 return writer_flush_nonempty_block(w);
882}
883
884const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w)
885{
886 return &w->stats;
887}