Git fork

Merge branch 'ps/reftable-libgit2-cleanup'

Code clean-ups.

* ps/reftable-libgit2-cleanup:
refs/reftable: always reload stacks when creating lock
reftable: don't second-guess errors from flock interface
reftable/stack: handle outdated stacks when compacting
reftable/stack: allow passing flags to `reftable_stack_add()`
reftable/stack: fix compiler warning due to missing braces
reftable/stack: reorder code to avoid forward declarations
reftable/writer: drop Git-specific `QSORT()` macro
reftable/writer: fix type used for number of records

+281 -284
+13 -12
refs/reftable-backend.c
··· 1012 1012 if (!arg) { 1013 1013 struct reftable_addition *addition; 1014 1014 1015 - ret = reftable_stack_reload(be->stack); 1016 - if (ret) 1017 - return ret; 1018 - 1019 1015 ret = reftable_stack_new_addition(&addition, be->stack, 1020 1016 REFTABLE_STACK_NEW_ADDITION_RELOAD); 1021 1017 if (ret) { ··· 1974 1970 ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1); 1975 1971 if (ret) 1976 1972 goto done; 1977 - ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg); 1973 + ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg, 1974 + REFTABLE_STACK_NEW_ADDITION_RELOAD); 1978 1975 1979 1976 done: 1980 1977 assert(ret != REFTABLE_API_ERROR); ··· 2003 2000 ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1); 2004 2001 if (ret) 2005 2002 goto done; 2006 - ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg); 2003 + ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg, 2004 + REFTABLE_STACK_NEW_ADDITION_RELOAD); 2007 2005 2008 2006 done: 2009 2007 assert(ret != REFTABLE_API_ERROR); ··· 2375 2373 goto done; 2376 2374 arg.stack = be->stack; 2377 2375 2378 - ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg); 2376 + ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg, 2377 + REFTABLE_STACK_NEW_ADDITION_RELOAD); 2379 2378 2380 2379 done: 2381 2380 return ret; ··· 2446 2445 return ret; 2447 2446 arg.stack = be->stack; 2448 2447 2449 - ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg); 2448 + ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg, 2449 + REFTABLE_STACK_NEW_ADDITION_RELOAD); 2450 2450 2451 2451 assert(ret != REFTABLE_API_ERROR); 2452 2452 return ret; ··· 2567 2567 if (ret < 0) 2568 2568 goto done; 2569 2569 2570 + ret = reftable_stack_new_addition(&add, be->stack, 2571 + REFTABLE_STACK_NEW_ADDITION_RELOAD); 2572 + if (ret < 0) 2573 + goto done; 2574 + 2570 2575 ret = reftable_stack_init_log_iterator(be->stack, &it); 2571 2576 if (ret < 0) 2572 2577 goto done; 2573 2578 2574 2579 ret = reftable_iterator_seek_log(&it, refname); 2575 - if (ret < 0) 2576 - goto done; 2577 - 2578 - ret = reftable_stack_new_addition(&add, be->stack, 0); 2579 2580 if (ret < 0) 2580 2581 goto done; 2581 2582
+6 -3
reftable/reftable-stack.h
··· 68 68 * transaction. Releases the lock if held. */ 69 69 void reftable_addition_destroy(struct reftable_addition *add); 70 70 71 - /* add a new table to the stack. The write_table function must call 72 - * reftable_writer_set_limits, add refs and return an error value. */ 71 + /* 72 + * Add a new table to the stack. The write_table function must call 73 + * reftable_writer_set_limits, add refs and return an error value. 74 + * The flags are passed through to `reftable_stack_new_addition()`. 75 + */ 73 76 int reftable_stack_add(struct reftable_stack *st, 74 77 int (*write_table)(struct reftable_writer *wr, 75 78 void *write_arg), 76 - void *write_arg); 79 + void *write_arg, unsigned flags); 77 80 78 81 struct reftable_iterator; 79 82
+2 -2
reftable/reftable-writer.h
··· 156 156 the records before adding them, reordering the records array passed in. 157 157 */ 158 158 int reftable_writer_add_refs(struct reftable_writer *w, 159 - struct reftable_ref_record *refs, int n); 159 + struct reftable_ref_record *refs, size_t n); 160 160 161 161 /* 162 162 adds reftable_log_records. Log records are keyed by (refname, decreasing ··· 171 171 the records before adding them, reordering records array passed in. 172 172 */ 173 173 int reftable_writer_add_logs(struct reftable_writer *w, 174 - struct reftable_log_record *logs, int n); 174 + struct reftable_log_record *logs, size_t n); 175 175 176 176 /* reftable_writer_close finalizes the reftable. The writer is retained so 177 177 * statistics can be inspected. */
+213 -226
reftable/stack.c
··· 17 17 #include "table.h" 18 18 #include "writer.h" 19 19 20 - static int stack_try_add(struct reftable_stack *st, 21 - int (*write_table)(struct reftable_writer *wr, 22 - void *arg), 23 - void *arg); 24 - static int stack_write_compact(struct reftable_stack *st, 25 - struct reftable_writer *wr, 26 - size_t first, size_t last, 27 - struct reftable_log_expiry_config *config); 28 - static void reftable_addition_close(struct reftable_addition *add); 29 - static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st, 30 - int reuse_open); 31 - 32 20 static int stack_filename(struct reftable_buf *dest, struct reftable_stack *st, 33 21 const char *name) 34 22 { ··· 82 70 { 83 71 struct fd_writer *writer = arg; 84 72 return stack_fsync(writer->opts, writer->fd); 85 - } 86 - 87 - int reftable_new_stack(struct reftable_stack **dest, const char *dir, 88 - const struct reftable_write_options *_opts) 89 - { 90 - struct reftable_buf list_file_name = REFTABLE_BUF_INIT; 91 - struct reftable_write_options opts = { 0 }; 92 - struct reftable_stack *p; 93 - int err; 94 - 95 - p = reftable_calloc(1, sizeof(*p)); 96 - if (!p) { 97 - err = REFTABLE_OUT_OF_MEMORY_ERROR; 98 - goto out; 99 - } 100 - 101 - if (_opts) 102 - opts = *_opts; 103 - if (opts.hash_id == 0) 104 - opts.hash_id = REFTABLE_HASH_SHA1; 105 - 106 - *dest = NULL; 107 - 108 - reftable_buf_reset(&list_file_name); 109 - if ((err = reftable_buf_addstr(&list_file_name, dir)) < 0 || 110 - (err = reftable_buf_addstr(&list_file_name, "/tables.list")) < 0) 111 - goto out; 112 - 113 - p->list_file = reftable_buf_detach(&list_file_name); 114 - p->list_fd = -1; 115 - p->opts = opts; 116 - p->reftable_dir = reftable_strdup(dir); 117 - if (!p->reftable_dir) { 118 - err = REFTABLE_OUT_OF_MEMORY_ERROR; 119 - goto out; 120 - } 121 - 122 - err = reftable_stack_reload_maybe_reuse(p, 1); 123 - if (err < 0) 124 - goto out; 125 - 126 - *dest = p; 127 - err = 0; 128 - 129 - out: 130 - if (err < 0) 131 - reftable_stack_destroy(p); 132 - return err; 133 73 } 134 74 135 75 static int fd_read_lines(int fd, char ***namesp) ··· 591 531 return err; 592 532 } 593 533 594 - /* -1 = error 595 - 0 = up to date 596 - 1 = changed. */ 534 + int reftable_new_stack(struct reftable_stack **dest, const char *dir, 535 + const struct reftable_write_options *_opts) 536 + { 537 + struct reftable_buf list_file_name = REFTABLE_BUF_INIT; 538 + struct reftable_write_options opts = { 0 }; 539 + struct reftable_stack *p; 540 + int err; 541 + 542 + p = reftable_calloc(1, sizeof(*p)); 543 + if (!p) { 544 + err = REFTABLE_OUT_OF_MEMORY_ERROR; 545 + goto out; 546 + } 547 + 548 + if (_opts) 549 + opts = *_opts; 550 + if (opts.hash_id == 0) 551 + opts.hash_id = REFTABLE_HASH_SHA1; 552 + 553 + *dest = NULL; 554 + 555 + reftable_buf_reset(&list_file_name); 556 + if ((err = reftable_buf_addstr(&list_file_name, dir)) < 0 || 557 + (err = reftable_buf_addstr(&list_file_name, "/tables.list")) < 0) 558 + goto out; 559 + 560 + p->list_file = reftable_buf_detach(&list_file_name); 561 + p->list_fd = -1; 562 + p->opts = opts; 563 + p->reftable_dir = reftable_strdup(dir); 564 + if (!p->reftable_dir) { 565 + err = REFTABLE_OUT_OF_MEMORY_ERROR; 566 + goto out; 567 + } 568 + 569 + err = reftable_stack_reload_maybe_reuse(p, 1); 570 + if (err < 0) 571 + goto out; 572 + 573 + *dest = p; 574 + err = 0; 575 + 576 + out: 577 + if (err < 0) 578 + reftable_stack_destroy(p); 579 + return err; 580 + } 581 + 582 + /* 583 + * Check whether the given stack is up-to-date with what we have in memory. 584 + * Returns 0 if so, 1 if the stack is out-of-date or a negative error code 585 + * otherwise. 586 + */ 597 587 static int stack_uptodate(struct reftable_stack *st) 598 588 { 599 589 char **names = NULL; ··· 667 657 return err; 668 658 } 669 659 670 - int reftable_stack_add(struct reftable_stack *st, 671 - int (*write)(struct reftable_writer *wr, void *arg), 672 - void *arg) 673 - { 674 - int err = stack_try_add(st, write, arg); 675 - if (err < 0) { 676 - if (err == REFTABLE_OUTDATED_ERROR) { 677 - /* Ignore error return, we want to propagate 678 - REFTABLE_OUTDATED_ERROR. 679 - */ 680 - reftable_stack_reload(st); 681 - } 682 - return err; 683 - } 684 - 685 - return 0; 686 - } 687 - 688 - static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max) 689 - { 690 - char buf[100]; 691 - uint32_t rnd = reftable_rand(); 692 - snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x", 693 - min, max, rnd); 694 - reftable_buf_reset(dest); 695 - return reftable_buf_addstr(dest, buf); 696 - } 697 - 698 660 struct reftable_addition { 699 661 struct reftable_flock tables_list_lock; 700 662 struct reftable_stack *stack; ··· 704 666 uint64_t next_update_index; 705 667 }; 706 668 707 - #define REFTABLE_ADDITION_INIT {0} 669 + static void reftable_addition_close(struct reftable_addition *add) 670 + { 671 + struct reftable_buf nm = REFTABLE_BUF_INIT; 672 + size_t i; 673 + 674 + for (i = 0; i < add->new_tables_len; i++) { 675 + if (!stack_filename(&nm, add->stack, add->new_tables[i])) 676 + unlink(nm.buf); 677 + reftable_free(add->new_tables[i]); 678 + add->new_tables[i] = NULL; 679 + } 680 + reftable_free(add->new_tables); 681 + add->new_tables = NULL; 682 + add->new_tables_len = 0; 683 + add->new_tables_cap = 0; 684 + 685 + flock_release(&add->tables_list_lock); 686 + reftable_buf_release(&nm); 687 + } 708 688 709 689 static int reftable_stack_init_addition(struct reftable_addition *add, 710 690 struct reftable_stack *st, ··· 713 693 struct reftable_buf lock_file_name = REFTABLE_BUF_INIT; 714 694 int err; 715 695 696 + memset(add, 0, sizeof(*add)); 716 697 add->stack = st; 717 698 718 699 err = flock_acquire(&add->tables_list_lock, st->list_file, 719 700 st->opts.lock_timeout_ms); 720 - if (err < 0) { 721 - if (errno == EEXIST) { 722 - err = REFTABLE_LOCK_ERROR; 723 - } else { 724 - err = REFTABLE_IO_ERROR; 725 - } 701 + if (err < 0) 726 702 goto done; 727 - } 703 + 728 704 if (st->opts.default_permissions) { 729 705 if (chmod(add->tables_list_lock.path, 730 706 st->opts.default_permissions) < 0) { ··· 754 730 return err; 755 731 } 756 732 757 - static void reftable_addition_close(struct reftable_addition *add) 733 + static int stack_try_add(struct reftable_stack *st, 734 + int (*write_table)(struct reftable_writer *wr, 735 + void *arg), 736 + void *arg, unsigned flags) 758 737 { 759 - struct reftable_buf nm = REFTABLE_BUF_INIT; 760 - size_t i; 738 + struct reftable_addition add; 739 + int err; 761 740 762 - for (i = 0; i < add->new_tables_len; i++) { 763 - if (!stack_filename(&nm, add->stack, add->new_tables[i])) 764 - unlink(nm.buf); 765 - reftable_free(add->new_tables[i]); 766 - add->new_tables[i] = NULL; 741 + err = reftable_stack_init_addition(&add, st, flags); 742 + if (err < 0) 743 + goto done; 744 + 745 + err = reftable_addition_add(&add, write_table, arg); 746 + if (err < 0) 747 + goto done; 748 + 749 + err = reftable_addition_commit(&add); 750 + done: 751 + reftable_addition_close(&add); 752 + return err; 753 + } 754 + 755 + int reftable_stack_add(struct reftable_stack *st, 756 + int (*write)(struct reftable_writer *wr, void *arg), 757 + void *arg, unsigned flags) 758 + { 759 + int err = stack_try_add(st, write, arg, flags); 760 + if (err < 0) { 761 + if (err == REFTABLE_OUTDATED_ERROR) { 762 + /* Ignore error return, we want to propagate 763 + REFTABLE_OUTDATED_ERROR. 764 + */ 765 + reftable_stack_reload(st); 766 + } 767 + return err; 767 768 } 768 - reftable_free(add->new_tables); 769 - add->new_tables = NULL; 770 - add->new_tables_len = 0; 771 - add->new_tables_cap = 0; 769 + 770 + return 0; 771 + } 772 772 773 - flock_release(&add->tables_list_lock); 774 - reftable_buf_release(&nm); 773 + static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max) 774 + { 775 + char buf[100]; 776 + uint32_t rnd = reftable_rand(); 777 + snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x", 778 + min, max, rnd); 779 + reftable_buf_reset(dest); 780 + return reftable_buf_addstr(dest, buf); 775 781 } 776 782 777 783 void reftable_addition_destroy(struct reftable_addition *add) ··· 841 847 * control. It is possible that a concurrent writer is already 842 848 * trying to compact parts of the stack, which would lead to a 843 849 * `REFTABLE_LOCK_ERROR` because parts of the stack are locked 844 - * already. This is a benign error though, so we ignore it. 850 + * already. Similarly, the stack may have been rewritten by a 851 + * concurrent writer, which causes `REFTABLE_OUTDATED_ERROR`. 852 + * Both of these errors are benign, so we simply ignore them. 845 853 */ 846 854 err = reftable_stack_auto_compact(add->stack); 847 - if (err < 0 && err != REFTABLE_LOCK_ERROR) 855 + if (err < 0 && err != REFTABLE_LOCK_ERROR && 856 + err != REFTABLE_OUTDATED_ERROR) 848 857 goto done; 849 858 err = 0; 850 859 } ··· 858 867 struct reftable_stack *st, 859 868 unsigned int flags) 860 869 { 861 - int err = 0; 862 - struct reftable_addition empty = REFTABLE_ADDITION_INIT; 870 + int err; 863 871 864 872 REFTABLE_CALLOC_ARRAY(*dest, 1); 865 873 if (!*dest) 866 874 return REFTABLE_OUT_OF_MEMORY_ERROR; 867 875 868 - **dest = empty; 869 876 err = reftable_stack_init_addition(*dest, st, flags); 870 877 if (err) { 871 878 reftable_free(*dest); 872 879 *dest = NULL; 873 880 } 874 - return err; 875 - } 876 881 877 - static int stack_try_add(struct reftable_stack *st, 878 - int (*write_table)(struct reftable_writer *wr, 879 - void *arg), 880 - void *arg) 881 - { 882 - struct reftable_addition add = REFTABLE_ADDITION_INIT; 883 - int err = reftable_stack_init_addition(&add, st, 0); 884 - if (err < 0) 885 - goto done; 886 - 887 - err = reftable_addition_add(&add, write_table, arg); 888 - if (err < 0) 889 - goto done; 890 - 891 - err = reftable_addition_commit(&add); 892 - done: 893 - reftable_addition_close(&add); 894 882 return err; 895 883 } 896 884 ··· 1007 995 return 1; 1008 996 } 1009 997 1010 - static int stack_compact_locked(struct reftable_stack *st, 1011 - size_t first, size_t last, 1012 - struct reftable_log_expiry_config *config, 1013 - struct reftable_tmpfile *tab_file_out) 1014 - { 1015 - struct reftable_buf next_name = REFTABLE_BUF_INIT; 1016 - struct reftable_buf tab_file_path = REFTABLE_BUF_INIT; 1017 - struct reftable_writer *wr = NULL; 1018 - struct fd_writer writer= { 1019 - .opts = &st->opts, 1020 - }; 1021 - struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT; 1022 - int err = 0; 1023 - 1024 - err = format_name(&next_name, reftable_table_min_update_index(st->tables[first]), 1025 - reftable_table_max_update_index(st->tables[last])); 1026 - if (err < 0) 1027 - goto done; 1028 - 1029 - err = stack_filename(&tab_file_path, st, next_name.buf); 1030 - if (err < 0) 1031 - goto done; 1032 - 1033 - err = reftable_buf_addstr(&tab_file_path, ".temp.XXXXXX"); 1034 - if (err < 0) 1035 - goto done; 1036 - 1037 - err = tmpfile_from_pattern(&tab_file, tab_file_path.buf); 1038 - if (err < 0) 1039 - goto done; 1040 - 1041 - if (st->opts.default_permissions && 1042 - chmod(tab_file.path, st->opts.default_permissions) < 0) { 1043 - err = REFTABLE_IO_ERROR; 1044 - goto done; 1045 - } 1046 - 1047 - writer.fd = tab_file.fd; 1048 - err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush, 1049 - &writer, &st->opts); 1050 - if (err < 0) 1051 - goto done; 1052 - 1053 - err = stack_write_compact(st, wr, first, last, config); 1054 - if (err < 0) 1055 - goto done; 1056 - 1057 - err = reftable_writer_close(wr); 1058 - if (err < 0) 1059 - goto done; 1060 - 1061 - err = tmpfile_close(&tab_file); 1062 - if (err < 0) 1063 - goto done; 1064 - 1065 - *tab_file_out = tab_file; 1066 - tab_file = REFTABLE_TMPFILE_INIT; 1067 - 1068 - done: 1069 - tmpfile_delete(&tab_file); 1070 - reftable_writer_free(wr); 1071 - reftable_buf_release(&next_name); 1072 - reftable_buf_release(&tab_file_path); 1073 - return err; 1074 - } 1075 - 1076 998 static int stack_write_compact(struct reftable_stack *st, 1077 999 struct reftable_writer *wr, 1078 1000 size_t first, size_t last, ··· 1172 1094 return err; 1173 1095 } 1174 1096 1097 + static int stack_compact_locked(struct reftable_stack *st, 1098 + size_t first, size_t last, 1099 + struct reftable_log_expiry_config *config, 1100 + struct reftable_tmpfile *tab_file_out) 1101 + { 1102 + struct reftable_buf next_name = REFTABLE_BUF_INIT; 1103 + struct reftable_buf tab_file_path = REFTABLE_BUF_INIT; 1104 + struct reftable_writer *wr = NULL; 1105 + struct fd_writer writer= { 1106 + .opts = &st->opts, 1107 + }; 1108 + struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT; 1109 + int err = 0; 1110 + 1111 + err = format_name(&next_name, reftable_table_min_update_index(st->tables[first]), 1112 + reftable_table_max_update_index(st->tables[last])); 1113 + if (err < 0) 1114 + goto done; 1115 + 1116 + err = stack_filename(&tab_file_path, st, next_name.buf); 1117 + if (err < 0) 1118 + goto done; 1119 + 1120 + err = reftable_buf_addstr(&tab_file_path, ".temp.XXXXXX"); 1121 + if (err < 0) 1122 + goto done; 1123 + 1124 + err = tmpfile_from_pattern(&tab_file, tab_file_path.buf); 1125 + if (err < 0) 1126 + goto done; 1127 + 1128 + if (st->opts.default_permissions && 1129 + chmod(tab_file.path, st->opts.default_permissions) < 0) { 1130 + err = REFTABLE_IO_ERROR; 1131 + goto done; 1132 + } 1133 + 1134 + writer.fd = tab_file.fd; 1135 + err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush, 1136 + &writer, &st->opts); 1137 + if (err < 0) 1138 + goto done; 1139 + 1140 + err = stack_write_compact(st, wr, first, last, config); 1141 + if (err < 0) 1142 + goto done; 1143 + 1144 + err = reftable_writer_close(wr); 1145 + if (err < 0) 1146 + goto done; 1147 + 1148 + err = tmpfile_close(&tab_file); 1149 + if (err < 0) 1150 + goto done; 1151 + 1152 + *tab_file_out = tab_file; 1153 + tab_file = REFTABLE_TMPFILE_INIT; 1154 + 1155 + done: 1156 + tmpfile_delete(&tab_file); 1157 + reftable_writer_free(wr); 1158 + reftable_buf_release(&next_name); 1159 + reftable_buf_release(&tab_file_path); 1160 + return err; 1161 + } 1162 + 1175 1163 enum stack_compact_range_flags { 1176 1164 /* 1177 1165 * Perform a best-effort compaction. That is, even if we cannot lock ··· 1219 1207 * which are part of the user-specified range. 1220 1208 */ 1221 1209 err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms); 1222 - if (err < 0) { 1223 - if (errno == EEXIST) 1224 - err = REFTABLE_LOCK_ERROR; 1225 - else 1226 - err = REFTABLE_IO_ERROR; 1210 + if (err < 0) 1227 1211 goto done; 1228 - } 1229 1212 1213 + /* 1214 + * Check whether the stack is up-to-date. We unfortunately cannot 1215 + * handle the situation gracefully in case it's _not_ up-to-date 1216 + * because the range of tables that the user has requested us to 1217 + * compact may have been changed. So instead we abort. 1218 + * 1219 + * We could in theory improve the situation by having the caller not 1220 + * pass in a range, but instead the list of tables to compact. If so, 1221 + * we could check that relevant tables still exist. But for now it's 1222 + * good enough to just abort. 1223 + */ 1230 1224 err = stack_uptodate(st); 1231 - if (err) 1225 + if (err < 0) 1232 1226 goto done; 1227 + if (err > 0) { 1228 + err = REFTABLE_OUTDATED_ERROR; 1229 + goto done; 1230 + } 1233 1231 1234 1232 /* 1235 1233 * Lock all tables in the user-provided range. This is the slice of our ··· 1264 1262 * tables, otherwise there would be nothing to compact. 1265 1263 * In that case, we return a lock error to our caller. 1266 1264 */ 1267 - if (errno == EEXIST && last - (i - 1) >= 2 && 1265 + if (err == REFTABLE_LOCK_ERROR && last - (i - 1) >= 2 && 1268 1266 flags & STACK_COMPACT_RANGE_BEST_EFFORT) { 1269 1267 err = 0; 1270 1268 /* ··· 1276 1274 */ 1277 1275 first = (i - 1) + 1; 1278 1276 break; 1279 - } else if (errno == EEXIST) { 1280 - err = REFTABLE_LOCK_ERROR; 1281 - goto done; 1282 - } else { 1283 - err = REFTABLE_IO_ERROR; 1284 - goto done; 1285 1277 } 1278 + 1279 + goto done; 1286 1280 } 1287 1281 1288 1282 /* ··· 1291 1285 * of tables. 1292 1286 */ 1293 1287 err = flock_close(&table_locks[nlocks++]); 1294 - if (err < 0) { 1295 - err = REFTABLE_IO_ERROR; 1288 + if (err < 0) 1296 1289 goto done; 1297 - } 1298 1290 } 1299 1291 1300 1292 /* ··· 1326 1318 * the new table. 1327 1319 */ 1328 1320 err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms); 1329 - if (err < 0) { 1330 - if (errno == EEXIST) 1331 - err = REFTABLE_LOCK_ERROR; 1332 - else 1333 - err = REFTABLE_IO_ERROR; 1321 + if (err < 0) 1334 1322 goto done; 1335 - } 1336 1323 1337 1324 if (st->opts.default_permissions) { 1338 1325 if (chmod(tables_list_lock.path,
+1 -1
reftable/system.c
··· 72 72 reftable_free(lockfile); 73 73 if (errno == EEXIST) 74 74 return REFTABLE_LOCK_ERROR; 75 - return -1; 75 + return REFTABLE_IO_ERROR; 76 76 } 77 77 78 78 l->fd = get_lock_file_fd(lockfile);
+3 -1
reftable/system.h
··· 81 81 * to acquire the lock. If `timeout_ms` is 0 we don't wait, if it is negative 82 82 * we block indefinitely. 83 83 * 84 - * Retrun 0 on success, a reftable error code on error. 84 + * Retrun 0 on success, a reftable error code on error. Specifically, 85 + * `REFTABLE_LOCK_ERROR` should be returned in case the target path is already 86 + * locked. 85 87 */ 86 88 int flock_acquire(struct reftable_flock *l, const char *target_path, 87 89 long timeout_ms);
+13 -10
reftable/writer.c
··· 395 395 } 396 396 397 397 int reftable_writer_add_refs(struct reftable_writer *w, 398 - struct reftable_ref_record *refs, int n) 398 + struct reftable_ref_record *refs, size_t n) 399 399 { 400 400 int err = 0; 401 - int i = 0; 402 - QSORT(refs, n, reftable_ref_record_compare_name); 403 - for (i = 0; err == 0 && i < n; i++) { 401 + 402 + if (n) 403 + qsort(refs, n, sizeof(*refs), reftable_ref_record_compare_name); 404 + 405 + for (size_t i = 0; err == 0 && i < n; i++) 404 406 err = reftable_writer_add_ref(w, &refs[i]); 405 - } 407 + 406 408 return err; 407 409 } 408 410 ··· 486 488 } 487 489 488 490 int reftable_writer_add_logs(struct reftable_writer *w, 489 - struct reftable_log_record *logs, int n) 491 + struct reftable_log_record *logs, size_t n) 490 492 { 491 493 int err = 0; 492 - int i = 0; 493 - QSORT(logs, n, reftable_log_record_compare_key); 494 + 495 + if (n) 496 + qsort(logs, n, sizeof(*logs), reftable_log_record_compare_key); 494 497 495 - for (i = 0; err == 0 && i < n; i++) { 498 + for (size_t i = 0; err == 0 && i < n; i++) 496 499 err = reftable_writer_add_log(w, &logs[i]); 497 - } 500 + 498 501 return err; 499 502 } 500 503
+30 -29
t/unit-tests/u-reftable-stack.c
··· 128 128 cl_reftable_set_hash(ref.value.val1, i, REFTABLE_HASH_SHA1); 129 129 130 130 cl_assert_equal_i(reftable_stack_add(st, 131 - &write_test_ref, &ref), 0); 131 + &write_test_ref, &ref, 0), 0); 132 132 } 133 133 134 134 st->opts.disable_auto_compact = disable_auto_compact; ··· 171 171 err = reftable_new_stack(&st, dir, &opts); 172 172 cl_assert(!err); 173 173 174 - err = reftable_stack_add(st, write_test_ref, &ref); 174 + err = reftable_stack_add(st, write_test_ref, &ref, 0); 175 175 cl_assert(!err); 176 176 177 177 err = reftable_stack_read_ref(st, ref.refname, &dest); ··· 235 235 cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); 236 236 cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); 237 237 cl_assert_equal_i(reftable_stack_add(st1, write_test_ref, 238 - &ref1), 0); 238 + &ref1, 0), 0); 239 239 cl_assert_equal_i(reftable_stack_add(st2, write_test_ref, 240 - &ref2), REFTABLE_OUTDATED_ERROR); 240 + &ref2, 0), REFTABLE_OUTDATED_ERROR); 241 241 cl_assert_equal_i(reftable_stack_reload(st2), 0); 242 242 cl_assert_equal_i(reftable_stack_add(st2, write_test_ref, 243 - &ref2), 0); 243 + &ref2, 0), 0); 244 244 reftable_stack_destroy(st1); 245 245 reftable_stack_destroy(st2); 246 246 clear_dir(dir); ··· 406 406 407 407 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 408 408 cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 409 - &ref), 0); 409 + &ref, 0), 0); 410 410 cl_assert_equal_i(st->merged->tables_len, 1); 411 411 cl_assert_equal_i(st->stats.attempts, 0); 412 412 cl_assert_equal_i(st->stats.failures, 0); ··· 424 424 write_file_buf(table_path.buf, "", 0); 425 425 426 426 ref.update_index = 2; 427 - err = reftable_stack_add(st, write_test_ref, &ref); 427 + err = reftable_stack_add(st, write_test_ref, &ref, 0); 428 428 cl_assert(!err); 429 429 cl_assert_equal_i(st->merged->tables_len, 2); 430 430 cl_assert_equal_i(st->stats.attempts, 1); ··· 460 460 461 461 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 462 462 cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 463 - &ref1), 0); 463 + &ref1, 0), 0); 464 464 cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 465 - &ref2), REFTABLE_API_ERROR); 465 + &ref2, 0), REFTABLE_API_ERROR); 466 466 reftable_stack_destroy(st); 467 467 clear_dir(dir); 468 468 } ··· 477 477 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 478 478 for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--) 479 479 cl_assert_equal_i(reftable_stack_add(st, write_error, 480 - &i), i); 480 + &i, 0), i); 481 481 482 482 reftable_stack_destroy(st); 483 483 clear_dir(dir); ··· 521 521 522 522 for (i = 0; i < N; i++) 523 523 cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 524 - &refs[i]), 0); 524 + &refs[i], 0), 0); 525 525 526 526 for (i = 0; i < N; i++) { 527 527 struct write_log_arg arg = { ··· 529 529 .update_index = reftable_stack_next_update_index(st), 530 530 }; 531 531 cl_assert_equal_i(reftable_stack_add(st, write_test_log, 532 - &arg), 0); 532 + &arg, 0), 0); 533 533 } 534 534 535 535 cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); ··· 612 612 } 613 613 614 614 for (i = 0; i < N; i++) 615 - cl_assert_equal_i(reftable_stack_add(st, 616 - write_test_ref, &refs[i]), 0); 615 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 616 + &refs[i], 0), 0); 617 617 618 618 for (i = 0; i < N; i++) { 619 619 struct write_log_arg arg = { ··· 621 621 .update_index = reftable_stack_next_update_index(st), 622 622 }; 623 623 624 - cl_assert_equal_i(reftable_stack_add(st, 625 - write_test_log, &arg), 0); 624 + cl_assert_equal_i(reftable_stack_add(st, write_test_log, 625 + &arg, 0), 0); 626 626 } 627 627 628 628 reftable_stack_init_ref_iterator(st, &it); ··· 697 697 698 698 input.value.update.message = (char *) "one\ntwo"; 699 699 cl_assert_equal_i(reftable_stack_add(st, write_test_log, 700 - &arg), REFTABLE_API_ERROR); 700 + &arg, 0), REFTABLE_API_ERROR); 701 701 702 702 input.value.update.message = (char *) "one"; 703 703 cl_assert_equal_i(reftable_stack_add(st, write_test_log, 704 - &arg), 0); 704 + &arg, 0), 0); 705 705 cl_assert_equal_i(reftable_stack_read_log(st, input.refname, 706 706 &dest), 0); 707 707 cl_assert_equal_s(dest.value.update.message, "one\n"); ··· 709 709 input.value.update.message = (char *) "two\n"; 710 710 arg.update_index = 2; 711 711 cl_assert_equal_i(reftable_stack_add(st, write_test_log, 712 - &arg), 0); 712 + &arg, 0), 0); 713 713 cl_assert_equal_i(reftable_stack_read_log(st, input.refname, 714 714 &dest), 0); 715 715 cl_assert_equal_s(dest.value.update.message, "two\n"); ··· 759 759 } 760 760 } 761 761 for (i = 0; i < N; i++) 762 - cl_assert_equal_i(reftable_stack_add(st, write_test_ref, &refs[i]), 0); 762 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 763 + &refs[i], 0), 0); 763 764 764 765 for (i = 0; i < N; i++) { 765 766 struct write_log_arg arg = { 766 767 .log = &logs[i], 767 768 .update_index = reftable_stack_next_update_index(st), 768 769 }; 769 - cl_assert_equal_i(reftable_stack_add(st, 770 - write_test_log, &arg), 0); 770 + cl_assert_equal_i(reftable_stack_add(st, write_test_log, 771 + &arg, 0), 0); 771 772 } 772 773 773 774 cl_assert_equal_i(reftable_stack_read_ref(st, "branch", ··· 815 816 816 817 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 817 818 cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 818 - &ref), 0); 819 + &ref, 0), 0); 819 820 820 821 /* can't read it with the wrong hash ID. */ 821 822 cl_assert_equal_i(reftable_new_stack(&st32, dir, ··· 884 885 .update_index = reftable_stack_next_update_index(st), 885 886 }; 886 887 cl_assert_equal_i(reftable_stack_add(st, write_test_log, 887 - &arg), 0); 888 + &arg, 0), 0); 888 889 } 889 890 890 891 cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); ··· 924 925 925 926 cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); 926 927 cl_assert_equal_i(reftable_stack_add(st, write_nothing, 927 - NULL), 0); 928 + NULL, 0), 0); 928 929 cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); 929 930 clear_dir(dir); 930 931 reftable_stack_destroy(st); ··· 963 964 }; 964 965 snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 965 966 966 - err = reftable_stack_add(st, write_test_ref, &ref); 967 + err = reftable_stack_add(st, write_test_ref, &ref, 0); 967 968 cl_assert(!err); 968 969 969 970 err = reftable_stack_auto_compact(st); ··· 999 1000 }; 1000 1001 xsnprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); 1001 1002 1002 - err = reftable_stack_add(st, &write_test_ref, &ref); 1003 + err = reftable_stack_add(st, &write_test_ref, &ref, 0); 1003 1004 cl_assert(!err); 1004 1005 1005 1006 cl_assert(i < 5 || st->merged->tables_len < 5 * fastlogN(i, 5)); ··· 1078 1079 snprintf(buf, sizeof(buf), "branch-%04"PRIuMAX, (uintmax_t)i); 1079 1080 ref.refname = buf; 1080 1081 1081 - cl_assert_equal_i(reftable_stack_add(st, 1082 - write_test_ref, &ref), 0); 1082 + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, 1083 + &ref, 0), 0); 1083 1084 1084 1085 /* 1085 1086 * The stack length should grow continuously for all runs where