Git fork
at reftables-rust 4021 lines 114 kB view raw
1#define USE_THE_REPOSITORY_VARIABLE 2#define DISABLE_SIGN_COMPARE_WARNINGS 3 4#include "../git-compat-util.h" 5#include "../abspath.h" 6#include "../config.h" 7#include "../copy.h" 8#include "../environment.h" 9#include "../gettext.h" 10#include "../hash.h" 11#include "../hex.h" 12#include "../fsck.h" 13#include "../refs.h" 14#include "../repo-settings.h" 15#include "refs-internal.h" 16#include "ref-cache.h" 17#include "packed-backend.h" 18#include "../ident.h" 19#include "../iterator.h" 20#include "../dir-iterator.h" 21#include "../lockfile.h" 22#include "../object.h" 23#include "../path.h" 24#include "../dir.h" 25#include "../chdir-notify.h" 26#include "../setup.h" 27#include "../worktree.h" 28#include "../wrapper.h" 29#include "../write-or-die.h" 30#include "../revision.h" 31#include <wildmatch.h> 32 33/* 34 * This backend uses the following flags in `ref_update::flags` for 35 * internal bookkeeping purposes. Their numerical values must not 36 * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW, 37 * or REF_HAVE_OLD, which are also stored in `ref_update::flags`. 38 */ 39 40/* 41 * Used as a flag in ref_update::flags when a loose ref is being 42 * pruned. This flag must only be used when REF_NO_DEREF is set. 43 */ 44#define REF_IS_PRUNING (1 << 4) 45 46/* 47 * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken 48 * refs (i.e., because the reference is about to be deleted anyway). 49 */ 50#define REF_DELETING (1 << 5) 51 52/* 53 * Used as a flag in ref_update::flags when the lockfile needs to be 54 * committed. 55 */ 56#define REF_NEEDS_COMMIT (1 << 6) 57 58/* 59 * Used as a flag in ref_update::flags when the ref_update was via an 60 * update to HEAD. 61 */ 62#define REF_UPDATE_VIA_HEAD (1 << 8) 63 64/* 65 * Used as a flag in ref_update::flags when a reference has been 66 * deleted and the ref's parent directories may need cleanup. 67 */ 68#define REF_DELETED_RMDIR (1 << 9) 69 70/* 71 * Used to indicate that the reflog-only update has been created via 72 * `split_head_update()`. 73 */ 74#define REF_LOG_VIA_SPLIT (1 << 14) 75 76struct ref_lock { 77 char *ref_name; 78 struct lock_file lk; 79 struct object_id old_oid; 80 unsigned int count; /* track users of the lock (ref update + reflog updates) */ 81}; 82 83struct files_ref_store { 84 struct ref_store base; 85 unsigned int store_flags; 86 87 char *gitcommondir; 88 enum log_refs_config log_all_ref_updates; 89 int prefer_symlink_refs; 90 91 struct ref_cache *loose; 92 93 struct ref_store *packed_ref_store; 94}; 95 96static void clear_loose_ref_cache(struct files_ref_store *refs) 97{ 98 if (refs->loose) { 99 free_ref_cache(refs->loose); 100 refs->loose = NULL; 101 } 102} 103 104/* 105 * Create a new submodule ref cache and add it to the internal 106 * set of caches. 107 */ 108static struct ref_store *files_ref_store_init(struct repository *repo, 109 const char *gitdir, 110 unsigned int flags) 111{ 112 struct files_ref_store *refs = xcalloc(1, sizeof(*refs)); 113 struct ref_store *ref_store = (struct ref_store *)refs; 114 struct strbuf sb = STRBUF_INIT; 115 116 base_ref_store_init(ref_store, repo, gitdir, &refs_be_files); 117 refs->store_flags = flags; 118 get_common_dir_noenv(&sb, gitdir); 119 refs->gitcommondir = strbuf_detach(&sb, NULL); 120 refs->packed_ref_store = 121 packed_ref_store_init(repo, refs->gitcommondir, flags); 122 refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo); 123 repo_config_get_bool(repo, "core.prefersymlinkrefs", &refs->prefer_symlink_refs); 124 125 chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir); 126 chdir_notify_reparent("files-backend $GIT_COMMONDIR", 127 &refs->gitcommondir); 128 129 return ref_store; 130} 131 132/* 133 * Die if refs is not the main ref store. caller is used in any 134 * necessary error messages. 135 */ 136static void files_assert_main_repository(struct files_ref_store *refs, 137 const char *caller) 138{ 139 if (refs->store_flags & REF_STORE_MAIN) 140 return; 141 142 BUG("operation %s only allowed for main ref store", caller); 143} 144 145/* 146 * Downcast ref_store to files_ref_store. Die if ref_store is not a 147 * files_ref_store. required_flags is compared with ref_store's 148 * store_flags to ensure the ref_store has all required capabilities. 149 * "caller" is used in any necessary error messages. 150 */ 151static struct files_ref_store *files_downcast(struct ref_store *ref_store, 152 unsigned int required_flags, 153 const char *caller) 154{ 155 struct files_ref_store *refs; 156 157 if (ref_store->be != &refs_be_files) 158 BUG("ref_store is type \"%s\" not \"files\" in %s", 159 ref_store->be->name, caller); 160 161 refs = (struct files_ref_store *)ref_store; 162 163 if ((refs->store_flags & required_flags) != required_flags) 164 BUG("operation %s requires abilities 0x%x, but only have 0x%x", 165 caller, required_flags, refs->store_flags); 166 167 return refs; 168} 169 170static void files_ref_store_release(struct ref_store *ref_store) 171{ 172 struct files_ref_store *refs = files_downcast(ref_store, 0, "release"); 173 free_ref_cache(refs->loose); 174 free(refs->gitcommondir); 175 ref_store_release(refs->packed_ref_store); 176 free(refs->packed_ref_store); 177} 178 179static void files_reflog_path(struct files_ref_store *refs, 180 struct strbuf *sb, 181 const char *refname) 182{ 183 const char *bare_refname; 184 const char *wtname; 185 int wtname_len; 186 enum ref_worktree_type wt_type = parse_worktree_ref( 187 refname, &wtname, &wtname_len, &bare_refname); 188 189 switch (wt_type) { 190 case REF_WORKTREE_CURRENT: 191 strbuf_addf(sb, "%s/logs/%s", refs->base.gitdir, refname); 192 break; 193 case REF_WORKTREE_SHARED: 194 case REF_WORKTREE_MAIN: 195 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, bare_refname); 196 break; 197 case REF_WORKTREE_OTHER: 198 strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir, 199 wtname_len, wtname, bare_refname); 200 break; 201 default: 202 BUG("unknown ref type %d of ref %s", wt_type, refname); 203 } 204} 205 206static void files_ref_path(struct files_ref_store *refs, 207 struct strbuf *sb, 208 const char *refname) 209{ 210 const char *bare_refname; 211 const char *wtname; 212 int wtname_len; 213 enum ref_worktree_type wt_type = parse_worktree_ref( 214 refname, &wtname, &wtname_len, &bare_refname); 215 switch (wt_type) { 216 case REF_WORKTREE_CURRENT: 217 strbuf_addf(sb, "%s/%s", refs->base.gitdir, refname); 218 break; 219 case REF_WORKTREE_OTHER: 220 strbuf_addf(sb, "%s/worktrees/%.*s/%s", refs->gitcommondir, 221 wtname_len, wtname, bare_refname); 222 break; 223 case REF_WORKTREE_SHARED: 224 case REF_WORKTREE_MAIN: 225 strbuf_addf(sb, "%s/%s", refs->gitcommondir, bare_refname); 226 break; 227 default: 228 BUG("unknown ref type %d of ref %s", wt_type, refname); 229 } 230} 231 232/* 233 * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being 234 * per-worktree, might not appear in the directory listing for 235 * refs/ in the main repo. 236 */ 237static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname) 238{ 239 const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" }; 240 int ip; 241 242 if (strcmp(dirname, "refs/")) 243 return; 244 245 for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) { 246 const char *prefix = prefixes[ip]; 247 int prefix_len = strlen(prefix); 248 struct ref_entry *child_entry; 249 int pos; 250 251 pos = search_ref_dir(dir, prefix, prefix_len); 252 if (pos >= 0) 253 continue; 254 child_entry = create_dir_entry(dir->cache, prefix, prefix_len); 255 add_entry_to_dir(dir, child_entry); 256 } 257} 258 259static void loose_fill_ref_dir_regular_file(struct files_ref_store *refs, 260 const char *refname, 261 struct ref_dir *dir) 262{ 263 struct object_id oid; 264 int flag; 265 const char *referent = refs_resolve_ref_unsafe(&refs->base, 266 refname, 267 RESOLVE_REF_READING, 268 &oid, &flag); 269 270 if (!referent) { 271 oidclr(&oid, refs->base.repo->hash_algo); 272 flag |= REF_ISBROKEN; 273 } else if (is_null_oid(&oid)) { 274 /* 275 * It is so astronomically unlikely 276 * that null_oid is the OID of an 277 * actual object that we consider its 278 * appearance in a loose reference 279 * file to be repo corruption 280 * (probably due to a software bug). 281 */ 282 flag |= REF_ISBROKEN; 283 } 284 285 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { 286 if (!refname_is_safe(refname)) 287 die("loose refname is dangerous: %s", refname); 288 oidclr(&oid, refs->base.repo->hash_algo); 289 flag |= REF_BAD_NAME | REF_ISBROKEN; 290 } 291 292 if (!(flag & REF_ISSYMREF)) 293 referent = NULL; 294 295 add_entry_to_dir(dir, create_ref_entry(refname, referent, &oid, flag)); 296} 297 298/* 299 * Read the loose references from the namespace dirname into dir 300 * (without recursing). dirname must end with '/'. dir must be the 301 * directory entry corresponding to dirname. 302 */ 303static void loose_fill_ref_dir(struct ref_store *ref_store, 304 struct ref_dir *dir, const char *dirname) 305{ 306 struct files_ref_store *refs = 307 files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir"); 308 DIR *d; 309 struct dirent *de; 310 int dirnamelen = strlen(dirname); 311 struct strbuf refname; 312 struct strbuf path = STRBUF_INIT; 313 314 files_ref_path(refs, &path, dirname); 315 316 d = opendir(path.buf); 317 if (!d) { 318 strbuf_release(&path); 319 return; 320 } 321 322 strbuf_init(&refname, dirnamelen + 257); 323 strbuf_add(&refname, dirname, dirnamelen); 324 325 while ((de = readdir(d)) != NULL) { 326 unsigned char dtype; 327 328 if (de->d_name[0] == '.') 329 continue; 330 if (ends_with(de->d_name, ".lock")) 331 continue; 332 strbuf_addstr(&refname, de->d_name); 333 334 dtype = get_dtype(de, &path, 1); 335 if (dtype == DT_DIR) { 336 strbuf_addch(&refname, '/'); 337 add_entry_to_dir(dir, 338 create_dir_entry(dir->cache, refname.buf, 339 refname.len)); 340 } else if (dtype == DT_REG) { 341 loose_fill_ref_dir_regular_file(refs, refname.buf, dir); 342 } 343 strbuf_setlen(&refname, dirnamelen); 344 } 345 strbuf_release(&refname); 346 strbuf_release(&path); 347 closedir(d); 348 349 add_per_worktree_entries_to_dir(dir, dirname); 350} 351 352static int for_each_root_ref(struct files_ref_store *refs, 353 int (*cb)(const char *refname, void *cb_data), 354 void *cb_data) 355{ 356 struct strbuf path = STRBUF_INIT, refname = STRBUF_INIT; 357 const char *dirname = refs->loose->root->name; 358 struct dirent *de; 359 size_t dirnamelen; 360 int ret; 361 DIR *d; 362 363 files_ref_path(refs, &path, dirname); 364 365 d = opendir(path.buf); 366 if (!d) { 367 strbuf_release(&path); 368 return -1; 369 } 370 371 strbuf_addstr(&refname, dirname); 372 dirnamelen = refname.len; 373 374 while ((de = readdir(d)) != NULL) { 375 unsigned char dtype; 376 377 if (de->d_name[0] == '.') 378 continue; 379 if (ends_with(de->d_name, ".lock")) 380 continue; 381 strbuf_addstr(&refname, de->d_name); 382 383 dtype = get_dtype(de, &path, 1); 384 if (dtype == DT_REG && is_root_ref(de->d_name)) { 385 ret = cb(refname.buf, cb_data); 386 if (ret) 387 goto done; 388 } 389 390 strbuf_setlen(&refname, dirnamelen); 391 } 392 393 ret = 0; 394 395done: 396 strbuf_release(&refname); 397 strbuf_release(&path); 398 closedir(d); 399 return ret; 400} 401 402struct fill_root_ref_data { 403 struct files_ref_store *refs; 404 struct ref_dir *dir; 405}; 406 407static int fill_root_ref(const char *refname, void *cb_data) 408{ 409 struct fill_root_ref_data *data = cb_data; 410 loose_fill_ref_dir_regular_file(data->refs, refname, data->dir); 411 return 0; 412} 413 414/* 415 * Add root refs to the ref dir by parsing the directory for any files which 416 * follow the root ref syntax. 417 */ 418static void add_root_refs(struct files_ref_store *refs, 419 struct ref_dir *dir) 420{ 421 struct fill_root_ref_data data = { 422 .refs = refs, 423 .dir = dir, 424 }; 425 426 for_each_root_ref(refs, fill_root_ref, &data); 427} 428 429static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs, 430 unsigned int flags) 431{ 432 if (!refs->loose) { 433 struct ref_dir *dir; 434 435 /* 436 * Mark the top-level directory complete because we 437 * are about to read the only subdirectory that can 438 * hold references: 439 */ 440 refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir); 441 442 /* We're going to fill the top level ourselves: */ 443 refs->loose->root->flag &= ~REF_INCOMPLETE; 444 445 dir = get_ref_dir(refs->loose->root); 446 447 if (flags & DO_FOR_EACH_INCLUDE_ROOT_REFS) 448 add_root_refs(refs, dir); 449 450 /* 451 * Add an incomplete entry for "refs/" (to be filled 452 * lazily): 453 */ 454 add_entry_to_dir(dir, create_dir_entry(refs->loose, "refs/", 5)); 455 } 456 return refs->loose; 457} 458 459static int read_ref_internal(struct ref_store *ref_store, const char *refname, 460 struct object_id *oid, struct strbuf *referent, 461 unsigned int *type, int *failure_errno, int skip_packed_refs) 462{ 463 struct files_ref_store *refs = 464 files_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); 465 struct strbuf sb_contents = STRBUF_INIT; 466 struct strbuf sb_path = STRBUF_INIT; 467 const char *path; 468 const char *buf; 469 struct stat st; 470 int fd; 471 int ret = -1; 472 int remaining_retries = 3; 473 int myerr = 0; 474 475 *type = 0; 476 strbuf_reset(&sb_path); 477 478 files_ref_path(refs, &sb_path, refname); 479 480 path = sb_path.buf; 481 482stat_ref: 483 /* 484 * We might have to loop back here to avoid a race 485 * condition: first we lstat() the file, then we try 486 * to read it as a link or as a file. But if somebody 487 * changes the type of the file (file <-> directory 488 * <-> symlink) between the lstat() and reading, then 489 * we don't want to report that as an error but rather 490 * try again starting with the lstat(). 491 * 492 * We'll keep a count of the retries, though, just to avoid 493 * any confusing situation sending us into an infinite loop. 494 */ 495 496 if (remaining_retries-- <= 0) 497 goto out; 498 499 if (lstat(path, &st) < 0) { 500 int ignore_errno; 501 myerr = errno; 502 if (myerr != ENOENT || skip_packed_refs) 503 goto out; 504 if (refs_read_raw_ref(refs->packed_ref_store, refname, oid, 505 referent, type, &ignore_errno)) { 506 myerr = ENOENT; 507 goto out; 508 } 509 ret = 0; 510 goto out; 511 } 512 513 /* Follow "normalized" - ie "refs/.." symlinks by hand */ 514 if (S_ISLNK(st.st_mode)) { 515 strbuf_reset(&sb_contents); 516 if (strbuf_readlink(&sb_contents, path, st.st_size) < 0) { 517 myerr = errno; 518 if (myerr == ENOENT || myerr == EINVAL) 519 /* inconsistent with lstat; retry */ 520 goto stat_ref; 521 else 522 goto out; 523 } 524 if (starts_with(sb_contents.buf, "refs/") && 525 !check_refname_format(sb_contents.buf, 0)) { 526 strbuf_swap(&sb_contents, referent); 527 *type |= REF_ISSYMREF; 528 ret = 0; 529 goto out; 530 } 531 /* 532 * It doesn't look like a refname; fall through to just 533 * treating it like a non-symlink, and reading whatever it 534 * points to. 535 */ 536 } 537 538 /* Is it a directory? */ 539 if (S_ISDIR(st.st_mode)) { 540 int ignore_errno; 541 /* 542 * Even though there is a directory where the loose 543 * ref is supposed to be, there could still be a 544 * packed ref: 545 */ 546 if (skip_packed_refs || 547 refs_read_raw_ref(refs->packed_ref_store, refname, oid, 548 referent, type, &ignore_errno)) { 549 myerr = EISDIR; 550 goto out; 551 } 552 ret = 0; 553 goto out; 554 } 555 556 /* 557 * Anything else, just open it and try to use it as 558 * a ref 559 */ 560 fd = open(path, O_RDONLY); 561 if (fd < 0) { 562 myerr = errno; 563 if (myerr == ENOENT && !S_ISLNK(st.st_mode)) 564 /* inconsistent with lstat; retry */ 565 goto stat_ref; 566 else 567 goto out; 568 } 569 strbuf_reset(&sb_contents); 570 if (strbuf_read(&sb_contents, fd, 256) < 0) { 571 myerr = errno; 572 close(fd); 573 goto out; 574 } 575 close(fd); 576 strbuf_rtrim(&sb_contents); 577 buf = sb_contents.buf; 578 579 ret = parse_loose_ref_contents(ref_store->repo->hash_algo, buf, 580 oid, referent, type, NULL, &myerr); 581 582out: 583 if (ret && !myerr) 584 BUG("returning non-zero %d, should have set myerr!", ret); 585 *failure_errno = myerr; 586 587 strbuf_release(&sb_path); 588 strbuf_release(&sb_contents); 589 errno = 0; 590 return ret; 591} 592 593static int files_read_raw_ref(struct ref_store *ref_store, const char *refname, 594 struct object_id *oid, struct strbuf *referent, 595 unsigned int *type, int *failure_errno) 596{ 597 return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0); 598} 599 600static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname, 601 struct strbuf *referent) 602{ 603 struct object_id oid; 604 int failure_errno, ret; 605 unsigned int type; 606 607 ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1); 608 if (!ret && !(type & REF_ISSYMREF)) 609 return NOT_A_SYMREF; 610 return ret; 611} 612 613int parse_loose_ref_contents(const struct git_hash_algo *algop, 614 const char *buf, struct object_id *oid, 615 struct strbuf *referent, unsigned int *type, 616 const char **trailing, int *failure_errno) 617{ 618 const char *p; 619 if (skip_prefix(buf, "ref:", &buf)) { 620 while (isspace(*buf)) 621 buf++; 622 623 strbuf_reset(referent); 624 strbuf_addstr(referent, buf); 625 *type |= REF_ISSYMREF; 626 return 0; 627 } 628 629 /* 630 * FETCH_HEAD has additional data after the sha. 631 */ 632 if (parse_oid_hex_algop(buf, oid, &p, algop) || 633 (*p != '\0' && !isspace(*p))) { 634 *type |= REF_ISBROKEN; 635 *failure_errno = EINVAL; 636 return -1; 637 } 638 639 if (trailing) 640 *trailing = p; 641 642 return 0; 643} 644 645static void unlock_ref(struct ref_lock *lock) 646{ 647 lock->count--; 648 if (!lock->count) { 649 rollback_lock_file(&lock->lk); 650 free(lock->ref_name); 651 free(lock); 652 } 653} 654 655/* 656 * Check if the transaction has another update with a case-insensitive refname 657 * match. 658 * 659 * If the update is part of the transaction, we only check up to that index. 660 * Further updates are expected to call this function to match previous indices. 661 */ 662static bool transaction_has_case_conflicting_update(struct ref_transaction *transaction, 663 struct ref_update *update) 664{ 665 for (size_t i = 0; i < transaction->nr; i++) { 666 if (transaction->updates[i] == update) 667 break; 668 669 if (!strcasecmp(transaction->updates[i]->refname, update->refname)) 670 return true; 671 } 672 return false; 673} 674 675/* 676 * Lock refname, without following symrefs, and set *lock_p to point 677 * at a newly-allocated lock object. Fill in lock->old_oid, referent, 678 * and type similarly to read_raw_ref(). 679 * 680 * The caller must verify that refname is a "safe" reference name (in 681 * the sense of refname_is_safe()) before calling this function. 682 * 683 * If the reference doesn't already exist, verify that refname doesn't 684 * have a D/F conflict with any existing references. extras and skip 685 * are passed to refs_verify_refname_available() for this check. 686 * 687 * If mustexist is not set and the reference is not found or is 688 * broken, lock the reference anyway but clear old_oid. 689 * 690 * Return 0 on success. On failure, write an error message to err and 691 * return REF_TRANSACTION_ERROR_NAME_CONFLICT or REF_TRANSACTION_ERROR_GENERIC. 692 * 693 * Implementation note: This function is basically 694 * 695 * lock reference 696 * read_raw_ref() 697 * 698 * but it includes a lot more code to 699 * - Deal with possible races with other processes 700 * - Avoid calling refs_verify_refname_available() when it can be 701 * avoided, namely if we were successfully able to read the ref 702 * - Generate informative error messages in the case of failure 703 */ 704static enum ref_transaction_error lock_raw_ref(struct files_ref_store *refs, 705 struct ref_transaction *transaction, 706 size_t update_idx, 707 int mustexist, 708 struct string_list *refnames_to_check, 709 struct ref_lock **lock_p, 710 struct strbuf *referent, 711 struct strbuf *err) 712{ 713 enum ref_transaction_error ret = REF_TRANSACTION_ERROR_GENERIC; 714 struct ref_update *update = transaction->updates[update_idx]; 715 const struct string_list *extras = &transaction->refnames; 716 const char *refname = update->refname; 717 unsigned int *type = &update->type; 718 struct ref_lock *lock; 719 struct strbuf ref_file = STRBUF_INIT; 720 int attempts_remaining = 3; 721 int failure_errno; 722 723 assert(err); 724 files_assert_main_repository(refs, "lock_raw_ref"); 725 726 *type = 0; 727 728 /* First lock the file so it can't change out from under us. */ 729 730 *lock_p = CALLOC_ARRAY(lock, 1); 731 732 lock->ref_name = xstrdup(refname); 733 lock->count = 1; 734 files_ref_path(refs, &ref_file, refname); 735 736retry: 737 switch (safe_create_leading_directories(the_repository, ref_file.buf)) { 738 case SCLD_OK: 739 break; /* success */ 740 case SCLD_EXISTS: 741 /* 742 * Suppose refname is "refs/foo/bar". We just failed 743 * to create the containing directory, "refs/foo", 744 * because there was a non-directory in the way. This 745 * indicates a D/F conflict, probably because of 746 * another reference such as "refs/foo". There is no 747 * reason to expect this error to be transitory. 748 */ 749 if (refs_verify_refname_available(&refs->base, refname, 750 extras, NULL, 0, err)) { 751 if (mustexist) { 752 /* 753 * To the user the relevant error is 754 * that the "mustexist" reference is 755 * missing: 756 */ 757 strbuf_reset(err); 758 strbuf_addf(err, "unable to resolve reference '%s'", 759 refname); 760 ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; 761 } else { 762 /* 763 * The error message set by 764 * refs_verify_refname_available() is 765 * OK. 766 */ 767 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; 768 } 769 } else { 770 /* 771 * The file that is in the way isn't a loose 772 * reference. Report it as a low-level 773 * failure. 774 */ 775 strbuf_addf(err, "unable to create lock file %s.lock; " 776 "non-directory in the way", 777 ref_file.buf); 778 } 779 goto error_return; 780 case SCLD_VANISHED: 781 /* Maybe another process was tidying up. Try again. */ 782 if (--attempts_remaining > 0) 783 goto retry; 784 /* fall through */ 785 default: 786 strbuf_addf(err, "unable to create directory for %s", 787 ref_file.buf); 788 goto error_return; 789 } 790 791 if (hold_lock_file_for_update_timeout( 792 &lock->lk, ref_file.buf, LOCK_NO_DEREF, 793 get_files_ref_lock_timeout_ms()) < 0) { 794 int myerr = errno; 795 errno = 0; 796 if (myerr == ENOENT && --attempts_remaining > 0) { 797 /* 798 * Maybe somebody just deleted one of the 799 * directories leading to ref_file. Try 800 * again: 801 */ 802 goto retry; 803 } else { 804 unable_to_lock_message(ref_file.buf, myerr, err); 805 if (myerr == EEXIST) { 806 if (ignore_case && 807 transaction_has_case_conflicting_update(transaction, update)) { 808 /* 809 * In case-insensitive filesystems, ensure that conflicts within a 810 * given transaction are handled. Pre-existing refs on a 811 * case-insensitive system will be overridden without any issue. 812 */ 813 ret = REF_TRANSACTION_ERROR_CASE_CONFLICT; 814 } else { 815 /* 816 * Pre-existing case-conflicting reference locks should also be 817 * specially categorized to avoid failing all batched updates. 818 */ 819 ret = REF_TRANSACTION_ERROR_CREATE_EXISTS; 820 } 821 } 822 823 goto error_return; 824 } 825 } 826 827 /* 828 * Now we hold the lock and can read the reference without 829 * fear that its value will change. 830 */ 831 832 if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent, 833 type, &failure_errno)) { 834 struct string_list_item *item; 835 836 if (failure_errno == ENOENT) { 837 if (mustexist) { 838 /* Garden variety missing reference. */ 839 strbuf_addf(err, "unable to resolve reference '%s'", 840 refname); 841 ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; 842 goto error_return; 843 } else { 844 /* 845 * Reference is missing, but that's OK. We 846 * know that there is not a conflict with 847 * another loose reference because 848 * (supposing that we are trying to lock 849 * reference "refs/foo/bar"): 850 * 851 * - We were successfully able to create 852 * the lockfile refs/foo/bar.lock, so we 853 * know there cannot be a loose reference 854 * named "refs/foo". 855 * 856 * - We got ENOENT and not EISDIR, so we 857 * know that there cannot be a loose 858 * reference named "refs/foo/bar/baz". 859 */ 860 } 861 } else if (failure_errno == EISDIR) { 862 /* 863 * There is a directory in the way. It might have 864 * contained references that have been deleted. If 865 * we don't require that the reference already 866 * exists, try to remove the directory so that it 867 * doesn't cause trouble when we want to rename the 868 * lockfile into place later. 869 */ 870 if (mustexist) { 871 /* Garden variety missing reference. */ 872 strbuf_addf(err, "unable to resolve reference '%s'", 873 refname); 874 ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; 875 goto error_return; 876 } else if (remove_dir_recursively(&ref_file, 877 REMOVE_DIR_EMPTY_ONLY)) { 878 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; 879 if (refs_verify_refname_available( 880 &refs->base, refname, 881 extras, NULL, 0, err)) { 882 /* 883 * The error message set by 884 * verify_refname_available() is OK. 885 */ 886 goto error_return; 887 } else { 888 /* 889 * Directory conflicts can occur if there 890 * is an existing lock file in the directory 891 * or if the filesystem is case-insensitive 892 * and the directory contains a valid reference 893 * but conflicts with the update. 894 */ 895 strbuf_addf(err, "there is a non-empty directory '%s' " 896 "blocking reference '%s'", 897 ref_file.buf, refname); 898 goto error_return; 899 } 900 } 901 } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) { 902 strbuf_addf(err, "unable to resolve reference '%s': " 903 "reference broken", refname); 904 goto error_return; 905 } else { 906 strbuf_addf(err, "unable to resolve reference '%s': %s", 907 refname, strerror(failure_errno)); 908 goto error_return; 909 } 910 911 /* 912 * If the ref did not exist and we are creating it, we have to 913 * make sure there is no existing packed ref that conflicts 914 * with refname. This check is deferred so that we can batch it. 915 * 916 * For case-insensitive filesystems, we should also check for F/D 917 * conflicts between 'foo' and 'Foo/bar'. So let's lowercase 918 * the refname. 919 */ 920 if (ignore_case) { 921 struct strbuf lower = STRBUF_INIT; 922 923 strbuf_addstr(&lower, refname); 924 strbuf_tolower(&lower); 925 926 item = string_list_append_nodup(refnames_to_check, 927 strbuf_detach(&lower, NULL)); 928 } else { 929 item = string_list_append(refnames_to_check, refname); 930 } 931 932 item->util = xmalloc(sizeof(update_idx)); 933 memcpy(item->util, &update_idx, sizeof(update_idx)); 934 } 935 936 ret = 0; 937 goto out; 938 939error_return: 940 unlock_ref(lock); 941 *lock_p = NULL; 942 943out: 944 strbuf_release(&ref_file); 945 return ret; 946} 947 948struct files_ref_iterator { 949 struct ref_iterator base; 950 951 struct ref_iterator *iter0; 952 struct repository *repo; 953 unsigned int flags; 954}; 955 956static int files_ref_iterator_advance(struct ref_iterator *ref_iterator) 957{ 958 struct files_ref_iterator *iter = 959 (struct files_ref_iterator *)ref_iterator; 960 int ok; 961 962 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { 963 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 964 parse_worktree_ref(iter->iter0->refname, NULL, NULL, 965 NULL) != REF_WORKTREE_CURRENT) 966 continue; 967 968 if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) && 969 (iter->iter0->flags & REF_ISSYMREF) && 970 (iter->iter0->flags & REF_ISBROKEN)) 971 continue; 972 973 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 974 !ref_resolves_to_object(iter->iter0->refname, 975 iter->repo, 976 iter->iter0->oid, 977 iter->iter0->flags)) 978 continue; 979 980 iter->base.refname = iter->iter0->refname; 981 iter->base.oid = iter->iter0->oid; 982 iter->base.flags = iter->iter0->flags; 983 iter->base.referent = iter->iter0->referent; 984 985 return ITER_OK; 986 } 987 988 return ok; 989} 990 991static int files_ref_iterator_seek(struct ref_iterator *ref_iterator, 992 const char *refname, unsigned int flags) 993{ 994 struct files_ref_iterator *iter = 995 (struct files_ref_iterator *)ref_iterator; 996 return ref_iterator_seek(iter->iter0, refname, flags); 997} 998 999static int files_ref_iterator_peel(struct ref_iterator *ref_iterator, 1000 struct object_id *peeled) 1001{ 1002 struct files_ref_iterator *iter = 1003 (struct files_ref_iterator *)ref_iterator; 1004 1005 return ref_iterator_peel(iter->iter0, peeled); 1006} 1007 1008static void files_ref_iterator_release(struct ref_iterator *ref_iterator) 1009{ 1010 struct files_ref_iterator *iter = 1011 (struct files_ref_iterator *)ref_iterator; 1012 ref_iterator_free(iter->iter0); 1013} 1014 1015static struct ref_iterator_vtable files_ref_iterator_vtable = { 1016 .advance = files_ref_iterator_advance, 1017 .seek = files_ref_iterator_seek, 1018 .peel = files_ref_iterator_peel, 1019 .release = files_ref_iterator_release, 1020}; 1021 1022static struct ref_iterator *files_ref_iterator_begin( 1023 struct ref_store *ref_store, 1024 const char *prefix, const char **exclude_patterns, 1025 unsigned int flags) 1026{ 1027 struct files_ref_store *refs; 1028 struct ref_iterator *loose_iter, *packed_iter, *overlay_iter; 1029 struct files_ref_iterator *iter; 1030 struct ref_iterator *ref_iterator; 1031 unsigned int required_flags = REF_STORE_READ; 1032 1033 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 1034 required_flags |= REF_STORE_ODB; 1035 1036 refs = files_downcast(ref_store, required_flags, "ref_iterator_begin"); 1037 1038 /* 1039 * We must make sure that all loose refs are read before 1040 * accessing the packed-refs file; this avoids a race 1041 * condition if loose refs are migrated to the packed-refs 1042 * file by a simultaneous process, but our in-memory view is 1043 * from before the migration. We ensure this as follows: 1044 * First, we call start the loose refs iteration with its 1045 * `prime_ref` argument set to true. This causes the loose 1046 * references in the subtree to be pre-read into the cache. 1047 * (If they've already been read, that's OK; we only need to 1048 * guarantee that they're read before the packed refs, not 1049 * *how much* before.) After that, we call 1050 * packed_ref_iterator_begin(), which internally checks 1051 * whether the packed-ref cache is up to date with what is on 1052 * disk, and re-reads it if not. 1053 */ 1054 1055 loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, flags), 1056 prefix, ref_store->repo, 1); 1057 1058 /* 1059 * The packed-refs file might contain broken references, for 1060 * example an old version of a reference that points at an 1061 * object that has since been garbage-collected. This is OK as 1062 * long as there is a corresponding loose reference that 1063 * overrides it, and we don't want to emit an error message in 1064 * this case. So ask the packed_ref_store for all of its 1065 * references, and (if needed) do our own check for broken 1066 * ones in files_ref_iterator_advance(), after we have merged 1067 * the packed and loose references. 1068 */ 1069 packed_iter = refs_ref_iterator_begin( 1070 refs->packed_ref_store, prefix, exclude_patterns, 0, 1071 DO_FOR_EACH_INCLUDE_BROKEN); 1072 1073 overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter); 1074 1075 CALLOC_ARRAY(iter, 1); 1076 ref_iterator = &iter->base; 1077 base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable); 1078 iter->iter0 = overlay_iter; 1079 iter->repo = ref_store->repo; 1080 iter->flags = flags; 1081 1082 return ref_iterator; 1083} 1084 1085/* 1086 * Callback function for raceproof_create_file(). This function is 1087 * expected to do something that makes dirname(path) permanent despite 1088 * the fact that other processes might be cleaning up empty 1089 * directories at the same time. Usually it will create a file named 1090 * path, but alternatively it could create another file in that 1091 * directory, or even chdir() into that directory. The function should 1092 * return 0 if the action was completed successfully. On error, it 1093 * should return a nonzero result and set errno. 1094 * raceproof_create_file() treats two errno values specially: 1095 * 1096 * - ENOENT -- dirname(path) does not exist. In this case, 1097 * raceproof_create_file() tries creating dirname(path) 1098 * (and any parent directories, if necessary) and calls 1099 * the function again. 1100 * 1101 * - EISDIR -- the file already exists and is a directory. In this 1102 * case, raceproof_create_file() removes the directory if 1103 * it is empty (and recursively any empty directories that 1104 * it contains) and calls the function again. 1105 * 1106 * Any other errno causes raceproof_create_file() to fail with the 1107 * callback's return value and errno. 1108 * 1109 * Obviously, this function should be OK with being called again if it 1110 * fails with ENOENT or EISDIR. In other scenarios it will not be 1111 * called again. 1112 */ 1113typedef int create_file_fn(const char *path, void *cb); 1114 1115/* 1116 * Create a file in dirname(path) by calling fn, creating leading 1117 * directories if necessary. Retry a few times in case we are racing 1118 * with another process that is trying to clean up the directory that 1119 * contains path. See the documentation for create_file_fn for more 1120 * details. 1121 * 1122 * Return the value and set the errno that resulted from the most 1123 * recent call of fn. fn is always called at least once, and will be 1124 * called more than once if it returns ENOENT or EISDIR. 1125 */ 1126static int raceproof_create_file(const char *path, create_file_fn fn, void *cb) 1127{ 1128 /* 1129 * The number of times we will try to remove empty directories 1130 * in the way of path. This is only 1 because if another 1131 * process is racily creating directories that conflict with 1132 * us, we don't want to fight against them. 1133 */ 1134 int remove_directories_remaining = 1; 1135 1136 /* 1137 * The number of times that we will try to create the 1138 * directories containing path. We are willing to attempt this 1139 * more than once, because another process could be trying to 1140 * clean up empty directories at the same time as we are 1141 * trying to create them. 1142 */ 1143 int create_directories_remaining = 3; 1144 1145 /* A scratch copy of path, filled lazily if we need it: */ 1146 struct strbuf path_copy = STRBUF_INIT; 1147 1148 int ret, save_errno; 1149 1150 /* Sanity check: */ 1151 assert(*path); 1152 1153retry_fn: 1154 ret = fn(path, cb); 1155 save_errno = errno; 1156 if (!ret) 1157 goto out; 1158 1159 if (errno == EISDIR && remove_directories_remaining-- > 0) { 1160 /* 1161 * A directory is in the way. Maybe it is empty; try 1162 * to remove it: 1163 */ 1164 if (!path_copy.len) 1165 strbuf_addstr(&path_copy, path); 1166 1167 if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY)) 1168 goto retry_fn; 1169 } else if (errno == ENOENT && create_directories_remaining-- > 0) { 1170 /* 1171 * Maybe the containing directory didn't exist, or 1172 * maybe it was just deleted by a process that is 1173 * racing with us to clean up empty directories. Try 1174 * to create it: 1175 */ 1176 enum scld_error scld_result; 1177 1178 if (!path_copy.len) 1179 strbuf_addstr(&path_copy, path); 1180 1181 do { 1182 scld_result = safe_create_leading_directories(the_repository, path_copy.buf); 1183 if (scld_result == SCLD_OK) 1184 goto retry_fn; 1185 } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0); 1186 } 1187 1188out: 1189 strbuf_release(&path_copy); 1190 errno = save_errno; 1191 return ret; 1192} 1193 1194static int remove_empty_directories(struct strbuf *path) 1195{ 1196 /* 1197 * we want to create a file but there is a directory there; 1198 * if that is an empty directory (or a directory that contains 1199 * only empty directories), remove them. 1200 */ 1201 return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY); 1202} 1203 1204static int create_reflock(const char *path, void *cb) 1205{ 1206 struct lock_file *lk = cb; 1207 1208 return hold_lock_file_for_update_timeout( 1209 lk, path, LOCK_NO_DEREF, 1210 get_files_ref_lock_timeout_ms()) < 0 ? -1 : 0; 1211} 1212 1213/* 1214 * Locks a ref returning the lock on success and NULL on failure. 1215 */ 1216static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, 1217 const char *refname, 1218 struct strbuf *err) 1219{ 1220 struct strbuf ref_file = STRBUF_INIT; 1221 struct ref_lock *lock; 1222 1223 files_assert_main_repository(refs, "lock_ref_oid_basic"); 1224 assert(err); 1225 1226 CALLOC_ARRAY(lock, 1); 1227 1228 files_ref_path(refs, &ref_file, refname); 1229 1230 /* 1231 * If the ref did not exist and we are creating it, make sure 1232 * there is no existing packed ref whose name begins with our 1233 * refname, nor a packed ref whose name is a proper prefix of 1234 * our refname. 1235 */ 1236 if (is_null_oid(&lock->old_oid) && 1237 refs_verify_refname_available(refs->packed_ref_store, refname, 1238 NULL, NULL, 0, err)) 1239 goto error_return; 1240 1241 lock->ref_name = xstrdup(refname); 1242 lock->count = 1; 1243 1244 if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) { 1245 unable_to_lock_message(ref_file.buf, errno, err); 1246 goto error_return; 1247 } 1248 1249 if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0, 1250 &lock->old_oid, NULL)) 1251 oidclr(&lock->old_oid, refs->base.repo->hash_algo); 1252 goto out; 1253 1254 error_return: 1255 unlock_ref(lock); 1256 lock = NULL; 1257 1258 out: 1259 strbuf_release(&ref_file); 1260 return lock; 1261} 1262 1263struct ref_to_prune { 1264 struct ref_to_prune *next; 1265 struct object_id oid; 1266 char name[FLEX_ARRAY]; 1267}; 1268 1269enum { 1270 REMOVE_EMPTY_PARENTS_REF = 0x01, 1271 REMOVE_EMPTY_PARENTS_REFLOG = 0x02 1272}; 1273 1274/* 1275 * Remove empty parent directories associated with the specified 1276 * reference and/or its reflog, but spare [logs/]refs/ and immediate 1277 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or 1278 * REMOVE_EMPTY_PARENTS_REFLOG. 1279 */ 1280static void try_remove_empty_parents(struct files_ref_store *refs, 1281 const char *refname, 1282 unsigned int flags) 1283{ 1284 struct strbuf buf = STRBUF_INIT; 1285 struct strbuf sb = STRBUF_INIT; 1286 char *p, *q; 1287 int i; 1288 1289 strbuf_addstr(&buf, refname); 1290 p = buf.buf; 1291 for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */ 1292 while (*p && *p != '/') 1293 p++; 1294 /* tolerate duplicate slashes; see check_refname_format() */ 1295 while (*p == '/') 1296 p++; 1297 } 1298 q = buf.buf + buf.len; 1299 while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) { 1300 while (q > p && *q != '/') 1301 q--; 1302 while (q > p && *(q-1) == '/') 1303 q--; 1304 if (q == p) 1305 break; 1306 strbuf_setlen(&buf, q - buf.buf); 1307 1308 strbuf_reset(&sb); 1309 files_ref_path(refs, &sb, buf.buf); 1310 if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf)) 1311 flags &= ~REMOVE_EMPTY_PARENTS_REF; 1312 1313 strbuf_reset(&sb); 1314 files_reflog_path(refs, &sb, buf.buf); 1315 if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf)) 1316 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG; 1317 } 1318 strbuf_release(&buf); 1319 strbuf_release(&sb); 1320} 1321 1322/* make sure nobody touched the ref, and unlink */ 1323static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r) 1324{ 1325 struct ref_transaction *transaction; 1326 struct strbuf err = STRBUF_INIT; 1327 int ret = -1; 1328 1329 if (check_refname_format(r->name, 0)) 1330 return; 1331 1332 transaction = ref_store_transaction_begin(&refs->base, 0, &err); 1333 if (!transaction) 1334 goto cleanup; 1335 ref_transaction_add_update( 1336 transaction, r->name, 1337 REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING, 1338 null_oid(the_hash_algo), &r->oid, NULL, NULL, NULL, NULL); 1339 if (ref_transaction_commit(transaction, &err)) 1340 goto cleanup; 1341 1342 ret = 0; 1343 1344cleanup: 1345 if (ret) 1346 error("%s", err.buf); 1347 strbuf_release(&err); 1348 ref_transaction_free(transaction); 1349 return; 1350} 1351 1352/* 1353 * Prune the loose versions of the references in the linked list 1354 * `*refs_to_prune`, freeing the entries in the list as we go. 1355 */ 1356static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune) 1357{ 1358 while (*refs_to_prune) { 1359 struct ref_to_prune *r = *refs_to_prune; 1360 *refs_to_prune = r->next; 1361 prune_ref(refs, r); 1362 free(r); 1363 } 1364} 1365 1366/* 1367 * Return true if the specified reference should be packed. 1368 */ 1369static int should_pack_ref(struct files_ref_store *refs, 1370 const char *refname, 1371 const struct object_id *oid, unsigned int ref_flags, 1372 struct pack_refs_opts *opts) 1373{ 1374 struct string_list_item *item; 1375 1376 /* Do not pack per-worktree refs: */ 1377 if (parse_worktree_ref(refname, NULL, NULL, NULL) != 1378 REF_WORKTREE_SHARED) 1379 return 0; 1380 1381 /* Do not pack symbolic refs: */ 1382 if (ref_flags & REF_ISSYMREF) 1383 return 0; 1384 1385 /* Do not pack broken refs: */ 1386 if (!ref_resolves_to_object(refname, refs->base.repo, oid, ref_flags)) 1387 return 0; 1388 1389 if (ref_excluded(opts->exclusions, refname)) 1390 return 0; 1391 1392 for_each_string_list_item(item, opts->includes) 1393 if (!wildmatch(item->string, refname, 0)) 1394 return 1; 1395 1396 return 0; 1397} 1398 1399static int should_pack_refs(struct files_ref_store *refs, 1400 struct pack_refs_opts *opts) 1401{ 1402 struct ref_iterator *iter; 1403 size_t packed_size; 1404 size_t refcount = 0; 1405 size_t limit; 1406 int ret; 1407 1408 if (!(opts->flags & PACK_REFS_AUTO)) 1409 return 1; 1410 1411 ret = packed_refs_size(refs->packed_ref_store, &packed_size); 1412 if (ret < 0) 1413 die("cannot determine packed-refs size"); 1414 1415 /* 1416 * Packing loose references into the packed-refs file scales with the 1417 * number of references we're about to write. We thus decide whether we 1418 * repack refs by weighing the current size of the packed-refs file 1419 * against the number of loose references. This is done such that we do 1420 * not repack too often on repositories with a huge number of 1421 * references, where we can expect a lot of churn in the number of 1422 * references. 1423 * 1424 * As a heuristic, we repack if the number of loose references in the 1425 * repository exceeds `log2(nr_packed_refs) * 5`, where we estimate 1426 * `nr_packed_refs = packed_size / 100`, which scales as following: 1427 * 1428 * - 1kB ~ 10 packed refs: 16 refs 1429 * - 10kB ~ 100 packed refs: 33 refs 1430 * - 100kB ~ 1k packed refs: 49 refs 1431 * - 1MB ~ 10k packed refs: 66 refs 1432 * - 10MB ~ 100k packed refs: 82 refs 1433 * - 100MB ~ 1m packed refs: 99 refs 1434 * 1435 * We thus allow roughly 16 additional loose refs per factor of ten of 1436 * packed refs. This heuristic may be tweaked in the future, but should 1437 * serve as a sufficiently good first iteration. 1438 */ 1439 limit = log2u(packed_size / 100) * 5; 1440 if (limit < 16) 1441 limit = 16; 1442 1443 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL, 1444 refs->base.repo, 0); 1445 while ((ret = ref_iterator_advance(iter)) == ITER_OK) { 1446 if (should_pack_ref(refs, iter->refname, iter->oid, 1447 iter->flags, opts)) 1448 refcount++; 1449 if (refcount >= limit) { 1450 ref_iterator_free(iter); 1451 return 1; 1452 } 1453 } 1454 1455 if (ret != ITER_DONE) 1456 die("error while iterating over references"); 1457 1458 ref_iterator_free(iter); 1459 return 0; 1460} 1461 1462static int files_pack_refs(struct ref_store *ref_store, 1463 struct pack_refs_opts *opts) 1464{ 1465 struct files_ref_store *refs = 1466 files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, 1467 "pack_refs"); 1468 struct ref_iterator *iter; 1469 int ok; 1470 struct ref_to_prune *refs_to_prune = NULL; 1471 struct strbuf err = STRBUF_INIT; 1472 struct ref_transaction *transaction; 1473 1474 if (!should_pack_refs(refs, opts)) 1475 return 0; 1476 1477 transaction = ref_store_transaction_begin(refs->packed_ref_store, 1478 0, &err); 1479 if (!transaction) 1480 return -1; 1481 1482 packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err); 1483 1484 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL, 1485 refs->base.repo, 0); 1486 while ((ok = ref_iterator_advance(iter)) == ITER_OK) { 1487 /* 1488 * If the loose reference can be packed, add an entry 1489 * in the packed ref cache. If the reference should be 1490 * pruned, also add it to refs_to_prune. 1491 */ 1492 if (!should_pack_ref(refs, iter->refname, iter->oid, iter->flags, opts)) 1493 continue; 1494 1495 /* 1496 * Add a reference creation for this reference to the 1497 * packed-refs transaction: 1498 */ 1499 if (ref_transaction_update(transaction, iter->refname, 1500 iter->oid, NULL, NULL, NULL, 1501 REF_NO_DEREF, NULL, &err)) 1502 die("failure preparing to create packed reference %s: %s", 1503 iter->refname, err.buf); 1504 1505 /* Schedule the loose reference for pruning if requested. */ 1506 if ((opts->flags & PACK_REFS_PRUNE)) { 1507 struct ref_to_prune *n; 1508 FLEX_ALLOC_STR(n, name, iter->refname); 1509 oidcpy(&n->oid, iter->oid); 1510 n->next = refs_to_prune; 1511 refs_to_prune = n; 1512 } 1513 } 1514 if (ok != ITER_DONE) 1515 die("error while iterating over references"); 1516 1517 if (ref_transaction_commit(transaction, &err)) 1518 die("unable to write new packed-refs: %s", err.buf); 1519 1520 ref_transaction_free(transaction); 1521 1522 packed_refs_unlock(refs->packed_ref_store); 1523 1524 prune_refs(refs, &refs_to_prune); 1525 ref_iterator_free(iter); 1526 strbuf_release(&err); 1527 return 0; 1528} 1529 1530static int files_optimize(struct ref_store *ref_store, struct pack_refs_opts *opts) 1531{ 1532 /* 1533 * For the "files" backend, "optimizing" is the same as "packing". 1534 * So, we just call the existing worker function for packing. 1535 */ 1536 return files_pack_refs(ref_store, opts); 1537} 1538 1539/* 1540 * People using contrib's git-new-workdir have .git/logs/refs -> 1541 * /some/other/path/.git/logs/refs, and that may live on another device. 1542 * 1543 * IOW, to avoid cross device rename errors, the temporary renamed log must 1544 * live into logs/refs. 1545 */ 1546#define TMP_RENAMED_LOG "refs/.tmp-renamed-log" 1547 1548struct rename_cb { 1549 const char *tmp_renamed_log; 1550 int true_errno; 1551}; 1552 1553static int rename_tmp_log_callback(const char *path, void *cb_data) 1554{ 1555 struct rename_cb *cb = cb_data; 1556 1557 if (rename(cb->tmp_renamed_log, path)) { 1558 /* 1559 * rename(a, b) when b is an existing directory ought 1560 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR. 1561 * Sheesh. Record the true errno for error reporting, 1562 * but report EISDIR to raceproof_create_file() so 1563 * that it knows to retry. 1564 */ 1565 cb->true_errno = errno; 1566 if (errno == ENOTDIR) 1567 errno = EISDIR; 1568 return -1; 1569 } else { 1570 return 0; 1571 } 1572} 1573 1574static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname) 1575{ 1576 struct strbuf path = STRBUF_INIT; 1577 struct strbuf tmp = STRBUF_INIT; 1578 struct rename_cb cb; 1579 int ret; 1580 1581 files_reflog_path(refs, &path, newrefname); 1582 files_reflog_path(refs, &tmp, TMP_RENAMED_LOG); 1583 cb.tmp_renamed_log = tmp.buf; 1584 ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb); 1585 if (ret) { 1586 if (errno == EISDIR) 1587 error("directory not empty: %s", path.buf); 1588 else 1589 error("unable to move logfile %s to %s: %s", 1590 tmp.buf, path.buf, 1591 strerror(cb.true_errno)); 1592 } 1593 1594 strbuf_release(&path); 1595 strbuf_release(&tmp); 1596 return ret; 1597} 1598 1599static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs, 1600 struct ref_lock *lock, 1601 const struct object_id *oid, 1602 int skip_oid_verification, 1603 struct strbuf *err); 1604static int commit_ref_update(struct files_ref_store *refs, 1605 struct ref_lock *lock, 1606 const struct object_id *oid, const char *logmsg, 1607 int flags, 1608 struct strbuf *err); 1609 1610/* 1611 * Emit a better error message than lockfile.c's 1612 * unable_to_lock_message() would in case there is a D/F conflict with 1613 * another existing reference. If there would be a conflict, emit an error 1614 * message and return false; otherwise, return true. 1615 * 1616 * Note that this function is not safe against all races with other 1617 * processes, and that's not its job. We'll emit a more verbose error on D/f 1618 * conflicts if we get past it into lock_ref_oid_basic(). 1619 */ 1620static int refs_rename_ref_available(struct ref_store *refs, 1621 const char *old_refname, 1622 const char *new_refname) 1623{ 1624 struct string_list skip = STRING_LIST_INIT_NODUP; 1625 struct strbuf err = STRBUF_INIT; 1626 int ok; 1627 1628 string_list_insert(&skip, old_refname); 1629 ok = !refs_verify_refname_available(refs, new_refname, 1630 NULL, &skip, 0, &err); 1631 if (!ok) 1632 error("%s", err.buf); 1633 1634 string_list_clear(&skip, 0); 1635 strbuf_release(&err); 1636 return ok; 1637} 1638 1639static int files_copy_or_rename_ref(struct ref_store *ref_store, 1640 const char *oldrefname, const char *newrefname, 1641 const char *logmsg, int copy) 1642{ 1643 struct files_ref_store *refs = 1644 files_downcast(ref_store, REF_STORE_WRITE, "rename_ref"); 1645 struct object_id orig_oid; 1646 int flag = 0, logmoved = 0; 1647 struct ref_lock *lock; 1648 struct stat loginfo; 1649 struct strbuf sb_oldref = STRBUF_INIT; 1650 struct strbuf sb_newref = STRBUF_INIT; 1651 struct strbuf tmp_renamed_log = STRBUF_INIT; 1652 int log, ret; 1653 struct strbuf err = STRBUF_INIT; 1654 1655 files_reflog_path(refs, &sb_oldref, oldrefname); 1656 files_reflog_path(refs, &sb_newref, newrefname); 1657 files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG); 1658 1659 log = !lstat(sb_oldref.buf, &loginfo); 1660 if (log && S_ISLNK(loginfo.st_mode)) { 1661 ret = error("reflog for %s is a symlink", oldrefname); 1662 goto out; 1663 } 1664 1665 if (!refs_resolve_ref_unsafe(&refs->base, oldrefname, 1666 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, 1667 &orig_oid, &flag)) { 1668 ret = error("refname %s not found", oldrefname); 1669 goto out; 1670 } 1671 1672 if (flag & REF_ISSYMREF) { 1673 if (copy) 1674 ret = error("refname %s is a symbolic ref, copying it is not supported", 1675 oldrefname); 1676 else 1677 ret = error("refname %s is a symbolic ref, renaming it is not supported", 1678 oldrefname); 1679 goto out; 1680 } 1681 if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) { 1682 ret = 1; 1683 goto out; 1684 } 1685 1686 if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) { 1687 ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", 1688 oldrefname, strerror(errno)); 1689 goto out; 1690 } 1691 1692 if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) { 1693 ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", 1694 oldrefname, strerror(errno)); 1695 goto out; 1696 } 1697 1698 if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname, 1699 &orig_oid, REF_NO_DEREF)) { 1700 error("unable to delete old %s", oldrefname); 1701 goto rollback; 1702 } 1703 1704 /* 1705 * Since we are doing a shallow lookup, oid is not the 1706 * correct value to pass to delete_ref as old_oid. But that 1707 * doesn't matter, because an old_oid check wouldn't add to 1708 * the safety anyway; we want to delete the reference whatever 1709 * its current value. 1710 */ 1711 if (!copy && refs_resolve_ref_unsafe(&refs->base, newrefname, 1712 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, 1713 NULL, NULL) && 1714 refs_delete_ref(&refs->base, NULL, newrefname, 1715 NULL, REF_NO_DEREF)) { 1716 if (errno == EISDIR) { 1717 struct strbuf path = STRBUF_INIT; 1718 int result; 1719 1720 files_ref_path(refs, &path, newrefname); 1721 result = remove_empty_directories(&path); 1722 strbuf_release(&path); 1723 1724 if (result) { 1725 error("Directory not empty: %s", newrefname); 1726 goto rollback; 1727 } 1728 } else { 1729 error("unable to delete existing %s", newrefname); 1730 goto rollback; 1731 } 1732 } 1733 1734 if (log && rename_tmp_log(refs, newrefname)) 1735 goto rollback; 1736 1737 logmoved = log; 1738 1739 lock = lock_ref_oid_basic(refs, newrefname, &err); 1740 if (!lock) { 1741 if (copy) 1742 error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf); 1743 else 1744 error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf); 1745 strbuf_release(&err); 1746 goto rollback; 1747 } 1748 oidcpy(&lock->old_oid, &orig_oid); 1749 1750 if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) || 1751 commit_ref_update(refs, lock, &orig_oid, logmsg, 0, &err)) { 1752 error("unable to write current sha1 into %s: %s", newrefname, err.buf); 1753 strbuf_release(&err); 1754 goto rollback; 1755 } 1756 1757 ret = 0; 1758 goto out; 1759 1760 rollback: 1761 lock = lock_ref_oid_basic(refs, oldrefname, &err); 1762 if (!lock) { 1763 error("unable to lock %s for rollback: %s", oldrefname, err.buf); 1764 strbuf_release(&err); 1765 goto rollbacklog; 1766 } 1767 1768 if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) || 1769 commit_ref_update(refs, lock, &orig_oid, NULL, REF_SKIP_CREATE_REFLOG, &err)) { 1770 error("unable to write current sha1 into %s: %s", oldrefname, err.buf); 1771 strbuf_release(&err); 1772 } 1773 1774 rollbacklog: 1775 if (logmoved && rename(sb_newref.buf, sb_oldref.buf)) 1776 error("unable to restore logfile %s from %s: %s", 1777 oldrefname, newrefname, strerror(errno)); 1778 if (!logmoved && log && 1779 rename(tmp_renamed_log.buf, sb_oldref.buf)) 1780 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s", 1781 oldrefname, strerror(errno)); 1782 ret = 1; 1783 out: 1784 strbuf_release(&sb_newref); 1785 strbuf_release(&sb_oldref); 1786 strbuf_release(&tmp_renamed_log); 1787 1788 return ret; 1789} 1790 1791static int files_rename_ref(struct ref_store *ref_store, 1792 const char *oldrefname, const char *newrefname, 1793 const char *logmsg) 1794{ 1795 return files_copy_or_rename_ref(ref_store, oldrefname, 1796 newrefname, logmsg, 0); 1797} 1798 1799static int files_copy_ref(struct ref_store *ref_store, 1800 const char *oldrefname, const char *newrefname, 1801 const char *logmsg) 1802{ 1803 return files_copy_or_rename_ref(ref_store, oldrefname, 1804 newrefname, logmsg, 1); 1805} 1806 1807static int close_ref_gently(struct ref_lock *lock) 1808{ 1809 if (close_lock_file_gently(&lock->lk)) 1810 return -1; 1811 return 0; 1812} 1813 1814static int commit_ref(struct ref_lock *lock) 1815{ 1816 char *path = get_locked_file_path(&lock->lk); 1817 struct stat st; 1818 1819 if (!lstat(path, &st) && S_ISDIR(st.st_mode)) { 1820 /* 1821 * There is a directory at the path we want to rename 1822 * the lockfile to. Hopefully it is empty; try to 1823 * delete it. 1824 */ 1825 size_t len = strlen(path); 1826 struct strbuf sb_path = STRBUF_INIT; 1827 1828 strbuf_attach(&sb_path, path, len, len); 1829 1830 /* 1831 * If this fails, commit_lock_file() will also fail 1832 * and will report the problem. 1833 */ 1834 remove_empty_directories(&sb_path); 1835 strbuf_release(&sb_path); 1836 } else { 1837 free(path); 1838 } 1839 1840 if (commit_lock_file(&lock->lk)) 1841 return -1; 1842 return 0; 1843} 1844 1845static int open_or_create_logfile(const char *path, void *cb) 1846{ 1847 int *fd = cb; 1848 1849 *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666); 1850 return (*fd < 0) ? -1 : 0; 1851} 1852 1853/* 1854 * Create a reflog for a ref. If force_create = 0, only create the 1855 * reflog for certain refs (those for which should_autocreate_reflog 1856 * returns non-zero). Otherwise, create it regardless of the reference 1857 * name. If the logfile already existed or was created, return 0 and 1858 * set *logfd to the file descriptor opened for appending to the file. 1859 * If no logfile exists and we decided not to create one, return 0 and 1860 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and 1861 * return -1. 1862 */ 1863static int log_ref_setup(struct files_ref_store *refs, 1864 const char *refname, int force_create, 1865 int *logfd, struct strbuf *err) 1866{ 1867 enum log_refs_config log_refs_cfg = refs->log_all_ref_updates; 1868 struct strbuf logfile_sb = STRBUF_INIT; 1869 char *logfile; 1870 1871 if (log_refs_cfg == LOG_REFS_UNSET) 1872 log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL; 1873 1874 files_reflog_path(refs, &logfile_sb, refname); 1875 logfile = strbuf_detach(&logfile_sb, NULL); 1876 1877 if (force_create || should_autocreate_reflog(log_refs_cfg, refname)) { 1878 if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) { 1879 if (errno == ENOENT) 1880 strbuf_addf(err, "unable to create directory for '%s': " 1881 "%s", logfile, strerror(errno)); 1882 else if (errno == EISDIR) 1883 strbuf_addf(err, "there are still logs under '%s'", 1884 logfile); 1885 else 1886 strbuf_addf(err, "unable to append to '%s': %s", 1887 logfile, strerror(errno)); 1888 1889 goto error; 1890 } 1891 } else { 1892 *logfd = open(logfile, O_APPEND | O_WRONLY); 1893 if (*logfd < 0) { 1894 if (errno == ENOENT || errno == EISDIR) { 1895 /* 1896 * The logfile doesn't already exist, 1897 * but that is not an error; it only 1898 * means that we won't write log 1899 * entries to it. 1900 */ 1901 ; 1902 } else { 1903 strbuf_addf(err, "unable to append to '%s': %s", 1904 logfile, strerror(errno)); 1905 goto error; 1906 } 1907 } 1908 } 1909 1910 if (*logfd >= 0) 1911 adjust_shared_perm(the_repository, logfile); 1912 1913 free(logfile); 1914 return 0; 1915 1916error: 1917 free(logfile); 1918 return -1; 1919} 1920 1921static int files_create_reflog(struct ref_store *ref_store, const char *refname, 1922 struct strbuf *err) 1923{ 1924 struct files_ref_store *refs = 1925 files_downcast(ref_store, REF_STORE_WRITE, "create_reflog"); 1926 int fd; 1927 1928 if (log_ref_setup(refs, refname, 1, &fd, err)) 1929 return -1; 1930 1931 if (fd >= 0) 1932 close(fd); 1933 1934 return 0; 1935} 1936 1937static int log_ref_write_fd(int fd, const struct object_id *old_oid, 1938 const struct object_id *new_oid, 1939 const char *committer, const char *msg) 1940{ 1941 struct strbuf sb = STRBUF_INIT; 1942 int ret = 0; 1943 1944 if (!committer) 1945 committer = git_committer_info(0); 1946 1947 strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer); 1948 if (msg && *msg) { 1949 strbuf_addch(&sb, '\t'); 1950 strbuf_addstr(&sb, msg); 1951 } 1952 strbuf_addch(&sb, '\n'); 1953 if (write_in_full(fd, sb.buf, sb.len) < 0) 1954 ret = -1; 1955 strbuf_release(&sb); 1956 return ret; 1957} 1958 1959static int files_log_ref_write(struct files_ref_store *refs, 1960 const char *refname, 1961 const struct object_id *old_oid, 1962 const struct object_id *new_oid, 1963 const char *committer_info, const char *msg, 1964 int flags, struct strbuf *err) 1965{ 1966 int logfd, result; 1967 1968 if (flags & REF_SKIP_CREATE_REFLOG) 1969 return 0; 1970 1971 result = log_ref_setup(refs, refname, 1972 flags & REF_FORCE_CREATE_REFLOG, 1973 &logfd, err); 1974 1975 if (result) 1976 return result; 1977 1978 if (logfd < 0) 1979 return 0; 1980 result = log_ref_write_fd(logfd, old_oid, new_oid, committer_info, msg); 1981 if (result) { 1982 struct strbuf sb = STRBUF_INIT; 1983 int save_errno = errno; 1984 1985 files_reflog_path(refs, &sb, refname); 1986 strbuf_addf(err, "unable to append to '%s': %s", 1987 sb.buf, strerror(save_errno)); 1988 strbuf_release(&sb); 1989 close(logfd); 1990 return -1; 1991 } 1992 if (close(logfd)) { 1993 struct strbuf sb = STRBUF_INIT; 1994 int save_errno = errno; 1995 1996 files_reflog_path(refs, &sb, refname); 1997 strbuf_addf(err, "unable to append to '%s': %s", 1998 sb.buf, strerror(save_errno)); 1999 strbuf_release(&sb); 2000 return -1; 2001 } 2002 return 0; 2003} 2004 2005/* 2006 * Write oid into the open lockfile, then close the lockfile. On 2007 * errors, rollback the lockfile, fill in *err and return -1. 2008 */ 2009static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs, 2010 struct ref_lock *lock, 2011 const struct object_id *oid, 2012 int skip_oid_verification, 2013 struct strbuf *err) 2014{ 2015 static char term = '\n'; 2016 struct object *o; 2017 int fd; 2018 2019 if (!skip_oid_verification) { 2020 o = parse_object(refs->base.repo, oid); 2021 if (!o) { 2022 strbuf_addf( 2023 err, 2024 "trying to write ref '%s' with nonexistent object %s", 2025 lock->ref_name, oid_to_hex(oid)); 2026 unlock_ref(lock); 2027 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; 2028 } 2029 if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) { 2030 strbuf_addf( 2031 err, 2032 "trying to write non-commit object %s to branch '%s'", 2033 oid_to_hex(oid), lock->ref_name); 2034 unlock_ref(lock); 2035 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; 2036 } 2037 } 2038 fd = get_lock_file_fd(&lock->lk); 2039 if (write_in_full(fd, oid_to_hex(oid), refs->base.repo->hash_algo->hexsz) < 0 || 2040 write_in_full(fd, &term, 1) < 0 || 2041 fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 || 2042 close_ref_gently(lock) < 0) { 2043 strbuf_addf(err, 2044 "couldn't write '%s'", get_lock_file_path(&lock->lk)); 2045 unlock_ref(lock); 2046 return REF_TRANSACTION_ERROR_GENERIC; 2047 } 2048 return 0; 2049} 2050 2051/* 2052 * Commit a change to a loose reference that has already been written 2053 * to the loose reference lockfile. Also update the reflogs if 2054 * necessary, using the specified lockmsg (which can be NULL). 2055 */ 2056static int commit_ref_update(struct files_ref_store *refs, 2057 struct ref_lock *lock, 2058 const struct object_id *oid, const char *logmsg, 2059 int flags, 2060 struct strbuf *err) 2061{ 2062 files_assert_main_repository(refs, "commit_ref_update"); 2063 2064 clear_loose_ref_cache(refs); 2065 if (files_log_ref_write(refs, lock->ref_name, &lock->old_oid, oid, NULL, 2066 logmsg, flags, err)) { 2067 char *old_msg = strbuf_detach(err, NULL); 2068 strbuf_addf(err, "cannot update the ref '%s': %s", 2069 lock->ref_name, old_msg); 2070 free(old_msg); 2071 unlock_ref(lock); 2072 return -1; 2073 } 2074 2075 if (strcmp(lock->ref_name, "HEAD") != 0) { 2076 /* 2077 * Special hack: If a branch is updated directly and HEAD 2078 * points to it (may happen on the remote side of a push 2079 * for example) then logically the HEAD reflog should be 2080 * updated too. 2081 * A generic solution implies reverse symref information, 2082 * but finding all symrefs pointing to the given branch 2083 * would be rather costly for this rare event (the direct 2084 * update of a branch) to be worth it. So let's cheat and 2085 * check with HEAD only which should cover 99% of all usage 2086 * scenarios (even 100% of the default ones). 2087 */ 2088 int head_flag; 2089 const char *head_ref; 2090 2091 head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD", 2092 RESOLVE_REF_READING, 2093 NULL, &head_flag); 2094 if (head_ref && (head_flag & REF_ISSYMREF) && 2095 !strcmp(head_ref, lock->ref_name)) { 2096 struct strbuf log_err = STRBUF_INIT; 2097 if (files_log_ref_write(refs, "HEAD", &lock->old_oid, 2098 oid, NULL, logmsg, flags, 2099 &log_err)) { 2100 error("%s", log_err.buf); 2101 strbuf_release(&log_err); 2102 } 2103 } 2104 } 2105 2106 if (commit_ref(lock)) { 2107 strbuf_addf(err, "couldn't set '%s'", lock->ref_name); 2108 unlock_ref(lock); 2109 return -1; 2110 } 2111 2112 unlock_ref(lock); 2113 return 0; 2114} 2115 2116#ifdef NO_SYMLINK_HEAD 2117#define create_ref_symlink(a, b) (-1) 2118#else 2119static int create_ref_symlink(struct ref_lock *lock, const char *target) 2120{ 2121 int ret = -1; 2122 2123 char *ref_path = get_locked_file_path(&lock->lk); 2124 unlink(ref_path); 2125 ret = symlink(target, ref_path); 2126 free(ref_path); 2127 2128 if (ret) 2129 fprintf(stderr, "no symlink - falling back to symbolic ref\n"); 2130 return ret; 2131} 2132#endif 2133 2134static int create_symref_lock(struct ref_lock *lock, const char *target, 2135 struct strbuf *err) 2136{ 2137 if (!fdopen_lock_file(&lock->lk, "w")) { 2138 strbuf_addf(err, "unable to fdopen %s: %s", 2139 get_lock_file_path(&lock->lk), strerror(errno)); 2140 return -1; 2141 } 2142 2143 if (fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target) < 0) { 2144 strbuf_addf(err, "unable to write to %s: %s", 2145 get_lock_file_path(&lock->lk), strerror(errno)); 2146 return -1; 2147 } 2148 2149 return 0; 2150} 2151 2152static int files_reflog_exists(struct ref_store *ref_store, 2153 const char *refname) 2154{ 2155 struct files_ref_store *refs = 2156 files_downcast(ref_store, REF_STORE_READ, "reflog_exists"); 2157 struct strbuf sb = STRBUF_INIT; 2158 struct stat st; 2159 int ret; 2160 2161 files_reflog_path(refs, &sb, refname); 2162 ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode); 2163 strbuf_release(&sb); 2164 return ret; 2165} 2166 2167static int files_delete_reflog(struct ref_store *ref_store, 2168 const char *refname) 2169{ 2170 struct files_ref_store *refs = 2171 files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog"); 2172 struct strbuf sb = STRBUF_INIT; 2173 int ret; 2174 2175 files_reflog_path(refs, &sb, refname); 2176 ret = remove_path(sb.buf); 2177 strbuf_release(&sb); 2178 return ret; 2179} 2180 2181static int show_one_reflog_ent(struct files_ref_store *refs, 2182 const char *refname, 2183 struct strbuf *sb, 2184 each_reflog_ent_fn fn, void *cb_data) 2185{ 2186 struct object_id ooid, noid; 2187 char *email_end, *message; 2188 timestamp_t timestamp; 2189 int tz; 2190 const char *p = sb->buf; 2191 2192 /* old SP new SP name <email> SP time TAB msg LF */ 2193 if (!sb->len || sb->buf[sb->len - 1] != '\n' || 2194 parse_oid_hex_algop(p, &ooid, &p, refs->base.repo->hash_algo) || *p++ != ' ' || 2195 parse_oid_hex_algop(p, &noid, &p, refs->base.repo->hash_algo) || *p++ != ' ' || 2196 !(email_end = strchr(p, '>')) || 2197 email_end[1] != ' ' || 2198 !(timestamp = parse_timestamp(email_end + 2, &message, 10)) || 2199 !message || message[0] != ' ' || 2200 (message[1] != '+' && message[1] != '-') || 2201 !isdigit(message[2]) || !isdigit(message[3]) || 2202 !isdigit(message[4]) || !isdigit(message[5])) 2203 return 0; /* corrupt? */ 2204 email_end[1] = '\0'; 2205 tz = strtol(message + 1, NULL, 10); 2206 if (message[6] != '\t') 2207 message += 6; 2208 else 2209 message += 7; 2210 return fn(refname, &ooid, &noid, p, timestamp, tz, message, cb_data); 2211} 2212 2213static char *find_beginning_of_line(char *bob, char *scan) 2214{ 2215 while (bob < scan && *(--scan) != '\n') 2216 ; /* keep scanning backwards */ 2217 /* 2218 * Return either beginning of the buffer, or LF at the end of 2219 * the previous line. 2220 */ 2221 return scan; 2222} 2223 2224static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store, 2225 const char *refname, 2226 each_reflog_ent_fn fn, 2227 void *cb_data) 2228{ 2229 struct files_ref_store *refs = 2230 files_downcast(ref_store, REF_STORE_READ, 2231 "for_each_reflog_ent_reverse"); 2232 struct strbuf sb = STRBUF_INIT; 2233 FILE *logfp; 2234 long pos; 2235 int ret = 0, at_tail = 1; 2236 2237 files_reflog_path(refs, &sb, refname); 2238 logfp = fopen(sb.buf, "r"); 2239 strbuf_release(&sb); 2240 if (!logfp) 2241 return -1; 2242 2243 /* Jump to the end */ 2244 if (fseek(logfp, 0, SEEK_END) < 0) 2245 ret = error("cannot seek back reflog for %s: %s", 2246 refname, strerror(errno)); 2247 pos = ftell(logfp); 2248 while (!ret && 0 < pos) { 2249 int cnt; 2250 size_t nread; 2251 char buf[BUFSIZ]; 2252 char *endp, *scanp; 2253 2254 /* Fill next block from the end */ 2255 cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos; 2256 if (fseek(logfp, pos - cnt, SEEK_SET)) { 2257 ret = error("cannot seek back reflog for %s: %s", 2258 refname, strerror(errno)); 2259 break; 2260 } 2261 nread = fread(buf, cnt, 1, logfp); 2262 if (nread != 1) { 2263 ret = error("cannot read %d bytes from reflog for %s: %s", 2264 cnt, refname, strerror(errno)); 2265 break; 2266 } 2267 pos -= cnt; 2268 2269 scanp = endp = buf + cnt; 2270 if (at_tail && scanp[-1] == '\n') 2271 /* Looking at the final LF at the end of the file */ 2272 scanp--; 2273 at_tail = 0; 2274 2275 while (buf < scanp) { 2276 /* 2277 * terminating LF of the previous line, or the beginning 2278 * of the buffer. 2279 */ 2280 char *bp; 2281 2282 bp = find_beginning_of_line(buf, scanp); 2283 2284 if (*bp == '\n') { 2285 /* 2286 * The newline is the end of the previous line, 2287 * so we know we have complete line starting 2288 * at (bp + 1). Prefix it onto any prior data 2289 * we collected for the line and process it. 2290 */ 2291 strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1)); 2292 scanp = bp; 2293 endp = bp + 1; 2294 ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); 2295 strbuf_reset(&sb); 2296 if (ret) 2297 break; 2298 } else if (!pos) { 2299 /* 2300 * We are at the start of the buffer, and the 2301 * start of the file; there is no previous 2302 * line, and we have everything for this one. 2303 * Process it, and we can end the loop. 2304 */ 2305 strbuf_splice(&sb, 0, 0, buf, endp - buf); 2306 ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); 2307 strbuf_reset(&sb); 2308 break; 2309 } 2310 2311 if (bp == buf) { 2312 /* 2313 * We are at the start of the buffer, and there 2314 * is more file to read backwards. Which means 2315 * we are in the middle of a line. Note that we 2316 * may get here even if *bp was a newline; that 2317 * just means we are at the exact end of the 2318 * previous line, rather than some spot in the 2319 * middle. 2320 * 2321 * Save away what we have to be combined with 2322 * the data from the next read. 2323 */ 2324 strbuf_splice(&sb, 0, 0, buf, endp - buf); 2325 break; 2326 } 2327 } 2328 2329 } 2330 if (!ret && sb.len) 2331 BUG("reverse reflog parser had leftover data"); 2332 2333 fclose(logfp); 2334 strbuf_release(&sb); 2335 return ret; 2336} 2337 2338static int files_for_each_reflog_ent(struct ref_store *ref_store, 2339 const char *refname, 2340 each_reflog_ent_fn fn, void *cb_data) 2341{ 2342 struct files_ref_store *refs = 2343 files_downcast(ref_store, REF_STORE_READ, 2344 "for_each_reflog_ent"); 2345 FILE *logfp; 2346 struct strbuf sb = STRBUF_INIT; 2347 int ret = 0; 2348 2349 files_reflog_path(refs, &sb, refname); 2350 logfp = fopen(sb.buf, "r"); 2351 strbuf_release(&sb); 2352 if (!logfp) 2353 return -1; 2354 2355 while (!ret && !strbuf_getwholeline(&sb, logfp, '\n')) 2356 ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); 2357 fclose(logfp); 2358 strbuf_release(&sb); 2359 return ret; 2360} 2361 2362struct files_reflog_iterator { 2363 struct ref_iterator base; 2364 struct ref_store *ref_store; 2365 struct dir_iterator *dir_iterator; 2366}; 2367 2368static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator) 2369{ 2370 struct files_reflog_iterator *iter = 2371 (struct files_reflog_iterator *)ref_iterator; 2372 struct dir_iterator *diter = iter->dir_iterator; 2373 int ok; 2374 2375 while ((ok = dir_iterator_advance(diter)) == ITER_OK) { 2376 if (!S_ISREG(diter->st.st_mode)) 2377 continue; 2378 if (check_refname_format(diter->basename, 2379 REFNAME_ALLOW_ONELEVEL)) 2380 continue; 2381 2382 iter->base.refname = diter->relative_path; 2383 return ITER_OK; 2384 } 2385 2386 return ok; 2387} 2388 2389static int files_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED, 2390 const char *refname UNUSED, 2391 unsigned int flags UNUSED) 2392{ 2393 BUG("ref_iterator_seek() called for reflog_iterator"); 2394} 2395 2396static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED, 2397 struct object_id *peeled UNUSED) 2398{ 2399 BUG("ref_iterator_peel() called for reflog_iterator"); 2400} 2401 2402static void files_reflog_iterator_release(struct ref_iterator *ref_iterator) 2403{ 2404 struct files_reflog_iterator *iter = 2405 (struct files_reflog_iterator *)ref_iterator; 2406 dir_iterator_free(iter->dir_iterator); 2407} 2408 2409static struct ref_iterator_vtable files_reflog_iterator_vtable = { 2410 .advance = files_reflog_iterator_advance, 2411 .seek = files_reflog_iterator_seek, 2412 .peel = files_reflog_iterator_peel, 2413 .release = files_reflog_iterator_release, 2414}; 2415 2416static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store, 2417 const char *gitdir) 2418{ 2419 struct dir_iterator *diter; 2420 struct files_reflog_iterator *iter; 2421 struct ref_iterator *ref_iterator; 2422 struct strbuf sb = STRBUF_INIT; 2423 2424 strbuf_addf(&sb, "%s/logs", gitdir); 2425 2426 diter = dir_iterator_begin(sb.buf, DIR_ITERATOR_SORTED); 2427 if (!diter) { 2428 strbuf_release(&sb); 2429 return empty_ref_iterator_begin(); 2430 } 2431 2432 CALLOC_ARRAY(iter, 1); 2433 ref_iterator = &iter->base; 2434 2435 base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable); 2436 iter->dir_iterator = diter; 2437 iter->ref_store = ref_store; 2438 strbuf_release(&sb); 2439 2440 return ref_iterator; 2441} 2442 2443static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store) 2444{ 2445 struct files_ref_store *refs = 2446 files_downcast(ref_store, REF_STORE_READ, 2447 "reflog_iterator_begin"); 2448 2449 if (!strcmp(refs->base.gitdir, refs->gitcommondir)) { 2450 return reflog_iterator_begin(ref_store, refs->gitcommondir); 2451 } else { 2452 return merge_ref_iterator_begin( 2453 reflog_iterator_begin(ref_store, refs->base.gitdir), 2454 reflog_iterator_begin(ref_store, refs->gitcommondir), 2455 ref_iterator_select, refs); 2456 } 2457} 2458 2459/* 2460 * If update is a direct update of head_ref (the reference pointed to 2461 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD. 2462 */ 2463static enum ref_transaction_error split_head_update(struct ref_update *update, 2464 struct ref_transaction *transaction, 2465 const char *head_ref, 2466 struct strbuf *err) 2467{ 2468 struct ref_update *new_update; 2469 2470 if ((update->flags & REF_LOG_ONLY) || 2471 (update->flags & REF_SKIP_CREATE_REFLOG) || 2472 (update->flags & REF_IS_PRUNING) || 2473 (update->flags & REF_UPDATE_VIA_HEAD)) 2474 return 0; 2475 2476 if (strcmp(update->refname, head_ref)) 2477 return 0; 2478 2479 /* 2480 * First make sure that HEAD is not already in the 2481 * transaction. This check is O(lg N) in the transaction 2482 * size, but it happens at most once per transaction. 2483 */ 2484 if (string_list_has_string(&transaction->refnames, "HEAD")) { 2485 /* An entry already existed */ 2486 strbuf_addf(err, 2487 "multiple updates for 'HEAD' (including one " 2488 "via its referent '%s') are not allowed", 2489 update->refname); 2490 return REF_TRANSACTION_ERROR_NAME_CONFLICT; 2491 } 2492 2493 new_update = ref_transaction_add_update( 2494 transaction, "HEAD", 2495 update->flags | REF_LOG_ONLY | REF_NO_DEREF | REF_LOG_VIA_SPLIT, 2496 &update->new_oid, &update->old_oid, 2497 NULL, NULL, update->committer_info, update->msg); 2498 new_update->parent_update = update; 2499 2500 /* 2501 * Add "HEAD". This insertion is O(N) in the transaction 2502 * size, but it happens at most once per transaction. 2503 * Add new_update->refname instead of a literal "HEAD". 2504 */ 2505 if (strcmp(new_update->refname, "HEAD")) 2506 BUG("%s unexpectedly not 'HEAD'", new_update->refname); 2507 2508 return 0; 2509} 2510 2511/* 2512 * update is for a symref that points at referent and doesn't have 2513 * REF_NO_DEREF set. Split it into two updates: 2514 * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set 2515 * - A new, separate update for the referent reference 2516 * Note that the new update will itself be subject to splitting when 2517 * the iteration gets to it. 2518 */ 2519static enum ref_transaction_error split_symref_update(struct ref_update *update, 2520 const char *referent, 2521 struct ref_transaction *transaction, 2522 struct strbuf *err) 2523{ 2524 struct ref_update *new_update; 2525 unsigned int new_flags; 2526 2527 /* 2528 * First make sure that referent is not already in the 2529 * transaction. This check is O(lg N) in the transaction 2530 * size, but it happens at most once per symref in a 2531 * transaction. 2532 */ 2533 if (string_list_has_string(&transaction->refnames, referent)) { 2534 /* An entry already exists */ 2535 strbuf_addf(err, 2536 "multiple updates for '%s' (including one " 2537 "via symref '%s') are not allowed", 2538 referent, update->refname); 2539 return REF_TRANSACTION_ERROR_NAME_CONFLICT; 2540 } 2541 2542 new_flags = update->flags; 2543 if (!strcmp(update->refname, "HEAD")) { 2544 /* 2545 * Record that the new update came via HEAD, so that 2546 * when we process it, split_head_update() doesn't try 2547 * to add another reflog update for HEAD. Note that 2548 * this bit will be propagated if the new_update 2549 * itself needs to be split. 2550 */ 2551 new_flags |= REF_UPDATE_VIA_HEAD; 2552 } 2553 2554 new_update = ref_transaction_add_update( 2555 transaction, referent, new_flags, 2556 update->new_target ? NULL : &update->new_oid, 2557 update->old_target ? NULL : &update->old_oid, 2558 update->new_target, update->old_target, NULL, 2559 update->msg); 2560 2561 new_update->parent_update = update; 2562 2563 /* 2564 * Change the symbolic ref update to log only. Also, it 2565 * doesn't need to check its old OID value, as that will be 2566 * done when new_update is processed. 2567 */ 2568 update->flags |= REF_LOG_ONLY | REF_NO_DEREF; 2569 2570 return 0; 2571} 2572 2573/* 2574 * Check whether the REF_HAVE_OLD and old_oid values stored in update 2575 * are consistent with oid, which is the reference's current value. If 2576 * everything is OK, return 0; otherwise, write an error message to 2577 * err and return -1. 2578 */ 2579static enum ref_transaction_error check_old_oid(struct ref_update *update, 2580 struct object_id *oid, 2581 struct strbuf *referent, 2582 struct strbuf *err) 2583{ 2584 if (update->flags & REF_LOG_ONLY || 2585 !(update->flags & REF_HAVE_OLD)) 2586 return 0; 2587 2588 if (oideq(oid, &update->old_oid)) { 2589 /* 2590 * Normally matching the expected old oid is enough. Either we 2591 * found the ref at the expected state, or we are creating and 2592 * expect the null oid (and likewise found nothing). 2593 * 2594 * But there is one exception for the null oid: if we found a 2595 * symref pointing to nothing we'll also get the null oid. In 2596 * regular recursive mode, that's good (we'll write to what the 2597 * symref points to, which doesn't exist). But in no-deref 2598 * mode, it means we'll clobber the symref, even though the 2599 * caller asked for this to be a creation event. So flag 2600 * that case to preserve the dangling symref. 2601 */ 2602 if ((update->flags & REF_NO_DEREF) && referent->len && 2603 is_null_oid(oid)) { 2604 strbuf_addf(err, "cannot lock ref '%s': " 2605 "dangling symref already exists", 2606 ref_update_original_update_refname(update)); 2607 return REF_TRANSACTION_ERROR_CREATE_EXISTS; 2608 } 2609 return 0; 2610 } 2611 2612 if (is_null_oid(&update->old_oid)) { 2613 strbuf_addf(err, "cannot lock ref '%s': " 2614 "reference already exists", 2615 ref_update_original_update_refname(update)); 2616 return REF_TRANSACTION_ERROR_CREATE_EXISTS; 2617 } else if (is_null_oid(oid)) { 2618 strbuf_addf(err, "cannot lock ref '%s': " 2619 "reference is missing but expected %s", 2620 ref_update_original_update_refname(update), 2621 oid_to_hex(&update->old_oid)); 2622 return REF_TRANSACTION_ERROR_NONEXISTENT_REF; 2623 } 2624 2625 strbuf_addf(err, "cannot lock ref '%s': is at %s but expected %s", 2626 ref_update_original_update_refname(update), oid_to_hex(oid), 2627 oid_to_hex(&update->old_oid)); 2628 2629 return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE; 2630} 2631 2632struct files_transaction_backend_data { 2633 struct ref_transaction *packed_transaction; 2634 int packed_refs_locked; 2635 struct strmap ref_locks; 2636}; 2637 2638/* 2639 * Prepare for carrying out update: 2640 * - Lock the reference referred to by update. 2641 * - Read the reference under lock. 2642 * - Check that its old OID value (if specified) is correct, and in 2643 * any case record it in update->lock->old_oid for later use when 2644 * writing the reflog. 2645 * - If it is a symref update without REF_NO_DEREF, split it up into a 2646 * REF_LOG_ONLY update of the symref and add a separate update for 2647 * the referent to transaction. 2648 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY 2649 * update of HEAD. 2650 */ 2651static enum ref_transaction_error lock_ref_for_update(struct files_ref_store *refs, 2652 struct ref_update *update, 2653 size_t update_idx, 2654 struct ref_transaction *transaction, 2655 const char *head_ref, 2656 struct string_list *refnames_to_check, 2657 struct strbuf *err) 2658{ 2659 struct strbuf referent = STRBUF_INIT; 2660 int mustexist = ref_update_expects_existing_old_ref(update); 2661 struct files_transaction_backend_data *backend_data; 2662 enum ref_transaction_error ret = 0; 2663 struct ref_lock *lock; 2664 2665 files_assert_main_repository(refs, "lock_ref_for_update"); 2666 2667 backend_data = transaction->backend_data; 2668 2669 if ((update->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(update)) 2670 update->flags |= REF_DELETING; 2671 2672 if (head_ref) { 2673 ret = split_head_update(update, transaction, head_ref, err); 2674 if (ret) 2675 goto out; 2676 } 2677 2678 lock = strmap_get(&backend_data->ref_locks, update->refname); 2679 if (lock) { 2680 lock->count++; 2681 } else { 2682 ret = lock_raw_ref(refs, transaction, update_idx, mustexist, 2683 refnames_to_check, &lock, &referent, err); 2684 if (ret) { 2685 char *reason; 2686 2687 reason = strbuf_detach(err, NULL); 2688 strbuf_addf(err, "cannot lock ref '%s': %s", 2689 ref_update_original_update_refname(update), reason); 2690 free(reason); 2691 goto out; 2692 } 2693 2694 strmap_put(&backend_data->ref_locks, update->refname, lock); 2695 } 2696 2697 update->backend_data = lock; 2698 2699 if (update->flags & REF_LOG_VIA_SPLIT) { 2700 struct ref_lock *parent_lock; 2701 2702 if (!update->parent_update) 2703 BUG("split update without a parent"); 2704 2705 parent_lock = update->parent_update->backend_data; 2706 2707 /* 2708 * Check that "HEAD" didn't racily change since we have looked 2709 * it up. If it did we must refuse to write the reflog entry. 2710 * 2711 * Note that this does not catch all races: if "HEAD" was 2712 * racily changed to point to one of the refs part of the 2713 * transaction then we would miss writing the split reflog 2714 * entry for "HEAD". 2715 */ 2716 if (!(update->type & REF_ISSYMREF) || 2717 strcmp(update->parent_update->refname, referent.buf)) { 2718 strbuf_addstr(err, "HEAD has been racily updated"); 2719 ret = REF_TRANSACTION_ERROR_GENERIC; 2720 goto out; 2721 } 2722 2723 if (update->flags & REF_HAVE_OLD) { 2724 oidcpy(&lock->old_oid, &update->old_oid); 2725 } else { 2726 oidcpy(&lock->old_oid, &parent_lock->old_oid); 2727 } 2728 } else if (update->type & REF_ISSYMREF) { 2729 if (update->flags & REF_NO_DEREF) { 2730 /* 2731 * We won't be reading the referent as part of 2732 * the transaction, so we have to read it here 2733 * to record and possibly check old_oid: 2734 */ 2735 if (!refs_resolve_ref_unsafe(&refs->base, 2736 referent.buf, 0, 2737 &lock->old_oid, NULL)) { 2738 if (update->flags & REF_HAVE_OLD) { 2739 strbuf_addf(err, "cannot lock ref '%s': " 2740 "error reading reference", 2741 ref_update_original_update_refname(update)); 2742 ret = REF_TRANSACTION_ERROR_GENERIC; 2743 goto out; 2744 } 2745 } 2746 2747 if (update->old_target) 2748 ret = ref_update_check_old_target(referent.buf, update, err); 2749 else 2750 ret = check_old_oid(update, &lock->old_oid, 2751 &referent, err); 2752 if (ret) 2753 goto out; 2754 } else { 2755 /* 2756 * Create a new update for the reference this 2757 * symref is pointing at. Also, we will record 2758 * and verify old_oid for this update as part 2759 * of processing the split-off update, so we 2760 * don't have to do it here. 2761 */ 2762 ret = split_symref_update(update, referent.buf, 2763 transaction, err); 2764 if (ret) 2765 goto out; 2766 } 2767 } else { 2768 struct ref_update *parent_update; 2769 2770 /* 2771 * Even if the ref is a regular ref, if `old_target` is set, we 2772 * fail with an error. 2773 */ 2774 if (update->old_target) { 2775 strbuf_addf(err, _("cannot lock ref '%s': " 2776 "expected symref with target '%s': " 2777 "but is a regular ref"), 2778 ref_update_original_update_refname(update), 2779 update->old_target); 2780 ret = REF_TRANSACTION_ERROR_EXPECTED_SYMREF; 2781 goto out; 2782 } else { 2783 ret = check_old_oid(update, &lock->old_oid, 2784 &referent, err); 2785 if (ret) { 2786 goto out; 2787 } 2788 } 2789 2790 /* 2791 * If this update is happening indirectly because of a 2792 * symref update, record the old OID in the parent 2793 * update: 2794 */ 2795 for (parent_update = update->parent_update; 2796 parent_update; 2797 parent_update = parent_update->parent_update) { 2798 struct ref_lock *parent_lock = parent_update->backend_data; 2799 oidcpy(&parent_lock->old_oid, &lock->old_oid); 2800 } 2801 } 2802 2803 if (update->new_target && !(update->flags & REF_LOG_ONLY)) { 2804 if (create_symref_lock(lock, update->new_target, err)) { 2805 ret = REF_TRANSACTION_ERROR_GENERIC; 2806 goto out; 2807 } 2808 2809 if (close_ref_gently(lock)) { 2810 strbuf_addf(err, "couldn't close '%s.lock'", 2811 update->refname); 2812 ret = REF_TRANSACTION_ERROR_GENERIC; 2813 goto out; 2814 } 2815 2816 /* 2817 * Once we have created the symref lock, the commit 2818 * phase of the transaction only needs to commit the lock. 2819 */ 2820 update->flags |= REF_NEEDS_COMMIT; 2821 } else if ((update->flags & REF_HAVE_NEW) && 2822 !(update->flags & REF_DELETING) && 2823 !(update->flags & REF_LOG_ONLY)) { 2824 if (!(update->type & REF_ISSYMREF) && 2825 oideq(&lock->old_oid, &update->new_oid)) { 2826 /* 2827 * The reference already has the desired 2828 * value, so we don't need to write it. 2829 */ 2830 } else { 2831 ret = write_ref_to_lockfile( 2832 refs, lock, &update->new_oid, 2833 update->flags & REF_SKIP_OID_VERIFICATION, 2834 err); 2835 if (ret) { 2836 char *write_err = strbuf_detach(err, NULL); 2837 2838 /* 2839 * The lock was freed upon failure of 2840 * write_ref_to_lockfile(): 2841 */ 2842 update->backend_data = NULL; 2843 strbuf_addf(err, 2844 "cannot update ref '%s': %s", 2845 update->refname, write_err); 2846 free(write_err); 2847 goto out; 2848 } else { 2849 update->flags |= REF_NEEDS_COMMIT; 2850 } 2851 } 2852 } 2853 if (!(update->flags & REF_NEEDS_COMMIT)) { 2854 /* 2855 * We didn't call write_ref_to_lockfile(), so 2856 * the lockfile is still open. Close it to 2857 * free up the file descriptor: 2858 */ 2859 if (close_ref_gently(lock)) { 2860 strbuf_addf(err, "couldn't close '%s.lock'", 2861 update->refname); 2862 ret = REF_TRANSACTION_ERROR_GENERIC; 2863 goto out; 2864 } 2865 } 2866 2867out: 2868 strbuf_release(&referent); 2869 return ret; 2870} 2871 2872/* 2873 * Unlock any references in `transaction` that are still locked, and 2874 * mark the transaction closed. 2875 */ 2876static void files_transaction_cleanup(struct files_ref_store *refs, 2877 struct ref_transaction *transaction) 2878{ 2879 size_t i; 2880 struct files_transaction_backend_data *backend_data = 2881 transaction->backend_data; 2882 struct strbuf err = STRBUF_INIT; 2883 2884 for (i = 0; i < transaction->nr; i++) { 2885 struct ref_update *update = transaction->updates[i]; 2886 struct ref_lock *lock = update->backend_data; 2887 2888 if (lock) { 2889 unlock_ref(lock); 2890 try_remove_empty_parents(refs, update->refname, 2891 REMOVE_EMPTY_PARENTS_REF); 2892 update->backend_data = NULL; 2893 } 2894 } 2895 2896 if (backend_data) { 2897 if (backend_data->packed_transaction && 2898 ref_transaction_abort(backend_data->packed_transaction, &err)) { 2899 error("error aborting transaction: %s", err.buf); 2900 strbuf_release(&err); 2901 } 2902 2903 if (backend_data->packed_refs_locked) 2904 packed_refs_unlock(refs->packed_ref_store); 2905 2906 strmap_clear(&backend_data->ref_locks, 0); 2907 2908 free(backend_data); 2909 } 2910 2911 transaction->state = REF_TRANSACTION_CLOSED; 2912} 2913 2914static int files_transaction_prepare(struct ref_store *ref_store, 2915 struct ref_transaction *transaction, 2916 struct strbuf *err) 2917{ 2918 struct files_ref_store *refs = 2919 files_downcast(ref_store, REF_STORE_WRITE, 2920 "ref_transaction_prepare"); 2921 size_t i; 2922 int ret = 0; 2923 struct string_list refnames_to_check = STRING_LIST_INIT_DUP; 2924 char *head_ref = NULL; 2925 int head_type; 2926 struct files_transaction_backend_data *backend_data; 2927 struct ref_transaction *packed_transaction = NULL; 2928 2929 assert(err); 2930 2931 if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL) 2932 goto cleanup; 2933 if (!transaction->nr) 2934 goto cleanup; 2935 2936 CALLOC_ARRAY(backend_data, 1); 2937 strmap_init(&backend_data->ref_locks); 2938 transaction->backend_data = backend_data; 2939 2940 /* 2941 * Fail if any of the updates use REF_IS_PRUNING without REF_NO_DEREF. 2942 */ 2943 for (i = 0; i < transaction->nr; i++) { 2944 struct ref_update *update = transaction->updates[i]; 2945 2946 if ((update->flags & REF_IS_PRUNING) && 2947 !(update->flags & REF_NO_DEREF)) 2948 BUG("REF_IS_PRUNING set without REF_NO_DEREF"); 2949 } 2950 2951 /* 2952 * Special hack: If a branch is updated directly and HEAD 2953 * points to it (may happen on the remote side of a push 2954 * for example) then logically the HEAD reflog should be 2955 * updated too. 2956 * 2957 * A generic solution would require reverse symref lookups, 2958 * but finding all symrefs pointing to a given branch would be 2959 * rather costly for this rare event (the direct update of a 2960 * branch) to be worth it. So let's cheat and check with HEAD 2961 * only, which should cover 99% of all usage scenarios (even 2962 * 100% of the default ones). 2963 * 2964 * So if HEAD is a symbolic reference, then record the name of 2965 * the reference that it points to. If we see an update of 2966 * head_ref within the transaction, then split_head_update() 2967 * arranges for the reflog of HEAD to be updated, too. 2968 */ 2969 head_ref = refs_resolve_refdup(ref_store, "HEAD", 2970 RESOLVE_REF_NO_RECURSE, 2971 NULL, &head_type); 2972 2973 if (head_ref && !(head_type & REF_ISSYMREF)) { 2974 FREE_AND_NULL(head_ref); 2975 } 2976 2977 /* 2978 * Acquire all locks, verify old values if provided, check 2979 * that new values are valid, and write new values to the 2980 * lockfiles, ready to be activated. Only keep one lockfile 2981 * open at a time to avoid running out of file descriptors. 2982 * Note that lock_ref_for_update() might append more updates 2983 * to the transaction. 2984 */ 2985 for (i = 0; i < transaction->nr; i++) { 2986 struct ref_update *update = transaction->updates[i]; 2987 2988 ret = lock_ref_for_update(refs, update, i, transaction, 2989 head_ref, &refnames_to_check, 2990 err); 2991 if (ret) { 2992 if (ref_transaction_maybe_set_rejected(transaction, i, ret)) { 2993 strbuf_reset(err); 2994 ret = 0; 2995 2996 continue; 2997 } 2998 goto cleanup; 2999 } 3000 3001 if (update->flags & REF_DELETING && 3002 !(update->flags & REF_LOG_ONLY) && 3003 !(update->flags & REF_IS_PRUNING)) { 3004 /* 3005 * This reference has to be deleted from 3006 * packed-refs if it exists there. 3007 */ 3008 if (!packed_transaction) { 3009 packed_transaction = ref_store_transaction_begin( 3010 refs->packed_ref_store, 3011 transaction->flags, err); 3012 if (!packed_transaction) { 3013 ret = REF_TRANSACTION_ERROR_GENERIC; 3014 goto cleanup; 3015 } 3016 3017 backend_data->packed_transaction = 3018 packed_transaction; 3019 } 3020 3021 ref_transaction_add_update( 3022 packed_transaction, update->refname, 3023 REF_HAVE_NEW | REF_NO_DEREF, 3024 &update->new_oid, NULL, 3025 NULL, NULL, NULL, NULL); 3026 } 3027 } 3028 3029 /* 3030 * Verify that none of the loose reference that we're about to write 3031 * conflict with any existing packed references. Ideally, we'd do this 3032 * check after the packed-refs are locked so that the file cannot 3033 * change underneath our feet. But introducing such a lock now would 3034 * probably do more harm than good as users rely on there not being a 3035 * global lock with the "files" backend. 3036 * 3037 * Another alternative would be to do the check after the (optional) 3038 * lock, but that would extend the time we spend in the globally-locked 3039 * state. 3040 * 3041 * So instead, we accept the race for now. 3042 */ 3043 if (refs_verify_refnames_available(refs->packed_ref_store, &refnames_to_check, 3044 &transaction->refnames, NULL, transaction, 3045 0, err)) { 3046 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; 3047 goto cleanup; 3048 } 3049 3050 if (packed_transaction) { 3051 if (packed_refs_lock(refs->packed_ref_store, 0, err)) { 3052 ret = REF_TRANSACTION_ERROR_GENERIC; 3053 goto cleanup; 3054 } 3055 backend_data->packed_refs_locked = 1; 3056 3057 if (is_packed_transaction_needed(refs->packed_ref_store, 3058 packed_transaction)) { 3059 ret = ref_transaction_prepare(packed_transaction, err); 3060 /* 3061 * A failure during the prepare step will abort 3062 * itself, but not free. Do that now, and disconnect 3063 * from the files_transaction so it does not try to 3064 * abort us when we hit the cleanup code below. 3065 */ 3066 if (ret) { 3067 ref_transaction_free(packed_transaction); 3068 backend_data->packed_transaction = NULL; 3069 } 3070 } else { 3071 /* 3072 * We can skip rewriting the `packed-refs` 3073 * file. But we do need to leave it locked, so 3074 * that somebody else doesn't pack a reference 3075 * that we are trying to delete. 3076 * 3077 * We need to disconnect our transaction from 3078 * backend_data, since the abort (whether successful or 3079 * not) will free it. 3080 */ 3081 backend_data->packed_transaction = NULL; 3082 if (ref_transaction_abort(packed_transaction, err)) { 3083 ret = REF_TRANSACTION_ERROR_GENERIC; 3084 goto cleanup; 3085 } 3086 } 3087 } 3088 3089cleanup: 3090 free(head_ref); 3091 string_list_clear(&refnames_to_check, 1); 3092 3093 if (ret) 3094 files_transaction_cleanup(refs, transaction); 3095 else 3096 transaction->state = REF_TRANSACTION_PREPARED; 3097 3098 return ret; 3099} 3100 3101static int parse_and_write_reflog(struct files_ref_store *refs, 3102 struct ref_update *update, 3103 struct ref_lock *lock, 3104 struct strbuf *err) 3105{ 3106 struct object_id *old_oid = &lock->old_oid; 3107 3108 if (update->flags & REF_LOG_USE_PROVIDED_OIDS) { 3109 if (!(update->flags & REF_HAVE_OLD) || 3110 !(update->flags & REF_HAVE_NEW) || 3111 !(update->flags & REF_LOG_ONLY)) { 3112 strbuf_addf(err, _("trying to write reflog for '%s'" 3113 "with incomplete values"), update->refname); 3114 return REF_TRANSACTION_ERROR_GENERIC; 3115 } 3116 3117 old_oid = &update->old_oid; 3118 } 3119 3120 if (update->new_target) { 3121 /* 3122 * We want to get the resolved OID for the target, to ensure 3123 * that the correct value is added to the reflog. 3124 */ 3125 if (!refs_resolve_ref_unsafe(&refs->base, update->new_target, 3126 RESOLVE_REF_READING, 3127 &update->new_oid, NULL)) { 3128 /* 3129 * TODO: currently we skip creating reflogs for dangling 3130 * symref updates. It would be nice to capture this as 3131 * zero oid updates however. 3132 */ 3133 return 0; 3134 } 3135 } 3136 3137 if (files_log_ref_write(refs, lock->ref_name, old_oid, 3138 &update->new_oid, update->committer_info, 3139 update->msg, update->flags, err)) { 3140 char *old_msg = strbuf_detach(err, NULL); 3141 3142 strbuf_addf(err, "cannot update the ref '%s': %s", 3143 lock->ref_name, old_msg); 3144 free(old_msg); 3145 unlock_ref(lock); 3146 update->backend_data = NULL; 3147 return -1; 3148 } 3149 3150 return 0; 3151} 3152 3153static int ref_present(const char *refname, const char *referent UNUSED, 3154 const struct object_id *oid UNUSED, 3155 int flags UNUSED, 3156 void *cb_data) 3157{ 3158 struct string_list *affected_refnames = cb_data; 3159 3160 return string_list_has_string(affected_refnames, refname); 3161} 3162 3163static int files_transaction_finish_initial(struct files_ref_store *refs, 3164 struct ref_transaction *transaction, 3165 struct strbuf *err) 3166{ 3167 size_t i; 3168 int ret = 0; 3169 struct string_list affected_refnames = STRING_LIST_INIT_NODUP; 3170 struct string_list refnames_to_check = STRING_LIST_INIT_NODUP; 3171 struct ref_transaction *packed_transaction = NULL; 3172 struct ref_transaction *loose_transaction = NULL; 3173 3174 assert(err); 3175 3176 if (transaction->state != REF_TRANSACTION_PREPARED) 3177 BUG("commit called for transaction that is not prepared"); 3178 3179 /* 3180 * It's really undefined to call this function in an active 3181 * repository or when there are existing references: we are 3182 * only locking and changing packed-refs, so (1) any 3183 * simultaneous processes might try to change a reference at 3184 * the same time we do, and (2) any existing loose versions of 3185 * the references that we are setting would have precedence 3186 * over our values. But some remote helpers create the remote 3187 * "HEAD" and "master" branches before calling this function, 3188 * so here we really only check that none of the references 3189 * that we are creating already exists. 3190 */ 3191 if (refs_for_each_rawref(&refs->base, ref_present, 3192 &transaction->refnames)) 3193 BUG("initial ref transaction called with existing refs"); 3194 3195 packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, 3196 transaction->flags, err); 3197 if (!packed_transaction) { 3198 ret = REF_TRANSACTION_ERROR_GENERIC; 3199 goto cleanup; 3200 } 3201 3202 for (i = 0; i < transaction->nr; i++) { 3203 struct ref_update *update = transaction->updates[i]; 3204 3205 if (!(update->flags & REF_LOG_ONLY) && 3206 (update->flags & REF_HAVE_OLD) && 3207 !is_null_oid(&update->old_oid)) 3208 BUG("initial ref transaction with old_sha1 set"); 3209 3210 string_list_append(&refnames_to_check, update->refname); 3211 3212 /* 3213 * packed-refs don't support symbolic refs, root refs and reflogs, 3214 * so we have to queue these references via the loose transaction. 3215 */ 3216 if (update->new_target || 3217 is_root_ref(update->refname) || 3218 (update->flags & REF_LOG_ONLY)) { 3219 if (!loose_transaction) { 3220 loose_transaction = ref_store_transaction_begin(&refs->base, 0, err); 3221 if (!loose_transaction) { 3222 ret = REF_TRANSACTION_ERROR_GENERIC; 3223 goto cleanup; 3224 } 3225 } 3226 3227 if (update->flags & REF_LOG_ONLY) 3228 ref_transaction_add_update(loose_transaction, update->refname, 3229 update->flags, &update->new_oid, 3230 &update->old_oid, NULL, NULL, 3231 update->committer_info, update->msg); 3232 else 3233 ref_transaction_add_update(loose_transaction, update->refname, 3234 update->flags & ~REF_HAVE_OLD, 3235 update->new_target ? NULL : &update->new_oid, NULL, 3236 update->new_target, NULL, update->committer_info, 3237 NULL); 3238 } else { 3239 ref_transaction_add_update(packed_transaction, update->refname, 3240 update->flags & ~REF_HAVE_OLD, 3241 &update->new_oid, &update->old_oid, 3242 NULL, NULL, update->committer_info, NULL); 3243 } 3244 } 3245 3246 if (packed_refs_lock(refs->packed_ref_store, 0, err)) { 3247 ret = REF_TRANSACTION_ERROR_GENERIC; 3248 goto cleanup; 3249 } 3250 3251 if (refs_verify_refnames_available(&refs->base, &refnames_to_check, 3252 &affected_refnames, NULL, transaction, 3253 1, err)) { 3254 packed_refs_unlock(refs->packed_ref_store); 3255 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; 3256 goto cleanup; 3257 } 3258 3259 if (ref_transaction_commit(packed_transaction, err)) { 3260 ret = REF_TRANSACTION_ERROR_GENERIC; 3261 goto cleanup; 3262 } 3263 packed_refs_unlock(refs->packed_ref_store); 3264 3265 if (loose_transaction) { 3266 if (ref_transaction_prepare(loose_transaction, err) || 3267 ref_transaction_commit(loose_transaction, err)) { 3268 ret = REF_TRANSACTION_ERROR_GENERIC; 3269 goto cleanup; 3270 } 3271 } 3272 3273cleanup: 3274 if (loose_transaction) 3275 ref_transaction_free(loose_transaction); 3276 if (packed_transaction) 3277 ref_transaction_free(packed_transaction); 3278 transaction->state = REF_TRANSACTION_CLOSED; 3279 string_list_clear(&affected_refnames, 0); 3280 string_list_clear(&refnames_to_check, 0); 3281 return ret; 3282} 3283 3284static int files_transaction_finish(struct ref_store *ref_store, 3285 struct ref_transaction *transaction, 3286 struct strbuf *err) 3287{ 3288 struct files_ref_store *refs = 3289 files_downcast(ref_store, 0, "ref_transaction_finish"); 3290 size_t i; 3291 int ret = 0; 3292 struct strbuf sb = STRBUF_INIT; 3293 struct files_transaction_backend_data *backend_data; 3294 struct ref_transaction *packed_transaction; 3295 3296 3297 assert(err); 3298 3299 if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL) 3300 return files_transaction_finish_initial(refs, transaction, err); 3301 if (!transaction->nr) { 3302 transaction->state = REF_TRANSACTION_CLOSED; 3303 return 0; 3304 } 3305 3306 backend_data = transaction->backend_data; 3307 packed_transaction = backend_data->packed_transaction; 3308 3309 /* Perform updates first so live commits remain referenced */ 3310 for (i = 0; i < transaction->nr; i++) { 3311 struct ref_update *update = transaction->updates[i]; 3312 struct ref_lock *lock = update->backend_data; 3313 3314 if (update->rejection_err) 3315 continue; 3316 3317 if (update->flags & REF_NEEDS_COMMIT || 3318 update->flags & REF_LOG_ONLY) { 3319 if (parse_and_write_reflog(refs, update, lock, err)) { 3320 ret = REF_TRANSACTION_ERROR_GENERIC; 3321 goto cleanup; 3322 } 3323 } 3324 3325 /* 3326 * We try creating a symlink, if that succeeds we continue to the 3327 * next update. If not, we try and create a regular symref. 3328 */ 3329 if (update->new_target && refs->prefer_symlink_refs) 3330 /* 3331 * By using the `NOT_CONSTANT()` trick, we can avoid 3332 * errors by `clang`'s `-Wunreachable` logic that would 3333 * report that the `continue` statement is not reachable 3334 * when `NO_SYMLINK_HEAD` is `#define`d. 3335 */ 3336 if (NOT_CONSTANT(!create_ref_symlink(lock, update->new_target))) 3337 continue; 3338 3339 if (update->flags & REF_NEEDS_COMMIT) { 3340 clear_loose_ref_cache(refs); 3341 if (commit_ref(lock)) { 3342 strbuf_addf(err, "couldn't set '%s'", lock->ref_name); 3343 unlock_ref(lock); 3344 update->backend_data = NULL; 3345 ret = REF_TRANSACTION_ERROR_GENERIC; 3346 goto cleanup; 3347 } 3348 } 3349 } 3350 3351 /* 3352 * Now that updates are safely completed, we can perform 3353 * deletes. First delete the reflogs of any references that 3354 * will be deleted, since (in the unexpected event of an 3355 * error) leaving a reference without a reflog is less bad 3356 * than leaving a reflog without a reference (the latter is a 3357 * mildly invalid repository state): 3358 */ 3359 for (i = 0; i < transaction->nr; i++) { 3360 struct ref_update *update = transaction->updates[i]; 3361 3362 if (update->rejection_err) 3363 continue; 3364 3365 if (update->flags & REF_DELETING && 3366 !(update->flags & REF_LOG_ONLY) && 3367 !(update->flags & REF_IS_PRUNING)) { 3368 strbuf_reset(&sb); 3369 files_reflog_path(refs, &sb, update->refname); 3370 if (!unlink_or_warn(sb.buf)) 3371 try_remove_empty_parents(refs, update->refname, 3372 REMOVE_EMPTY_PARENTS_REFLOG); 3373 } 3374 } 3375 3376 /* 3377 * Perform deletes now that updates are safely completed. 3378 * 3379 * First delete any packed versions of the references, while 3380 * retaining the packed-refs lock: 3381 */ 3382 if (packed_transaction) { 3383 ret = ref_transaction_commit(packed_transaction, err); 3384 ref_transaction_free(packed_transaction); 3385 packed_transaction = NULL; 3386 backend_data->packed_transaction = NULL; 3387 if (ret) 3388 goto cleanup; 3389 } 3390 3391 /* Now delete the loose versions of the references: */ 3392 for (i = 0; i < transaction->nr; i++) { 3393 struct ref_update *update = transaction->updates[i]; 3394 struct ref_lock *lock = update->backend_data; 3395 3396 if (update->rejection_err) 3397 continue; 3398 3399 if (update->flags & REF_DELETING && 3400 !(update->flags & REF_LOG_ONLY)) { 3401 update->flags |= REF_DELETED_RMDIR; 3402 if (!(update->type & REF_ISPACKED) || 3403 update->type & REF_ISSYMREF) { 3404 /* It is a loose reference. */ 3405 strbuf_reset(&sb); 3406 files_ref_path(refs, &sb, lock->ref_name); 3407 if (unlink_or_msg(sb.buf, err)) { 3408 ret = REF_TRANSACTION_ERROR_GENERIC; 3409 goto cleanup; 3410 } 3411 } 3412 } 3413 } 3414 3415 clear_loose_ref_cache(refs); 3416 3417cleanup: 3418 files_transaction_cleanup(refs, transaction); 3419 3420 for (i = 0; i < transaction->nr; i++) { 3421 struct ref_update *update = transaction->updates[i]; 3422 3423 if (update->flags & REF_DELETED_RMDIR) { 3424 /* 3425 * The reference was deleted. Delete any 3426 * empty parent directories. (Note that this 3427 * can only work because we have already 3428 * removed the lockfile.) 3429 */ 3430 try_remove_empty_parents(refs, update->refname, 3431 REMOVE_EMPTY_PARENTS_REF); 3432 } 3433 } 3434 3435 strbuf_release(&sb); 3436 return ret; 3437} 3438 3439static int files_transaction_abort(struct ref_store *ref_store, 3440 struct ref_transaction *transaction, 3441 struct strbuf *err UNUSED) 3442{ 3443 struct files_ref_store *refs = 3444 files_downcast(ref_store, 0, "ref_transaction_abort"); 3445 3446 files_transaction_cleanup(refs, transaction); 3447 return 0; 3448} 3449 3450struct expire_reflog_cb { 3451 reflog_expiry_should_prune_fn *should_prune_fn; 3452 void *policy_cb; 3453 FILE *newlog; 3454 struct object_id last_kept_oid; 3455 unsigned int rewrite:1, 3456 dry_run:1; 3457}; 3458 3459static int expire_reflog_ent(const char *refname UNUSED, 3460 struct object_id *ooid, struct object_id *noid, 3461 const char *email, timestamp_t timestamp, int tz, 3462 const char *message, void *cb_data) 3463{ 3464 struct expire_reflog_cb *cb = cb_data; 3465 reflog_expiry_should_prune_fn *fn = cb->should_prune_fn; 3466 3467 if (cb->rewrite) 3468 ooid = &cb->last_kept_oid; 3469 3470 if (fn(ooid, noid, email, timestamp, tz, message, cb->policy_cb)) 3471 return 0; 3472 3473 if (cb->dry_run) 3474 return 0; /* --dry-run */ 3475 3476 fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s", oid_to_hex(ooid), 3477 oid_to_hex(noid), email, timestamp, tz, message); 3478 oidcpy(&cb->last_kept_oid, noid); 3479 3480 return 0; 3481} 3482 3483static int files_reflog_expire(struct ref_store *ref_store, 3484 const char *refname, 3485 unsigned int expire_flags, 3486 reflog_expiry_prepare_fn prepare_fn, 3487 reflog_expiry_should_prune_fn should_prune_fn, 3488 reflog_expiry_cleanup_fn cleanup_fn, 3489 void *policy_cb_data) 3490{ 3491 struct files_ref_store *refs = 3492 files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire"); 3493 struct lock_file reflog_lock = LOCK_INIT; 3494 struct expire_reflog_cb cb; 3495 struct ref_lock *lock; 3496 struct strbuf log_file_sb = STRBUF_INIT; 3497 char *log_file; 3498 int status = 0; 3499 struct strbuf err = STRBUF_INIT; 3500 const struct object_id *oid; 3501 3502 memset(&cb, 0, sizeof(cb)); 3503 cb.rewrite = !!(expire_flags & EXPIRE_REFLOGS_REWRITE); 3504 cb.dry_run = !!(expire_flags & EXPIRE_REFLOGS_DRY_RUN); 3505 cb.policy_cb = policy_cb_data; 3506 cb.should_prune_fn = should_prune_fn; 3507 3508 /* 3509 * The reflog file is locked by holding the lock on the 3510 * reference itself, plus we might need to update the 3511 * reference if --updateref was specified: 3512 */ 3513 lock = lock_ref_oid_basic(refs, refname, &err); 3514 if (!lock) { 3515 error("cannot lock ref '%s': %s", refname, err.buf); 3516 strbuf_release(&err); 3517 return -1; 3518 } 3519 oid = &lock->old_oid; 3520 3521 /* 3522 * When refs are deleted, their reflog is deleted before the 3523 * ref itself is deleted. This is because there is no separate 3524 * lock for reflog; instead we take a lock on the ref with 3525 * lock_ref_oid_basic(). 3526 * 3527 * If a race happens and the reflog doesn't exist after we've 3528 * acquired the lock that's OK. We've got nothing more to do; 3529 * We were asked to delete the reflog, but someone else 3530 * deleted it! The caller doesn't care that we deleted it, 3531 * just that it is deleted. So we can return successfully. 3532 */ 3533 if (!refs_reflog_exists(ref_store, refname)) { 3534 unlock_ref(lock); 3535 return 0; 3536 } 3537 3538 files_reflog_path(refs, &log_file_sb, refname); 3539 log_file = strbuf_detach(&log_file_sb, NULL); 3540 if (!cb.dry_run) { 3541 /* 3542 * Even though holding $GIT_DIR/logs/$reflog.lock has 3543 * no locking implications, we use the lock_file 3544 * machinery here anyway because it does a lot of the 3545 * work we need, including cleaning up if the program 3546 * exits unexpectedly. 3547 */ 3548 if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) { 3549 struct strbuf err = STRBUF_INIT; 3550 unable_to_lock_message(log_file, errno, &err); 3551 error("%s", err.buf); 3552 strbuf_release(&err); 3553 goto failure; 3554 } 3555 cb.newlog = fdopen_lock_file(&reflog_lock, "w"); 3556 if (!cb.newlog) { 3557 error("cannot fdopen %s (%s)", 3558 get_lock_file_path(&reflog_lock), strerror(errno)); 3559 goto failure; 3560 } 3561 } 3562 3563 (*prepare_fn)(refname, oid, cb.policy_cb); 3564 refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb); 3565 (*cleanup_fn)(cb.policy_cb); 3566 3567 if (!cb.dry_run) { 3568 /* 3569 * It doesn't make sense to adjust a reference pointed 3570 * to by a symbolic ref based on expiring entries in 3571 * the symbolic reference's reflog. Nor can we update 3572 * a reference if there are no remaining reflog 3573 * entries. 3574 */ 3575 int update = 0; 3576 3577 if ((expire_flags & EXPIRE_REFLOGS_UPDATE_REF) && 3578 !is_null_oid(&cb.last_kept_oid)) { 3579 int type; 3580 const char *ref; 3581 3582 ref = refs_resolve_ref_unsafe(&refs->base, refname, 3583 RESOLVE_REF_NO_RECURSE, 3584 NULL, &type); 3585 update = !!(ref && !(type & REF_ISSYMREF)); 3586 } 3587 3588 if (close_lock_file_gently(&reflog_lock)) { 3589 status |= error("couldn't write %s: %s", log_file, 3590 strerror(errno)); 3591 rollback_lock_file(&reflog_lock); 3592 } else if (update && 3593 (write_in_full(get_lock_file_fd(&lock->lk), 3594 oid_to_hex(&cb.last_kept_oid), refs->base.repo->hash_algo->hexsz) < 0 || 3595 write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 || 3596 close_ref_gently(lock) < 0)) { 3597 status |= error("couldn't write %s", 3598 get_lock_file_path(&lock->lk)); 3599 rollback_lock_file(&reflog_lock); 3600 } else if (commit_lock_file(&reflog_lock)) { 3601 status |= error("unable to write reflog '%s' (%s)", 3602 log_file, strerror(errno)); 3603 } else if (update && commit_ref(lock)) { 3604 status |= error("couldn't set %s", lock->ref_name); 3605 } 3606 } 3607 free(log_file); 3608 unlock_ref(lock); 3609 return status; 3610 3611 failure: 3612 rollback_lock_file(&reflog_lock); 3613 free(log_file); 3614 unlock_ref(lock); 3615 return -1; 3616} 3617 3618static int files_ref_store_create_on_disk(struct ref_store *ref_store, 3619 int flags, 3620 struct strbuf *err UNUSED) 3621{ 3622 struct files_ref_store *refs = 3623 files_downcast(ref_store, REF_STORE_WRITE, "create"); 3624 struct strbuf sb = STRBUF_INIT; 3625 3626 /* 3627 * We need to create a "refs" dir in any case so that older versions of 3628 * Git can tell that this is a repository. This serves two main purposes: 3629 * 3630 * - Clients will know to stop walking the parent-directory chain when 3631 * detecting the Git repository. Otherwise they may end up detecting 3632 * a Git repository in a parent directory instead. 3633 * 3634 * - Instead of failing to detect a repository with unknown reference 3635 * format altogether, old clients will print an error saying that 3636 * they do not understand the reference format extension. 3637 */ 3638 strbuf_addf(&sb, "%s/refs", ref_store->gitdir); 3639 safe_create_dir(the_repository, sb.buf, 1); 3640 adjust_shared_perm(the_repository, sb.buf); 3641 3642 /* 3643 * There is no need to create directories for common refs when creating 3644 * a worktree ref store. 3645 */ 3646 if (!(flags & REF_STORE_CREATE_ON_DISK_IS_WORKTREE)) { 3647 /* 3648 * Create .git/refs/{heads,tags} 3649 */ 3650 strbuf_reset(&sb); 3651 files_ref_path(refs, &sb, "refs/heads"); 3652 safe_create_dir(the_repository, sb.buf, 1); 3653 3654 strbuf_reset(&sb); 3655 files_ref_path(refs, &sb, "refs/tags"); 3656 safe_create_dir(the_repository, sb.buf, 1); 3657 } 3658 3659 strbuf_release(&sb); 3660 return 0; 3661} 3662 3663struct remove_one_root_ref_data { 3664 const char *gitdir; 3665 struct strbuf *err; 3666}; 3667 3668static int remove_one_root_ref(const char *refname, 3669 void *cb_data) 3670{ 3671 struct remove_one_root_ref_data *data = cb_data; 3672 struct strbuf buf = STRBUF_INIT; 3673 int ret = 0; 3674 3675 strbuf_addf(&buf, "%s/%s", data->gitdir, refname); 3676 3677 ret = unlink(buf.buf); 3678 if (ret < 0) 3679 strbuf_addf(data->err, "could not delete %s: %s\n", 3680 refname, strerror(errno)); 3681 3682 strbuf_release(&buf); 3683 return ret; 3684} 3685 3686static int files_ref_store_remove_on_disk(struct ref_store *ref_store, 3687 struct strbuf *err) 3688{ 3689 struct files_ref_store *refs = 3690 files_downcast(ref_store, REF_STORE_WRITE, "remove"); 3691 struct remove_one_root_ref_data data = { 3692 .gitdir = refs->base.gitdir, 3693 .err = err, 3694 }; 3695 struct strbuf sb = STRBUF_INIT; 3696 int ret = 0; 3697 3698 strbuf_addf(&sb, "%s/refs", refs->base.gitdir); 3699 if (remove_dir_recursively(&sb, 0) < 0) { 3700 strbuf_addf(err, "could not delete refs: %s", 3701 strerror(errno)); 3702 ret = -1; 3703 } 3704 strbuf_reset(&sb); 3705 3706 strbuf_addf(&sb, "%s/logs", refs->base.gitdir); 3707 if (remove_dir_recursively(&sb, 0) < 0) { 3708 strbuf_addf(err, "could not delete logs: %s", 3709 strerror(errno)); 3710 ret = -1; 3711 } 3712 strbuf_reset(&sb); 3713 3714 if (for_each_root_ref(refs, remove_one_root_ref, &data) < 0) 3715 ret = -1; 3716 3717 if (ref_store_remove_on_disk(refs->packed_ref_store, err) < 0) 3718 ret = -1; 3719 3720 strbuf_release(&sb); 3721 return ret; 3722} 3723 3724/* 3725 * For refs and reflogs, they share a unified interface when scanning 3726 * the whole directory. This function is used as the callback for each 3727 * regular file or symlink in the directory. 3728 */ 3729typedef int (*files_fsck_refs_fn)(struct ref_store *ref_store, 3730 struct fsck_options *o, 3731 const char *refname, 3732 struct dir_iterator *iter); 3733 3734static int files_fsck_symref_target(struct fsck_options *o, 3735 struct fsck_ref_report *report, 3736 struct strbuf *referent, 3737 unsigned int symbolic_link) 3738{ 3739 int is_referent_root; 3740 char orig_last_byte; 3741 size_t orig_len; 3742 int ret = 0; 3743 3744 orig_len = referent->len; 3745 orig_last_byte = referent->buf[orig_len - 1]; 3746 if (!symbolic_link) 3747 strbuf_rtrim(referent); 3748 3749 is_referent_root = is_root_ref(referent->buf); 3750 if (!is_referent_root && 3751 !starts_with(referent->buf, "refs/") && 3752 !starts_with(referent->buf, "worktrees/")) { 3753 ret = fsck_report_ref(o, report, 3754 FSCK_MSG_SYMREF_TARGET_IS_NOT_A_REF, 3755 "points to non-ref target '%s'", referent->buf); 3756 3757 } 3758 3759 if (!is_referent_root && check_refname_format(referent->buf, 0)) { 3760 ret = fsck_report_ref(o, report, 3761 FSCK_MSG_BAD_REFERENT_NAME, 3762 "points to invalid refname '%s'", referent->buf); 3763 goto out; 3764 } 3765 3766 if (symbolic_link) 3767 goto out; 3768 3769 if (referent->len == orig_len || 3770 (referent->len < orig_len && orig_last_byte != '\n')) { 3771 ret = fsck_report_ref(o, report, 3772 FSCK_MSG_REF_MISSING_NEWLINE, 3773 "misses LF at the end"); 3774 } 3775 3776 if (referent->len != orig_len && referent->len != orig_len - 1) { 3777 ret = fsck_report_ref(o, report, 3778 FSCK_MSG_TRAILING_REF_CONTENT, 3779 "has trailing whitespaces or newlines"); 3780 } 3781 3782out: 3783 return ret; 3784} 3785 3786static int files_fsck_refs_content(struct ref_store *ref_store, 3787 struct fsck_options *o, 3788 const char *target_name, 3789 struct dir_iterator *iter) 3790{ 3791 struct strbuf ref_content = STRBUF_INIT; 3792 struct strbuf abs_gitdir = STRBUF_INIT; 3793 struct strbuf referent = STRBUF_INIT; 3794 struct fsck_ref_report report = { 0 }; 3795 const char *trailing = NULL; 3796 unsigned int type = 0; 3797 int failure_errno = 0; 3798 struct object_id oid; 3799 int ret = 0; 3800 3801 report.path = target_name; 3802 3803 if (S_ISLNK(iter->st.st_mode)) { 3804 const char *relative_referent_path = NULL; 3805 3806 ret = fsck_report_ref(o, &report, 3807 FSCK_MSG_SYMLINK_REF, 3808 "use deprecated symbolic link for symref"); 3809 3810 strbuf_add_absolute_path(&abs_gitdir, ref_store->repo->gitdir); 3811 strbuf_normalize_path(&abs_gitdir); 3812 if (!is_dir_sep(abs_gitdir.buf[abs_gitdir.len - 1])) 3813 strbuf_addch(&abs_gitdir, '/'); 3814 3815 strbuf_add_real_path(&ref_content, iter->path.buf); 3816 skip_prefix(ref_content.buf, abs_gitdir.buf, 3817 &relative_referent_path); 3818 3819 if (relative_referent_path) 3820 strbuf_addstr(&referent, relative_referent_path); 3821 else 3822 strbuf_addbuf(&referent, &ref_content); 3823 3824 ret |= files_fsck_symref_target(o, &report, &referent, 1); 3825 goto cleanup; 3826 } 3827 3828 if (strbuf_read_file(&ref_content, iter->path.buf, 0) < 0) { 3829 /* 3830 * Ref file could be removed by another concurrent process. We should 3831 * ignore this error and continue to the next ref. 3832 */ 3833 if (errno == ENOENT) 3834 goto cleanup; 3835 3836 ret = error_errno(_("cannot read ref file '%s'"), iter->path.buf); 3837 goto cleanup; 3838 } 3839 3840 if (parse_loose_ref_contents(ref_store->repo->hash_algo, 3841 ref_content.buf, &oid, &referent, 3842 &type, &trailing, &failure_errno)) { 3843 strbuf_rtrim(&ref_content); 3844 ret = fsck_report_ref(o, &report, 3845 FSCK_MSG_BAD_REF_CONTENT, 3846 "%s", ref_content.buf); 3847 goto cleanup; 3848 } 3849 3850 if (!(type & REF_ISSYMREF)) { 3851 if (!*trailing) { 3852 ret = fsck_report_ref(o, &report, 3853 FSCK_MSG_REF_MISSING_NEWLINE, 3854 "misses LF at the end"); 3855 goto cleanup; 3856 } 3857 if (*trailing != '\n' || *(trailing + 1)) { 3858 ret = fsck_report_ref(o, &report, 3859 FSCK_MSG_TRAILING_REF_CONTENT, 3860 "has trailing garbage: '%s'", trailing); 3861 goto cleanup; 3862 } 3863 } else { 3864 ret = files_fsck_symref_target(o, &report, &referent, 0); 3865 goto cleanup; 3866 } 3867 3868cleanup: 3869 strbuf_release(&ref_content); 3870 strbuf_release(&referent); 3871 strbuf_release(&abs_gitdir); 3872 return ret; 3873} 3874 3875static int files_fsck_refs_name(struct ref_store *ref_store UNUSED, 3876 struct fsck_options *o, 3877 const char *refname, 3878 struct dir_iterator *iter) 3879{ 3880 struct strbuf sb = STRBUF_INIT; 3881 int ret = 0; 3882 3883 /* 3884 * Ignore the files ending with ".lock" as they may be lock files 3885 * However, do not allow bare ".lock" files. 3886 */ 3887 if (iter->basename[0] != '.' && ends_with(iter->basename, ".lock")) 3888 goto cleanup; 3889 3890 /* 3891 * This works right now because we never check the root refs. 3892 */ 3893 if (check_refname_format(refname, 0)) { 3894 struct fsck_ref_report report = { 0 }; 3895 3896 report.path = refname; 3897 ret = fsck_report_ref(o, &report, 3898 FSCK_MSG_BAD_REF_NAME, 3899 "invalid refname format"); 3900 } 3901 3902cleanup: 3903 strbuf_release(&sb); 3904 return ret; 3905} 3906 3907static int files_fsck_refs_dir(struct ref_store *ref_store, 3908 struct fsck_options *o, 3909 const char *refs_check_dir, 3910 struct worktree *wt, 3911 files_fsck_refs_fn *fsck_refs_fn) 3912{ 3913 struct strbuf refname = STRBUF_INIT; 3914 struct strbuf sb = STRBUF_INIT; 3915 struct dir_iterator *iter; 3916 int iter_status; 3917 int ret = 0; 3918 3919 strbuf_addf(&sb, "%s/%s", ref_store->gitdir, refs_check_dir); 3920 3921 iter = dir_iterator_begin(sb.buf, 0); 3922 if (!iter) { 3923 if (errno == ENOENT && !is_main_worktree(wt)) 3924 goto out; 3925 3926 ret = error_errno(_("cannot open directory %s"), sb.buf); 3927 goto out; 3928 } 3929 3930 while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) { 3931 if (S_ISDIR(iter->st.st_mode)) { 3932 continue; 3933 } else if (S_ISREG(iter->st.st_mode) || 3934 S_ISLNK(iter->st.st_mode)) { 3935 strbuf_reset(&refname); 3936 3937 if (!is_main_worktree(wt)) 3938 strbuf_addf(&refname, "worktrees/%s/", wt->id); 3939 strbuf_addf(&refname, "%s/%s", refs_check_dir, 3940 iter->relative_path); 3941 3942 if (o->verbose) 3943 fprintf_ln(stderr, "Checking %s", refname.buf); 3944 3945 for (size_t i = 0; fsck_refs_fn[i]; i++) { 3946 if (fsck_refs_fn[i](ref_store, o, refname.buf, iter)) 3947 ret = -1; 3948 } 3949 } else { 3950 struct fsck_ref_report report = { .path = iter->basename }; 3951 if (fsck_report_ref(o, &report, 3952 FSCK_MSG_BAD_REF_FILETYPE, 3953 "unexpected file type")) 3954 ret = -1; 3955 } 3956 } 3957 3958 if (iter_status != ITER_DONE) 3959 ret = error(_("failed to iterate over '%s'"), sb.buf); 3960 3961out: 3962 dir_iterator_free(iter); 3963 strbuf_release(&sb); 3964 strbuf_release(&refname); 3965 return ret; 3966} 3967 3968static int files_fsck_refs(struct ref_store *ref_store, 3969 struct fsck_options *o, 3970 struct worktree *wt) 3971{ 3972 files_fsck_refs_fn fsck_refs_fn[]= { 3973 files_fsck_refs_name, 3974 files_fsck_refs_content, 3975 NULL, 3976 }; 3977 3978 return files_fsck_refs_dir(ref_store, o, "refs", wt, fsck_refs_fn); 3979} 3980 3981static int files_fsck(struct ref_store *ref_store, 3982 struct fsck_options *o, 3983 struct worktree *wt) 3984{ 3985 struct files_ref_store *refs = 3986 files_downcast(ref_store, REF_STORE_READ, "fsck"); 3987 3988 return files_fsck_refs(ref_store, o, wt) | 3989 refs->packed_ref_store->be->fsck(refs->packed_ref_store, o, wt); 3990} 3991 3992struct ref_storage_be refs_be_files = { 3993 .name = "files", 3994 .init = files_ref_store_init, 3995 .release = files_ref_store_release, 3996 .create_on_disk = files_ref_store_create_on_disk, 3997 .remove_on_disk = files_ref_store_remove_on_disk, 3998 3999 .transaction_prepare = files_transaction_prepare, 4000 .transaction_finish = files_transaction_finish, 4001 .transaction_abort = files_transaction_abort, 4002 4003 .pack_refs = files_pack_refs, 4004 .optimize = files_optimize, 4005 .rename_ref = files_rename_ref, 4006 .copy_ref = files_copy_ref, 4007 4008 .iterator_begin = files_ref_iterator_begin, 4009 .read_raw_ref = files_read_raw_ref, 4010 .read_symbolic_ref = files_read_symbolic_ref, 4011 4012 .reflog_iterator_begin = files_reflog_iterator_begin, 4013 .for_each_reflog_ent = files_for_each_reflog_ent, 4014 .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse, 4015 .reflog_exists = files_reflog_exists, 4016 .create_reflog = files_create_reflog, 4017 .delete_reflog = files_delete_reflog, 4018 .reflog_expire = files_reflog_expire, 4019 4020 .fsck = files_fsck, 4021};