Git fork
at reftables-rust 3070 lines 87 kB view raw
1#define USE_THE_REPOSITORY_VARIABLE 2#define DISABLE_SIGN_COMPARE_WARNINGS 3 4#include "git-compat-util.h" 5#include "advice.h" 6#include "strvec.h" 7#include "repository.h" 8#include "parse.h" 9#include "dir.h" 10#include "environment.h" 11#include "gettext.h" 12#include "hex.h" 13#include "name-hash.h" 14#include "tree.h" 15#include "tree-walk.h" 16#include "cache-tree.h" 17#include "unpack-trees.h" 18#include "progress.h" 19#include "refs.h" 20#include "attr.h" 21#include "read-cache.h" 22#include "split-index.h" 23#include "sparse-index.h" 24#include "submodule.h" 25#include "submodule-config.h" 26#include "symlinks.h" 27#include "trace2.h" 28#include "fsmonitor.h" 29#include "odb.h" 30#include "promisor-remote.h" 31#include "entry.h" 32#include "parallel-checkout.h" 33#include "setup.h" 34 35/* 36 * Error messages expected by scripts out of plumbing commands such as 37 * read-tree. Non-scripted Porcelain is not required to use these messages 38 * and in fact are encouraged to reword them to better suit their particular 39 * situation better. See how "git checkout" and "git merge" replaces 40 * them using setup_unpack_trees_porcelain(), for example. 41 */ 42static const char *unpack_plumbing_errors[NB_UNPACK_TREES_WARNING_TYPES] = { 43 /* ERROR_WOULD_OVERWRITE */ 44 "Entry '%s' would be overwritten by merge. Cannot merge.", 45 46 /* ERROR_NOT_UPTODATE_FILE */ 47 "Entry '%s' not uptodate. Cannot merge.", 48 49 /* ERROR_NOT_UPTODATE_DIR */ 50 "Updating '%s' would lose untracked files in it", 51 52 /* ERROR_CWD_IN_THE_WAY */ 53 "Refusing to remove '%s' since it is the current working directory.", 54 55 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 56 "Untracked working tree file '%s' would be overwritten by merge.", 57 58 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 59 "Untracked working tree file '%s' would be removed by merge.", 60 61 /* ERROR_BIND_OVERLAP */ 62 "Entry '%s' overlaps with '%s'. Cannot bind.", 63 64 /* ERROR_WOULD_LOSE_SUBMODULE */ 65 "Submodule '%s' cannot checkout new HEAD.", 66 67 /* NB_UNPACK_TREES_ERROR_TYPES; just a meta value */ 68 "", 69 70 /* WARNING_SPARSE_NOT_UPTODATE_FILE */ 71 "Path '%s' not uptodate; will not remove from working tree.", 72 73 /* WARNING_SPARSE_UNMERGED_FILE */ 74 "Path '%s' unmerged; will not remove from working tree.", 75 76 /* WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN */ 77 "Path '%s' already present; will not overwrite with sparse update.", 78}; 79 80#define ERRORMSG(o,type) \ 81 ( ((o) && (o)->internal.msgs[(type)]) \ 82 ? ((o)->internal.msgs[(type)]) \ 83 : (unpack_plumbing_errors[(type)]) ) 84 85static const char *super_prefixed(const char *path, const char *super_prefix) 86{ 87 /* 88 * It is necessary and sufficient to have two static buffers 89 * here, as the return value of this function is fed to 90 * error() using the unpack_*_errors[] templates we see above. 91 */ 92 static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 93 static int super_prefix_len = -1; 94 static unsigned idx = ARRAY_SIZE(buf) - 1; 95 96 if (super_prefix_len < 0) { 97 if (!super_prefix) { 98 super_prefix_len = 0; 99 } else { 100 int i; 101 for (i = 0; i < ARRAY_SIZE(buf); i++) 102 strbuf_addstr(&buf[i], super_prefix); 103 super_prefix_len = buf[0].len; 104 } 105 } 106 107 if (!super_prefix_len) 108 return path; 109 110 if (++idx >= ARRAY_SIZE(buf)) 111 idx = 0; 112 113 strbuf_setlen(&buf[idx], super_prefix_len); 114 strbuf_addstr(&buf[idx], path); 115 116 return buf[idx].buf; 117} 118 119void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, 120 const char *cmd) 121{ 122 int i; 123 const char **msgs = opts->internal.msgs; 124 const char *msg; 125 126 strvec_init(&opts->internal.msgs_to_free); 127 128 if (!strcmp(cmd, "checkout")) 129 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 130 ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" 131 "Please commit your changes or stash them before you switch branches.") 132 : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); 133 else if (!strcmp(cmd, "merge")) 134 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 135 ? _("Your local changes to the following files would be overwritten by merge:\n%%s" 136 "Please commit your changes or stash them before you merge.") 137 : _("Your local changes to the following files would be overwritten by merge:\n%%s"); 138 else 139 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 140 ? _("Your local changes to the following files would be overwritten by %s:\n%%s" 141 "Please commit your changes or stash them before you %s.") 142 : _("Your local changes to the following files would be overwritten by %s:\n%%s"); 143 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 144 strvec_pushf(&opts->internal.msgs_to_free, msg, cmd, cmd); 145 146 msgs[ERROR_NOT_UPTODATE_DIR] = 147 _("Updating the following directories would lose untracked files in them:\n%s"); 148 149 msgs[ERROR_CWD_IN_THE_WAY] = 150 _("Refusing to remove the current working directory:\n%s"); 151 152 if (!strcmp(cmd, "checkout")) 153 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 154 ? _("The following untracked working tree files would be removed by checkout:\n%%s" 155 "Please move or remove them before you switch branches.") 156 : _("The following untracked working tree files would be removed by checkout:\n%%s"); 157 else if (!strcmp(cmd, "merge")) 158 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 159 ? _("The following untracked working tree files would be removed by merge:\n%%s" 160 "Please move or remove them before you merge.") 161 : _("The following untracked working tree files would be removed by merge:\n%%s"); 162 else 163 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 164 ? _("The following untracked working tree files would be removed by %s:\n%%s" 165 "Please move or remove them before you %s.") 166 : _("The following untracked working tree files would be removed by %s:\n%%s"); 167 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = 168 strvec_pushf(&opts->internal.msgs_to_free, msg, cmd, cmd); 169 170 if (!strcmp(cmd, "checkout")) 171 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 172 ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" 173 "Please move or remove them before you switch branches.") 174 : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); 175 else if (!strcmp(cmd, "merge")) 176 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 177 ? _("The following untracked working tree files would be overwritten by merge:\n%%s" 178 "Please move or remove them before you merge.") 179 : _("The following untracked working tree files would be overwritten by merge:\n%%s"); 180 else 181 msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) 182 ? _("The following untracked working tree files would be overwritten by %s:\n%%s" 183 "Please move or remove them before you %s.") 184 : _("The following untracked working tree files would be overwritten by %s:\n%%s"); 185 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = 186 strvec_pushf(&opts->internal.msgs_to_free, msg, cmd, cmd); 187 188 /* 189 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 190 * cannot easily display it as a list. 191 */ 192 msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind."); 193 194 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 195 _("Cannot update submodule:\n%s"); 196 197 msgs[WARNING_SPARSE_NOT_UPTODATE_FILE] = 198 _("The following paths are not up to date and were left despite sparse patterns:\n%s"); 199 msgs[WARNING_SPARSE_UNMERGED_FILE] = 200 _("The following paths are unmerged and were left despite sparse patterns:\n%s"); 201 msgs[WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN] = 202 _("The following paths were already present and thus not updated despite sparse patterns:\n%s"); 203 204 opts->internal.show_all_errors = 1; 205 /* rejected paths may not have a static buffer */ 206 for (i = 0; i < ARRAY_SIZE(opts->internal.unpack_rejects); i++) 207 opts->internal.unpack_rejects[i].strdup_strings = 1; 208} 209 210void clear_unpack_trees_porcelain(struct unpack_trees_options *opts) 211{ 212 strvec_clear(&opts->internal.msgs_to_free); 213 memset(opts->internal.msgs, 0, sizeof(opts->internal.msgs)); 214 discard_index(&opts->internal.result); 215} 216 217static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, 218 unsigned int set, unsigned int clear) 219{ 220 clear |= CE_HASHED; 221 222 if (set & CE_REMOVE) 223 set |= CE_WT_REMOVE; 224 225 ce->ce_flags = (ce->ce_flags & ~clear) | set; 226 return add_index_entry(&o->internal.result, ce, 227 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 228} 229 230static void add_entry(struct unpack_trees_options *o, 231 const struct cache_entry *ce, 232 unsigned int set, unsigned int clear) 233{ 234 do_add_entry(o, dup_cache_entry(ce, &o->internal.result), set, clear); 235} 236 237/* 238 * add error messages on path <path> 239 * corresponding to the type <e> with the message <msg> 240 * indicating if it should be display in porcelain or not 241 */ 242static int add_rejected_path(struct unpack_trees_options *o, 243 enum unpack_trees_error_types e, 244 const char *path) 245{ 246 if (o->quiet) 247 return -1; 248 249 if (!o->internal.show_all_errors) 250 return error(ERRORMSG(o, e), super_prefixed(path, 251 o->super_prefix)); 252 253 /* 254 * Otherwise, insert in a list for future display by 255 * display_(error|warning)_msgs() 256 */ 257 string_list_append(&o->internal.unpack_rejects[e], path); 258 return -1; 259} 260 261/* 262 * display all the error messages stored in a nice way 263 */ 264static void display_error_msgs(struct unpack_trees_options *o) 265{ 266 int e; 267 unsigned error_displayed = 0; 268 for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 269 struct string_list *rejects = &o->internal.unpack_rejects[e]; 270 271 if (rejects->nr > 0) { 272 int i; 273 struct strbuf path = STRBUF_INIT; 274 275 error_displayed = 1; 276 for (i = 0; i < rejects->nr; i++) 277 strbuf_addf(&path, "\t%s\n", rejects->items[i].string); 278 error(ERRORMSG(o, e), super_prefixed(path.buf, 279 o->super_prefix)); 280 strbuf_release(&path); 281 } 282 string_list_clear(rejects, 0); 283 } 284 if (error_displayed) 285 fprintf(stderr, _("Aborting\n")); 286} 287 288/* 289 * display all the warning messages stored in a nice way 290 */ 291static void display_warning_msgs(struct unpack_trees_options *o) 292{ 293 int e; 294 unsigned warning_displayed = 0; 295 for (e = NB_UNPACK_TREES_ERROR_TYPES + 1; 296 e < NB_UNPACK_TREES_WARNING_TYPES; e++) { 297 struct string_list *rejects = &o->internal.unpack_rejects[e]; 298 299 if (rejects->nr > 0) { 300 int i; 301 struct strbuf path = STRBUF_INIT; 302 303 warning_displayed = 1; 304 for (i = 0; i < rejects->nr; i++) 305 strbuf_addf(&path, "\t%s\n", rejects->items[i].string); 306 warning(ERRORMSG(o, e), super_prefixed(path.buf, 307 o->super_prefix)); 308 strbuf_release(&path); 309 } 310 string_list_clear(rejects, 0); 311 } 312 if (warning_displayed) 313 fprintf(stderr, _("After fixing the above paths, you may want to run `git sparse-checkout reapply`.\n")); 314} 315static int check_submodule_move_head(const struct cache_entry *ce, 316 const char *old_id, 317 const char *new_id, 318 struct unpack_trees_options *o) 319{ 320 unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 321 const struct submodule *sub = submodule_from_ce(ce); 322 323 if (!sub) 324 return 0; 325 326 if (o->reset) 327 flags |= SUBMODULE_MOVE_HEAD_FORCE; 328 329 if (submodule_move_head(ce->name, o->super_prefix, old_id, new_id, 330 flags)) 331 return add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 332 return 0; 333} 334 335/* 336 * Perform the loading of the repository's gitmodules file. This function is 337 * used by 'check_update()' to perform loading of the gitmodules file in two 338 * different situations: 339 * (1) before removing entries from the working tree if the gitmodules file has 340 * been marked for removal. This situation is specified by 'state' == NULL. 341 * (2) before checking out entries to the working tree if the gitmodules file 342 * has been marked for update. This situation is specified by 'state' != NULL. 343 */ 344static void load_gitmodules_file(struct index_state *index, 345 struct checkout *state) 346{ 347 int pos = index_name_pos(index, GITMODULES_FILE, strlen(GITMODULES_FILE)); 348 349 if (pos >= 0) { 350 struct cache_entry *ce = index->cache[pos]; 351 if (!state && ce->ce_flags & CE_WT_REMOVE) { 352 repo_read_gitmodules(the_repository, 0); 353 } else if (state && (ce->ce_flags & CE_UPDATE)) { 354 submodule_free(the_repository); 355 checkout_entry(ce, state, NULL, NULL); 356 repo_read_gitmodules(the_repository, 0); 357 } 358 } 359} 360 361static struct progress *get_progress(struct unpack_trees_options *o, 362 struct index_state *index) 363{ 364 unsigned cnt = 0, total = 0; 365 366 if (!o->update || !o->verbose_update) 367 return NULL; 368 369 for (; cnt < index->cache_nr; cnt++) { 370 const struct cache_entry *ce = index->cache[cnt]; 371 if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 372 total++; 373 } 374 375 return start_delayed_progress(the_repository, 376 _("Updating files"), total); 377} 378 379static void setup_collided_checkout_detection(struct checkout *state, 380 struct index_state *index) 381{ 382 int i; 383 384 state->clone = 1; 385 for (i = 0; i < index->cache_nr; i++) 386 index->cache[i]->ce_flags &= ~CE_MATCHED; 387} 388 389static void report_collided_checkout(struct index_state *index) 390{ 391 struct string_list list = STRING_LIST_INIT_NODUP; 392 int i; 393 394 for (i = 0; i < index->cache_nr; i++) { 395 struct cache_entry *ce = index->cache[i]; 396 397 if (!(ce->ce_flags & CE_MATCHED)) 398 continue; 399 400 string_list_append(&list, ce->name); 401 ce->ce_flags &= ~CE_MATCHED; 402 } 403 404 list.cmp = fspathcmp; 405 string_list_sort(&list); 406 407 if (list.nr) { 408 warning(_("the following paths have collided (e.g. case-sensitive paths\n" 409 "on a case-insensitive filesystem) and only one from the same\n" 410 "colliding group is in the working tree:\n")); 411 412 for (i = 0; i < list.nr; i++) 413 fprintf(stderr, " '%s'\n", list.items[i].string); 414 } 415 416 string_list_clear(&list, 0); 417} 418 419static int must_checkout(const struct cache_entry *ce) 420{ 421 return ce->ce_flags & CE_UPDATE; 422} 423 424static int check_updates(struct unpack_trees_options *o, 425 struct index_state *index) 426{ 427 unsigned cnt = 0; 428 int errs = 0; 429 struct progress *progress; 430 struct checkout state = CHECKOUT_INIT; 431 int i, pc_workers, pc_threshold; 432 433 trace_performance_enter(); 434 state.super_prefix = o->super_prefix; 435 state.force = 1; 436 state.quiet = 1; 437 state.refresh_cache = 1; 438 state.istate = index; 439 clone_checkout_metadata(&state.meta, &o->meta, NULL); 440 441 if (!o->update || o->dry_run) { 442 remove_marked_cache_entries(index, 0); 443 trace_performance_leave("check_updates"); 444 return 0; 445 } 446 447 if (o->clone) 448 setup_collided_checkout_detection(&state, index); 449 450 progress = get_progress(o, index); 451 452 /* Start with clean cache to avoid using any possibly outdated info. */ 453 invalidate_lstat_cache(); 454 455 git_attr_set_direction(GIT_ATTR_CHECKOUT); 456 457 if (should_update_submodules()) 458 load_gitmodules_file(index, NULL); 459 460 for (i = 0; i < index->cache_nr; i++) { 461 const struct cache_entry *ce = index->cache[i]; 462 463 if (ce->ce_flags & CE_WT_REMOVE) { 464 display_progress(progress, ++cnt); 465 unlink_entry(ce, o->super_prefix); 466 } 467 } 468 469 remove_marked_cache_entries(index, 0); 470 remove_scheduled_dirs(); 471 472 if (should_update_submodules()) 473 load_gitmodules_file(index, &state); 474 475 if (repo_has_promisor_remote(the_repository)) 476 /* 477 * Prefetch the objects that are to be checked out in the loop 478 * below. 479 */ 480 prefetch_cache_entries(index, must_checkout); 481 482 get_parallel_checkout_configs(&pc_workers, &pc_threshold); 483 484 enable_delayed_checkout(&state); 485 if (pc_workers > 1) 486 init_parallel_checkout(); 487 for (i = 0; i < index->cache_nr; i++) { 488 struct cache_entry *ce = index->cache[i]; 489 490 if (must_checkout(ce)) { 491 size_t last_pc_queue_size = pc_queue_size(); 492 493 if (ce->ce_flags & CE_WT_REMOVE) 494 BUG("both update and delete flags are set on %s", 495 ce->name); 496 ce->ce_flags &= ~CE_UPDATE; 497 errs |= checkout_entry(ce, &state, NULL, NULL); 498 499 if (last_pc_queue_size == pc_queue_size()) 500 display_progress(progress, ++cnt); 501 } 502 } 503 if (pc_workers > 1) 504 errs |= run_parallel_checkout(&state, pc_workers, pc_threshold, 505 progress, &cnt); 506 stop_progress(&progress); 507 errs |= finish_delayed_checkout(&state, o->verbose_update); 508 git_attr_set_direction(GIT_ATTR_CHECKIN); 509 510 if (o->clone) 511 report_collided_checkout(index); 512 513 trace_performance_leave("check_updates"); 514 return errs != 0; 515} 516 517static int verify_uptodate_sparse(const struct cache_entry *ce, 518 struct unpack_trees_options *o); 519static int verify_absent_sparse(const struct cache_entry *ce, 520 enum unpack_trees_error_types, 521 struct unpack_trees_options *o); 522 523static int apply_sparse_checkout(struct index_state *istate, 524 struct cache_entry *ce, 525 struct unpack_trees_options *o) 526{ 527 int was_skip_worktree = ce_skip_worktree(ce); 528 529 if (ce->ce_flags & CE_NEW_SKIP_WORKTREE) 530 ce->ce_flags |= CE_SKIP_WORKTREE; 531 else 532 ce->ce_flags &= ~CE_SKIP_WORKTREE; 533 if (was_skip_worktree != ce_skip_worktree(ce)) { 534 ce->ce_flags |= CE_UPDATE_IN_BASE; 535 mark_fsmonitor_invalid(istate, ce); 536 istate->cache_changed |= CE_ENTRY_CHANGED; 537 } 538 539 /* 540 * if (!was_skip_worktree && !ce_skip_worktree()) { 541 * This is perfectly normal. Move on; 542 * } 543 */ 544 545 /* 546 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 547 * area as a result of ce_skip_worktree() shortcuts in 548 * verify_absent() and verify_uptodate(). 549 * Make sure they don't modify worktree if they are already 550 * outside checkout area 551 */ 552 if (was_skip_worktree && ce_skip_worktree(ce)) { 553 ce->ce_flags &= ~CE_UPDATE; 554 555 /* 556 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 557 * on to get that file removed from both index and worktree. 558 * If that file is already outside worktree area, don't 559 * bother remove it. 560 */ 561 if (ce->ce_flags & CE_REMOVE) 562 ce->ce_flags &= ~CE_WT_REMOVE; 563 } 564 565 if (!was_skip_worktree && ce_skip_worktree(ce)) { 566 /* 567 * If CE_UPDATE is set, verify_uptodate() must be called already 568 * also stat info may have lost after merged_entry() so calling 569 * verify_uptodate() again may fail 570 */ 571 if (!(ce->ce_flags & CE_UPDATE) && 572 verify_uptodate_sparse(ce, o)) { 573 ce->ce_flags &= ~CE_SKIP_WORKTREE; 574 return -1; 575 } 576 ce->ce_flags |= CE_WT_REMOVE; 577 ce->ce_flags &= ~CE_UPDATE; 578 } 579 if (was_skip_worktree && !ce_skip_worktree(ce)) { 580 if (verify_absent_sparse(ce, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN, o)) 581 return -1; 582 ce->ce_flags |= CE_UPDATE; 583 } 584 return 0; 585} 586 587static int warn_conflicted_path(struct index_state *istate, 588 int i, 589 struct unpack_trees_options *o) 590{ 591 char *conflicting_path = istate->cache[i]->name; 592 int count = 0; 593 594 add_rejected_path(o, WARNING_SPARSE_UNMERGED_FILE, conflicting_path); 595 596 /* Find out how many higher stage entries are at same path */ 597 while ((++count) + i < istate->cache_nr && 598 !strcmp(conflicting_path, istate->cache[count + i]->name)) 599 ; /* do nothing */ 600 601 return count; 602} 603 604static inline int call_unpack_fn(const struct cache_entry * const *src, 605 struct unpack_trees_options *o) 606{ 607 int ret = o->fn(src, o); 608 if (ret > 0) 609 ret = 0; 610 return ret; 611} 612 613static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) 614{ 615 ce->ce_flags |= CE_UNPACKED; 616 617 if (o->internal.cache_bottom < o->src_index->cache_nr && 618 o->src_index->cache[o->internal.cache_bottom] == ce) { 619 int bottom = o->internal.cache_bottom; 620 621 while (bottom < o->src_index->cache_nr && 622 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 623 bottom++; 624 o->internal.cache_bottom = bottom; 625 } 626} 627 628static void mark_all_ce_unused(struct index_state *index) 629{ 630 int i; 631 for (i = 0; i < index->cache_nr; i++) 632 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 633} 634 635static int locate_in_src_index(const struct cache_entry *ce, 636 struct unpack_trees_options *o) 637{ 638 struct index_state *index = o->src_index; 639 int len = ce_namelen(ce); 640 int pos = index_name_pos(index, ce->name, len); 641 if (pos < 0) 642 pos = -1 - pos; 643 return pos; 644} 645 646/* 647 * We call unpack_index_entry() with an unmerged cache entry 648 * only in diff-index, and it wants a single callback. Skip 649 * the other unmerged entry with the same name. 650 */ 651static void mark_ce_used_same_name(struct cache_entry *ce, 652 struct unpack_trees_options *o) 653{ 654 struct index_state *index = o->src_index; 655 int len = ce_namelen(ce); 656 int pos; 657 658 for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 659 struct cache_entry *next = index->cache[pos]; 660 if (len != ce_namelen(next) || 661 memcmp(ce->name, next->name, len)) 662 break; 663 mark_ce_used(next, o); 664 } 665} 666 667static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 668{ 669 const struct index_state *index = o->src_index; 670 int pos = o->internal.cache_bottom; 671 672 while (pos < index->cache_nr) { 673 struct cache_entry *ce = index->cache[pos]; 674 if (!(ce->ce_flags & CE_UNPACKED)) 675 return ce; 676 pos++; 677 } 678 return NULL; 679} 680 681static void add_same_unmerged(const struct cache_entry *ce, 682 struct unpack_trees_options *o) 683{ 684 struct index_state *index = o->src_index; 685 int len = ce_namelen(ce); 686 int pos = index_name_pos(index, ce->name, len); 687 688 if (0 <= pos) 689 die("programming error in a caller of mark_ce_used_same_name"); 690 for (pos = -pos - 1; pos < index->cache_nr; pos++) { 691 struct cache_entry *next = index->cache[pos]; 692 if (len != ce_namelen(next) || 693 memcmp(ce->name, next->name, len)) 694 break; 695 add_entry(o, next, 0, 0); 696 mark_ce_used(next, o); 697 } 698} 699 700static int unpack_index_entry(struct cache_entry *ce, 701 struct unpack_trees_options *o) 702{ 703 const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 704 int ret; 705 706 src[0] = ce; 707 708 mark_ce_used(ce, o); 709 if (ce_stage(ce)) { 710 if (o->skip_unmerged) { 711 add_entry(o, ce, 0, 0); 712 return 0; 713 } 714 } 715 ret = call_unpack_fn(src, o); 716 if (ce_stage(ce)) 717 mark_ce_used_same_name(ce, o); 718 return ret; 719} 720 721static int find_cache_pos(struct traverse_info *, const char *p, size_t len); 722 723static void restore_cache_bottom(struct traverse_info *info, int bottom) 724{ 725 struct unpack_trees_options *o = info->data; 726 727 if (o->diff_index_cached) 728 return; 729 o->internal.cache_bottom = bottom; 730} 731 732static int switch_cache_bottom(struct traverse_info *info) 733{ 734 struct unpack_trees_options *o = info->data; 735 int ret, pos; 736 737 if (o->diff_index_cached) 738 return 0; 739 ret = o->internal.cache_bottom; 740 pos = find_cache_pos(info->prev, info->name, info->namelen); 741 742 if (pos < -1) 743 o->internal.cache_bottom = -2 - pos; 744 else if (pos < 0) 745 o->internal.cache_bottom = o->src_index->cache_nr; 746 return ret; 747} 748 749static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) 750{ 751 return !is_null_oid(&name_j->oid) && !is_null_oid(&name_k->oid) && oideq(&name_j->oid, &name_k->oid); 752} 753 754static int all_trees_same_as_cache_tree(int n, unsigned long dirmask, 755 struct name_entry *names, 756 struct traverse_info *info) 757{ 758 struct unpack_trees_options *o = info->data; 759 int i; 760 761 if (!o->merge || dirmask != ((1 << n) - 1)) 762 return 0; 763 764 for (i = 1; i < n; i++) 765 if (!are_same_oid(names, names + i)) 766 return 0; 767 768 return cache_tree_matches_traversal(o->src_index->cache_tree, names, info); 769} 770 771static int index_pos_by_traverse_info(struct name_entry *names, 772 struct traverse_info *info) 773{ 774 struct unpack_trees_options *o = info->data; 775 struct strbuf name = STRBUF_INIT; 776 int pos; 777 778 strbuf_make_traverse_path(&name, info, names->path, names->pathlen); 779 strbuf_addch(&name, '/'); 780 pos = index_name_pos(o->src_index, name.buf, name.len); 781 if (pos >= 0) { 782 if (!o->src_index->sparse_index || 783 !(o->src_index->cache[pos]->ce_flags & CE_SKIP_WORKTREE)) 784 BUG("This is a directory and should not exist in index"); 785 } else { 786 pos = -pos - 1; 787 } 788 if (pos >= o->src_index->cache_nr || 789 !starts_with(o->src_index->cache[pos]->name, name.buf) || 790 (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name.buf))) 791 BUG("pos %d doesn't point to the first entry of %s in index", 792 pos, name.buf); 793 strbuf_release(&name); 794 return pos; 795} 796 797/* 798 * Fast path if we detect that all trees are the same as cache-tree at this 799 * path. We'll walk these trees in an iterative loop using cache-tree/index 800 * instead of ODB since we already know what these trees contain. 801 */ 802static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names, 803 struct traverse_info *info) 804{ 805 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 806 struct unpack_trees_options *o = info->data; 807 struct cache_entry *tree_ce = NULL; 808 int ce_len = 0; 809 int i, d; 810 811 if (!o->merge) 812 BUG("We need cache-tree to do this optimization"); 813 if (nr_entries + pos > o->src_index->cache_nr) 814 return error(_("corrupted cache-tree has entries not present in index")); 815 816 /* 817 * Do what unpack_callback() and unpack_single_entry() normally 818 * do. But we walk all paths in an iterative loop instead. 819 * 820 * D/F conflicts and higher stage entries are not a concern 821 * because cache-tree would be invalidated and we would never 822 * get here in the first place. 823 */ 824 for (i = 0; i < nr_entries; i++) { 825 int new_ce_len, len, rc; 826 827 src[0] = o->src_index->cache[pos + i]; 828 829 len = ce_namelen(src[0]); 830 new_ce_len = cache_entry_size(len); 831 832 if (new_ce_len > ce_len) { 833 new_ce_len <<= 1; 834 tree_ce = xrealloc(tree_ce, new_ce_len); 835 memset(tree_ce, 0, new_ce_len); 836 ce_len = new_ce_len; 837 838 tree_ce->ce_flags = create_ce_flags(0); 839 840 for (d = 1; d <= nr_names; d++) 841 src[d] = tree_ce; 842 } 843 844 tree_ce->ce_mode = src[0]->ce_mode; 845 tree_ce->ce_namelen = len; 846 oidcpy(&tree_ce->oid, &src[0]->oid); 847 memcpy(tree_ce->name, src[0]->name, len + 1); 848 849 rc = call_unpack_fn((const struct cache_entry * const *)src, o); 850 if (rc < 0) { 851 free(tree_ce); 852 return rc; 853 } 854 855 mark_ce_used(src[0], o); 856 } 857 free(tree_ce); 858 if (o->internal.debug_unpack) 859 printf("Unpacked %d entries from %s to %s using cache-tree\n", 860 nr_entries, 861 o->src_index->cache[pos]->name, 862 o->src_index->cache[pos + nr_entries - 1]->name); 863 return 0; 864} 865 866static int traverse_trees_recursive(int n, unsigned long dirmask, 867 unsigned long df_conflicts, 868 struct name_entry *names, 869 struct traverse_info *info) 870{ 871 struct unpack_trees_options *o = info->data; 872 int i, ret, bottom; 873 int nr_buf = 0; 874 struct tree_desc *t; 875 void **buf; 876 struct traverse_info newinfo; 877 struct name_entry *p; 878 int nr_entries; 879 880 nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info); 881 if (nr_entries > 0) { 882 int pos = index_pos_by_traverse_info(names, info); 883 884 if (!o->merge || df_conflicts) 885 BUG("Wrong condition to get here buddy"); 886 887 /* 888 * All entries up to 'pos' must have been processed 889 * (i.e. marked CE_UNPACKED) at this point. But to be safe, 890 * save and restore cache_bottom anyway to not miss 891 * unprocessed entries before 'pos'. 892 */ 893 bottom = o->internal.cache_bottom; 894 ret = traverse_by_cache_tree(pos, nr_entries, n, info); 895 o->internal.cache_bottom = bottom; 896 return ret; 897 } 898 899 p = names; 900 while (!p->mode) 901 p++; 902 903 newinfo = *info; 904 newinfo.prev = info; 905 newinfo.pathspec = info->pathspec; 906 newinfo.name = p->path; 907 newinfo.namelen = p->pathlen; 908 newinfo.mode = p->mode; 909 newinfo.pathlen = st_add3(newinfo.pathlen, tree_entry_len(p), 1); 910 newinfo.df_conflicts |= df_conflicts; 911 912 ALLOC_ARRAY(t, n); 913 ALLOC_ARRAY(buf, n); 914 915 /* 916 * Fetch the tree from the ODB for each peer directory in the 917 * n commits. 918 * 919 * For 2- and 3-way traversals, we try to avoid hitting the 920 * ODB twice for the same OID. This should yield a nice speed 921 * up in checkouts and merges when the commits are similar. 922 * 923 * We don't bother doing the full O(n^2) search for larger n, 924 * because wider traversals don't happen that often and we 925 * avoid the search setup. 926 * 927 * When 2 peer OIDs are the same, we just copy the tree 928 * descriptor data. This implicitly borrows the buffer 929 * data from the earlier cell. 930 */ 931 for (i = 0; i < n; i++, dirmask >>= 1) { 932 if (i > 0 && are_same_oid(&names[i], &names[i - 1])) 933 t[i] = t[i - 1]; 934 else if (i > 1 && are_same_oid(&names[i], &names[i - 2])) 935 t[i] = t[i - 2]; 936 else { 937 const struct object_id *oid = NULL; 938 if (dirmask & 1) 939 oid = &names[i].oid; 940 buf[nr_buf++] = fill_tree_descriptor(the_repository, t + i, oid); 941 } 942 } 943 944 bottom = switch_cache_bottom(&newinfo); 945 ret = traverse_trees(o->src_index, n, t, &newinfo); 946 restore_cache_bottom(&newinfo, bottom); 947 948 for (i = 0; i < nr_buf; i++) 949 free(buf[i]); 950 free(buf); 951 free(t); 952 953 return ret; 954} 955 956/* 957 * Compare the traverse-path to the cache entry without actually 958 * having to generate the textual representation of the traverse 959 * path. 960 * 961 * NOTE! This *only* compares up to the size of the traverse path 962 * itself - the caller needs to do the final check for the cache 963 * entry having more data at the end! 964 */ 965static int do_compare_entry_piecewise(const struct cache_entry *ce, 966 const struct traverse_info *info, 967 const char *name, size_t namelen, 968 unsigned mode) 969{ 970 int pathlen, ce_len; 971 const char *ce_name; 972 973 if (info->prev) { 974 int cmp = do_compare_entry_piecewise(ce, info->prev, 975 info->name, info->namelen, 976 info->mode); 977 if (cmp) 978 return cmp; 979 } 980 pathlen = info->pathlen; 981 ce_len = ce_namelen(ce); 982 983 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 984 if (ce_len < pathlen) 985 return -1; 986 987 ce_len -= pathlen; 988 ce_name = ce->name + pathlen; 989 990 return df_name_compare(ce_name, ce_len, S_IFREG, name, namelen, mode); 991} 992 993static int do_compare_entry(const struct cache_entry *ce, 994 const struct traverse_info *info, 995 const char *name, size_t namelen, 996 unsigned mode) 997{ 998 int pathlen, ce_len; 999 const char *ce_name; 1000 int cmp; 1001 unsigned ce_mode; 1002 1003 /* 1004 * If we have not precomputed the traverse path, it is quicker 1005 * to avoid doing so. But if we have precomputed it, 1006 * it is quicker to use the precomputed version. 1007 */ 1008 if (!info->traverse_path) 1009 return do_compare_entry_piecewise(ce, info, name, namelen, mode); 1010 1011 cmp = strncmp(ce->name, info->traverse_path, info->pathlen); 1012 if (cmp) 1013 return cmp; 1014 1015 pathlen = info->pathlen; 1016 ce_len = ce_namelen(ce); 1017 1018 if (ce_len < pathlen) 1019 return -1; 1020 1021 ce_len -= pathlen; 1022 ce_name = ce->name + pathlen; 1023 1024 ce_mode = S_ISSPARSEDIR(ce->ce_mode) ? S_IFDIR : S_IFREG; 1025 return df_name_compare(ce_name, ce_len, ce_mode, name, namelen, mode); 1026} 1027 1028static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 1029{ 1030 int cmp = do_compare_entry(ce, info, n->path, n->pathlen, n->mode); 1031 if (cmp) 1032 return cmp; 1033 1034 /* 1035 * At this point, we know that we have a prefix match. If ce 1036 * is a sparse directory, then allow an exact match. This only 1037 * works when the input name is a directory, since ce->name 1038 * ends in a directory separator. 1039 */ 1040 if (S_ISSPARSEDIR(ce->ce_mode) && 1041 ce->ce_namelen == traverse_path_len(info, tree_entry_len(n)) + 1) 1042 return 0; 1043 1044 /* 1045 * Even if the beginning compared identically, the ce should 1046 * compare as bigger than a directory leading up to it! 1047 */ 1048 return ce_namelen(ce) > traverse_path_len(info, tree_entry_len(n)); 1049} 1050 1051static int ce_in_traverse_path(const struct cache_entry *ce, 1052 const struct traverse_info *info) 1053{ 1054 if (!info->prev) 1055 return 1; 1056 if (do_compare_entry(ce, info->prev, 1057 info->name, info->namelen, info->mode)) 1058 return 0; 1059 /* 1060 * If ce (blob) is the same name as the path (which is a tree 1061 * we will be descending into), it won't be inside it. 1062 */ 1063 return (info->pathlen < ce_namelen(ce)); 1064} 1065 1066static struct cache_entry *create_ce_entry(const struct traverse_info *info, 1067 const struct name_entry *n, 1068 int stage, 1069 struct index_state *istate, 1070 int is_transient, 1071 int is_sparse_directory) 1072{ 1073 size_t len = traverse_path_len(info, tree_entry_len(n)); 1074 size_t alloc_len = is_sparse_directory ? len + 1 : len; 1075 struct cache_entry *ce = 1076 is_transient ? 1077 make_empty_transient_cache_entry(alloc_len, NULL) : 1078 make_empty_cache_entry(istate, alloc_len); 1079 1080 ce->ce_mode = create_ce_mode(n->mode); 1081 ce->ce_flags = create_ce_flags(stage); 1082 ce->ce_namelen = len; 1083 oidcpy(&ce->oid, &n->oid); 1084 /* len+1 because the cache_entry allocates space for NUL */ 1085 make_traverse_path(ce->name, len + 1, info, n->path, n->pathlen); 1086 1087 if (is_sparse_directory) { 1088 ce->name[len] = '/'; 1089 ce->name[len + 1] = '\0'; 1090 ce->ce_namelen++; 1091 ce->ce_flags |= CE_SKIP_WORKTREE; 1092 } 1093 1094 return ce; 1095} 1096 1097/* 1098 * Determine whether the path specified by 'p' should be unpacked as a new 1099 * sparse directory in a sparse index. A new sparse directory 'A/': 1100 * - must be outside the sparse cone. 1101 * - must not already be in the index (i.e., no index entry with name 'A/' 1102 * exists). 1103 * - must not have any child entries in the index (i.e., no index entry 1104 * 'A/<something>' exists). 1105 * If 'p' meets the above requirements, return 1; otherwise, return 0. 1106 */ 1107static int entry_is_new_sparse_dir(const struct traverse_info *info, 1108 const struct name_entry *p) 1109{ 1110 int res, pos; 1111 struct strbuf dirpath = STRBUF_INIT; 1112 struct unpack_trees_options *o = info->data; 1113 1114 if (!S_ISDIR(p->mode)) 1115 return 0; 1116 1117 /* 1118 * If the path is inside the sparse cone, it can't be a sparse directory. 1119 */ 1120 strbuf_add(&dirpath, info->traverse_path, info->pathlen); 1121 strbuf_add(&dirpath, p->path, p->pathlen); 1122 strbuf_addch(&dirpath, '/'); 1123 if (path_in_cone_mode_sparse_checkout(dirpath.buf, o->src_index)) { 1124 res = 0; 1125 goto cleanup; 1126 } 1127 1128 pos = index_name_pos_sparse(o->src_index, dirpath.buf, dirpath.len); 1129 if (pos >= 0) { 1130 /* Path is already in the index, not a new sparse dir */ 1131 res = 0; 1132 goto cleanup; 1133 } 1134 1135 /* Where would this sparse dir be inserted into the index? */ 1136 pos = -pos - 1; 1137 if (pos >= o->src_index->cache_nr) { 1138 /* 1139 * Sparse dir would be inserted at the end of the index, so we 1140 * know it has no child entries. 1141 */ 1142 res = 1; 1143 goto cleanup; 1144 } 1145 1146 /* 1147 * If the dir has child entries in the index, the first would be at the 1148 * position the sparse directory would be inserted. If the entry at this 1149 * position is inside the dir, not a new sparse dir. 1150 */ 1151 res = strncmp(o->src_index->cache[pos]->name, dirpath.buf, dirpath.len); 1152 1153cleanup: 1154 strbuf_release(&dirpath); 1155 return res; 1156} 1157 1158/* 1159 * Note that traverse_by_cache_tree() duplicates some logic in this function 1160 * without actually calling it. If you change the logic here you may need to 1161 * check and change there as well. 1162 */ 1163static int unpack_single_entry(int n, unsigned long mask, 1164 unsigned long dirmask, 1165 struct cache_entry **src, 1166 const struct name_entry *names, 1167 const struct traverse_info *info, 1168 int *is_new_sparse_dir) 1169{ 1170 int i; 1171 struct unpack_trees_options *o = info->data; 1172 unsigned long conflicts = info->df_conflicts | dirmask; 1173 const struct name_entry *p = names; 1174 1175 *is_new_sparse_dir = 0; 1176 if (mask == dirmask && !src[0]) { 1177 /* 1178 * If we're not in a sparse index, we can't unpack a directory 1179 * without recursing into it, so we return. 1180 */ 1181 if (!o->src_index->sparse_index) 1182 return 0; 1183 1184 /* Find first entry with a real name (we could use "mask" too) */ 1185 while (!p->mode) 1186 p++; 1187 1188 /* 1189 * If the directory is completely missing from the index but 1190 * would otherwise be a sparse directory, we should unpack it. 1191 * If not, we'll return and continue recursively traversing the 1192 * tree. 1193 */ 1194 *is_new_sparse_dir = entry_is_new_sparse_dir(info, p); 1195 if (!*is_new_sparse_dir) 1196 return 0; 1197 } 1198 1199 /* 1200 * When we are unpacking a sparse directory, then this isn't necessarily 1201 * a directory-file conflict. 1202 */ 1203 if (mask == dirmask && 1204 (*is_new_sparse_dir || (src[0] && S_ISSPARSEDIR(src[0]->ce_mode)))) 1205 conflicts = 0; 1206 1207 /* 1208 * Ok, we've filled in up to any potential index entry in src[0], 1209 * now do the rest. 1210 */ 1211 for (i = 0; i < n; i++) { 1212 int stage; 1213 unsigned int bit = 1ul << i; 1214 if (conflicts & bit) { 1215 src[i + o->merge] = o->df_conflict_entry; 1216 continue; 1217 } 1218 if (!(mask & bit)) 1219 continue; 1220 if (!o->merge) 1221 stage = 0; 1222 else if (i + 1 < o->head_idx) 1223 stage = 1; 1224 else if (i + 1 > o->head_idx) 1225 stage = 3; 1226 else 1227 stage = 2; 1228 1229 /* 1230 * If the merge bit is set, then the cache entries are 1231 * discarded in the following block. In this case, 1232 * construct "transient" cache_entries, as they are 1233 * not stored in the index. otherwise construct the 1234 * cache entry from the index aware logic. 1235 */ 1236 src[i + o->merge] = create_ce_entry(info, names + i, stage, 1237 &o->internal.result, 1238 o->merge, bit & dirmask); 1239 } 1240 1241 if (o->merge) { 1242 int rc = call_unpack_fn((const struct cache_entry * const *)src, 1243 o); 1244 for (i = 0; i < n; i++) { 1245 struct cache_entry *ce = src[i + o->merge]; 1246 if (ce != o->df_conflict_entry) 1247 discard_cache_entry(ce); 1248 } 1249 return rc; 1250 } 1251 1252 for (i = 0; i < n; i++) 1253 if (src[i] && src[i] != o->df_conflict_entry) 1254 if (do_add_entry(o, src[i], 0, 0)) 1255 return -1; 1256 1257 return 0; 1258} 1259 1260static int unpack_failed(struct unpack_trees_options *o, const char *message) 1261{ 1262 discard_index(&o->internal.result); 1263 if (!o->quiet && !o->exiting_early) { 1264 if (message) 1265 return error("%s", message); 1266 return -1; 1267 } 1268 return -1; 1269} 1270 1271/* 1272 * The tree traversal is looking at name p. If we have a matching entry, 1273 * return it. If name p is a directory in the index, do not return 1274 * anything, as we will want to match it when the traversal descends into 1275 * the directory. 1276 */ 1277static int find_cache_pos(struct traverse_info *info, 1278 const char *p, size_t p_len) 1279{ 1280 int pos; 1281 struct unpack_trees_options *o = info->data; 1282 struct index_state *index = o->src_index; 1283 int pfxlen = info->pathlen; 1284 1285 for (pos = o->internal.cache_bottom; pos < index->cache_nr; pos++) { 1286 const struct cache_entry *ce = index->cache[pos]; 1287 const char *ce_name, *ce_slash; 1288 int cmp, ce_len; 1289 1290 if (ce->ce_flags & CE_UNPACKED) { 1291 /* 1292 * cache_bottom entry is already unpacked, so 1293 * we can never match it; don't check it 1294 * again. 1295 */ 1296 if (pos == o->internal.cache_bottom) 1297 ++o->internal.cache_bottom; 1298 continue; 1299 } 1300 if (!ce_in_traverse_path(ce, info)) { 1301 /* 1302 * Check if we can skip future cache checks 1303 * (because we're already past all possible 1304 * entries in the traverse path). 1305 */ 1306 if (info->traverse_path) { 1307 if (strncmp(ce->name, info->traverse_path, 1308 info->pathlen) > 0) 1309 break; 1310 } 1311 continue; 1312 } 1313 ce_name = ce->name + pfxlen; 1314 ce_slash = strchr(ce_name, '/'); 1315 if (ce_slash) 1316 ce_len = ce_slash - ce_name; 1317 else 1318 ce_len = ce_namelen(ce) - pfxlen; 1319 cmp = name_compare(p, p_len, ce_name, ce_len); 1320 /* 1321 * Exact match; if we have a directory we need to 1322 * delay returning it. 1323 */ 1324 if (!cmp) 1325 return ce_slash ? -2 - pos : pos; 1326 if (0 < cmp) 1327 continue; /* keep looking */ 1328 /* 1329 * ce_name sorts after p->path; could it be that we 1330 * have files under p->path directory in the index? 1331 * E.g. ce_name == "t-i", and p->path == "t"; we may 1332 * have "t/a" in the index. 1333 */ 1334 if (p_len < ce_len && !memcmp(ce_name, p, p_len) && 1335 ce_name[p_len] < '/') 1336 continue; /* keep looking */ 1337 break; 1338 } 1339 return -1; 1340} 1341 1342/* 1343 * Given a sparse directory entry 'ce', compare ce->name to 1344 * info->traverse_path + p->path + '/' if info->traverse_path 1345 * is non-empty. 1346 * 1347 * Compare ce->name to p->path + '/' otherwise. Note that 1348 * ce->name must end in a trailing '/' because it is a sparse 1349 * directory entry. 1350 */ 1351static int sparse_dir_matches_path(const struct cache_entry *ce, 1352 struct traverse_info *info, 1353 const struct name_entry *p) 1354{ 1355 assert(S_ISSPARSEDIR(ce->ce_mode)); 1356 assert(ce->name[ce->ce_namelen - 1] == '/'); 1357 1358 if (info->pathlen) 1359 return ce->ce_namelen == info->pathlen + p->pathlen + 1 && 1360 ce->name[info->pathlen - 1] == '/' && 1361 !strncmp(ce->name, info->traverse_path, info->pathlen) && 1362 !strncmp(ce->name + info->pathlen, p->path, p->pathlen); 1363 return ce->ce_namelen == p->pathlen + 1 && 1364 !strncmp(ce->name, p->path, p->pathlen); 1365} 1366 1367static struct cache_entry *find_cache_entry(struct traverse_info *info, 1368 const struct name_entry *p) 1369{ 1370 const char *path; 1371 int pos = find_cache_pos(info, p->path, p->pathlen); 1372 struct unpack_trees_options *o = info->data; 1373 1374 if (0 <= pos) 1375 return o->src_index->cache[pos]; 1376 1377 /* 1378 * Check for a sparse-directory entry named "path/". 1379 * Due to the input p->path not having a trailing 1380 * slash, the negative 'pos' value overshoots the 1381 * expected position, hence "-2" instead of "-1". 1382 */ 1383 pos = -pos - 2; 1384 1385 if (pos < 0 || pos >= o->src_index->cache_nr) 1386 return NULL; 1387 1388 /* 1389 * Due to lexicographic sorting and sparse directory 1390 * entries ending with a trailing slash, our path as a 1391 * sparse directory (e.g "subdir/") and our path as a 1392 * file (e.g. "subdir") might be separated by other 1393 * paths (e.g. "subdir-"). 1394 */ 1395 while (pos >= 0) { 1396 struct cache_entry *ce = o->src_index->cache[pos]; 1397 1398 if (!skip_prefix(ce->name, info->traverse_path, &path) || 1399 strncmp(path, p->path, p->pathlen) || 1400 path[p->pathlen] != '/') 1401 return NULL; 1402 1403 if (S_ISSPARSEDIR(ce->ce_mode) && 1404 sparse_dir_matches_path(ce, info, p)) 1405 return ce; 1406 1407 pos--; 1408 } 1409 1410 return NULL; 1411} 1412 1413static void debug_path(struct traverse_info *info) 1414{ 1415 if (info->prev) { 1416 debug_path(info->prev); 1417 if (*info->prev->name) 1418 putchar('/'); 1419 } 1420 printf("%s", info->name); 1421} 1422 1423static void debug_name_entry(int i, struct name_entry *n) 1424{ 1425 printf("ent#%d %06o %s\n", i, 1426 n->path ? n->mode : 0, 1427 n->path ? n->path : "(missing)"); 1428} 1429 1430static void debug_unpack_callback(int n, 1431 unsigned long mask, 1432 unsigned long dirmask, 1433 struct name_entry *names, 1434 struct traverse_info *info) 1435{ 1436 int i; 1437 printf("* unpack mask %lu, dirmask %lu, cnt %d ", 1438 mask, dirmask, n); 1439 debug_path(info); 1440 putchar('\n'); 1441 for (i = 0; i < n; i++) 1442 debug_name_entry(i, names + i); 1443} 1444 1445/* 1446 * Returns true if and only if the given cache_entry is a 1447 * sparse-directory entry that matches the given name_entry 1448 * from the tree walk at the given traverse_info. 1449 */ 1450static int is_sparse_directory_entry(struct cache_entry *ce, 1451 const struct name_entry *name, 1452 struct traverse_info *info) 1453{ 1454 if (!ce || !name || !S_ISSPARSEDIR(ce->ce_mode)) 1455 return 0; 1456 1457 return sparse_dir_matches_path(ce, info, name); 1458} 1459 1460static int unpack_sparse_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) 1461{ 1462 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 1463 struct unpack_trees_options *o = info->data; 1464 int ret, is_new_sparse_dir; 1465 1466 assert(o->merge); 1467 1468 /* 1469 * Unlike in 'unpack_callback', where src[0] is derived from the index when 1470 * merging, src[0] is a transient cache entry derived from the first tree 1471 * provided. Create the temporary entry as if it came from a non-sparse index. 1472 */ 1473 if (!is_null_oid(&names[0].oid)) { 1474 src[0] = create_ce_entry(info, &names[0], 0, 1475 &o->internal.result, 1, 1476 dirmask & (1ul << 0)); 1477 src[0]->ce_flags |= (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE); 1478 } 1479 1480 /* 1481 * 'unpack_single_entry' assumes that src[0] is derived directly from 1482 * the index, rather than from an entry in 'names'. This is *not* true when 1483 * merging a sparse directory, in which case names[0] is the "index" source 1484 * entry. To match the expectations of 'unpack_single_entry', shift past the 1485 * "index" tree (i.e., names[0]) and adjust 'names', 'n', 'mask', and 1486 * 'dirmask' accordingly. 1487 */ 1488 ret = unpack_single_entry(n - 1, mask >> 1, dirmask >> 1, src, names + 1, info, &is_new_sparse_dir); 1489 1490 if (src[0]) 1491 discard_cache_entry(src[0]); 1492 1493 return ret >= 0 ? mask : -1; 1494} 1495 1496/* 1497 * Note that traverse_by_cache_tree() duplicates some logic in this function 1498 * without actually calling it. If you change the logic here you may need to 1499 * check and change there as well. 1500 */ 1501static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) 1502{ 1503 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 1504 struct unpack_trees_options *o = info->data; 1505 const struct name_entry *p = names; 1506 int is_new_sparse_dir; 1507 1508 /* Find first entry with a real name (we could use "mask" too) */ 1509 while (!p->mode) 1510 p++; 1511 1512 if (o->internal.debug_unpack) 1513 debug_unpack_callback(n, mask, dirmask, names, info); 1514 1515 /* Are we supposed to look at the index too? */ 1516 if (o->merge) { 1517 while (1) { 1518 int cmp; 1519 struct cache_entry *ce; 1520 1521 if (o->diff_index_cached) 1522 ce = next_cache_entry(o); 1523 else 1524 ce = find_cache_entry(info, p); 1525 1526 if (!ce) 1527 break; 1528 cmp = compare_entry(ce, info, p); 1529 if (cmp < 0) { 1530 if (unpack_index_entry(ce, o) < 0) 1531 return unpack_failed(o, NULL); 1532 continue; 1533 } 1534 if (!cmp) { 1535 if (ce_stage(ce)) { 1536 /* 1537 * If we skip unmerged index 1538 * entries, we'll skip this 1539 * entry *and* the tree 1540 * entries associated with it! 1541 */ 1542 if (o->skip_unmerged) { 1543 add_same_unmerged(ce, o); 1544 return mask; 1545 } 1546 } 1547 src[0] = ce; 1548 } 1549 break; 1550 } 1551 } 1552 1553 if (unpack_single_entry(n, mask, dirmask, src, names, info, &is_new_sparse_dir)) 1554 return -1; 1555 1556 if (o->merge && src[0]) { 1557 if (ce_stage(src[0])) 1558 mark_ce_used_same_name(src[0], o); 1559 else 1560 mark_ce_used(src[0], o); 1561 } 1562 1563 /* Now handle any directories.. */ 1564 if (dirmask) { 1565 /* special case: "diff-index --cached" looking at a tree */ 1566 if (o->diff_index_cached && 1567 n == 1 && dirmask == 1 && S_ISDIR(names->mode)) { 1568 int matches; 1569 matches = cache_tree_matches_traversal(o->src_index->cache_tree, 1570 names, info); 1571 /* 1572 * Everything under the name matches; skip the 1573 * entire hierarchy. diff_index_cached codepath 1574 * special cases D/F conflicts in such a way that 1575 * it does not do any look-ahead, so this is safe. 1576 */ 1577 if (matches) { 1578 /* 1579 * Only increment the cache_bottom if the 1580 * directory isn't a sparse directory index 1581 * entry (if it is, it was already incremented) 1582 * in 'mark_ce_used()' 1583 */ 1584 if (!src[0] || !S_ISSPARSEDIR(src[0]->ce_mode)) 1585 o->internal.cache_bottom += matches; 1586 return mask; 1587 } 1588 } 1589 1590 if (!is_sparse_directory_entry(src[0], p, info) && 1591 !is_new_sparse_dir && 1592 traverse_trees_recursive(n, dirmask, mask & ~dirmask, 1593 names, info) < 0) { 1594 return -1; 1595 } 1596 1597 return mask; 1598 } 1599 1600 return mask; 1601} 1602 1603static int clear_ce_flags_1(struct index_state *istate, 1604 struct cache_entry **cache, int nr, 1605 struct strbuf *prefix, 1606 int select_mask, int clear_mask, 1607 struct pattern_list *pl, 1608 enum pattern_match_result default_match, 1609 int progress_nr); 1610 1611/* Whole directory matching */ 1612static int clear_ce_flags_dir(struct index_state *istate, 1613 struct cache_entry **cache, int nr, 1614 struct strbuf *prefix, 1615 char *basename, 1616 int select_mask, int clear_mask, 1617 struct pattern_list *pl, 1618 enum pattern_match_result default_match, 1619 int progress_nr) 1620{ 1621 struct cache_entry **cache_end; 1622 int dtype = DT_DIR; 1623 int rc; 1624 enum pattern_match_result ret, orig_ret; 1625 orig_ret = path_matches_pattern_list(prefix->buf, prefix->len, 1626 basename, &dtype, pl, istate); 1627 1628 strbuf_addch(prefix, '/'); 1629 1630 /* If undecided, use matching result of parent dir in defval */ 1631 if (orig_ret == UNDECIDED) 1632 ret = default_match; 1633 else 1634 ret = orig_ret; 1635 1636 for (cache_end = cache; cache_end != cache + nr; cache_end++) { 1637 struct cache_entry *ce = *cache_end; 1638 if (strncmp(ce->name, prefix->buf, prefix->len)) 1639 break; 1640 } 1641 1642 if (pl->use_cone_patterns && orig_ret == MATCHED_RECURSIVE) { 1643 struct cache_entry **ce = cache; 1644 rc = cache_end - cache; 1645 1646 while (ce < cache_end) { 1647 (*ce)->ce_flags &= ~clear_mask; 1648 ce++; 1649 } 1650 } else if (pl->use_cone_patterns && orig_ret == NOT_MATCHED) { 1651 rc = cache_end - cache; 1652 } else { 1653 rc = clear_ce_flags_1(istate, cache, cache_end - cache, 1654 prefix, 1655 select_mask, clear_mask, 1656 pl, ret, 1657 progress_nr); 1658 } 1659 1660 strbuf_setlen(prefix, prefix->len - 1); 1661 return rc; 1662} 1663 1664/* 1665 * Traverse the index, find every entry that matches according to 1666 * o->pl. Do "ce_flags &= ~clear_mask" on those entries. Return the 1667 * number of traversed entries. 1668 * 1669 * If select_mask is non-zero, only entries whose ce_flags has on of 1670 * those bits enabled are traversed. 1671 * 1672 * cache : pointer to an index entry 1673 * prefix_len : an offset to its path 1674 * 1675 * The current path ("prefix") including the trailing '/' is 1676 * cache[0]->name[0..(prefix_len-1)] 1677 * Top level path has prefix_len zero. 1678 */ 1679static int clear_ce_flags_1(struct index_state *istate, 1680 struct cache_entry **cache, int nr, 1681 struct strbuf *prefix, 1682 int select_mask, int clear_mask, 1683 struct pattern_list *pl, 1684 enum pattern_match_result default_match, 1685 int progress_nr) 1686{ 1687 struct cache_entry **cache_end = nr ? cache + nr : cache; 1688 1689 /* 1690 * Process all entries that have the given prefix and meet 1691 * select_mask condition 1692 */ 1693 while(cache != cache_end) { 1694 struct cache_entry *ce = *cache; 1695 const char *name, *slash; 1696 int len, dtype; 1697 enum pattern_match_result ret; 1698 1699 display_progress(istate->progress, progress_nr); 1700 1701 if (select_mask && !(ce->ce_flags & select_mask)) { 1702 cache++; 1703 progress_nr++; 1704 continue; 1705 } 1706 1707 if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len)) 1708 break; 1709 1710 name = ce->name + prefix->len; 1711 slash = strchr(name, '/'); 1712 1713 /* If it's a directory, try whole directory match first */ 1714 if (slash) { 1715 int processed; 1716 1717 len = slash - name; 1718 strbuf_add(prefix, name, len); 1719 1720 processed = clear_ce_flags_dir(istate, cache, cache_end - cache, 1721 prefix, 1722 prefix->buf + prefix->len - len, 1723 select_mask, clear_mask, 1724 pl, default_match, 1725 progress_nr); 1726 1727 /* clear_c_f_dir eats a whole dir already? */ 1728 if (processed) { 1729 cache += processed; 1730 progress_nr += processed; 1731 strbuf_setlen(prefix, prefix->len - len); 1732 continue; 1733 } 1734 1735 strbuf_addch(prefix, '/'); 1736 processed = clear_ce_flags_1(istate, cache, cache_end - cache, 1737 prefix, 1738 select_mask, clear_mask, pl, 1739 default_match, progress_nr); 1740 1741 cache += processed; 1742 progress_nr += processed; 1743 1744 strbuf_setlen(prefix, prefix->len - len - 1); 1745 continue; 1746 } 1747 1748 /* Non-directory */ 1749 dtype = ce_to_dtype(ce); 1750 ret = path_matches_pattern_list(ce->name, 1751 ce_namelen(ce), 1752 name, &dtype, pl, istate); 1753 if (ret == UNDECIDED) 1754 ret = default_match; 1755 if (ret == MATCHED || ret == MATCHED_RECURSIVE) 1756 ce->ce_flags &= ~clear_mask; 1757 cache++; 1758 progress_nr++; 1759 } 1760 1761 display_progress(istate->progress, progress_nr); 1762 return nr - (cache_end - cache); 1763} 1764 1765static int clear_ce_flags(struct index_state *istate, 1766 int select_mask, int clear_mask, 1767 struct pattern_list *pl, 1768 int show_progress) 1769{ 1770 static struct strbuf prefix = STRBUF_INIT; 1771 char label[100]; 1772 int rval; 1773 1774 strbuf_reset(&prefix); 1775 if (show_progress) 1776 istate->progress = start_delayed_progress( 1777 the_repository, 1778 _("Updating index flags"), 1779 istate->cache_nr); 1780 1781 xsnprintf(label, sizeof(label), "clear_ce_flags(0x%08lx,0x%08lx)", 1782 (unsigned long)select_mask, (unsigned long)clear_mask); 1783 trace2_region_enter("unpack_trees", label, the_repository); 1784 rval = clear_ce_flags_1(istate, 1785 istate->cache, 1786 istate->cache_nr, 1787 &prefix, 1788 select_mask, clear_mask, 1789 pl, 0, 0); 1790 trace2_region_leave("unpack_trees", label, the_repository); 1791 1792 stop_progress(&istate->progress); 1793 return rval; 1794} 1795 1796/* 1797 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout 1798 */ 1799static void mark_new_skip_worktree(struct pattern_list *pl, 1800 struct index_state *istate, 1801 int select_flag, int skip_wt_flag, 1802 int show_progress) 1803{ 1804 int i; 1805 1806 /* 1807 * 1. Pretend the narrowest worktree: only unmerged entries 1808 * are checked out 1809 */ 1810 for (i = 0; i < istate->cache_nr; i++) { 1811 struct cache_entry *ce = istate->cache[i]; 1812 1813 if (select_flag && !(ce->ce_flags & select_flag)) 1814 continue; 1815 1816 if (!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED)) 1817 ce->ce_flags |= skip_wt_flag; 1818 else 1819 ce->ce_flags &= ~skip_wt_flag; 1820 } 1821 1822 /* 1823 * 2. Widen worktree according to sparse-checkout file. 1824 * Matched entries will have skip_wt_flag cleared (i.e. "in") 1825 */ 1826 clear_ce_flags(istate, select_flag, skip_wt_flag, pl, show_progress); 1827} 1828 1829static void populate_from_existing_patterns(struct unpack_trees_options *o, 1830 struct pattern_list *pl) 1831{ 1832 if (get_sparse_checkout_patterns(pl) < 0) 1833 o->skip_sparse_checkout = 1; 1834 else 1835 o->internal.pl = pl; 1836} 1837 1838static void update_sparsity_for_prefix(const char *prefix, 1839 struct index_state *istate) 1840{ 1841 int prefix_len = strlen(prefix); 1842 struct strbuf ce_prefix = STRBUF_INIT; 1843 1844 if (!istate->sparse_index) 1845 return; 1846 1847 while (prefix_len > 0 && prefix[prefix_len - 1] == '/') 1848 prefix_len--; 1849 1850 if (prefix_len <= 0) 1851 BUG("Invalid prefix passed to update_sparsity_for_prefix"); 1852 1853 strbuf_grow(&ce_prefix, prefix_len + 1); 1854 strbuf_add(&ce_prefix, prefix, prefix_len); 1855 strbuf_addch(&ce_prefix, '/'); 1856 1857 /* 1858 * If the prefix points to a sparse directory or a path inside a sparse 1859 * directory, the index should be expanded. This is accomplished in one 1860 * of two ways: 1861 * - if the prefix is inside a sparse directory, it will be expanded by 1862 * the 'ensure_full_index(...)' call in 'index_name_pos(...)'. 1863 * - if the prefix matches an existing sparse directory entry, 1864 * 'index_name_pos(...)' will return its index position, triggering 1865 * the 'ensure_full_index(...)' below. 1866 */ 1867 if (!path_in_cone_mode_sparse_checkout(ce_prefix.buf, istate) && 1868 index_name_pos(istate, ce_prefix.buf, ce_prefix.len) >= 0) 1869 ensure_full_index(istate); 1870 1871 strbuf_release(&ce_prefix); 1872} 1873 1874static int verify_absent(const struct cache_entry *, 1875 enum unpack_trees_error_types, 1876 struct unpack_trees_options *); 1877/* 1878 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the 1879 * resulting index, -2 on failure to reflect the changes to the work tree. 1880 * 1881 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally 1882 */ 1883int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o) 1884{ 1885 struct repository *repo = the_repository; 1886 int i, ret; 1887 static struct cache_entry *dfc; 1888 struct pattern_list pl; 1889 int free_pattern_list = 0; 1890 struct dir_struct dir = DIR_INIT; 1891 1892 if (o->reset == UNPACK_RESET_INVALID) 1893 BUG("o->reset had a value of 1; should be UNPACK_TREES_*_UNTRACKED"); 1894 1895 if (len > MAX_UNPACK_TREES) 1896 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES); 1897 if (o->internal.dir) 1898 BUG("o->internal.dir is for internal use only"); 1899 if (o->internal.pl) 1900 BUG("o->internal.pl is for internal use only"); 1901 if (o->df_conflict_entry) 1902 BUG("o->df_conflict_entry is an output only field"); 1903 1904 trace_performance_enter(); 1905 trace2_region_enter("unpack_trees", "unpack_trees", the_repository); 1906 1907 prepare_repo_settings(repo); 1908 if (repo->settings.command_requires_full_index) { 1909 ensure_full_index(o->src_index); 1910 if (o->dst_index) 1911 ensure_full_index(o->dst_index); 1912 } 1913 1914 if (o->reset == UNPACK_RESET_OVERWRITE_UNTRACKED && 1915 o->preserve_ignored) 1916 BUG("UNPACK_RESET_OVERWRITE_UNTRACKED incompatible with preserved ignored files"); 1917 1918 if (!o->preserve_ignored) { 1919 o->internal.dir = &dir; 1920 o->internal.dir->flags |= DIR_SHOW_IGNORED; 1921 setup_standard_excludes(o->internal.dir); 1922 } 1923 1924 if (o->prefix) 1925 update_sparsity_for_prefix(o->prefix, o->src_index); 1926 1927 if (!core_apply_sparse_checkout || !o->update) 1928 o->skip_sparse_checkout = 1; 1929 if (!o->skip_sparse_checkout) { 1930 memset(&pl, 0, sizeof(pl)); 1931 free_pattern_list = 1; 1932 populate_from_existing_patterns(o, &pl); 1933 } 1934 1935 index_state_init(&o->internal.result, o->src_index->repo); 1936 o->internal.result.initialized = 1; 1937 o->internal.result.timestamp.sec = o->src_index->timestamp.sec; 1938 o->internal.result.timestamp.nsec = o->src_index->timestamp.nsec; 1939 o->internal.result.version = o->src_index->version; 1940 if (!o->src_index->split_index) { 1941 o->internal.result.split_index = NULL; 1942 } else if (o->src_index == o->dst_index) { 1943 /* 1944 * o->dst_index (and thus o->src_index) will be discarded 1945 * and overwritten with o->internal.result at the end of 1946 * this function, so just use src_index's split_index to 1947 * avoid having to create a new one. 1948 */ 1949 o->internal.result.split_index = o->src_index->split_index; 1950 if (o->src_index->cache_changed & SPLIT_INDEX_ORDERED) 1951 o->internal.result.cache_changed |= SPLIT_INDEX_ORDERED; 1952 o->internal.result.split_index->refcount++; 1953 } else { 1954 o->internal.result.split_index = 1955 init_split_index(&o->internal.result); 1956 } 1957 oidcpy(&o->internal.result.oid, &o->src_index->oid); 1958 o->internal.merge_size = len; 1959 mark_all_ce_unused(o->src_index); 1960 1961 o->internal.result.fsmonitor_last_update = 1962 xstrdup_or_null(o->src_index->fsmonitor_last_update); 1963 o->internal.result.fsmonitor_has_run_once = o->src_index->fsmonitor_has_run_once; 1964 1965 if (!o->src_index->initialized && 1966 !repo->settings.command_requires_full_index && 1967 is_sparse_index_allowed(&o->internal.result, 0)) 1968 o->internal.result.sparse_index = 1; 1969 1970 /* 1971 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries 1972 */ 1973 if (!o->skip_sparse_checkout) 1974 mark_new_skip_worktree(o->internal.pl, o->src_index, 0, 1975 CE_NEW_SKIP_WORKTREE, o->verbose_update); 1976 1977 if (!dfc) 1978 dfc = xcalloc(1, cache_entry_size(0)); 1979 o->df_conflict_entry = dfc; 1980 1981 if (len) { 1982 const char *prefix = o->prefix ? o->prefix : ""; 1983 struct traverse_info info; 1984 1985 setup_traverse_info(&info, prefix); 1986 info.fn = unpack_callback; 1987 info.data = o; 1988 info.show_all_errors = o->internal.show_all_errors; 1989 info.pathspec = o->pathspec; 1990 1991 if (o->prefix) { 1992 /* 1993 * Unpack existing index entries that sort before the 1994 * prefix the tree is spliced into. Note that o->merge 1995 * is always true in this case. 1996 */ 1997 while (1) { 1998 struct cache_entry *ce = next_cache_entry(o); 1999 if (!ce) 2000 break; 2001 if (ce_in_traverse_path(ce, &info)) 2002 break; 2003 if (unpack_index_entry(ce, o) < 0) 2004 goto return_failed; 2005 } 2006 } 2007 2008 trace_performance_enter(); 2009 trace2_region_enter("unpack_trees", "traverse_trees", the_repository); 2010 ret = traverse_trees(o->src_index, len, t, &info); 2011 trace2_region_leave("unpack_trees", "traverse_trees", the_repository); 2012 trace_performance_leave("traverse_trees"); 2013 if (ret < 0) 2014 goto return_failed; 2015 } 2016 2017 /* Any left-over entries in the index? */ 2018 if (o->merge) { 2019 while (1) { 2020 struct cache_entry *ce = next_cache_entry(o); 2021 if (!ce) 2022 break; 2023 if (unpack_index_entry(ce, o) < 0) 2024 goto return_failed; 2025 } 2026 } 2027 mark_all_ce_unused(o->src_index); 2028 2029 if (o->trivial_merges_only && o->internal.nontrivial_merge) { 2030 ret = unpack_failed(o, "Merge requires file-level merging"); 2031 goto done; 2032 } 2033 2034 if (!o->skip_sparse_checkout) { 2035 /* 2036 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1 2037 * If they will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE 2038 * so apply_sparse_checkout() won't attempt to remove it from worktree 2039 */ 2040 mark_new_skip_worktree(o->internal.pl, &o->internal.result, 2041 CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE, 2042 o->verbose_update); 2043 2044 ret = 0; 2045 for (i = 0; i < o->internal.result.cache_nr; i++) { 2046 struct cache_entry *ce = o->internal.result.cache[i]; 2047 2048 /* 2049 * Entries marked with CE_ADDED in merged_entry() do not have 2050 * verify_absent() check (the check is effectively disabled 2051 * because CE_NEW_SKIP_WORKTREE is set unconditionally). 2052 * 2053 * Do the real check now because we have had 2054 * correct CE_NEW_SKIP_WORKTREE 2055 */ 2056 if (ce->ce_flags & CE_ADDED && 2057 verify_absent(ce, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN, o)) 2058 ret = 1; 2059 2060 if (apply_sparse_checkout(&o->internal.result, ce, o)) 2061 ret = 1; 2062 } 2063 if (ret == 1) { 2064 /* 2065 * Inability to sparsify or de-sparsify individual 2066 * paths is not an error, but just a warning. 2067 */ 2068 if (o->internal.show_all_errors) 2069 display_warning_msgs(o); 2070 ret = 0; 2071 } 2072 } 2073 2074 ret = check_updates(o, &o->internal.result) ? (-2) : 0; 2075 if (o->dst_index) { 2076 move_index_extensions(&o->internal.result, o->src_index); 2077 if (!ret) { 2078 if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0) && 2079 cache_tree_verify(the_repository, 2080 &o->internal.result) < 0) { 2081 ret = -1; 2082 goto done; 2083 } 2084 2085 if (!o->skip_cache_tree_update && 2086 !cache_tree_fully_valid(o->internal.result.cache_tree)) 2087 cache_tree_update(&o->internal.result, 2088 WRITE_TREE_SILENT | 2089 WRITE_TREE_REPAIR); 2090 } 2091 2092 o->internal.result.updated_workdir = 1; 2093 discard_index(o->dst_index); 2094 *o->dst_index = o->internal.result; 2095 memset(&o->internal.result, 0, sizeof(o->internal.result)); 2096 } else { 2097 discard_index(&o->internal.result); 2098 } 2099 o->src_index = NULL; 2100 2101done: 2102 if (free_pattern_list) 2103 clear_pattern_list(&pl); 2104 if (o->internal.dir) { 2105 dir_clear(o->internal.dir); 2106 o->internal.dir = NULL; 2107 } 2108 trace2_region_leave("unpack_trees", "unpack_trees", the_repository); 2109 trace_performance_leave("unpack_trees"); 2110 return ret; 2111 2112return_failed: 2113 if (o->internal.show_all_errors) 2114 display_error_msgs(o); 2115 mark_all_ce_unused(o->src_index); 2116 ret = unpack_failed(o, NULL); 2117 if (o->exiting_early) 2118 ret = 0; 2119 goto done; 2120} 2121 2122/* 2123 * Update SKIP_WORKTREE bits according to sparsity patterns, and update 2124 * working directory to match. 2125 * 2126 * CE_NEW_SKIP_WORKTREE is used internally. 2127 */ 2128enum update_sparsity_result update_sparsity(struct unpack_trees_options *o, 2129 struct pattern_list *pl) 2130{ 2131 enum update_sparsity_result ret = UPDATE_SPARSITY_SUCCESS; 2132 int i; 2133 unsigned old_show_all_errors; 2134 int free_pattern_list = 0; 2135 2136 old_show_all_errors = o->internal.show_all_errors; 2137 o->internal.show_all_errors = 1; 2138 index_state_init(&o->internal.result, o->src_index->repo); 2139 2140 /* Sanity checks */ 2141 if (!o->update || o->index_only || o->skip_sparse_checkout) 2142 BUG("update_sparsity() is for reflecting sparsity patterns in working directory"); 2143 if (o->src_index != o->dst_index || o->fn) 2144 BUG("update_sparsity() called wrong"); 2145 2146 trace_performance_enter(); 2147 2148 /* If we weren't given patterns, use the recorded ones */ 2149 if (!pl) { 2150 free_pattern_list = 1; 2151 pl = xcalloc(1, sizeof(*pl)); 2152 populate_from_existing_patterns(o, pl); 2153 } 2154 o->internal.pl = pl; 2155 2156 /* Expand sparse directories as needed */ 2157 expand_index(o->src_index, o->internal.pl); 2158 2159 /* Set NEW_SKIP_WORKTREE on existing entries. */ 2160 mark_all_ce_unused(o->src_index); 2161 mark_new_skip_worktree(o->internal.pl, o->src_index, 0, 2162 CE_NEW_SKIP_WORKTREE, o->verbose_update); 2163 2164 /* Then loop over entries and update/remove as needed */ 2165 ret = UPDATE_SPARSITY_SUCCESS; 2166 for (i = 0; i < o->src_index->cache_nr; i++) { 2167 struct cache_entry *ce = o->src_index->cache[i]; 2168 2169 2170 if (ce_stage(ce)) { 2171 /* -1 because for loop will increment by 1 */ 2172 i += warn_conflicted_path(o->src_index, i, o) - 1; 2173 ret = UPDATE_SPARSITY_WARNINGS; 2174 continue; 2175 } 2176 2177 if (apply_sparse_checkout(o->src_index, ce, o)) 2178 ret = UPDATE_SPARSITY_WARNINGS; 2179 } 2180 2181 if (check_updates(o, o->src_index)) 2182 ret = UPDATE_SPARSITY_WORKTREE_UPDATE_FAILURES; 2183 2184 display_warning_msgs(o); 2185 o->internal.show_all_errors = old_show_all_errors; 2186 if (free_pattern_list) { 2187 clear_pattern_list(pl); 2188 free(pl); 2189 o->internal.pl = NULL; 2190 } 2191 trace_performance_leave("update_sparsity"); 2192 return ret; 2193} 2194 2195/* Here come the merge functions */ 2196 2197static int reject_merge(const struct cache_entry *ce, 2198 struct unpack_trees_options *o) 2199{ 2200 return add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name); 2201} 2202 2203static int same(const struct cache_entry *a, const struct cache_entry *b) 2204{ 2205 if (!!a != !!b) 2206 return 0; 2207 if (!a && !b) 2208 return 1; 2209 if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED) 2210 return 0; 2211 return a->ce_mode == b->ce_mode && 2212 oideq(&a->oid, &b->oid); 2213} 2214 2215 2216/* 2217 * When a CE gets turned into an unmerged entry, we 2218 * want it to be up-to-date 2219 */ 2220static int verify_uptodate_1(const struct cache_entry *ce, 2221 struct unpack_trees_options *o, 2222 enum unpack_trees_error_types error_type) 2223{ 2224 struct stat st; 2225 2226 if (o->index_only) 2227 return 0; 2228 2229 /* 2230 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again 2231 * if this entry is truly up-to-date because this file may be 2232 * overwritten. 2233 */ 2234 if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce)) 2235 ; /* keep checking */ 2236 else if (o->reset || ce_uptodate(ce)) 2237 return 0; 2238 2239 if (!lstat(ce->name, &st)) { 2240 int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE; 2241 unsigned changed = ie_match_stat(o->src_index, ce, &st, flags); 2242 2243 if (submodule_from_ce(ce)) { 2244 int r = check_submodule_move_head(ce, 2245 "HEAD", oid_to_hex(&ce->oid), o); 2246 if (r) 2247 return add_rejected_path(o, error_type, ce->name); 2248 return 0; 2249 } 2250 2251 if (!changed) 2252 return 0; 2253 /* 2254 * Historic default policy was to allow submodule to be out 2255 * of sync wrt the superproject index. If the submodule was 2256 * not considered interesting above, we don't care here. 2257 */ 2258 if (S_ISGITLINK(ce->ce_mode)) 2259 return 0; 2260 2261 errno = 0; 2262 } 2263 if (errno == ENOENT) 2264 return 0; 2265 return add_rejected_path(o, error_type, ce->name); 2266} 2267 2268int verify_uptodate(const struct cache_entry *ce, 2269 struct unpack_trees_options *o) 2270{ 2271 if (!o->skip_sparse_checkout && 2272 (ce->ce_flags & CE_SKIP_WORKTREE) && 2273 (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) 2274 return 0; 2275 return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE); 2276} 2277 2278static int verify_uptodate_sparse(const struct cache_entry *ce, 2279 struct unpack_trees_options *o) 2280{ 2281 return verify_uptodate_1(ce, o, WARNING_SPARSE_NOT_UPTODATE_FILE); 2282} 2283 2284/* 2285 * TODO: We should actually invalidate o->internal.result, not src_index [1]. 2286 * But since cache tree and untracked cache both are not copied to 2287 * o->internal.result until unpacking is complete, we invalidate them on 2288 * src_index instead with the assumption that they will be copied to 2289 * dst_index at the end. 2290 * 2291 * [1] src_index->cache_tree is also used in unpack_callback() so if 2292 * we invalidate o->internal.result, we need to update it to use 2293 * o->internal.result.cache_tree as well. 2294 */ 2295static void invalidate_ce_path(const struct cache_entry *ce, 2296 struct unpack_trees_options *o) 2297{ 2298 if (!ce) 2299 return; 2300 cache_tree_invalidate_path(o->src_index, ce->name); 2301 untracked_cache_invalidate_path(o->src_index, ce->name, 1); 2302} 2303 2304/* 2305 * Check that checking out ce->sha1 in subdir ce->name is not 2306 * going to overwrite any working files. 2307 */ 2308static int verify_clean_submodule(const char *old_sha1, 2309 const struct cache_entry *ce, 2310 struct unpack_trees_options *o) 2311{ 2312 if (!submodule_from_ce(ce)) 2313 return 0; 2314 2315 return check_submodule_move_head(ce, old_sha1, 2316 oid_to_hex(&ce->oid), o); 2317} 2318 2319static int verify_clean_subdirectory(const struct cache_entry *ce, 2320 struct unpack_trees_options *o) 2321{ 2322 /* 2323 * we are about to extract "ce->name"; we would not want to lose 2324 * anything in the existing directory there. 2325 */ 2326 int namelen; 2327 int i; 2328 struct dir_struct d; 2329 char *pathbuf; 2330 int cnt = 0; 2331 2332 if (S_ISGITLINK(ce->ce_mode)) { 2333 struct object_id oid; 2334 int sub_head = repo_resolve_gitlink_ref(the_repository, ce->name, 2335 "HEAD", &oid); 2336 /* 2337 * If we are not going to update the submodule, then 2338 * we don't care. 2339 */ 2340 if (!sub_head && oideq(&oid, &ce->oid)) 2341 return 0; 2342 return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid), 2343 ce, o); 2344 } 2345 2346 /* 2347 * First let's make sure we do not have a local modification 2348 * in that directory. 2349 */ 2350 namelen = ce_namelen(ce); 2351 for (i = locate_in_src_index(ce, o); 2352 i < o->src_index->cache_nr; 2353 i++) { 2354 struct cache_entry *ce2 = o->src_index->cache[i]; 2355 int len = ce_namelen(ce2); 2356 if (len < namelen || 2357 strncmp(ce->name, ce2->name, namelen) || 2358 ce2->name[namelen] != '/') 2359 break; 2360 /* 2361 * ce2->name is an entry in the subdirectory to be 2362 * removed. 2363 */ 2364 if (!ce_stage(ce2)) { 2365 if (verify_uptodate(ce2, o)) 2366 return -1; 2367 add_entry(o, ce2, CE_REMOVE, 0); 2368 invalidate_ce_path(ce, o); 2369 mark_ce_used(ce2, o); 2370 } 2371 cnt++; 2372 } 2373 2374 /* Do not lose a locally present file that is not ignored. */ 2375 pathbuf = xstrfmt("%.*s/", namelen, ce->name); 2376 2377 memset(&d, 0, sizeof(d)); 2378 if (o->internal.dir) 2379 setup_standard_excludes(&d); 2380 i = read_directory(&d, o->src_index, pathbuf, namelen+1, NULL); 2381 dir_clear(&d); 2382 free(pathbuf); 2383 if (i) 2384 return add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name); 2385 2386 /* Do not lose startup_info->original_cwd */ 2387 if (startup_info->original_cwd && 2388 !strcmp(startup_info->original_cwd, ce->name)) 2389 return add_rejected_path(o, ERROR_CWD_IN_THE_WAY, ce->name); 2390 2391 return cnt; 2392} 2393 2394/* 2395 * This gets called when there was no index entry for the tree entry 'dst', 2396 * but we found a file in the working tree that 'lstat()' said was fine, 2397 * and we're on a case-insensitive filesystem. 2398 * 2399 * See if we can find a case-insensitive match in the index that also 2400 * matches the stat information, and assume it's that other file! 2401 */ 2402static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st) 2403{ 2404 const struct cache_entry *src; 2405 2406 src = index_file_exists(o->src_index, name, len, 1); 2407 return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE); 2408} 2409 2410enum absent_checking_type { 2411 COMPLETELY_ABSENT, 2412 ABSENT_ANY_DIRECTORY 2413}; 2414 2415static int check_ok_to_remove(const char *name, int len, int dtype, 2416 const struct cache_entry *ce, struct stat *st, 2417 enum unpack_trees_error_types error_type, 2418 enum absent_checking_type absent_type, 2419 struct unpack_trees_options *o) 2420{ 2421 const struct cache_entry *result; 2422 2423 /* 2424 * It may be that the 'lstat()' succeeded even though 2425 * target 'ce' was absent, because there is an old 2426 * entry that is different only in case.. 2427 * 2428 * Ignore that lstat() if it matches. 2429 */ 2430 if (ignore_case && icase_exists(o, name, len, st)) 2431 return 0; 2432 2433 if (o->internal.dir && 2434 is_excluded(o->internal.dir, o->src_index, name, &dtype)) 2435 /* 2436 * ce->name is explicitly excluded, so it is Ok to 2437 * overwrite it. 2438 */ 2439 return 0; 2440 if (S_ISDIR(st->st_mode)) { 2441 /* 2442 * We are checking out path "foo" and 2443 * found "foo/." in the working tree. 2444 * This is tricky -- if we have modified 2445 * files that are in "foo/" we would lose 2446 * them. 2447 */ 2448 if (verify_clean_subdirectory(ce, o) < 0) 2449 return -1; 2450 return 0; 2451 } 2452 2453 /* If we only care about directories, then we can remove */ 2454 if (absent_type == ABSENT_ANY_DIRECTORY) 2455 return 0; 2456 2457 /* 2458 * The previous round may already have decided to 2459 * delete this path, which is in a subdirectory that 2460 * is being replaced with a blob. 2461 */ 2462 result = index_file_exists(&o->internal.result, name, len, 0); 2463 if (result) { 2464 if (result->ce_flags & CE_REMOVE) 2465 return 0; 2466 } 2467 2468 return add_rejected_path(o, error_type, name); 2469} 2470 2471/* 2472 * We do not want to remove or overwrite a working tree file that 2473 * is not tracked, unless it is ignored. 2474 */ 2475static int verify_absent_1(const struct cache_entry *ce, 2476 enum unpack_trees_error_types error_type, 2477 enum absent_checking_type absent_type, 2478 struct unpack_trees_options *o) 2479{ 2480 int len; 2481 struct stat st; 2482 2483 if (o->index_only || !o->update) 2484 return 0; 2485 2486 if (o->reset == UNPACK_RESET_OVERWRITE_UNTRACKED) { 2487 /* Avoid nuking startup_info->original_cwd... */ 2488 if (startup_info->original_cwd && 2489 !strcmp(startup_info->original_cwd, ce->name)) 2490 return add_rejected_path(o, ERROR_CWD_IN_THE_WAY, 2491 ce->name); 2492 /* ...but nuke anything else. */ 2493 return 0; 2494 } 2495 2496 len = check_leading_path(ce->name, ce_namelen(ce), 0); 2497 if (!len) 2498 return 0; 2499 else if (len > 0) { 2500 char *path; 2501 int ret; 2502 2503 path = xmemdupz(ce->name, len); 2504 if (lstat(path, &st)) 2505 ret = error_errno("cannot stat '%s'", path); 2506 else { 2507 if (submodule_from_ce(ce)) 2508 ret = check_submodule_move_head(ce, 2509 oid_to_hex(&ce->oid), 2510 NULL, o); 2511 else 2512 ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL, 2513 &st, error_type, 2514 absent_type, o); 2515 } 2516 free(path); 2517 return ret; 2518 } else if (lstat(ce->name, &st)) { 2519 if (errno != ENOENT) 2520 return error_errno("cannot stat '%s'", ce->name); 2521 return 0; 2522 } else { 2523 if (submodule_from_ce(ce)) 2524 return check_submodule_move_head(ce, oid_to_hex(&ce->oid), 2525 NULL, o); 2526 2527 return check_ok_to_remove(ce->name, ce_namelen(ce), 2528 ce_to_dtype(ce), ce, &st, 2529 error_type, absent_type, o); 2530 } 2531} 2532 2533static int verify_absent(const struct cache_entry *ce, 2534 enum unpack_trees_error_types error_type, 2535 struct unpack_trees_options *o) 2536{ 2537 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) 2538 return 0; 2539 return verify_absent_1(ce, error_type, COMPLETELY_ABSENT, o); 2540} 2541 2542static int verify_absent_if_directory(const struct cache_entry *ce, 2543 enum unpack_trees_error_types error_type, 2544 struct unpack_trees_options *o) 2545{ 2546 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) 2547 return 0; 2548 return verify_absent_1(ce, error_type, ABSENT_ANY_DIRECTORY, o); 2549} 2550 2551static int verify_absent_sparse(const struct cache_entry *ce, 2552 enum unpack_trees_error_types error_type, 2553 struct unpack_trees_options *o) 2554{ 2555 return verify_absent_1(ce, error_type, COMPLETELY_ABSENT, o); 2556} 2557 2558static int merged_entry(const struct cache_entry *ce, 2559 const struct cache_entry *old, 2560 struct unpack_trees_options *o) 2561{ 2562 int update = CE_UPDATE; 2563 struct cache_entry *merge = dup_cache_entry(ce, &o->internal.result); 2564 2565 if (!old) { 2566 /* 2567 * New index entries. In sparse checkout, the following 2568 * verify_absent() will be delayed until after 2569 * traverse_trees() finishes in unpack_trees(), then: 2570 * 2571 * - CE_NEW_SKIP_WORKTREE will be computed correctly 2572 * - verify_absent() be called again, this time with 2573 * correct CE_NEW_SKIP_WORKTREE 2574 * 2575 * verify_absent() call here does nothing in sparse 2576 * checkout (i.e. o->skip_sparse_checkout == 0) 2577 */ 2578 update |= CE_ADDED; 2579 merge->ce_flags |= CE_NEW_SKIP_WORKTREE; 2580 2581 if (verify_absent(merge, 2582 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) { 2583 discard_cache_entry(merge); 2584 return -1; 2585 } 2586 invalidate_ce_path(merge, o); 2587 2588 if (submodule_from_ce(ce) && file_exists(ce->name)) { 2589 int ret = check_submodule_move_head(ce, NULL, 2590 oid_to_hex(&ce->oid), 2591 o); 2592 if (ret) 2593 return ret; 2594 } 2595 2596 } else if (!(old->ce_flags & CE_CONFLICTED)) { 2597 /* 2598 * See if we can re-use the old CE directly? 2599 * That way we get the uptodate stat info. 2600 * 2601 * This also removes the UPDATE flag on a match; otherwise 2602 * we will end up overwriting local changes in the work tree. 2603 */ 2604 if (same(old, merge)) { 2605 copy_cache_entry(merge, old); 2606 update = 0; 2607 } else { 2608 if (verify_uptodate(old, o)) { 2609 discard_cache_entry(merge); 2610 return -1; 2611 } 2612 /* Migrate old flags over */ 2613 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE); 2614 invalidate_ce_path(old, o); 2615 } 2616 2617 if (submodule_from_ce(ce) && file_exists(ce->name)) { 2618 int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid), 2619 oid_to_hex(&ce->oid), 2620 o); 2621 if (ret) 2622 return ret; 2623 } 2624 } else { 2625 /* 2626 * Previously unmerged entry left as an existence 2627 * marker by read_index_unmerged(); 2628 */ 2629 if (verify_absent_if_directory(merge, 2630 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) { 2631 discard_cache_entry(merge); 2632 return -1; 2633 } 2634 2635 invalidate_ce_path(old, o); 2636 } 2637 2638 if (do_add_entry(o, merge, update, CE_STAGEMASK) < 0) 2639 return -1; 2640 return 1; 2641} 2642 2643static int merged_sparse_dir(const struct cache_entry * const *src, int n, 2644 struct unpack_trees_options *o) 2645{ 2646 struct tree_desc t[MAX_UNPACK_TREES + 1]; 2647 void * tree_bufs[MAX_UNPACK_TREES + 1]; 2648 struct traverse_info info; 2649 int i, ret; 2650 2651 /* 2652 * Create the tree traversal information for traversing into *only* the 2653 * sparse directory. 2654 */ 2655 setup_traverse_info(&info, src[0]->name); 2656 info.fn = unpack_sparse_callback; 2657 info.data = o; 2658 info.show_all_errors = o->internal.show_all_errors; 2659 info.pathspec = o->pathspec; 2660 2661 /* Get the tree descriptors of the sparse directory in each of the merging trees */ 2662 for (i = 0; i < n; i++) 2663 tree_bufs[i] = fill_tree_descriptor(o->src_index->repo, &t[i], 2664 src[i] && !is_null_oid(&src[i]->oid) ? &src[i]->oid : NULL); 2665 2666 ret = traverse_trees(o->src_index, n, t, &info); 2667 2668 for (i = 0; i < n; i++) 2669 free(tree_bufs[i]); 2670 2671 return ret; 2672} 2673 2674static int deleted_entry(const struct cache_entry *ce, 2675 const struct cache_entry *old, 2676 struct unpack_trees_options *o) 2677{ 2678 /* Did it exist in the index? */ 2679 if (!old) { 2680 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o)) 2681 return -1; 2682 return 0; 2683 } else if (verify_absent_if_directory(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o)) { 2684 return -1; 2685 } 2686 2687 if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o)) 2688 return -1; 2689 add_entry(o, ce, CE_REMOVE, 0); 2690 invalidate_ce_path(ce, o); 2691 return 1; 2692} 2693 2694static int keep_entry(const struct cache_entry *ce, 2695 struct unpack_trees_options *o) 2696{ 2697 add_entry(o, ce, 0, 0); 2698 if (ce_stage(ce)) 2699 invalidate_ce_path(ce, o); 2700 return 1; 2701} 2702 2703#if DBRT_DEBUG 2704static void show_stage_entry(FILE *o, 2705 const char *label, const struct cache_entry *ce) 2706{ 2707 if (!ce) 2708 fprintf(o, "%s (missing)\n", label); 2709 else 2710 fprintf(o, "%s%06o %s %d\t%s\n", 2711 label, 2712 ce->ce_mode, 2713 oid_to_hex(&ce->oid), 2714 ce_stage(ce), 2715 ce->name); 2716} 2717#endif 2718 2719int threeway_merge(const struct cache_entry * const *stages, 2720 struct unpack_trees_options *o) 2721{ 2722 const struct cache_entry *index; 2723 const struct cache_entry *head; 2724 const struct cache_entry *remote = stages[o->head_idx + 1]; 2725 int count; 2726 int head_match = 0; 2727 int remote_match = 0; 2728 2729 int df_conflict_head = 0; 2730 int df_conflict_remote = 0; 2731 2732 int any_anc_missing = 0; 2733 int no_anc_exists = 1; 2734 int i; 2735 2736 for (i = 1; i < o->head_idx; i++) { 2737 if (!stages[i] || stages[i] == o->df_conflict_entry) 2738 any_anc_missing = 1; 2739 else 2740 no_anc_exists = 0; 2741 } 2742 2743 index = stages[0]; 2744 head = stages[o->head_idx]; 2745 2746 if (head == o->df_conflict_entry) { 2747 df_conflict_head = 1; 2748 head = NULL; 2749 } 2750 2751 if (remote == o->df_conflict_entry) { 2752 df_conflict_remote = 1; 2753 remote = NULL; 2754 } 2755 2756 /* 2757 * First, if there's a #16 situation, note that to prevent #13 2758 * and #14. 2759 */ 2760 if (!same(remote, head)) { 2761 for (i = 1; i < o->head_idx; i++) { 2762 if (same(stages[i], head)) { 2763 head_match = i; 2764 } 2765 if (same(stages[i], remote)) { 2766 remote_match = i; 2767 } 2768 } 2769 } 2770 2771 /* 2772 * We start with cases where the index is allowed to match 2773 * something other than the head: #14(ALT) and #2ALT, where it 2774 * is permitted to match the result instead. 2775 */ 2776 /* #14, #14ALT, #2ALT */ 2777 if (remote && !df_conflict_head && head_match && !remote_match) { 2778 if (index && !same(index, remote) && !same(index, head)) { 2779 if (S_ISSPARSEDIR(index->ce_mode)) 2780 return merged_sparse_dir(stages, 4, o); 2781 else 2782 return reject_merge(index, o); 2783 } 2784 return merged_entry(remote, index, o); 2785 } 2786 /* 2787 * If we have an entry in the index cache, then we want to 2788 * make sure that it matches head. 2789 */ 2790 if (index && !same(index, head)) { 2791 if (S_ISSPARSEDIR(index->ce_mode)) 2792 return merged_sparse_dir(stages, 4, o); 2793 else 2794 return reject_merge(index, o); 2795 } 2796 2797 if (head) { 2798 /* #5ALT, #15 */ 2799 if (same(head, remote)) 2800 return merged_entry(head, index, o); 2801 /* #13, #3ALT */ 2802 if (!df_conflict_remote && remote_match && !head_match) 2803 return merged_entry(head, index, o); 2804 } 2805 2806 /* #1 */ 2807 if (!head && !remote && any_anc_missing) 2808 return 0; 2809 2810 /* 2811 * Under the "aggressive" rule, we resolve mostly trivial 2812 * cases that we historically had git-merge-one-file resolve. 2813 */ 2814 if (o->aggressive) { 2815 int head_deleted = !head; 2816 int remote_deleted = !remote; 2817 const struct cache_entry *ce = NULL; 2818 2819 if (index) 2820 ce = index; 2821 else if (head) 2822 ce = head; 2823 else if (remote) 2824 ce = remote; 2825 else { 2826 for (i = 1; i < o->head_idx; i++) { 2827 if (stages[i] && stages[i] != o->df_conflict_entry) { 2828 ce = stages[i]; 2829 break; 2830 } 2831 } 2832 } 2833 2834 /* 2835 * Deleted in both. 2836 * Deleted in one and unchanged in the other. 2837 */ 2838 if ((head_deleted && remote_deleted) || 2839 (head_deleted && remote && remote_match) || 2840 (remote_deleted && head && head_match)) { 2841 if (index) 2842 return deleted_entry(index, index, o); 2843 if (ce && !head_deleted) { 2844 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o)) 2845 return -1; 2846 } 2847 return 0; 2848 } 2849 /* 2850 * Added in both, identically. 2851 */ 2852 if (no_anc_exists && head && remote && same(head, remote)) 2853 return merged_entry(head, index, o); 2854 2855 } 2856 2857 /* Handle "no merge" cases (see t/t1000-read-tree-m-3way.sh) */ 2858 if (index) { 2859 /* 2860 * If we've reached the "no merge" cases and we're merging 2861 * a sparse directory, we may have an "edit/edit" conflict that 2862 * can be resolved by individually merging directory contents. 2863 */ 2864 if (S_ISSPARSEDIR(index->ce_mode)) 2865 return merged_sparse_dir(stages, 4, o); 2866 2867 /* 2868 * If we're not merging a sparse directory, ensure the index is 2869 * up-to-date to avoid files getting overwritten with conflict 2870 * resolution files 2871 */ 2872 if (verify_uptodate(index, o)) 2873 return -1; 2874 } 2875 2876 o->internal.nontrivial_merge = 1; 2877 2878 /* #2, #3, #4, #6, #7, #9, #10, #11. */ 2879 count = 0; 2880 if (!head_match || !remote_match) { 2881 for (i = 1; i < o->head_idx; i++) { 2882 if (stages[i] && stages[i] != o->df_conflict_entry) { 2883 keep_entry(stages[i], o); 2884 count++; 2885 break; 2886 } 2887 } 2888 } 2889#if DBRT_DEBUG 2890 else { 2891 fprintf(stderr, "read-tree: warning #16 detected\n"); 2892 show_stage_entry(stderr, "head ", stages[head_match]); 2893 show_stage_entry(stderr, "remote ", stages[remote_match]); 2894 } 2895#endif 2896 if (head) { count += keep_entry(head, o); } 2897 if (remote) { count += keep_entry(remote, o); } 2898 return count; 2899} 2900 2901/* 2902 * Two-way merge. 2903 * 2904 * The rule is to "carry forward" what is in the index without losing 2905 * information across a "fast-forward", favoring a successful merge 2906 * over a merge failure when it makes sense. For details of the 2907 * "carry forward" rule, please see <Documentation/git-read-tree.adoc>. 2908 * 2909 */ 2910int twoway_merge(const struct cache_entry * const *src, 2911 struct unpack_trees_options *o) 2912{ 2913 const struct cache_entry *current = src[0]; 2914 const struct cache_entry *oldtree = src[1]; 2915 const struct cache_entry *newtree = src[2]; 2916 2917 if (o->internal.merge_size != 2) 2918 return error("Cannot do a twoway merge of %d trees", 2919 o->internal.merge_size); 2920 2921 if (oldtree == o->df_conflict_entry) 2922 oldtree = NULL; 2923 if (newtree == o->df_conflict_entry) 2924 newtree = NULL; 2925 2926 if (current) { 2927 if (current->ce_flags & CE_CONFLICTED) { 2928 if (same(oldtree, newtree) || o->reset) { 2929 if (!newtree) 2930 return deleted_entry(current, current, o); 2931 else 2932 return merged_entry(newtree, current, o); 2933 } 2934 return reject_merge(current, o); 2935 } else if ((!oldtree && !newtree) || /* 4 and 5 */ 2936 (!oldtree && newtree && 2937 same(current, newtree)) || /* 6 and 7 */ 2938 (oldtree && newtree && 2939 same(oldtree, newtree)) || /* 14 and 15 */ 2940 (oldtree && newtree && 2941 !same(oldtree, newtree) && /* 18 and 19 */ 2942 same(current, newtree))) { 2943 return keep_entry(current, o); 2944 } else if (oldtree && !newtree && same(current, oldtree)) { 2945 /* 10 or 11 */ 2946 return deleted_entry(oldtree, current, o); 2947 } else if (oldtree && newtree && 2948 same(current, oldtree) && !same(current, newtree)) { 2949 /* 20 or 21 */ 2950 return merged_entry(newtree, current, o); 2951 } else if (current && !oldtree && newtree && 2952 S_ISSPARSEDIR(current->ce_mode) != S_ISSPARSEDIR(newtree->ce_mode) && 2953 ce_stage(current) == 0) { 2954 /* 2955 * This case is a directory/file conflict across the sparse-index 2956 * boundary. When we are changing from one path to another via 2957 * 'git checkout', then we want to replace one entry with another 2958 * via merged_entry(). If there are staged changes, then we should 2959 * reject the merge instead. 2960 */ 2961 return merged_entry(newtree, current, o); 2962 } else if (S_ISSPARSEDIR(current->ce_mode)) { 2963 /* 2964 * The sparse directories differ, but we don't know whether that's 2965 * because of two different files in the directory being modified 2966 * (can be trivially merged) or if there is a real file conflict. 2967 * Merge the sparse directory by OID to compare file-by-file. 2968 */ 2969 return merged_sparse_dir(src, 3, o); 2970 } else 2971 return reject_merge(current, o); 2972 } 2973 else if (newtree) { 2974 if (oldtree && !o->initial_checkout) { 2975 /* 2976 * deletion of the path was staged; 2977 */ 2978 if (same(oldtree, newtree)) 2979 return 1; 2980 return reject_merge(oldtree, o); 2981 } 2982 return merged_entry(newtree, current, o); 2983 } 2984 return deleted_entry(oldtree, current, o); 2985} 2986 2987/* 2988 * Bind merge. 2989 * 2990 * Keep the index entries at stage0, collapse stage1 but make sure 2991 * stage0 does not have anything there. 2992 */ 2993int bind_merge(const struct cache_entry * const *src, 2994 struct unpack_trees_options *o) 2995{ 2996 const struct cache_entry *old = src[0]; 2997 const struct cache_entry *a = src[1]; 2998 2999 if (o->internal.merge_size != 1) 3000 return error("Cannot do a bind merge of %d trees", 3001 o->internal.merge_size); 3002 if (a && old) 3003 return o->quiet ? -1 : 3004 error(ERRORMSG(o, ERROR_BIND_OVERLAP), 3005 super_prefixed(a->name, o->super_prefix), 3006 super_prefixed(old->name, o->super_prefix)); 3007 if (!a) 3008 return keep_entry(old, o); 3009 else 3010 return merged_entry(a, NULL, o); 3011} 3012 3013/* 3014 * One-way merge. 3015 * 3016 * The rule is: 3017 * - take the stat information from stage0, take the data from stage1 3018 */ 3019int oneway_merge(const struct cache_entry * const *src, 3020 struct unpack_trees_options *o) 3021{ 3022 const struct cache_entry *old = src[0]; 3023 const struct cache_entry *a = src[1]; 3024 3025 if (o->internal.merge_size != 1) 3026 return error("Cannot do a oneway merge of %d trees", 3027 o->internal.merge_size); 3028 3029 if (!a || a == o->df_conflict_entry) 3030 return deleted_entry(old, old, o); 3031 3032 if (old && same(old, a)) { 3033 int update = 0; 3034 if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old) && 3035 !(old->ce_flags & CE_FSMONITOR_VALID)) { 3036 struct stat st; 3037 if (lstat(old->name, &st) || 3038 ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE)) 3039 update |= CE_UPDATE; 3040 } 3041 if (o->update && S_ISGITLINK(old->ce_mode) && 3042 should_update_submodules() && !verify_uptodate(old, o)) 3043 update |= CE_UPDATE; 3044 add_entry(o, old, update, CE_STAGEMASK); 3045 return 0; 3046 } 3047 return merged_entry(a, old, o); 3048} 3049 3050/* 3051 * Merge worktree and untracked entries in a stash entry. 3052 * 3053 * Ignore all index entries. Collapse remaining trees but make sure that they 3054 * don't have any conflicting files. 3055 */ 3056int stash_worktree_untracked_merge(const struct cache_entry * const *src, 3057 struct unpack_trees_options *o) 3058{ 3059 const struct cache_entry *worktree = src[1]; 3060 const struct cache_entry *untracked = src[2]; 3061 3062 if (o->internal.merge_size != 2) 3063 BUG("invalid merge_size: %d", o->internal.merge_size); 3064 3065 if (worktree && untracked) 3066 return error(_("worktree and untracked commit have duplicate entries: %s"), 3067 super_prefixed(worktree->name, o->super_prefix)); 3068 3069 return merged_entry(worktree ? worktree : untracked, NULL, o); 3070}