Git fork
at reftables-rust 677 lines 19 kB view raw
1#define USE_THE_REPOSITORY_VARIABLE 2#define DISABLE_SIGN_COMPARE_WARNINGS 3 4#include "git-compat-util.h" 5#include "config.h" 6#include "entry.h" 7#include "gettext.h" 8#include "hash.h" 9#include "hex.h" 10#include "parallel-checkout.h" 11#include "pkt-line.h" 12#include "progress.h" 13#include "read-cache-ll.h" 14#include "run-command.h" 15#include "sigchain.h" 16#include "streaming.h" 17#include "symlinks.h" 18#include "thread-utils.h" 19#include "trace2.h" 20 21struct pc_worker { 22 struct child_process cp; 23 size_t next_item_to_complete, nr_items_to_complete; 24}; 25 26struct parallel_checkout { 27 enum pc_status status; 28 struct parallel_checkout_item *items; /* The parallel checkout queue. */ 29 size_t nr, alloc; 30 struct progress *progress; 31 unsigned int *progress_cnt; 32}; 33 34static struct parallel_checkout parallel_checkout; 35 36enum pc_status parallel_checkout_status(void) 37{ 38 return parallel_checkout.status; 39} 40 41static const int DEFAULT_THRESHOLD_FOR_PARALLELISM = 100; 42static const int DEFAULT_NUM_WORKERS = 1; 43 44void get_parallel_checkout_configs(int *num_workers, int *threshold) 45{ 46 char *env_workers = getenv("GIT_TEST_CHECKOUT_WORKERS"); 47 48 if (env_workers && *env_workers) { 49 if (strtol_i(env_workers, 10, num_workers)) { 50 die(_("invalid value for '%s': '%s'"), 51 "GIT_TEST_CHECKOUT_WORKERS", env_workers); 52 } 53 if (*num_workers < 1) 54 *num_workers = online_cpus(); 55 56 *threshold = 0; 57 return; 58 } 59 60 if (repo_config_get_int(the_repository, "checkout.workers", num_workers)) 61 *num_workers = DEFAULT_NUM_WORKERS; 62 else if (*num_workers < 1) 63 *num_workers = online_cpus(); 64 65 if (repo_config_get_int(the_repository, "checkout.thresholdForParallelism", threshold)) 66 *threshold = DEFAULT_THRESHOLD_FOR_PARALLELISM; 67} 68 69void init_parallel_checkout(void) 70{ 71 if (parallel_checkout.status != PC_UNINITIALIZED) 72 BUG("parallel checkout already initialized"); 73 74 parallel_checkout.status = PC_ACCEPTING_ENTRIES; 75} 76 77static void finish_parallel_checkout(void) 78{ 79 if (parallel_checkout.status == PC_UNINITIALIZED) 80 BUG("cannot finish parallel checkout: not initialized yet"); 81 82 free(parallel_checkout.items); 83 memset(&parallel_checkout, 0, sizeof(parallel_checkout)); 84} 85 86static int is_eligible_for_parallel_checkout(const struct cache_entry *ce, 87 const struct conv_attrs *ca) 88{ 89 enum conv_attrs_classification c; 90 size_t packed_item_size; 91 92 /* 93 * Symlinks cannot be checked out in parallel as, in case of path 94 * collision, they could racily replace leading directories of other 95 * entries being checked out. Submodules are checked out in child 96 * processes, which have their own parallel checkout queues. 97 */ 98 if (!S_ISREG(ce->ce_mode)) 99 return 0; 100 101 packed_item_size = sizeof(struct pc_item_fixed_portion) + ce->ce_namelen + 102 (ca->working_tree_encoding ? strlen(ca->working_tree_encoding) : 0); 103 104 /* 105 * The amount of data we send to the workers per checkout item is 106 * typically small (75~300B). So unless we find an insanely huge path 107 * of 64KB, we should never reach the 65KB limit of one pkt-line. If 108 * that does happen, we let the sequential code handle the item. 109 */ 110 if (packed_item_size > LARGE_PACKET_DATA_MAX) 111 return 0; 112 113 c = classify_conv_attrs(ca); 114 switch (c) { 115 case CA_CLASS_INCORE: 116 return 1; 117 118 case CA_CLASS_INCORE_FILTER: 119 /* 120 * It would be safe to allow concurrent instances of 121 * single-file smudge filters, like rot13, but we should not 122 * assume that all filters are parallel-process safe. So we 123 * don't allow this. 124 */ 125 return 0; 126 127 case CA_CLASS_INCORE_PROCESS: 128 /* 129 * The parallel queue and the delayed queue are not compatible, 130 * so they must be kept completely separated. And we can't tell 131 * if a long-running process will delay its response without 132 * actually asking it to perform the filtering. Therefore, this 133 * type of filter is not allowed in parallel checkout. 134 * 135 * Furthermore, there should only be one instance of the 136 * long-running process filter as we don't know how it is 137 * managing its own concurrency. So, spreading the entries that 138 * requisite such a filter among the parallel workers would 139 * require a lot more inter-process communication. We would 140 * probably have to designate a single process to interact with 141 * the filter and send all the necessary data to it, for each 142 * entry. 143 */ 144 return 0; 145 146 case CA_CLASS_STREAMABLE: 147 return 1; 148 149 default: 150 BUG("unsupported conv_attrs classification '%d'", c); 151 } 152} 153 154int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca, 155 int *checkout_counter) 156{ 157 struct parallel_checkout_item *pc_item; 158 159 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES || 160 !is_eligible_for_parallel_checkout(ce, ca)) 161 return -1; 162 163 ALLOC_GROW(parallel_checkout.items, parallel_checkout.nr + 1, 164 parallel_checkout.alloc); 165 166 pc_item = &parallel_checkout.items[parallel_checkout.nr]; 167 pc_item->ce = ce; 168 memcpy(&pc_item->ca, ca, sizeof(pc_item->ca)); 169 pc_item->status = PC_ITEM_PENDING; 170 pc_item->id = parallel_checkout.nr; 171 pc_item->checkout_counter = checkout_counter; 172 parallel_checkout.nr++; 173 174 return 0; 175} 176 177size_t pc_queue_size(void) 178{ 179 return parallel_checkout.nr; 180} 181 182static void advance_progress_meter(void) 183{ 184 if (parallel_checkout.progress) { 185 (*parallel_checkout.progress_cnt)++; 186 display_progress(parallel_checkout.progress, 187 *parallel_checkout.progress_cnt); 188 } 189} 190 191static int handle_results(struct checkout *state) 192{ 193 int ret = 0; 194 size_t i; 195 int have_pending = 0; 196 197 /* 198 * We first update the successfully written entries with the collected 199 * stat() data, so that they can be found by mark_colliding_entries(), 200 * in the next loop, when necessary. 201 */ 202 for (i = 0; i < parallel_checkout.nr; i++) { 203 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i]; 204 if (pc_item->status == PC_ITEM_WRITTEN) 205 update_ce_after_write(state, pc_item->ce, &pc_item->st); 206 } 207 208 for (i = 0; i < parallel_checkout.nr; i++) { 209 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i]; 210 211 switch(pc_item->status) { 212 case PC_ITEM_WRITTEN: 213 if (pc_item->checkout_counter) 214 (*pc_item->checkout_counter)++; 215 break; 216 case PC_ITEM_COLLIDED: 217 /* 218 * The entry could not be checked out due to a path 219 * collision with another entry. Since there can only 220 * be one entry of each colliding group on the disk, we 221 * could skip trying to check out this one and move on. 222 * However, this would leave the unwritten entries with 223 * null stat() fields on the index, which could 224 * potentially slow down subsequent operations that 225 * require refreshing it: git would not be able to 226 * trust st_size and would have to go to the filesystem 227 * to see if the contents match (see ie_modified()). 228 * 229 * Instead, let's pay the overhead only once, now, and 230 * call checkout_entry_ca() again for this file, to 231 * have its stat() data stored in the index. This also 232 * has the benefit of adding this entry and its 233 * colliding pair to the collision report message. 234 * Additionally, this overwriting behavior is consistent 235 * with what the sequential checkout does, so it doesn't 236 * add any extra overhead. 237 */ 238 ret |= checkout_entry_ca(pc_item->ce, &pc_item->ca, 239 state, NULL, 240 pc_item->checkout_counter); 241 advance_progress_meter(); 242 break; 243 case PC_ITEM_PENDING: 244 have_pending = 1; 245 /* fall through */ 246 case PC_ITEM_FAILED: 247 ret = -1; 248 break; 249 default: 250 BUG("unknown checkout item status in parallel checkout"); 251 } 252 } 253 254 if (have_pending) 255 error("parallel checkout finished with pending entries"); 256 257 return ret; 258} 259 260static int reset_fd(int fd, const char *path) 261{ 262 if (lseek(fd, 0, SEEK_SET) != 0) 263 return error_errno("failed to rewind descriptor of '%s'", path); 264 if (ftruncate(fd, 0)) 265 return error_errno("failed to truncate file '%s'", path); 266 return 0; 267} 268 269static int write_pc_item_to_fd(struct parallel_checkout_item *pc_item, int fd, 270 const char *path) 271{ 272 int ret; 273 struct stream_filter *filter; 274 struct strbuf buf = STRBUF_INIT; 275 char *blob; 276 size_t size; 277 ssize_t wrote; 278 279 /* Sanity check */ 280 ASSERT(is_eligible_for_parallel_checkout(pc_item->ce, &pc_item->ca)); 281 282 filter = get_stream_filter_ca(&pc_item->ca, &pc_item->ce->oid); 283 if (filter) { 284 if (stream_blob_to_fd(fd, &pc_item->ce->oid, filter, 1)) { 285 /* On error, reset fd to try writing without streaming */ 286 if (reset_fd(fd, path)) 287 return -1; 288 } else { 289 return 0; 290 } 291 } 292 293 blob = read_blob_entry(pc_item->ce, &size); 294 if (!blob) 295 return error("cannot read object %s '%s'", 296 oid_to_hex(&pc_item->ce->oid), pc_item->ce->name); 297 298 /* 299 * checkout metadata is used to give context for external process 300 * filters. Files requiring such filters are not eligible for parallel 301 * checkout, so pass NULL. Note: if that changes, the metadata must also 302 * be passed from the main process to the workers. 303 */ 304 ret = convert_to_working_tree_ca(&pc_item->ca, pc_item->ce->name, 305 blob, size, &buf, NULL); 306 307 if (ret) { 308 size_t newsize; 309 free(blob); 310 blob = strbuf_detach(&buf, &newsize); 311 size = newsize; 312 } 313 314 wrote = write_in_full(fd, blob, size); 315 free(blob); 316 if (wrote < 0) 317 return error("unable to write file '%s'", path); 318 319 return 0; 320} 321 322static int close_and_clear(int *fd) 323{ 324 int ret = 0; 325 326 if (*fd >= 0) { 327 ret = close(*fd); 328 *fd = -1; 329 } 330 331 return ret; 332} 333 334void write_pc_item(struct parallel_checkout_item *pc_item, 335 struct checkout *state) 336{ 337 unsigned int mode = (pc_item->ce->ce_mode & 0100) ? 0777 : 0666; 338 int fd = -1, fstat_done = 0; 339 struct strbuf path = STRBUF_INIT; 340 const char *dir_sep; 341 342 strbuf_add(&path, state->base_dir, state->base_dir_len); 343 strbuf_add(&path, pc_item->ce->name, pc_item->ce->ce_namelen); 344 345 dir_sep = find_last_dir_sep(path.buf); 346 347 /* 348 * The leading dirs should have been already created by now. But, in 349 * case of path collisions, one of the dirs could have been replaced by 350 * a symlink (checked out after we enqueued this entry for parallel 351 * checkout). Thus, we must check the leading dirs again. 352 */ 353 if (dir_sep && !has_dirs_only_path(path.buf, dir_sep - path.buf, 354 state->base_dir_len)) { 355 pc_item->status = PC_ITEM_COLLIDED; 356 trace2_data_string("pcheckout", NULL, "collision/dirname", path.buf); 357 goto out; 358 } 359 360 fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, mode); 361 362 if (fd < 0) { 363 if (errno == EEXIST || errno == EISDIR) { 364 /* 365 * Errors which probably represent a path collision. 366 * Suppress the error message and mark the item to be 367 * retried later, sequentially. ENOTDIR and ENOENT are 368 * also interesting, but the above has_dirs_only_path() 369 * call should have already caught these cases. 370 */ 371 pc_item->status = PC_ITEM_COLLIDED; 372 trace2_data_string("pcheckout", NULL, 373 "collision/basename", path.buf); 374 } else { 375 error_errno("failed to open file '%s'", path.buf); 376 pc_item->status = PC_ITEM_FAILED; 377 } 378 goto out; 379 } 380 381 if (write_pc_item_to_fd(pc_item, fd, path.buf)) { 382 /* Error was already reported. */ 383 pc_item->status = PC_ITEM_FAILED; 384 close_and_clear(&fd); 385 unlink(path.buf); 386 goto out; 387 } 388 389 fstat_done = fstat_checkout_output(fd, state, &pc_item->st); 390 391 if (close_and_clear(&fd)) { 392 error_errno("unable to close file '%s'", path.buf); 393 pc_item->status = PC_ITEM_FAILED; 394 goto out; 395 } 396 397 if (state->refresh_cache && !fstat_done && lstat(path.buf, &pc_item->st) < 0) { 398 error_errno("unable to stat just-written file '%s'", path.buf); 399 pc_item->status = PC_ITEM_FAILED; 400 goto out; 401 } 402 403 pc_item->status = PC_ITEM_WRITTEN; 404 405out: 406 strbuf_release(&path); 407} 408 409static void send_one_item(int fd, struct parallel_checkout_item *pc_item) 410{ 411 size_t len_data; 412 char *data, *variant; 413 struct pc_item_fixed_portion *fixed_portion; 414 const char *working_tree_encoding = pc_item->ca.working_tree_encoding; 415 size_t name_len = pc_item->ce->ce_namelen; 416 size_t working_tree_encoding_len = working_tree_encoding ? 417 strlen(working_tree_encoding) : 0; 418 419 /* 420 * Any changes in the calculation of the message size must also be made 421 * in is_eligible_for_parallel_checkout(). 422 */ 423 len_data = sizeof(struct pc_item_fixed_portion) + name_len + 424 working_tree_encoding_len; 425 426 data = xmalloc(len_data); 427 428 fixed_portion = (struct pc_item_fixed_portion *)data; 429 fixed_portion->id = pc_item->id; 430 fixed_portion->ce_mode = pc_item->ce->ce_mode; 431 fixed_portion->crlf_action = pc_item->ca.crlf_action; 432 fixed_portion->ident = pc_item->ca.ident; 433 fixed_portion->name_len = name_len; 434 fixed_portion->working_tree_encoding_len = working_tree_encoding_len; 435 oidcpy(&fixed_portion->oid, &pc_item->ce->oid); 436 437 variant = data + sizeof(*fixed_portion); 438 if (working_tree_encoding_len) { 439 memcpy(variant, working_tree_encoding, working_tree_encoding_len); 440 variant += working_tree_encoding_len; 441 } 442 memcpy(variant, pc_item->ce->name, name_len); 443 444 packet_write(fd, data, len_data); 445 446 free(data); 447} 448 449static void send_batch(int fd, size_t start, size_t nr) 450{ 451 size_t i; 452 sigchain_push(SIGPIPE, SIG_IGN); 453 for (i = 0; i < nr; i++) 454 send_one_item(fd, &parallel_checkout.items[start + i]); 455 packet_flush(fd); 456 sigchain_pop(SIGPIPE); 457} 458 459static struct pc_worker *setup_workers(struct checkout *state, int num_workers) 460{ 461 struct pc_worker *workers; 462 int i, workers_with_one_extra_item; 463 size_t base_batch_size, batch_beginning = 0; 464 465 ALLOC_ARRAY(workers, num_workers); 466 467 for (i = 0; i < num_workers; i++) { 468 struct child_process *cp = &workers[i].cp; 469 470 child_process_init(cp); 471 cp->git_cmd = 1; 472 cp->in = -1; 473 cp->out = -1; 474 cp->clean_on_exit = 1; 475 strvec_push(&cp->args, "checkout--worker"); 476 if (state->base_dir_len) 477 strvec_pushf(&cp->args, "--prefix=%s", state->base_dir); 478 if (start_command(cp)) 479 die("failed to spawn checkout worker"); 480 } 481 482 base_batch_size = parallel_checkout.nr / num_workers; 483 workers_with_one_extra_item = parallel_checkout.nr % num_workers; 484 485 for (i = 0; i < num_workers; i++) { 486 struct pc_worker *worker = &workers[i]; 487 size_t batch_size = base_batch_size; 488 489 /* distribute the extra work evenly */ 490 if (i < workers_with_one_extra_item) 491 batch_size++; 492 493 send_batch(worker->cp.in, batch_beginning, batch_size); 494 worker->next_item_to_complete = batch_beginning; 495 worker->nr_items_to_complete = batch_size; 496 497 batch_beginning += batch_size; 498 } 499 500 return workers; 501} 502 503static void finish_workers(struct pc_worker *workers, int num_workers) 504{ 505 int i; 506 507 /* 508 * Close pipes before calling finish_command() to let the workers 509 * exit asynchronously and avoid spending extra time on wait(). 510 */ 511 for (i = 0; i < num_workers; i++) { 512 struct child_process *cp = &workers[i].cp; 513 if (cp->in >= 0) 514 close(cp->in); 515 if (cp->out >= 0) 516 close(cp->out); 517 } 518 519 for (i = 0; i < num_workers; i++) { 520 int rc = finish_command(&workers[i].cp); 521 if (rc > 128) { 522 /* 523 * For a normal non-zero exit, the worker should have 524 * already printed something useful to stderr. But a 525 * death by signal should be mentioned to the user. 526 */ 527 error("checkout worker %d died of signal %d", i, rc - 128); 528 } 529 } 530 531 free(workers); 532} 533 534static inline void assert_pc_item_result_size(int got, int exp) 535{ 536 if (got != exp) 537 BUG("wrong result size from checkout worker (got %dB, exp %dB)", 538 got, exp); 539} 540 541static void parse_and_save_result(const char *buffer, int len, 542 struct pc_worker *worker) 543{ 544 struct pc_item_result *res; 545 struct parallel_checkout_item *pc_item; 546 struct stat *st = NULL; 547 548 if (len < PC_ITEM_RESULT_BASE_SIZE) 549 BUG("too short result from checkout worker (got %dB, exp >=%dB)", 550 len, (int)PC_ITEM_RESULT_BASE_SIZE); 551 552 res = (struct pc_item_result *)buffer; 553 554 /* 555 * Worker should send either the full result struct on success, or 556 * just the base (i.e. no stat data), otherwise. 557 */ 558 if (res->status == PC_ITEM_WRITTEN) { 559 assert_pc_item_result_size(len, (int)sizeof(struct pc_item_result)); 560 st = &res->st; 561 } else { 562 assert_pc_item_result_size(len, (int)PC_ITEM_RESULT_BASE_SIZE); 563 } 564 565 if (!worker->nr_items_to_complete) 566 BUG("received result from supposedly finished checkout worker"); 567 if (res->id != worker->next_item_to_complete) 568 BUG("unexpected item id from checkout worker (got %"PRIuMAX", exp %"PRIuMAX")", 569 (uintmax_t)res->id, (uintmax_t)worker->next_item_to_complete); 570 571 worker->next_item_to_complete++; 572 worker->nr_items_to_complete--; 573 574 pc_item = &parallel_checkout.items[res->id]; 575 pc_item->status = res->status; 576 if (st) 577 pc_item->st = *st; 578 579 if (res->status != PC_ITEM_COLLIDED) 580 advance_progress_meter(); 581} 582 583static void gather_results_from_workers(struct pc_worker *workers, 584 int num_workers) 585{ 586 int i, active_workers = num_workers; 587 struct pollfd *pfds; 588 589 CALLOC_ARRAY(pfds, num_workers); 590 for (i = 0; i < num_workers; i++) { 591 pfds[i].fd = workers[i].cp.out; 592 pfds[i].events = POLLIN; 593 } 594 595 while (active_workers) { 596 int nr = poll(pfds, num_workers, -1); 597 598 if (nr < 0) { 599 if (errno == EINTR) 600 continue; 601 die_errno("failed to poll checkout workers"); 602 } 603 604 for (i = 0; i < num_workers && nr > 0; i++) { 605 struct pc_worker *worker = &workers[i]; 606 struct pollfd *pfd = &pfds[i]; 607 608 if (!pfd->revents) 609 continue; 610 611 if (pfd->revents & POLLIN) { 612 int len = packet_read(pfd->fd, packet_buffer, 613 sizeof(packet_buffer), 0); 614 615 if (len < 0) { 616 BUG("packet_read() returned negative value"); 617 } else if (!len) { 618 pfd->fd = -1; 619 active_workers--; 620 } else { 621 parse_and_save_result(packet_buffer, 622 len, worker); 623 } 624 } else if (pfd->revents & POLLHUP) { 625 pfd->fd = -1; 626 active_workers--; 627 } else if (pfd->revents & (POLLNVAL | POLLERR)) { 628 die("error polling from checkout worker"); 629 } 630 631 nr--; 632 } 633 } 634 635 free(pfds); 636} 637 638static void write_items_sequentially(struct checkout *state) 639{ 640 size_t i; 641 642 for (i = 0; i < parallel_checkout.nr; i++) { 643 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i]; 644 write_pc_item(pc_item, state); 645 if (pc_item->status != PC_ITEM_COLLIDED) 646 advance_progress_meter(); 647 } 648} 649 650int run_parallel_checkout(struct checkout *state, int num_workers, int threshold, 651 struct progress *progress, unsigned int *progress_cnt) 652{ 653 int ret; 654 655 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES) 656 BUG("cannot run parallel checkout: uninitialized or already running"); 657 658 parallel_checkout.status = PC_RUNNING; 659 parallel_checkout.progress = progress; 660 parallel_checkout.progress_cnt = progress_cnt; 661 662 if (parallel_checkout.nr < num_workers) 663 num_workers = parallel_checkout.nr; 664 665 if (num_workers <= 1 || parallel_checkout.nr < threshold) { 666 write_items_sequentially(state); 667 } else { 668 struct pc_worker *workers = setup_workers(state, num_workers); 669 gather_results_from_workers(workers, num_workers); 670 finish_workers(workers, num_workers); 671 } 672 673 ret = handle_results(state); 674 675 finish_parallel_checkout(); 676 return ret; 677}