Git fork
1#define USE_THE_REPOSITORY_VARIABLE
2#define DISABLE_SIGN_COMPARE_WARNINGS
3
4#include "builtin.h"
5#include "abspath.h"
6#include "config.h"
7#include "dir.h"
8#include "environment.h"
9#include "gettext.h"
10#include "parse-options.h"
11#include "fsmonitor-ll.h"
12#include "fsmonitor-ipc.h"
13#include "fsmonitor-settings.h"
14#include "compat/fsmonitor/fsm-health.h"
15#include "compat/fsmonitor/fsm-listen.h"
16#include "fsmonitor--daemon.h"
17
18#include "simple-ipc.h"
19#include "khash.h"
20#include "run-command.h"
21#include "trace.h"
22#include "trace2.h"
23
24static const char * const builtin_fsmonitor__daemon_usage[] = {
25 N_("git fsmonitor--daemon start [<options>]"),
26 N_("git fsmonitor--daemon run [<options>]"),
27 "git fsmonitor--daemon stop",
28 "git fsmonitor--daemon status",
29 NULL
30};
31
32#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
33/*
34 * Global state loaded from config.
35 */
36#define FSMONITOR__IPC_THREADS "fsmonitor.ipcthreads"
37static int fsmonitor__ipc_threads = 8;
38
39#define FSMONITOR__START_TIMEOUT "fsmonitor.starttimeout"
40static int fsmonitor__start_timeout_sec = 60;
41
42#define FSMONITOR__ANNOUNCE_STARTUP "fsmonitor.announcestartup"
43static int fsmonitor__announce_startup = 0;
44
45static int fsmonitor_config(const char *var, const char *value,
46 const struct config_context *ctx, void *cb)
47{
48 if (!strcmp(var, FSMONITOR__IPC_THREADS)) {
49 int i = git_config_int(var, value, ctx->kvi);
50 if (i < 1)
51 return error(_("value of '%s' out of range: %d"),
52 FSMONITOR__IPC_THREADS, i);
53 fsmonitor__ipc_threads = i;
54 return 0;
55 }
56
57 if (!strcmp(var, FSMONITOR__START_TIMEOUT)) {
58 int i = git_config_int(var, value, ctx->kvi);
59 if (i < 0)
60 return error(_("value of '%s' out of range: %d"),
61 FSMONITOR__START_TIMEOUT, i);
62 fsmonitor__start_timeout_sec = i;
63 return 0;
64 }
65
66 if (!strcmp(var, FSMONITOR__ANNOUNCE_STARTUP)) {
67 int is_bool;
68 int i = git_config_bool_or_int(var, value, ctx->kvi, &is_bool);
69 if (i < 0)
70 return error(_("value of '%s' not bool or int: %d"),
71 var, i);
72 fsmonitor__announce_startup = i;
73 return 0;
74 }
75
76 return git_default_config(var, value, ctx, cb);
77}
78
79/*
80 * Acting as a CLIENT.
81 *
82 * Send a "quit" command to the `git-fsmonitor--daemon` (if running)
83 * and wait for it to shutdown.
84 */
85static int do_as_client__send_stop(void)
86{
87 struct strbuf answer = STRBUF_INIT;
88 int ret;
89
90 ret = fsmonitor_ipc__send_command("quit", &answer);
91
92 /* The quit command does not return any response data. */
93 strbuf_release(&answer);
94
95 if (ret)
96 return ret;
97
98 trace2_region_enter("fsm_client", "polling-for-daemon-exit", NULL);
99 while (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
100 sleep_millisec(50);
101 trace2_region_leave("fsm_client", "polling-for-daemon-exit", NULL);
102
103 return 0;
104}
105
106static int do_as_client__status(void)
107{
108 enum ipc_active_state state = fsmonitor_ipc__get_state();
109
110 switch (state) {
111 case IPC_STATE__LISTENING:
112 printf(_("fsmonitor-daemon is watching '%s'\n"),
113 the_repository->worktree);
114 return 0;
115
116 default:
117 printf(_("fsmonitor-daemon is not watching '%s'\n"),
118 the_repository->worktree);
119 return 1;
120 }
121}
122
123enum fsmonitor_cookie_item_result {
124 FCIR_ERROR = -1, /* could not create cookie file ? */
125 FCIR_INIT,
126 FCIR_SEEN,
127 FCIR_ABORT,
128};
129
130struct fsmonitor_cookie_item {
131 struct hashmap_entry entry;
132 char *name;
133 enum fsmonitor_cookie_item_result result;
134};
135
136static int cookies_cmp(const void *data UNUSED,
137 const struct hashmap_entry *he1,
138 const struct hashmap_entry *he2, const void *keydata)
139{
140 const struct fsmonitor_cookie_item *a =
141 container_of(he1, const struct fsmonitor_cookie_item, entry);
142 const struct fsmonitor_cookie_item *b =
143 container_of(he2, const struct fsmonitor_cookie_item, entry);
144
145 return strcmp(a->name, keydata ? keydata : b->name);
146}
147
148static enum fsmonitor_cookie_item_result with_lock__wait_for_cookie(
149 struct fsmonitor_daemon_state *state)
150{
151 /* assert current thread holding state->main_lock */
152
153 int fd;
154 struct fsmonitor_cookie_item *cookie;
155 struct strbuf cookie_pathname = STRBUF_INIT;
156 struct strbuf cookie_filename = STRBUF_INIT;
157 enum fsmonitor_cookie_item_result result;
158 int my_cookie_seq;
159
160 CALLOC_ARRAY(cookie, 1);
161
162 my_cookie_seq = state->cookie_seq++;
163
164 strbuf_addf(&cookie_filename, "%i-%i", getpid(), my_cookie_seq);
165
166 strbuf_addbuf(&cookie_pathname, &state->path_cookie_prefix);
167 strbuf_addbuf(&cookie_pathname, &cookie_filename);
168
169 cookie->name = strbuf_detach(&cookie_filename, NULL);
170 cookie->result = FCIR_INIT;
171 hashmap_entry_init(&cookie->entry, strhash(cookie->name));
172
173 hashmap_add(&state->cookies, &cookie->entry);
174
175 trace_printf_key(&trace_fsmonitor, "cookie-wait: '%s' '%s'",
176 cookie->name, cookie_pathname.buf);
177
178 /*
179 * Create the cookie file on disk and then wait for a notification
180 * that the listener thread has seen it.
181 */
182 fd = open(cookie_pathname.buf, O_WRONLY | O_CREAT | O_EXCL, 0600);
183 if (fd < 0) {
184 error_errno(_("could not create fsmonitor cookie '%s'"),
185 cookie->name);
186
187 cookie->result = FCIR_ERROR;
188 goto done;
189 }
190
191 /*
192 * Technically, close() and unlink() can fail, but we don't
193 * care here. We only created the file to trigger a watch
194 * event from the FS to know that when we're up to date.
195 */
196 close(fd);
197 unlink(cookie_pathname.buf);
198
199 /*
200 * Technically, this is an infinite wait (well, unless another
201 * thread sends us an abort). I'd like to change this to
202 * use `pthread_cond_timedwait()` and return an error/timeout
203 * and let the caller do the trivial response thing, but we
204 * don't have that routine in our thread-utils.
205 *
206 * After extensive beta testing I'm not really worried about
207 * this. Also note that the above open() and unlink() calls
208 * will cause at least two FS events on that path, so the odds
209 * of getting stuck are pretty slim.
210 */
211 while (cookie->result == FCIR_INIT)
212 pthread_cond_wait(&state->cookies_cond,
213 &state->main_lock);
214
215done:
216 hashmap_remove(&state->cookies, &cookie->entry, NULL);
217
218 result = cookie->result;
219
220 free(cookie->name);
221 free(cookie);
222 strbuf_release(&cookie_pathname);
223
224 return result;
225}
226
227/*
228 * Mark these cookies as _SEEN and wake up the corresponding client threads.
229 */
230static void with_lock__mark_cookies_seen(struct fsmonitor_daemon_state *state,
231 const struct string_list *cookie_names)
232{
233 /* assert current thread holding state->main_lock */
234
235 int k;
236 int nr_seen = 0;
237
238 for (k = 0; k < cookie_names->nr; k++) {
239 struct fsmonitor_cookie_item key;
240 struct fsmonitor_cookie_item *cookie;
241
242 key.name = cookie_names->items[k].string;
243 hashmap_entry_init(&key.entry, strhash(key.name));
244
245 cookie = hashmap_get_entry(&state->cookies, &key, entry, NULL);
246 if (cookie) {
247 trace_printf_key(&trace_fsmonitor, "cookie-seen: '%s'",
248 cookie->name);
249 cookie->result = FCIR_SEEN;
250 nr_seen++;
251 }
252 }
253
254 if (nr_seen)
255 pthread_cond_broadcast(&state->cookies_cond);
256}
257
258/*
259 * Set _ABORT on all pending cookies and wake up all client threads.
260 */
261static void with_lock__abort_all_cookies(struct fsmonitor_daemon_state *state)
262{
263 /* assert current thread holding state->main_lock */
264
265 struct hashmap_iter iter;
266 struct fsmonitor_cookie_item *cookie;
267 int nr_aborted = 0;
268
269 hashmap_for_each_entry(&state->cookies, &iter, cookie, entry) {
270 trace_printf_key(&trace_fsmonitor, "cookie-abort: '%s'",
271 cookie->name);
272 cookie->result = FCIR_ABORT;
273 nr_aborted++;
274 }
275
276 if (nr_aborted)
277 pthread_cond_broadcast(&state->cookies_cond);
278}
279
280/*
281 * Requests to and from a FSMonitor Protocol V2 provider use an opaque
282 * "token" as a virtual timestamp. Clients can request a summary of all
283 * created/deleted/modified files relative to a token. In the response,
284 * clients receive a new token for the next (relative) request.
285 *
286 *
287 * Token Format
288 * ============
289 *
290 * The contents of the token are private and provider-specific.
291 *
292 * For the built-in fsmonitor--daemon, we define a token as follows:
293 *
294 * "builtin" ":" <token_id> ":" <sequence_nr>
295 *
296 * The "builtin" prefix is used as a namespace to avoid conflicts
297 * with other providers (such as Watchman).
298 *
299 * The <token_id> is an arbitrary OPAQUE string, such as a GUID,
300 * UUID, or {timestamp,pid}. It is used to group all filesystem
301 * events that happened while the daemon was monitoring (and in-sync
302 * with the filesystem).
303 *
304 * Unlike FSMonitor Protocol V1, it is not defined as a timestamp
305 * and does not define less-than/greater-than relationships.
306 * (There are too many race conditions to rely on file system
307 * event timestamps.)
308 *
309 * The <sequence_nr> is a simple integer incremented whenever the
310 * daemon needs to make its state public. For example, if 1000 file
311 * system events come in, but no clients have requested the data,
312 * the daemon can continue to accumulate file changes in the same
313 * bin and does not need to advance the sequence number. However,
314 * as soon as a client does arrive, the daemon needs to start a new
315 * bin and increment the sequence number.
316 *
317 * The sequence number serves as the boundary between 2 sets
318 * of bins -- the older ones that the client has already seen
319 * and the newer ones that it hasn't.
320 *
321 * When a new <token_id> is created, the <sequence_nr> is reset to
322 * zero.
323 *
324 *
325 * About Token Ids
326 * ===============
327 *
328 * A new token_id is created:
329 *
330 * [1] each time the daemon is started.
331 *
332 * [2] any time that the daemon must re-sync with the filesystem
333 * (such as when the kernel drops or we miss events on a very
334 * active volume).
335 *
336 * [3] in response to a client "flush" command (for dropped event
337 * testing).
338 *
339 * When a new token_id is created, the daemon is free to discard all
340 * cached filesystem events associated with any previous token_ids.
341 * Events associated with a non-current token_id will never be sent
342 * to a client. A token_id change implicitly means that the daemon
343 * has gap in its event history.
344 *
345 * Therefore, clients that present a token with a stale (non-current)
346 * token_id will always be given a trivial response.
347 */
348struct fsmonitor_token_data {
349 struct strbuf token_id;
350 struct fsmonitor_batch *batch_head;
351 struct fsmonitor_batch *batch_tail;
352 uint64_t client_ref_count;
353};
354
355struct fsmonitor_batch {
356 struct fsmonitor_batch *next;
357 uint64_t batch_seq_nr;
358 const char **interned_paths;
359 size_t nr, alloc;
360 time_t pinned_time;
361};
362
363static struct fsmonitor_token_data *fsmonitor_new_token_data(void)
364{
365 static int test_env_value = -1;
366 static uint64_t flush_count = 0;
367 struct fsmonitor_token_data *token;
368 struct fsmonitor_batch *batch;
369
370 CALLOC_ARRAY(token, 1);
371 batch = fsmonitor_batch__new();
372
373 strbuf_init(&token->token_id, 0);
374 token->batch_head = batch;
375 token->batch_tail = batch;
376 token->client_ref_count = 0;
377
378 if (test_env_value < 0)
379 test_env_value = git_env_bool("GIT_TEST_FSMONITOR_TOKEN", 0);
380
381 if (!test_env_value) {
382 struct timeval tv;
383 struct tm tm;
384 time_t secs;
385
386 gettimeofday(&tv, NULL);
387 secs = tv.tv_sec;
388 gmtime_r(&secs, &tm);
389
390 strbuf_addf(&token->token_id,
391 "%"PRIu64".%d.%4d%02d%02dT%02d%02d%02d.%06ldZ",
392 flush_count++,
393 getpid(),
394 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
395 tm.tm_hour, tm.tm_min, tm.tm_sec,
396 (long)tv.tv_usec);
397 } else {
398 strbuf_addf(&token->token_id, "test_%08x", test_env_value++);
399 }
400
401 /*
402 * We created a new <token_id> and are starting a new series
403 * of tokens with a zero <seq_nr>.
404 *
405 * Since clients cannot guess our new (non test) <token_id>
406 * they will always receive a trivial response (because of the
407 * mismatch on the <token_id>). The trivial response will
408 * tell them our new <token_id> so that subsequent requests
409 * will be relative to our new series. (And when sending that
410 * response, we pin the current head of the batch list.)
411 *
412 * Even if the client correctly guesses the <token_id>, their
413 * request of "builtin:<token_id>:0" asks for all changes MORE
414 * RECENT than batch/bin 0.
415 *
416 * This implies that it is a waste to accumulate paths in the
417 * initial batch/bin (because they will never be transmitted).
418 *
419 * So the daemon could be running for days and watching the
420 * file system, but doesn't need to actually accumulate any
421 * paths UNTIL we need to set a reference point for a later
422 * relative request.
423 *
424 * However, it is very useful for testing to always have a
425 * reference point set. Pin batch 0 to force early file system
426 * events to accumulate.
427 */
428 if (test_env_value)
429 batch->pinned_time = time(NULL);
430
431 return token;
432}
433
434struct fsmonitor_batch *fsmonitor_batch__new(void)
435{
436 struct fsmonitor_batch *batch;
437
438 CALLOC_ARRAY(batch, 1);
439
440 return batch;
441}
442
443void fsmonitor_batch__free_list(struct fsmonitor_batch *batch)
444{
445 while (batch) {
446 struct fsmonitor_batch *next = batch->next;
447
448 /*
449 * The actual strings within the array of this batch
450 * are interned, so we don't own them. We only own
451 * the array.
452 */
453 free(batch->interned_paths);
454 free(batch);
455
456 batch = next;
457 }
458}
459
460void fsmonitor_batch__add_path(struct fsmonitor_batch *batch,
461 const char *path)
462{
463 const char *interned_path = strintern(path);
464
465 trace_printf_key(&trace_fsmonitor, "event: %s", interned_path);
466
467 ALLOC_GROW(batch->interned_paths, batch->nr + 1, batch->alloc);
468 batch->interned_paths[batch->nr++] = interned_path;
469}
470
471static void fsmonitor_batch__combine(struct fsmonitor_batch *batch_dest,
472 const struct fsmonitor_batch *batch_src)
473{
474 size_t k;
475
476 ALLOC_GROW(batch_dest->interned_paths,
477 batch_dest->nr + batch_src->nr + 1,
478 batch_dest->alloc);
479
480 for (k = 0; k < batch_src->nr; k++)
481 batch_dest->interned_paths[batch_dest->nr++] =
482 batch_src->interned_paths[k];
483}
484
485/*
486 * To keep the batch list from growing unbounded in response to filesystem
487 * activity, we try to truncate old batches from the end of the list as
488 * they become irrelevant.
489 *
490 * We assume that the .git/index will be updated with the most recent token
491 * any time the index is updated. And future commands will only ask for
492 * recent changes *since* that new token. So as tokens advance into the
493 * future, older batch items will never be requested/needed. So we can
494 * truncate them without loss of functionality.
495 *
496 * However, multiple commands may be talking to the daemon concurrently
497 * or perform a slow command, so a little "token skew" is possible.
498 * Therefore, we want this to be a little bit lazy and have a generous
499 * delay.
500 *
501 * The current reader thread walked backwards in time from `token->batch_head`
502 * back to `batch_marker` somewhere in the middle of the batch list.
503 *
504 * Let's walk backwards in time from that marker an arbitrary delay
505 * and truncate the list there. Note that these timestamps are completely
506 * artificial (based on when we pinned the batch item) and not on any
507 * filesystem activity.
508 *
509 * Return the obsolete portion of the list after we have removed it from
510 * the official list so that the caller can free it after leaving the lock.
511 */
512#define MY_TIME_DELAY_SECONDS (5 * 60) /* seconds */
513
514static struct fsmonitor_batch *with_lock__truncate_old_batches(
515 struct fsmonitor_daemon_state *state,
516 const struct fsmonitor_batch *batch_marker)
517{
518 /* assert current thread holding state->main_lock */
519
520 const struct fsmonitor_batch *batch;
521 struct fsmonitor_batch *remainder;
522
523 if (!batch_marker)
524 return NULL;
525
526 trace_printf_key(&trace_fsmonitor, "Truncate: mark (%"PRIu64",%"PRIu64")",
527 batch_marker->batch_seq_nr,
528 (uint64_t)batch_marker->pinned_time);
529
530 for (batch = batch_marker; batch; batch = batch->next) {
531 time_t t;
532
533 if (!batch->pinned_time) /* an overflow batch */
534 continue;
535
536 t = batch->pinned_time + MY_TIME_DELAY_SECONDS;
537 if (t > batch_marker->pinned_time) /* too close to marker */
538 continue;
539
540 goto truncate_past_here;
541 }
542
543 return NULL;
544
545truncate_past_here:
546 state->current_token_data->batch_tail = (struct fsmonitor_batch *)batch;
547
548 remainder = ((struct fsmonitor_batch *)batch)->next;
549 ((struct fsmonitor_batch *)batch)->next = NULL;
550
551 return remainder;
552}
553
554static void fsmonitor_free_token_data(struct fsmonitor_token_data *token)
555{
556 if (!token)
557 return;
558
559 assert(token->client_ref_count == 0);
560
561 strbuf_release(&token->token_id);
562
563 fsmonitor_batch__free_list(token->batch_head);
564
565 free(token);
566}
567
568/*
569 * Flush all of our cached data about the filesystem. Call this if we
570 * lose sync with the filesystem and miss some notification events.
571 *
572 * [1] If we are missing events, then we no longer have a complete
573 * history of the directory (relative to our current start token).
574 * We should create a new token and start fresh (as if we just
575 * booted up).
576 *
577 * [2] Some of those lost events may have been for cookie files. We
578 * should assume the worst and abort them rather letting them starve.
579 *
580 * If there are no concurrent threads reading the current token data
581 * series, we can free it now. Otherwise, let the last reader free
582 * it.
583 *
584 * Either way, the old token data series is no longer associated with
585 * our state data.
586 */
587static void with_lock__do_force_resync(struct fsmonitor_daemon_state *state)
588{
589 /* assert current thread holding state->main_lock */
590
591 struct fsmonitor_token_data *free_me = NULL;
592 struct fsmonitor_token_data *new_one = NULL;
593
594 new_one = fsmonitor_new_token_data();
595
596 if (state->current_token_data->client_ref_count == 0)
597 free_me = state->current_token_data;
598 state->current_token_data = new_one;
599
600 fsmonitor_free_token_data(free_me);
601
602 with_lock__abort_all_cookies(state);
603}
604
605void fsmonitor_force_resync(struct fsmonitor_daemon_state *state)
606{
607 pthread_mutex_lock(&state->main_lock);
608 with_lock__do_force_resync(state);
609 pthread_mutex_unlock(&state->main_lock);
610}
611
612/*
613 * Format an opaque token string to send to the client.
614 */
615static void with_lock__format_response_token(
616 struct strbuf *response_token,
617 const struct strbuf *response_token_id,
618 const struct fsmonitor_batch *batch)
619{
620 /* assert current thread holding state->main_lock */
621
622 strbuf_reset(response_token);
623 strbuf_addf(response_token, "builtin:%s:%"PRIu64,
624 response_token_id->buf, batch->batch_seq_nr);
625}
626
627/*
628 * Parse an opaque token from the client.
629 * Returns -1 on error.
630 */
631static int fsmonitor_parse_client_token(const char *buf_token,
632 struct strbuf *requested_token_id,
633 uint64_t *seq_nr)
634{
635 const char *p;
636 char *p_end;
637
638 strbuf_reset(requested_token_id);
639 *seq_nr = 0;
640
641 if (!skip_prefix(buf_token, "builtin:", &p))
642 return -1;
643
644 while (*p && *p != ':')
645 strbuf_addch(requested_token_id, *p++);
646 if (!*p++)
647 return -1;
648
649 *seq_nr = (uint64_t)strtoumax(p, &p_end, 10);
650 if (*p_end)
651 return -1;
652
653 return 0;
654}
655
656KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
657
658static int do_handle_client(struct fsmonitor_daemon_state *state,
659 const char *command,
660 ipc_server_reply_cb *reply,
661 struct ipc_server_reply_data *reply_data)
662{
663 struct fsmonitor_token_data *token_data = NULL;
664 struct strbuf response_token = STRBUF_INIT;
665 struct strbuf requested_token_id = STRBUF_INIT;
666 struct strbuf payload = STRBUF_INIT;
667 uint64_t requested_oldest_seq_nr = 0;
668 uint64_t total_response_len = 0;
669 const char *p;
670 const struct fsmonitor_batch *batch_head;
671 const struct fsmonitor_batch *batch;
672 struct fsmonitor_batch *remainder = NULL;
673 intmax_t count = 0, duplicates = 0;
674 kh_str_t *shown;
675 int hash_ret;
676 int do_trivial = 0;
677 int do_flush = 0;
678 int do_cookie = 0;
679 enum fsmonitor_cookie_item_result cookie_result;
680
681 /*
682 * We expect `command` to be of the form:
683 *
684 * <command> := quit NUL
685 * | flush NUL
686 * | <V1-time-since-epoch-ns> NUL
687 * | <V2-opaque-fsmonitor-token> NUL
688 */
689
690 if (!strcmp(command, "quit")) {
691 /*
692 * A client has requested over the socket/pipe that the
693 * daemon shutdown.
694 *
695 * Tell the IPC thread pool to shutdown (which completes
696 * the await in the main thread (which can stop the
697 * fsmonitor listener thread)).
698 *
699 * There is no reply to the client.
700 */
701 return SIMPLE_IPC_QUIT;
702
703 } else if (!strcmp(command, "flush")) {
704 /*
705 * Flush all of our cached data and generate a new token
706 * just like if we lost sync with the filesystem.
707 *
708 * Then send a trivial response using the new token.
709 */
710 do_flush = 1;
711 do_trivial = 1;
712
713 } else if (!skip_prefix(command, "builtin:", &p)) {
714 /* assume V1 timestamp or garbage */
715
716 char *p_end;
717
718 strtoumax(command, &p_end, 10);
719 trace_printf_key(&trace_fsmonitor,
720 ((*p_end) ?
721 "fsmonitor: invalid command line '%s'" :
722 "fsmonitor: unsupported V1 protocol '%s'"),
723 command);
724 do_trivial = 1;
725 do_cookie = 1;
726
727 } else {
728 /* We have "builtin:*" */
729 if (fsmonitor_parse_client_token(command, &requested_token_id,
730 &requested_oldest_seq_nr)) {
731 trace_printf_key(&trace_fsmonitor,
732 "fsmonitor: invalid V2 protocol token '%s'",
733 command);
734 do_trivial = 1;
735 do_cookie = 1;
736
737 } else {
738 /*
739 * We have a V2 valid token:
740 * "builtin:<token_id>:<seq_nr>"
741 */
742 do_cookie = 1;
743 }
744 }
745
746 pthread_mutex_lock(&state->main_lock);
747
748 if (!state->current_token_data)
749 BUG("fsmonitor state does not have a current token");
750
751 /*
752 * Write a cookie file inside the directory being watched in
753 * an effort to flush out existing filesystem events that we
754 * actually care about. Suspend this client thread until we
755 * see the filesystem events for this cookie file.
756 *
757 * Creating the cookie lets us guarantee that our FS listener
758 * thread has drained the kernel queue and we are caught up
759 * with the kernel.
760 *
761 * If we cannot create the cookie (or otherwise guarantee that
762 * we are caught up), we send a trivial response. We have to
763 * assume that there might be some very, very recent activity
764 * on the FS still in flight.
765 */
766 if (do_cookie) {
767 cookie_result = with_lock__wait_for_cookie(state);
768 if (cookie_result != FCIR_SEEN) {
769 error(_("fsmonitor: cookie_result '%d' != SEEN"),
770 cookie_result);
771 do_trivial = 1;
772 }
773 }
774
775 if (do_flush)
776 with_lock__do_force_resync(state);
777
778 /*
779 * We mark the current head of the batch list as "pinned" so
780 * that the listener thread will treat this item as read-only
781 * (and prevent any more paths from being added to it) from
782 * now on.
783 */
784 token_data = state->current_token_data;
785 batch_head = token_data->batch_head;
786 ((struct fsmonitor_batch *)batch_head)->pinned_time = time(NULL);
787
788 /*
789 * FSMonitor Protocol V2 requires that we send a response header
790 * with a "new current token" and then all of the paths that changed
791 * since the "requested token". We send the seq_nr of the just-pinned
792 * head batch so that future requests from a client will be relative
793 * to it.
794 */
795 with_lock__format_response_token(&response_token,
796 &token_data->token_id, batch_head);
797
798 reply(reply_data, response_token.buf, response_token.len + 1);
799 total_response_len += response_token.len + 1;
800
801 trace2_data_string("fsmonitor", the_repository, "response/token",
802 response_token.buf);
803 trace_printf_key(&trace_fsmonitor, "response token: %s",
804 response_token.buf);
805
806 if (!do_trivial) {
807 if (strcmp(requested_token_id.buf, token_data->token_id.buf)) {
808 /*
809 * The client last spoke to a different daemon
810 * instance -OR- the daemon had to resync with
811 * the filesystem (and lost events), so reject.
812 */
813 trace2_data_string("fsmonitor", the_repository,
814 "response/token", "different");
815 do_trivial = 1;
816
817 } else if (requested_oldest_seq_nr <
818 token_data->batch_tail->batch_seq_nr) {
819 /*
820 * The client wants older events than we have for
821 * this token_id. This means that the end of our
822 * batch list was truncated and we cannot give the
823 * client a complete snapshot relative to their
824 * request.
825 */
826 trace_printf_key(&trace_fsmonitor,
827 "client requested truncated data");
828 do_trivial = 1;
829 }
830 }
831
832 if (do_trivial) {
833 pthread_mutex_unlock(&state->main_lock);
834
835 reply(reply_data, "/", 2);
836
837 trace2_data_intmax("fsmonitor", the_repository,
838 "response/trivial", 1);
839
840 goto cleanup;
841 }
842
843 /*
844 * We're going to hold onto a pointer to the current
845 * token-data while we walk the list of batches of files.
846 * During this time, we will NOT be under the lock.
847 * So we ref-count it.
848 *
849 * This allows the listener thread to continue prepending
850 * new batches of items to the token-data (which we'll ignore).
851 *
852 * AND it allows the listener thread to do a token-reset
853 * (and install a new `current_token_data`).
854 */
855 token_data->client_ref_count++;
856
857 pthread_mutex_unlock(&state->main_lock);
858
859 /*
860 * The client request is relative to the token that they sent,
861 * so walk the batch list backwards from the current head back
862 * to the batch (sequence number) they named.
863 *
864 * We use khash to de-dup the list of pathnames.
865 *
866 * NEEDSWORK: each batch contains a list of interned strings,
867 * so we only need to do pointer comparisons here to build the
868 * hash table. Currently, we're still comparing the string
869 * values.
870 */
871 shown = kh_init_str();
872 for (batch = batch_head;
873 batch && batch->batch_seq_nr > requested_oldest_seq_nr;
874 batch = batch->next) {
875 size_t k;
876
877 for (k = 0; k < batch->nr; k++) {
878 const char *s = batch->interned_paths[k];
879 size_t s_len;
880
881 if (kh_get_str(shown, s) != kh_end(shown))
882 duplicates++;
883 else {
884 kh_put_str(shown, s, &hash_ret);
885
886 trace_printf_key(&trace_fsmonitor,
887 "send[%"PRIuMAX"]: %s",
888 count, s);
889
890 /* Each path gets written with a trailing NUL */
891 s_len = strlen(s) + 1;
892
893 if (payload.len + s_len >=
894 LARGE_PACKET_DATA_MAX) {
895 reply(reply_data, payload.buf,
896 payload.len);
897 total_response_len += payload.len;
898 strbuf_reset(&payload);
899 }
900
901 strbuf_add(&payload, s, s_len);
902 count++;
903 }
904 }
905 }
906
907 if (payload.len) {
908 reply(reply_data, payload.buf, payload.len);
909 total_response_len += payload.len;
910 }
911
912 kh_release_str(shown);
913
914 pthread_mutex_lock(&state->main_lock);
915
916 if (token_data->client_ref_count > 0)
917 token_data->client_ref_count--;
918
919 if (token_data->client_ref_count == 0) {
920 if (token_data != state->current_token_data) {
921 /*
922 * The listener thread did a token-reset while we were
923 * walking the batch list. Therefore, this token is
924 * stale and can be discarded completely. If we are
925 * the last reader thread using this token, we own
926 * that work.
927 */
928 fsmonitor_free_token_data(token_data);
929 } else if (batch) {
930 /*
931 * We are holding the lock and are the only
932 * reader of the ref-counted portion of the
933 * list, so we get the honor of seeing if the
934 * list can be truncated to save memory.
935 *
936 * The main loop did not walk to the end of the
937 * list, so this batch is the first item in the
938 * batch-list that is older than the requested
939 * end-point sequence number. See if the tail
940 * end of the list is obsolete.
941 */
942 remainder = with_lock__truncate_old_batches(state,
943 batch);
944 }
945 }
946
947 pthread_mutex_unlock(&state->main_lock);
948
949 if (remainder)
950 fsmonitor_batch__free_list(remainder);
951
952 trace2_data_intmax("fsmonitor", the_repository, "response/length", total_response_len);
953 trace2_data_intmax("fsmonitor", the_repository, "response/count/files", count);
954 trace2_data_intmax("fsmonitor", the_repository, "response/count/duplicates", duplicates);
955
956cleanup:
957 strbuf_release(&response_token);
958 strbuf_release(&requested_token_id);
959 strbuf_release(&payload);
960
961 return 0;
962}
963
964static ipc_server_application_cb handle_client;
965
966static int handle_client(void *data,
967 const char *command, size_t command_len,
968 ipc_server_reply_cb *reply,
969 struct ipc_server_reply_data *reply_data)
970{
971 struct fsmonitor_daemon_state *state = data;
972 int result;
973
974 /*
975 * The Simple IPC API now supports {char*, len} arguments, but
976 * FSMonitor always uses proper null-terminated strings, so
977 * we can ignore the command_len argument. (Trust, but verify.)
978 */
979 if (command_len != strlen(command))
980 BUG("FSMonitor assumes text messages");
981
982 trace_printf_key(&trace_fsmonitor, "requested token: %s", command);
983
984 trace2_region_enter("fsmonitor", "handle_client", the_repository);
985 trace2_data_string("fsmonitor", the_repository, "request", command);
986
987 result = do_handle_client(state, command, reply, reply_data);
988
989 trace2_region_leave("fsmonitor", "handle_client", the_repository);
990
991 return result;
992}
993
994#define FSMONITOR_DIR "fsmonitor--daemon"
995#define FSMONITOR_COOKIE_DIR "cookies"
996#define FSMONITOR_COOKIE_PREFIX (FSMONITOR_DIR "/" FSMONITOR_COOKIE_DIR "/")
997
998enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
999 const char *rel)
1000{
1001 if (fspathncmp(rel, ".git", 4))
1002 return IS_WORKDIR_PATH;
1003 rel += 4;
1004
1005 if (!*rel)
1006 return IS_DOT_GIT;
1007 if (*rel != '/')
1008 return IS_WORKDIR_PATH; /* e.g. .gitignore */
1009 rel++;
1010
1011 if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
1012 strlen(FSMONITOR_COOKIE_PREFIX)))
1013 return IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX;
1014
1015 return IS_INSIDE_DOT_GIT;
1016}
1017
1018enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
1019 const char *rel)
1020{
1021 if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
1022 strlen(FSMONITOR_COOKIE_PREFIX)))
1023 return IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX;
1024
1025 return IS_INSIDE_GITDIR;
1026}
1027
1028static enum fsmonitor_path_type try_classify_workdir_abs_path(
1029 struct fsmonitor_daemon_state *state,
1030 const char *path)
1031{
1032 const char *rel;
1033
1034 if (fspathncmp(path, state->path_worktree_watch.buf,
1035 state->path_worktree_watch.len))
1036 return IS_OUTSIDE_CONE;
1037
1038 rel = path + state->path_worktree_watch.len;
1039
1040 if (!*rel)
1041 return IS_WORKDIR_PATH; /* it is the root dir exactly */
1042 if (*rel != '/')
1043 return IS_OUTSIDE_CONE;
1044 rel++;
1045
1046 return fsmonitor_classify_path_workdir_relative(rel);
1047}
1048
1049enum fsmonitor_path_type fsmonitor_classify_path_absolute(
1050 struct fsmonitor_daemon_state *state,
1051 const char *path)
1052{
1053 const char *rel;
1054 enum fsmonitor_path_type t;
1055
1056 t = try_classify_workdir_abs_path(state, path);
1057 if (state->nr_paths_watching == 1)
1058 return t;
1059 if (t != IS_OUTSIDE_CONE)
1060 return t;
1061
1062 if (fspathncmp(path, state->path_gitdir_watch.buf,
1063 state->path_gitdir_watch.len))
1064 return IS_OUTSIDE_CONE;
1065
1066 rel = path + state->path_gitdir_watch.len;
1067
1068 if (!*rel)
1069 return IS_GITDIR; /* it is the <gitdir> exactly */
1070 if (*rel != '/')
1071 return IS_OUTSIDE_CONE;
1072 rel++;
1073
1074 return fsmonitor_classify_path_gitdir_relative(rel);
1075}
1076
1077/*
1078 * We try to combine small batches at the front of the batch-list to avoid
1079 * having a long list. This hopefully makes it a little easier when we want
1080 * to truncate and maintain the list. However, we don't want the paths array
1081 * to just keep growing and growing with realloc, so we insert an arbitrary
1082 * limit.
1083 */
1084#define MY_COMBINE_LIMIT (1024)
1085
1086void fsmonitor_publish(struct fsmonitor_daemon_state *state,
1087 struct fsmonitor_batch *batch,
1088 const struct string_list *cookie_names)
1089{
1090 if (!batch && !cookie_names->nr)
1091 return;
1092
1093 pthread_mutex_lock(&state->main_lock);
1094
1095 if (batch) {
1096 struct fsmonitor_batch *head;
1097
1098 head = state->current_token_data->batch_head;
1099 if (!head) {
1100 BUG("token does not have batch");
1101 } else if (head->pinned_time) {
1102 /*
1103 * We cannot alter the current batch list
1104 * because:
1105 *
1106 * [a] it is being transmitted to at least one
1107 * client and the handle_client() thread has a
1108 * ref-count, but not a lock on the batch list
1109 * starting with this item.
1110 *
1111 * [b] it has been transmitted in the past to
1112 * at least one client such that future
1113 * requests are relative to this head batch.
1114 *
1115 * So, we can only prepend a new batch onto
1116 * the front of the list.
1117 */
1118 batch->batch_seq_nr = head->batch_seq_nr + 1;
1119 batch->next = head;
1120 state->current_token_data->batch_head = batch;
1121 } else if (!head->batch_seq_nr) {
1122 /*
1123 * Batch 0 is unpinned. See the note in
1124 * `fsmonitor_new_token_data()` about why we
1125 * don't need to accumulate these paths.
1126 */
1127 fsmonitor_batch__free_list(batch);
1128 } else if (head->nr + batch->nr > MY_COMBINE_LIMIT) {
1129 /*
1130 * The head batch in the list has never been
1131 * transmitted to a client, but folding the
1132 * contents of the new batch onto it would
1133 * exceed our arbitrary limit, so just prepend
1134 * the new batch onto the list.
1135 */
1136 batch->batch_seq_nr = head->batch_seq_nr + 1;
1137 batch->next = head;
1138 state->current_token_data->batch_head = batch;
1139 } else {
1140 /*
1141 * We are free to add the paths in the given
1142 * batch onto the end of the current head batch.
1143 */
1144 fsmonitor_batch__combine(head, batch);
1145 fsmonitor_batch__free_list(batch);
1146 }
1147 }
1148
1149 if (cookie_names->nr)
1150 with_lock__mark_cookies_seen(state, cookie_names);
1151
1152 pthread_mutex_unlock(&state->main_lock);
1153}
1154
1155static void *fsm_health__thread_proc(void *_state)
1156{
1157 struct fsmonitor_daemon_state *state = _state;
1158
1159 trace2_thread_start("fsm-health");
1160
1161 fsm_health__loop(state);
1162
1163 trace2_thread_exit();
1164 return NULL;
1165}
1166
1167static void *fsm_listen__thread_proc(void *_state)
1168{
1169 struct fsmonitor_daemon_state *state = _state;
1170
1171 trace2_thread_start("fsm-listen");
1172
1173 trace_printf_key(&trace_fsmonitor, "Watching: worktree '%s'",
1174 state->path_worktree_watch.buf);
1175 if (state->nr_paths_watching > 1)
1176 trace_printf_key(&trace_fsmonitor, "Watching: gitdir '%s'",
1177 state->path_gitdir_watch.buf);
1178
1179 fsm_listen__loop(state);
1180
1181 pthread_mutex_lock(&state->main_lock);
1182 if (state->current_token_data &&
1183 state->current_token_data->client_ref_count == 0)
1184 fsmonitor_free_token_data(state->current_token_data);
1185 state->current_token_data = NULL;
1186 pthread_mutex_unlock(&state->main_lock);
1187
1188 trace2_thread_exit();
1189 return NULL;
1190}
1191
1192static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
1193{
1194 struct ipc_server_opts ipc_opts = {
1195 .nr_threads = fsmonitor__ipc_threads,
1196
1197 /*
1198 * We know that there are no other active threads yet,
1199 * so we can let the IPC layer temporarily chdir() if
1200 * it needs to when creating the server side of the
1201 * Unix domain socket.
1202 */
1203 .uds_disallow_chdir = 0
1204 };
1205 int health_started = 0;
1206 int listener_started = 0;
1207 int err = 0;
1208
1209 /*
1210 * Start the IPC thread pool before the we've started the file
1211 * system event listener thread so that we have the IPC handle
1212 * before we need it.
1213 */
1214 if (ipc_server_init_async(&state->ipc_server_data,
1215 state->path_ipc.buf, &ipc_opts,
1216 handle_client, state))
1217 return error_errno(
1218 _("could not start IPC thread pool on '%s'"),
1219 state->path_ipc.buf);
1220
1221 /*
1222 * Start the fsmonitor listener thread to collect filesystem
1223 * events.
1224 */
1225 if (pthread_create(&state->listener_thread, NULL,
1226 fsm_listen__thread_proc, state)) {
1227 ipc_server_stop_async(state->ipc_server_data);
1228 err = error(_("could not start fsmonitor listener thread"));
1229 goto cleanup;
1230 }
1231 listener_started = 1;
1232
1233 /*
1234 * Start the health thread to watch over our process.
1235 */
1236 if (pthread_create(&state->health_thread, NULL,
1237 fsm_health__thread_proc, state)) {
1238 ipc_server_stop_async(state->ipc_server_data);
1239 err = error(_("could not start fsmonitor health thread"));
1240 goto cleanup;
1241 }
1242 health_started = 1;
1243
1244 /*
1245 * The daemon is now fully functional in background threads.
1246 * Our primary thread should now just wait while the threads
1247 * do all the work.
1248 */
1249cleanup:
1250 /*
1251 * Wait for the IPC thread pool to shutdown (whether by client
1252 * request, from filesystem activity, or an error).
1253 */
1254 ipc_server_await(state->ipc_server_data);
1255
1256 /*
1257 * The fsmonitor listener thread may have received a shutdown
1258 * event from the IPC thread pool, but it doesn't hurt to tell
1259 * it again. And wait for it to shutdown.
1260 */
1261 if (listener_started) {
1262 fsm_listen__stop_async(state);
1263 pthread_join(state->listener_thread, NULL);
1264 }
1265
1266 if (health_started) {
1267 fsm_health__stop_async(state);
1268 pthread_join(state->health_thread, NULL);
1269 }
1270
1271 if (err)
1272 return err;
1273 if (state->listen_error_code)
1274 return state->listen_error_code;
1275 if (state->health_error_code)
1276 return state->health_error_code;
1277 return 0;
1278}
1279
1280static int fsmonitor_run_daemon(void)
1281{
1282 struct fsmonitor_daemon_state state;
1283 const char *home;
1284 int err;
1285
1286 memset(&state, 0, sizeof(state));
1287
1288 hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
1289 pthread_mutex_init(&state.main_lock, NULL);
1290 pthread_cond_init(&state.cookies_cond, NULL);
1291 state.listen_error_code = 0;
1292 state.health_error_code = 0;
1293 state.current_token_data = fsmonitor_new_token_data();
1294
1295 /* Prepare to (recursively) watch the <worktree-root> directory. */
1296 strbuf_init(&state.path_worktree_watch, 0);
1297 strbuf_addstr(&state.path_worktree_watch,
1298 absolute_path(repo_get_work_tree(the_repository)));
1299 state.nr_paths_watching = 1;
1300
1301 strbuf_init(&state.alias.alias, 0);
1302 strbuf_init(&state.alias.points_to, 0);
1303 if ((err = fsmonitor__get_alias(state.path_worktree_watch.buf, &state.alias)))
1304 goto done;
1305
1306 /*
1307 * We create and delete cookie files somewhere inside the .git
1308 * directory to help us keep sync with the file system. If
1309 * ".git" is not a directory, then <gitdir> is not inside the
1310 * cone of <worktree-root>, so set up a second watch to watch
1311 * the <gitdir> so that we get events for the cookie files.
1312 */
1313 strbuf_init(&state.path_gitdir_watch, 0);
1314 strbuf_addbuf(&state.path_gitdir_watch, &state.path_worktree_watch);
1315 strbuf_addstr(&state.path_gitdir_watch, "/.git");
1316 if (!is_directory(state.path_gitdir_watch.buf)) {
1317 strbuf_reset(&state.path_gitdir_watch);
1318 strbuf_addstr(&state.path_gitdir_watch,
1319 absolute_path(repo_get_git_dir(the_repository)));
1320 strbuf_strip_suffix(&state.path_gitdir_watch, "/.");
1321 state.nr_paths_watching = 2;
1322 }
1323
1324 /*
1325 * We will write filesystem syncing cookie files into
1326 * <gitdir>/<fsmonitor-dir>/<cookie-dir>/<pid>-<seq>.
1327 *
1328 * The extra layers of subdirectories here keep us from
1329 * changing the mtime on ".git/" or ".git/foo/" when we create
1330 * or delete cookie files.
1331 *
1332 * There have been problems with some IDEs that do a
1333 * non-recursive watch of the ".git/" directory and run a
1334 * series of commands any time something happens.
1335 *
1336 * For example, if we place our cookie files directly in
1337 * ".git/" or ".git/foo/" then a `git status` (or similar
1338 * command) from the IDE will cause a cookie file to be
1339 * created in one of those dirs. This causes the mtime of
1340 * those dirs to change. This triggers the IDE's watch
1341 * notification. This triggers the IDE to run those commands
1342 * again. And the process repeats and the machine never goes
1343 * idle.
1344 *
1345 * Adding the extra layers of subdirectories prevents the
1346 * mtime of ".git/" and ".git/foo" from changing when a
1347 * cookie file is created.
1348 */
1349 strbuf_init(&state.path_cookie_prefix, 0);
1350 strbuf_addbuf(&state.path_cookie_prefix, &state.path_gitdir_watch);
1351
1352 strbuf_addch(&state.path_cookie_prefix, '/');
1353 strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_DIR);
1354 mkdir(state.path_cookie_prefix.buf, 0777);
1355
1356 strbuf_addch(&state.path_cookie_prefix, '/');
1357 strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_COOKIE_DIR);
1358 mkdir(state.path_cookie_prefix.buf, 0777);
1359
1360 strbuf_addch(&state.path_cookie_prefix, '/');
1361
1362 /*
1363 * We create a named-pipe or unix domain socket inside of the
1364 * ".git" directory. (Well, on Windows, we base our named
1365 * pipe in the NPFS on the absolute path of the git
1366 * directory.)
1367 */
1368 strbuf_init(&state.path_ipc, 0);
1369 strbuf_addstr(&state.path_ipc,
1370 absolute_path(fsmonitor_ipc__get_path(the_repository)));
1371
1372 /*
1373 * Confirm that we can create platform-specific resources for the
1374 * filesystem listener before we bother starting all the threads.
1375 */
1376 if (fsm_listen__ctor(&state)) {
1377 err = error(_("could not initialize listener thread"));
1378 goto done;
1379 }
1380
1381 if (fsm_health__ctor(&state)) {
1382 err = error(_("could not initialize health thread"));
1383 goto done;
1384 }
1385
1386 /*
1387 * CD out of the worktree root directory.
1388 *
1389 * The common Git startup mechanism causes our CWD to be the
1390 * root of the worktree. On Windows, this causes our process
1391 * to hold a locked handle on the CWD. This prevents the
1392 * worktree from being moved or deleted while the daemon is
1393 * running.
1394 *
1395 * We assume that our FS and IPC listener threads have either
1396 * opened all of the handles that they need or will do
1397 * everything using absolute paths.
1398 */
1399 home = getenv("HOME");
1400 if (home && *home && chdir(home))
1401 die_errno(_("could not cd home '%s'"), home);
1402
1403 err = fsmonitor_run_daemon_1(&state);
1404
1405done:
1406 pthread_cond_destroy(&state.cookies_cond);
1407 pthread_mutex_destroy(&state.main_lock);
1408 fsm_listen__dtor(&state);
1409 fsm_health__dtor(&state);
1410
1411 ipc_server_free(state.ipc_server_data);
1412
1413 strbuf_release(&state.path_worktree_watch);
1414 strbuf_release(&state.path_gitdir_watch);
1415 strbuf_release(&state.path_cookie_prefix);
1416 strbuf_release(&state.path_ipc);
1417 strbuf_release(&state.alias.alias);
1418 strbuf_release(&state.alias.points_to);
1419
1420 return err;
1421}
1422
1423static int try_to_run_foreground_daemon(int detach_console MAYBE_UNUSED)
1424{
1425 /*
1426 * Technically, we don't need to probe for an existing daemon
1427 * process, since we could just call `fsmonitor_run_daemon()`
1428 * and let it fail if the pipe/socket is busy.
1429 *
1430 * However, this method gives us a nicer error message for a
1431 * common error case.
1432 */
1433 if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
1434 die(_("fsmonitor--daemon is already running '%s'"),
1435 the_repository->worktree);
1436
1437 if (fsmonitor__announce_startup) {
1438 fprintf(stderr, _("running fsmonitor-daemon in '%s'\n"),
1439 the_repository->worktree);
1440 fflush(stderr);
1441 }
1442
1443#ifdef GIT_WINDOWS_NATIVE
1444 if (detach_console)
1445 FreeConsole();
1446#endif
1447
1448 return !!fsmonitor_run_daemon();
1449}
1450
1451static start_bg_wait_cb bg_wait_cb;
1452
1453static int bg_wait_cb(const struct child_process *cp UNUSED,
1454 void *cb_data UNUSED)
1455{
1456 enum ipc_active_state s = fsmonitor_ipc__get_state();
1457
1458 switch (s) {
1459 case IPC_STATE__LISTENING:
1460 /* child is "ready" */
1461 return 0;
1462
1463 case IPC_STATE__NOT_LISTENING:
1464 case IPC_STATE__PATH_NOT_FOUND:
1465 /* give child more time */
1466 return 1;
1467
1468 default:
1469 case IPC_STATE__INVALID_PATH:
1470 case IPC_STATE__OTHER_ERROR:
1471 /* all the time in world won't help */
1472 return -1;
1473 }
1474}
1475
1476static int try_to_start_background_daemon(void)
1477{
1478 struct child_process cp = CHILD_PROCESS_INIT;
1479 enum start_bg_result sbgr;
1480
1481 /*
1482 * Before we try to create a background daemon process, see
1483 * if a daemon process is already listening. This makes it
1484 * easier for us to report an already-listening error to the
1485 * console, since our spawn/daemon can only report the success
1486 * of creating the background process (and not whether it
1487 * immediately exited).
1488 */
1489 if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
1490 die(_("fsmonitor--daemon is already running '%s'"),
1491 the_repository->worktree);
1492
1493 if (fsmonitor__announce_startup) {
1494 fprintf(stderr, _("starting fsmonitor-daemon in '%s'\n"),
1495 the_repository->worktree);
1496 fflush(stderr);
1497 }
1498
1499 cp.git_cmd = 1;
1500
1501 strvec_push(&cp.args, "fsmonitor--daemon");
1502 strvec_push(&cp.args, "run");
1503 strvec_push(&cp.args, "--detach");
1504 strvec_pushf(&cp.args, "--ipc-threads=%d", fsmonitor__ipc_threads);
1505
1506 cp.no_stdin = 1;
1507 cp.no_stdout = 1;
1508 cp.no_stderr = 1;
1509
1510 sbgr = start_bg_command(&cp, bg_wait_cb, NULL,
1511 fsmonitor__start_timeout_sec);
1512
1513 switch (sbgr) {
1514 case SBGR_READY:
1515 return 0;
1516
1517 default:
1518 case SBGR_ERROR:
1519 case SBGR_CB_ERROR:
1520 return error(_("daemon failed to start"));
1521
1522 case SBGR_TIMEOUT:
1523 return error(_("daemon not online yet"));
1524
1525 case SBGR_DIED:
1526 return error(_("daemon terminated"));
1527 }
1528}
1529
1530int cmd_fsmonitor__daemon(int argc,
1531 const char **argv,
1532 const char *prefix,
1533 struct repository *repo UNUSED)
1534{
1535 const char *subcmd;
1536 enum fsmonitor_reason reason;
1537 int detach_console = 0;
1538
1539 struct option options[] = {
1540 OPT_BOOL(0, "detach", &detach_console, N_("detach from console")),
1541 OPT_INTEGER(0, "ipc-threads",
1542 &fsmonitor__ipc_threads,
1543 N_("use <n> ipc worker threads")),
1544 OPT_INTEGER(0, "start-timeout",
1545 &fsmonitor__start_timeout_sec,
1546 N_("max seconds to wait for background daemon startup")),
1547
1548 OPT_END()
1549 };
1550
1551 repo_config(the_repository, fsmonitor_config, NULL);
1552
1553 argc = parse_options(argc, argv, prefix, options,
1554 builtin_fsmonitor__daemon_usage, 0);
1555 if (argc != 1)
1556 usage_with_options(builtin_fsmonitor__daemon_usage, options);
1557 subcmd = argv[0];
1558
1559 if (fsmonitor__ipc_threads < 1)
1560 die(_("invalid 'ipc-threads' value (%d)"),
1561 fsmonitor__ipc_threads);
1562
1563 prepare_repo_settings(the_repository);
1564 /*
1565 * If the repo is fsmonitor-compatible, explicitly set IPC-mode
1566 * (without bothering to load the `core.fsmonitor` config settings).
1567 *
1568 * If the repo is not compatible, the repo-settings will be set to
1569 * incompatible rather than IPC, so we can use one of the __get
1570 * routines to detect the discrepancy.
1571 */
1572 fsm_settings__set_ipc(the_repository);
1573
1574 reason = fsm_settings__get_reason(the_repository);
1575 if (reason > FSMONITOR_REASON_OK)
1576 die("%s",
1577 fsm_settings__get_incompatible_msg(the_repository,
1578 reason));
1579
1580 if (!strcmp(subcmd, "start"))
1581 return !!try_to_start_background_daemon();
1582
1583 if (!strcmp(subcmd, "run"))
1584 return !!try_to_run_foreground_daemon(detach_console);
1585
1586 if (!strcmp(subcmd, "stop"))
1587 return !!do_as_client__send_stop();
1588
1589 if (!strcmp(subcmd, "status"))
1590 return !!do_as_client__status();
1591
1592 die(_("Unhandled subcommand '%s'"), subcmd);
1593}
1594
1595#else
1596int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED)
1597{
1598 struct option options[] = {
1599 OPT_END()
1600 };
1601
1602 show_usage_with_options_if_asked(argc, argv,
1603 builtin_fsmonitor__daemon_usage, options);
1604
1605 die(_("fsmonitor--daemon not supported on this platform"));
1606}
1607#endif