Git fork
1#include "../git-compat-util.h"
2#include "../hash.h"
3#include "../refs.h"
4#include "../repository.h"
5#include "refs-internal.h"
6#include "ref-cache.h"
7#include "../iterator.h"
8
9void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry)
10{
11 ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
12 dir->entries[dir->nr++] = entry;
13 /* optimize for the case that entries are added in order */
14 if (dir->nr == 1 ||
15 (dir->nr == dir->sorted + 1 &&
16 strcmp(dir->entries[dir->nr - 2]->name,
17 dir->entries[dir->nr - 1]->name) < 0))
18 dir->sorted = dir->nr;
19}
20
21struct ref_dir *get_ref_dir(struct ref_entry *entry)
22{
23 struct ref_dir *dir;
24 assert(entry->flag & REF_DIR);
25 dir = &entry->u.subdir;
26 if (entry->flag & REF_INCOMPLETE) {
27 if (!dir->cache->fill_ref_dir)
28 BUG("incomplete ref_store without fill_ref_dir function");
29
30 dir->cache->fill_ref_dir(dir->cache->ref_store, dir, entry->name);
31 entry->flag &= ~REF_INCOMPLETE;
32 }
33 return dir;
34}
35
36struct ref_entry *create_ref_entry(const char *refname,
37 const char *referent,
38 const struct object_id *oid, int flag)
39{
40 struct ref_entry *ref;
41
42 FLEX_ALLOC_STR(ref, name, refname);
43 oidcpy(&ref->u.value.oid, oid);
44 ref->flag = flag;
45 ref->u.value.referent = xstrdup_or_null(referent);
46
47 return ref;
48}
49
50struct ref_cache *create_ref_cache(struct ref_store *refs,
51 fill_ref_dir_fn *fill_ref_dir)
52{
53 struct ref_cache *ret = xcalloc(1, sizeof(*ret));
54
55 ret->ref_store = refs;
56 ret->fill_ref_dir = fill_ref_dir;
57 ret->root = create_dir_entry(ret, "", 0);
58 return ret;
59}
60
61static void clear_ref_dir(struct ref_dir *dir);
62
63static void free_ref_entry(struct ref_entry *entry)
64{
65 if (entry->flag & REF_DIR) {
66 /*
67 * Do not use get_ref_dir() here, as that might
68 * trigger the reading of loose refs.
69 */
70 clear_ref_dir(&entry->u.subdir);
71 } else {
72 free(entry->u.value.referent);
73 }
74 free(entry);
75}
76
77void free_ref_cache(struct ref_cache *cache)
78{
79 if (!cache)
80 return;
81 free_ref_entry(cache->root);
82 free(cache);
83}
84
85/*
86 * Clear and free all entries in dir, recursively.
87 */
88static void clear_ref_dir(struct ref_dir *dir)
89{
90 int i;
91 for (i = 0; i < dir->nr; i++)
92 free_ref_entry(dir->entries[i]);
93 FREE_AND_NULL(dir->entries);
94 dir->sorted = dir->nr = dir->alloc = 0;
95}
96
97struct ref_entry *create_dir_entry(struct ref_cache *cache,
98 const char *dirname, size_t len)
99{
100 struct ref_entry *direntry;
101
102 FLEX_ALLOC_MEM(direntry, name, dirname, len);
103 direntry->u.subdir.cache = cache;
104 direntry->flag = REF_DIR | REF_INCOMPLETE;
105 return direntry;
106}
107
108static int ref_entry_cmp(const void *a, const void *b)
109{
110 struct ref_entry *one = *(struct ref_entry **)a;
111 struct ref_entry *two = *(struct ref_entry **)b;
112 return strcmp(one->name, two->name);
113}
114
115static void sort_ref_dir(struct ref_dir *dir);
116
117struct string_slice {
118 size_t len;
119 const char *str;
120};
121
122static int ref_entry_cmp_sslice(const void *key_, const void *ent_)
123{
124 const struct string_slice *key = key_;
125 const struct ref_entry *ent = *(const struct ref_entry * const *)ent_;
126 int cmp = strncmp(key->str, ent->name, key->len);
127 if (cmp)
128 return cmp;
129 return '\0' - (unsigned char)ent->name[key->len];
130}
131
132int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
133{
134 struct ref_entry **r;
135 struct string_slice key;
136
137 if (refname == NULL || !dir->nr)
138 return -1;
139
140 sort_ref_dir(dir);
141 key.len = len;
142 key.str = refname;
143 r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries),
144 ref_entry_cmp_sslice);
145
146 if (!r)
147 return -1;
148
149 return r - dir->entries;
150}
151
152/*
153 * Search for a directory entry directly within dir (without
154 * recursing). Sort dir if necessary. subdirname must be a directory
155 * name (i.e., end in '/'). Returns NULL if the desired
156 * directory cannot be found. dir must already be complete.
157 */
158static struct ref_dir *search_for_subdir(struct ref_dir *dir,
159 const char *subdirname, size_t len)
160{
161 int entry_index = search_ref_dir(dir, subdirname, len);
162 struct ref_entry *entry;
163
164 if (entry_index == -1)
165 return NULL;
166
167 entry = dir->entries[entry_index];
168 return get_ref_dir(entry);
169}
170
171/*
172 * If refname is a reference name, find the ref_dir within the dir
173 * tree that should hold refname. If refname is a directory name
174 * (i.e., it ends in '/'), then return that ref_dir itself. dir must
175 * represent the top-level directory and must already be complete.
176 * Sort ref_dirs and recurse into subdirectories as necessary. Will
177 * return NULL if the desired directory cannot be found.
178 */
179static struct ref_dir *find_containing_dir(struct ref_dir *dir,
180 const char *refname)
181{
182 const char *slash;
183 for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
184 size_t dirnamelen = slash - refname + 1;
185 struct ref_dir *subdir;
186 subdir = search_for_subdir(dir, refname, dirnamelen);
187 if (!subdir) {
188 dir = NULL;
189 break;
190 }
191 dir = subdir;
192 }
193
194 return dir;
195}
196
197/*
198 * Emit a warning and return true iff ref1 and ref2 have the same name
199 * and the same oid. Die if they have the same name but different
200 * oids.
201 */
202static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
203{
204 if (strcmp(ref1->name, ref2->name))
205 return 0;
206
207 /* Duplicate name; make sure that they don't conflict: */
208
209 if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR))
210 /* This is impossible by construction */
211 die("Reference directory conflict: %s", ref1->name);
212
213 if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid))
214 die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
215
216 warning("Duplicated ref: %s", ref1->name);
217 return 1;
218}
219
220/*
221 * Sort the entries in dir non-recursively (if they are not already
222 * sorted) and remove any duplicate entries.
223 */
224static void sort_ref_dir(struct ref_dir *dir)
225{
226 int i, j;
227 struct ref_entry *last = NULL;
228
229 /*
230 * This check also prevents passing a zero-length array to qsort(),
231 * which is a problem on some platforms.
232 */
233 if (dir->sorted == dir->nr)
234 return;
235
236 QSORT(dir->entries, dir->nr, ref_entry_cmp);
237
238 /* Remove any duplicates: */
239 for (i = 0, j = 0; j < dir->nr; j++) {
240 struct ref_entry *entry = dir->entries[j];
241 if (last && is_dup_ref(last, entry))
242 free_ref_entry(entry);
243 else
244 last = dir->entries[i++] = entry;
245 }
246 dir->sorted = dir->nr = i;
247}
248
249enum prefix_state {
250 /* All refs within the directory would match prefix: */
251 PREFIX_CONTAINS_DIR,
252
253 /* Some, but not all, refs within the directory might match prefix: */
254 PREFIX_WITHIN_DIR,
255
256 /* No refs within the directory could possibly match prefix: */
257 PREFIX_EXCLUDES_DIR
258};
259
260/*
261 * Return a `prefix_state` constant describing the relationship
262 * between the directory with the specified `dirname` and `prefix`.
263 */
264static enum prefix_state overlaps_prefix(const char *dirname,
265 const char *prefix)
266{
267 while (*prefix && *dirname == *prefix) {
268 dirname++;
269 prefix++;
270 }
271 if (!*prefix)
272 return PREFIX_CONTAINS_DIR;
273 else if (!*dirname)
274 return PREFIX_WITHIN_DIR;
275 else
276 return PREFIX_EXCLUDES_DIR;
277}
278
279/*
280 * Load all of the refs from `dir` (recursively) that could possibly
281 * contain references matching `prefix` into our in-memory cache. If
282 * `prefix` is NULL, prime unconditionally.
283 */
284static void prime_ref_dir(struct ref_dir *dir, const char *prefix)
285{
286 /*
287 * The hard work of loading loose refs is done by get_ref_dir(), so we
288 * just need to recurse through all of the sub-directories. We do not
289 * even need to care about sorting, as traversal order does not matter
290 * to us.
291 */
292 int i;
293 for (i = 0; i < dir->nr; i++) {
294 struct ref_entry *entry = dir->entries[i];
295 if (!(entry->flag & REF_DIR)) {
296 /* Not a directory; no need to recurse. */
297 } else if (!prefix) {
298 /* Recurse in any case: */
299 prime_ref_dir(get_ref_dir(entry), NULL);
300 } else {
301 switch (overlaps_prefix(entry->name, prefix)) {
302 case PREFIX_CONTAINS_DIR:
303 /*
304 * Recurse, and from here down we
305 * don't have to check the prefix
306 * anymore:
307 */
308 prime_ref_dir(get_ref_dir(entry), NULL);
309 break;
310 case PREFIX_WITHIN_DIR:
311 prime_ref_dir(get_ref_dir(entry), prefix);
312 break;
313 case PREFIX_EXCLUDES_DIR:
314 /* No need to prime this directory. */
315 break;
316 }
317 }
318 }
319}
320
321/*
322 * A level in the reference hierarchy that is currently being iterated
323 * through.
324 */
325struct cache_ref_iterator_level {
326 /*
327 * The ref_dir being iterated over at this level. The ref_dir
328 * is sorted before being stored here.
329 */
330 struct ref_dir *dir;
331
332 enum prefix_state prefix_state;
333
334 /*
335 * The index of the current entry within dir (which might
336 * itself be a directory). If index == -1, then the iteration
337 * hasn't yet begun. If index == dir->nr, then the iteration
338 * through this level is over.
339 */
340 int index;
341};
342
343/*
344 * Represent an iteration through a ref_dir in the memory cache. The
345 * iteration recurses through subdirectories.
346 */
347struct cache_ref_iterator {
348 struct ref_iterator base;
349
350 /*
351 * The number of levels currently on the stack.
352 */
353 size_t levels_nr;
354
355 /* The number of levels that have been allocated on the stack */
356 size_t levels_alloc;
357
358 /*
359 * Only include references with this prefix in the iteration.
360 * The prefix is matched textually, without regard for path
361 * component boundaries.
362 */
363 char *prefix;
364
365 /*
366 * A stack of levels. levels[0] is the uppermost level that is
367 * being iterated over in this iteration. (This is not
368 * necessary the top level in the references hierarchy. If we
369 * are iterating through a subtree, then levels[0] will hold
370 * the ref_dir for that subtree, and subsequent levels will go
371 * on from there.)
372 */
373 struct cache_ref_iterator_level *levels;
374
375 struct repository *repo;
376 struct ref_cache *cache;
377
378 int prime_dir;
379};
380
381static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
382{
383 struct cache_ref_iterator *iter =
384 (struct cache_ref_iterator *)ref_iterator;
385
386 if (!iter->levels_nr)
387 return ITER_DONE;
388
389 while (1) {
390 struct cache_ref_iterator_level *level =
391 &iter->levels[iter->levels_nr - 1];
392 struct ref_dir *dir = level->dir;
393 struct ref_entry *entry;
394 enum prefix_state entry_prefix_state;
395
396 if (level->index == -1)
397 sort_ref_dir(dir);
398
399 if (++level->index == level->dir->nr) {
400 /* This level is exhausted; pop up a level */
401 if (--iter->levels_nr == 0)
402 return ITER_DONE;
403
404 continue;
405 }
406
407 entry = dir->entries[level->index];
408
409 if (level->prefix_state == PREFIX_WITHIN_DIR) {
410 entry_prefix_state = overlaps_prefix(entry->name, iter->prefix);
411 if (entry_prefix_state == PREFIX_EXCLUDES_DIR ||
412 (entry_prefix_state == PREFIX_WITHIN_DIR && !(entry->flag & REF_DIR)))
413 continue;
414 } else {
415 entry_prefix_state = level->prefix_state;
416 }
417
418 if (entry->flag & REF_DIR) {
419 /* push down a level */
420 ALLOC_GROW(iter->levels, iter->levels_nr + 1,
421 iter->levels_alloc);
422
423 level = &iter->levels[iter->levels_nr++];
424 level->dir = get_ref_dir(entry);
425 level->prefix_state = entry_prefix_state;
426 level->index = -1;
427 } else {
428 iter->base.refname = entry->name;
429 iter->base.referent = entry->u.value.referent;
430 iter->base.oid = &entry->u.value.oid;
431 iter->base.flags = entry->flag;
432 return ITER_OK;
433 }
434 }
435}
436
437static int cache_ref_iterator_set_prefix(struct cache_ref_iterator *iter,
438 const char *prefix)
439{
440 struct cache_ref_iterator_level *level;
441 struct ref_dir *dir;
442
443 dir = get_ref_dir(iter->cache->root);
444 if (prefix && *prefix)
445 dir = find_containing_dir(dir, prefix);
446 if (!dir) {
447 iter->levels_nr = 0;
448 return 0;
449 }
450
451 if (iter->prime_dir)
452 prime_ref_dir(dir, prefix);
453 iter->levels_nr = 1;
454 level = &iter->levels[0];
455 level->index = -1;
456 level->dir = dir;
457
458 if (prefix && *prefix) {
459 free(iter->prefix);
460 iter->prefix = xstrdup(prefix);
461 level->prefix_state = PREFIX_WITHIN_DIR;
462 } else {
463 FREE_AND_NULL(iter->prefix);
464 level->prefix_state = PREFIX_CONTAINS_DIR;
465 }
466
467 return 0;
468}
469
470static int cache_ref_iterator_seek(struct ref_iterator *ref_iterator,
471 const char *refname, unsigned int flags)
472{
473 struct cache_ref_iterator *iter =
474 (struct cache_ref_iterator *)ref_iterator;
475
476 if (flags & REF_ITERATOR_SEEK_SET_PREFIX) {
477 return cache_ref_iterator_set_prefix(iter, refname);
478 } else if (refname && *refname) {
479 struct cache_ref_iterator_level *level;
480 const char *slash = refname;
481 struct ref_dir *dir;
482
483 dir = get_ref_dir(iter->cache->root);
484
485 if (iter->prime_dir)
486 prime_ref_dir(dir, refname);
487
488 iter->levels_nr = 1;
489 level = &iter->levels[0];
490 level->index = -1;
491 level->dir = dir;
492
493 /* Unset any previously set prefix */
494 FREE_AND_NULL(iter->prefix);
495
496 /*
497 * Breakdown the provided seek path and assign the correct
498 * indexing to each level as needed.
499 */
500 do {
501 int idx;
502 size_t len;
503 int cmp = 0;
504
505 sort_ref_dir(dir);
506
507 slash = strchr(slash, '/');
508 len = slash ? (size_t)(slash - refname) : strlen(refname);
509
510 for (idx = 0; idx < dir->nr; idx++) {
511 cmp = strncmp(refname, dir->entries[idx]->name, len);
512 if (cmp <= 0)
513 break;
514 }
515 /* don't overflow the index */
516 idx = idx >= dir->nr ? dir->nr - 1 : idx;
517
518 if (slash)
519 slash = slash + 1;
520
521 level->index = idx;
522 if (dir->entries[idx]->flag & REF_DIR) {
523 /* push down a level */
524 dir = get_ref_dir(dir->entries[idx]);
525
526 ALLOC_GROW(iter->levels, iter->levels_nr + 1,
527 iter->levels_alloc);
528 level = &iter->levels[iter->levels_nr++];
529 level->dir = dir;
530 level->index = -1;
531 level->prefix_state = PREFIX_CONTAINS_DIR;
532 } else {
533 /* reduce the index so the leaf node is iterated over */
534 if (cmp <= 0 && !slash)
535 level->index = idx - 1;
536 /*
537 * while the seek path may not be exhausted, our
538 * match is exhausted at a leaf node.
539 */
540 break;
541 }
542 } while (slash && dir->nr);
543 }
544
545 return 0;
546}
547
548static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
549 struct object_id *peeled)
550{
551 struct cache_ref_iterator *iter =
552 (struct cache_ref_iterator *)ref_iterator;
553 return peel_object(iter->repo, ref_iterator->oid, peeled) ? -1 : 0;
554}
555
556static void cache_ref_iterator_release(struct ref_iterator *ref_iterator)
557{
558 struct cache_ref_iterator *iter =
559 (struct cache_ref_iterator *)ref_iterator;
560 free(iter->prefix);
561 free(iter->levels);
562}
563
564static struct ref_iterator_vtable cache_ref_iterator_vtable = {
565 .advance = cache_ref_iterator_advance,
566 .seek = cache_ref_iterator_seek,
567 .peel = cache_ref_iterator_peel,
568 .release = cache_ref_iterator_release,
569};
570
571struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
572 const char *prefix,
573 struct repository *repo,
574 int prime_dir)
575{
576 struct cache_ref_iterator *iter;
577 struct ref_iterator *ref_iterator;
578
579 CALLOC_ARRAY(iter, 1);
580 ref_iterator = &iter->base;
581 base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
582 ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
583
584 iter->repo = repo;
585 iter->cache = cache;
586 iter->prime_dir = prime_dir;
587
588 if (cache_ref_iterator_seek(&iter->base, prefix,
589 REF_ITERATOR_SEEK_SET_PREFIX) < 0) {
590 ref_iterator_free(&iter->base);
591 return NULL;
592 }
593
594 return ref_iterator;
595}