Git fork
1/*
2 * Generic reference iterator infrastructure. See refs-internal.h for
3 * documentation about the design and use of reference iterators.
4 */
5
6#define DISABLE_SIGN_COMPARE_WARNINGS
7
8#include "git-compat-util.h"
9#include "refs.h"
10#include "refs/refs-internal.h"
11#include "iterator.h"
12
13int ref_iterator_advance(struct ref_iterator *ref_iterator)
14{
15 return ref_iterator->vtable->advance(ref_iterator);
16}
17
18int ref_iterator_seek(struct ref_iterator *ref_iterator, const char *refname,
19 unsigned int flags)
20{
21 return ref_iterator->vtable->seek(ref_iterator, refname, flags);
22}
23
24int ref_iterator_peel(struct ref_iterator *ref_iterator,
25 struct object_id *peeled)
26{
27 return ref_iterator->vtable->peel(ref_iterator, peeled);
28}
29
30void ref_iterator_free(struct ref_iterator *ref_iterator)
31{
32 if (ref_iterator) {
33 ref_iterator->vtable->release(ref_iterator);
34 /* Help make use-after-free bugs fail quickly: */
35 ref_iterator->vtable = NULL;
36 free(ref_iterator);
37 }
38}
39
40void base_ref_iterator_init(struct ref_iterator *iter,
41 struct ref_iterator_vtable *vtable)
42{
43 iter->vtable = vtable;
44 iter->refname = NULL;
45 iter->referent = NULL;
46 iter->oid = NULL;
47 iter->flags = 0;
48}
49
50struct empty_ref_iterator {
51 struct ref_iterator base;
52};
53
54static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator UNUSED)
55{
56 return ITER_DONE;
57}
58
59static int empty_ref_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
60 const char *refname UNUSED,
61 unsigned int flags UNUSED)
62{
63 return 0;
64}
65
66static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
67 struct object_id *peeled UNUSED)
68{
69 BUG("peel called for empty iterator");
70}
71
72static void empty_ref_iterator_release(struct ref_iterator *ref_iterator UNUSED)
73{
74}
75
76static struct ref_iterator_vtable empty_ref_iterator_vtable = {
77 .advance = empty_ref_iterator_advance,
78 .seek = empty_ref_iterator_seek,
79 .peel = empty_ref_iterator_peel,
80 .release = empty_ref_iterator_release,
81};
82
83struct ref_iterator *empty_ref_iterator_begin(void)
84{
85 struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
86 struct ref_iterator *ref_iterator = &iter->base;
87
88 base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable);
89 return ref_iterator;
90}
91
92int is_empty_ref_iterator(struct ref_iterator *ref_iterator)
93{
94 return ref_iterator->vtable == &empty_ref_iterator_vtable;
95}
96
97struct merge_ref_iterator {
98 struct ref_iterator base;
99
100 struct ref_iterator *iter0, *iter0_owned;
101 struct ref_iterator *iter1, *iter1_owned;
102
103 ref_iterator_select_fn *select;
104 void *cb_data;
105
106 /*
107 * A pointer to iter0 or iter1 (whichever is supplying the
108 * current value), or NULL if advance has not yet been called.
109 */
110 struct ref_iterator **current;
111};
112
113enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree,
114 struct ref_iterator *iter_common,
115 void *cb_data UNUSED)
116{
117 if (iter_worktree && !iter_common) {
118 /*
119 * Return the worktree ref if there are no more common refs.
120 */
121 return ITER_SELECT_0;
122 } else if (iter_common) {
123 /*
124 * In case we have pending worktree and common refs we need to
125 * yield them based on their lexicographical order. Worktree
126 * refs that have the same name as common refs shadow the
127 * latter.
128 */
129 if (iter_worktree) {
130 int cmp = strcmp(iter_worktree->refname,
131 iter_common->refname);
132 if (cmp < 0)
133 return ITER_SELECT_0;
134 else if (!cmp)
135 return ITER_SELECT_0_SKIP_1;
136 }
137
138 /*
139 * We now know that the lexicographically-next ref is a common
140 * ref. When the common ref is a shared one we return it.
141 */
142 if (parse_worktree_ref(iter_common->refname, NULL, NULL,
143 NULL) == REF_WORKTREE_SHARED)
144 return ITER_SELECT_1;
145
146 /*
147 * Otherwise, if the common ref is a per-worktree ref we skip
148 * it because it would belong to the main worktree, not ours.
149 */
150 return ITER_SKIP_1;
151 } else {
152 return ITER_DONE;
153 }
154}
155
156static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
157{
158 struct merge_ref_iterator *iter =
159 (struct merge_ref_iterator *)ref_iterator;
160 int ok;
161
162 if (!iter->current) {
163 /* Initialize: advance both iterators to their first entries */
164 if ((ok = ref_iterator_advance(iter->iter0)) != ITER_OK) {
165 iter->iter0 = NULL;
166 if (ok == ITER_ERROR)
167 goto error;
168 }
169 if ((ok = ref_iterator_advance(iter->iter1)) != ITER_OK) {
170 iter->iter1 = NULL;
171 if (ok == ITER_ERROR)
172 goto error;
173 }
174 } else {
175 /*
176 * Advance the current iterator past the just-used
177 * entry:
178 */
179 if ((ok = ref_iterator_advance(*iter->current)) != ITER_OK) {
180 *iter->current = NULL;
181 if (ok == ITER_ERROR)
182 goto error;
183 }
184 }
185
186 /* Loop until we find an entry that we can yield. */
187 while (1) {
188 struct ref_iterator **secondary;
189 enum iterator_selection selection =
190 iter->select(iter->iter0, iter->iter1, iter->cb_data);
191
192 if (selection == ITER_SELECT_DONE) {
193 return ITER_DONE;
194 } else if (selection == ITER_SELECT_ERROR) {
195 return ITER_ERROR;
196 }
197
198 if ((selection & ITER_CURRENT_SELECTION_MASK) == 0) {
199 iter->current = &iter->iter0;
200 secondary = &iter->iter1;
201 } else {
202 iter->current = &iter->iter1;
203 secondary = &iter->iter0;
204 }
205
206 if (selection & ITER_SKIP_SECONDARY) {
207 if ((ok = ref_iterator_advance(*secondary)) != ITER_OK) {
208 *secondary = NULL;
209 if (ok == ITER_ERROR)
210 goto error;
211 }
212 }
213
214 if (selection & ITER_YIELD_CURRENT) {
215 iter->base.referent = (*iter->current)->referent;
216 iter->base.refname = (*iter->current)->refname;
217 iter->base.oid = (*iter->current)->oid;
218 iter->base.flags = (*iter->current)->flags;
219 return ITER_OK;
220 }
221 }
222
223error:
224 return ITER_ERROR;
225}
226
227static int merge_ref_iterator_seek(struct ref_iterator *ref_iterator,
228 const char *refname, unsigned int flags)
229{
230 struct merge_ref_iterator *iter =
231 (struct merge_ref_iterator *)ref_iterator;
232 int ret;
233
234 iter->current = NULL;
235 iter->iter0 = iter->iter0_owned;
236 iter->iter1 = iter->iter1_owned;
237
238 ret = ref_iterator_seek(iter->iter0, refname, flags);
239 if (ret < 0)
240 return ret;
241
242 ret = ref_iterator_seek(iter->iter1, refname, flags);
243 if (ret < 0)
244 return ret;
245
246 return 0;
247}
248
249static int merge_ref_iterator_peel(struct ref_iterator *ref_iterator,
250 struct object_id *peeled)
251{
252 struct merge_ref_iterator *iter =
253 (struct merge_ref_iterator *)ref_iterator;
254
255 if (!iter->current) {
256 BUG("peel called before advance for merge iterator");
257 }
258 return ref_iterator_peel(*iter->current, peeled);
259}
260
261static void merge_ref_iterator_release(struct ref_iterator *ref_iterator)
262{
263 struct merge_ref_iterator *iter =
264 (struct merge_ref_iterator *)ref_iterator;
265 ref_iterator_free(iter->iter0_owned);
266 ref_iterator_free(iter->iter1_owned);
267}
268
269static struct ref_iterator_vtable merge_ref_iterator_vtable = {
270 .advance = merge_ref_iterator_advance,
271 .seek = merge_ref_iterator_seek,
272 .peel = merge_ref_iterator_peel,
273 .release = merge_ref_iterator_release,
274};
275
276struct ref_iterator *merge_ref_iterator_begin(
277 struct ref_iterator *iter0, struct ref_iterator *iter1,
278 ref_iterator_select_fn *select, void *cb_data)
279{
280 struct merge_ref_iterator *iter = xcalloc(1, sizeof(*iter));
281 struct ref_iterator *ref_iterator = &iter->base;
282
283 /*
284 * We can't do the same kind of is_empty_ref_iterator()-style
285 * optimization here as overlay_ref_iterator_begin() does,
286 * because we don't know the semantics of the select function.
287 * It might, for example, implement "intersect" by passing
288 * references through only if they exist in both iterators.
289 */
290
291 base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable);
292 iter->iter0 = iter->iter0_owned = iter0;
293 iter->iter1 = iter->iter1_owned = iter1;
294 iter->select = select;
295 iter->cb_data = cb_data;
296 iter->current = NULL;
297 return ref_iterator;
298}
299
300/*
301 * A ref_iterator_select_fn that overlays the items from front on top
302 * of those from back (like loose refs over packed refs). See
303 * overlay_ref_iterator_begin().
304 */
305static enum iterator_selection overlay_iterator_select(
306 struct ref_iterator *front, struct ref_iterator *back,
307 void *cb_data UNUSED)
308{
309 int cmp;
310
311 if (!back)
312 return front ? ITER_SELECT_0 : ITER_SELECT_DONE;
313 else if (!front)
314 return ITER_SELECT_1;
315
316 cmp = strcmp(front->refname, back->refname);
317
318 if (cmp < 0)
319 return ITER_SELECT_0;
320 else if (cmp > 0)
321 return ITER_SELECT_1;
322 else
323 return ITER_SELECT_0_SKIP_1;
324}
325
326struct ref_iterator *overlay_ref_iterator_begin(
327 struct ref_iterator *front, struct ref_iterator *back)
328{
329 /*
330 * Optimization: if one of the iterators is empty, return the
331 * other one rather than incurring the overhead of wrapping
332 * them.
333 */
334 if (is_empty_ref_iterator(front)) {
335 ref_iterator_free(front);
336 return back;
337 } else if (is_empty_ref_iterator(back)) {
338 ref_iterator_free(back);
339 return front;
340 }
341
342 return merge_ref_iterator_begin(front, back, overlay_iterator_select, NULL);
343}
344
345struct prefix_ref_iterator {
346 struct ref_iterator base;
347
348 struct ref_iterator *iter0;
349 char *prefix;
350 int trim;
351};
352
353/* Return -1, 0, 1 if refname is before, inside, or after the prefix. */
354static int compare_prefix(const char *refname, const char *prefix)
355{
356 while (*prefix) {
357 if (*refname != *prefix)
358 return ((unsigned char)*refname < (unsigned char)*prefix) ? -1 : +1;
359
360 refname++;
361 prefix++;
362 }
363
364 return 0;
365}
366
367static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
368{
369 struct prefix_ref_iterator *iter =
370 (struct prefix_ref_iterator *)ref_iterator;
371 int ok;
372
373 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
374 int cmp = compare_prefix(iter->iter0->refname, iter->prefix);
375 if (cmp < 0)
376 continue;
377 /*
378 * As the source iterator is ordered, we
379 * can stop the iteration as soon as we see a
380 * refname that comes after the prefix:
381 */
382 if (cmp > 0)
383 return ITER_DONE;
384
385 if (iter->trim) {
386 /*
387 * It is nonsense to trim off characters that
388 * you haven't already checked for via a
389 * prefix check, whether via this
390 * `prefix_ref_iterator` or upstream in
391 * `iter0`). So if there wouldn't be at least
392 * one character left in the refname after
393 * trimming, report it as a bug:
394 */
395 if (strlen(iter->iter0->refname) <= iter->trim)
396 BUG("attempt to trim too many characters");
397 iter->base.refname = iter->iter0->refname + iter->trim;
398 } else {
399 iter->base.refname = iter->iter0->refname;
400 }
401
402 iter->base.oid = iter->iter0->oid;
403 iter->base.flags = iter->iter0->flags;
404 return ITER_OK;
405 }
406
407 return ok;
408}
409
410static int prefix_ref_iterator_seek(struct ref_iterator *ref_iterator,
411 const char *refname, unsigned int flags)
412{
413 struct prefix_ref_iterator *iter =
414 (struct prefix_ref_iterator *)ref_iterator;
415
416 if (flags & REF_ITERATOR_SEEK_SET_PREFIX) {
417 free(iter->prefix);
418 iter->prefix = xstrdup_or_null(refname);
419 }
420 return ref_iterator_seek(iter->iter0, refname, flags);
421}
422
423static int prefix_ref_iterator_peel(struct ref_iterator *ref_iterator,
424 struct object_id *peeled)
425{
426 struct prefix_ref_iterator *iter =
427 (struct prefix_ref_iterator *)ref_iterator;
428
429 return ref_iterator_peel(iter->iter0, peeled);
430}
431
432static void prefix_ref_iterator_release(struct ref_iterator *ref_iterator)
433{
434 struct prefix_ref_iterator *iter =
435 (struct prefix_ref_iterator *)ref_iterator;
436 ref_iterator_free(iter->iter0);
437 free(iter->prefix);
438}
439
440static struct ref_iterator_vtable prefix_ref_iterator_vtable = {
441 .advance = prefix_ref_iterator_advance,
442 .seek = prefix_ref_iterator_seek,
443 .peel = prefix_ref_iterator_peel,
444 .release = prefix_ref_iterator_release,
445};
446
447struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
448 const char *prefix,
449 int trim)
450{
451 struct prefix_ref_iterator *iter;
452 struct ref_iterator *ref_iterator;
453
454 if (!*prefix && !trim)
455 return iter0; /* optimization: no need to wrap iterator */
456
457 CALLOC_ARRAY(iter, 1);
458 ref_iterator = &iter->base;
459
460 base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable);
461
462 iter->iter0 = iter0;
463 iter->prefix = xstrdup(prefix);
464 iter->trim = trim;
465
466 return ref_iterator;
467}
468
469struct ref_iterator *current_ref_iter = NULL;
470
471int do_for_each_ref_iterator(struct ref_iterator *iter,
472 each_ref_fn fn, void *cb_data)
473{
474 int retval = 0, ok;
475 struct ref_iterator *old_ref_iter = current_ref_iter;
476
477 current_ref_iter = iter;
478 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
479 retval = fn(iter->refname, iter->referent, iter->oid, iter->flags, cb_data);
480 if (retval)
481 goto out;
482 }
483
484out:
485 current_ref_iter = old_ref_iter;
486 if (ok == ITER_ERROR)
487 retval = -1;
488 ref_iterator_free(iter);
489 return retval;
490}