Git fork
at reftables-rust 129 lines 2.9 kB view raw
1/* 2 * alloc.c - specialized allocator for internal objects 3 * 4 * Copyright (C) 2006 Linus Torvalds 5 * 6 * The standard malloc/free wastes too much space for objects, partly because 7 * it maintains all the allocation infrastructure, but even more because it ends 8 * up with maximal alignment because it doesn't know what the object alignment 9 * for the new allocation is. 10 */ 11#include "git-compat-util.h" 12#include "object.h" 13#include "blob.h" 14#include "tree.h" 15#include "commit.h" 16#include "repository.h" 17#include "tag.h" 18#include "alloc.h" 19 20#define BLOCKING 1024 21 22union any_object { 23 struct object object; 24 struct blob blob; 25 struct tree tree; 26 struct commit commit; 27 struct tag tag; 28}; 29 30struct alloc_state { 31 int nr; /* number of nodes left in current allocation */ 32 void *p; /* first free node in current allocation */ 33 34 /* bookkeeping of allocations */ 35 void **slabs; 36 int slab_nr, slab_alloc; 37}; 38 39struct alloc_state *alloc_state_alloc(void) 40{ 41 return xcalloc(1, sizeof(struct alloc_state)); 42} 43 44void alloc_state_free_and_null(struct alloc_state **s_) 45{ 46 struct alloc_state *s = *s_; 47 48 if (!s) 49 return; 50 51 while (s->slab_nr > 0) { 52 s->slab_nr--; 53 free(s->slabs[s->slab_nr]); 54 } 55 56 FREE_AND_NULL(s->slabs); 57 FREE_AND_NULL(*s_); 58} 59 60static inline void *alloc_node(struct alloc_state *s, size_t node_size) 61{ 62 void *ret; 63 64 if (!s->nr) { 65 s->nr = BLOCKING; 66 s->p = xmalloc(BLOCKING * node_size); 67 68 ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc); 69 s->slabs[s->slab_nr++] = s->p; 70 } 71 s->nr--; 72 ret = s->p; 73 s->p = (char *)s->p + node_size; 74 memset(ret, 0, node_size); 75 76 return ret; 77} 78 79void *alloc_blob_node(struct repository *r) 80{ 81 struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob)); 82 b->object.type = OBJ_BLOB; 83 return b; 84} 85 86void *alloc_tree_node(struct repository *r) 87{ 88 struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree)); 89 t->object.type = OBJ_TREE; 90 return t; 91} 92 93void *alloc_tag_node(struct repository *r) 94{ 95 struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag)); 96 t->object.type = OBJ_TAG; 97 return t; 98} 99 100void *alloc_object_node(struct repository *r) 101{ 102 struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object)); 103 obj->type = OBJ_NONE; 104 return obj; 105} 106 107/* 108 * The returned count is to be used as an index into commit slabs, 109 * that are *NOT* maintained per repository, and that is why a single 110 * global counter is used. 111 */ 112static unsigned int alloc_commit_index(void) 113{ 114 static unsigned int parsed_commits_count; 115 return parsed_commits_count++; 116} 117 118void init_commit_node(struct commit *c) 119{ 120 c->object.type = OBJ_COMMIT; 121 c->index = alloc_commit_index(); 122} 123 124void *alloc_commit_node(struct repository *r) 125{ 126 struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit)); 127 init_commit_node(c); 128 return c; 129}