Git fork
1#include "git-compat-util.h"
2#include "trace2/tr2_tgt.h"
3#include "trace2/tr2_tls.h"
4#include "trace2/tr2_ctr.h"
5
6/*
7 * A global counter block to aggregate values from the partial sums
8 * from each thread.
9 */
10static struct tr2_counter_block final_counter_block; /* access under tr2tls_mutex */
11
12/*
13 * Define metadata for each global counter.
14 *
15 * This array must match the "enum trace2_counter_id" and the values
16 * in "struct tr2_counter_block.counter[*]".
17 */
18static struct tr2_counter_metadata tr2_counter_metadata[TRACE2_NUMBER_OF_COUNTERS] = {
19 [TRACE2_COUNTER_ID_TEST1] = {
20 .category = "test",
21 .name = "test1",
22 .want_per_thread_events = 0,
23 },
24 [TRACE2_COUNTER_ID_TEST2] = {
25 .category = "test",
26 .name = "test2",
27 .want_per_thread_events = 1,
28 },
29 [TRACE2_COUNTER_ID_PACKED_REFS_JUMPS] = {
30 .category = "packed-refs",
31 .name = "jumps_made",
32 .want_per_thread_events = 0,
33 },
34 [TRACE2_COUNTER_ID_REFTABLE_RESEEKS] = {
35 .category = "reftable",
36 .name = "reseeks_made",
37 .want_per_thread_events = 0,
38 },
39 [TRACE2_COUNTER_ID_FSYNC_WRITEOUT_ONLY] = {
40 .category = "fsync",
41 .name = "writeout-only",
42 .want_per_thread_events = 0,
43 },
44 [TRACE2_COUNTER_ID_FSYNC_HARDWARE_FLUSH] = {
45 .category = "fsync",
46 .name = "hardware-flush",
47 .want_per_thread_events = 0,
48 },
49
50 /* Add additional metadata before here. */
51};
52
53void tr2_counter_increment(enum trace2_counter_id cid, uint64_t value)
54{
55 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
56 struct tr2_counter *c = &ctx->counter_block.counter[cid];
57
58 c->value += value;
59
60 ctx->used_any_counter = 1;
61 if (tr2_counter_metadata[cid].want_per_thread_events)
62 ctx->used_any_per_thread_counter = 1;
63}
64
65void tr2_update_final_counters(void)
66{
67 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
68 enum trace2_counter_id cid;
69
70 if (!ctx->used_any_counter)
71 return;
72
73 /*
74 * Access `final_counter_block` requires holding `tr2tls_mutex`.
75 * We assume that our caller is holding the lock.
76 */
77
78 for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++) {
79 struct tr2_counter *c_final = &final_counter_block.counter[cid];
80 const struct tr2_counter *c = &ctx->counter_block.counter[cid];
81
82 c_final->value += c->value;
83 }
84}
85
86void tr2_emit_per_thread_counters(tr2_tgt_evt_counter_t *fn_apply)
87{
88 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
89 enum trace2_counter_id cid;
90
91 if (!ctx->used_any_per_thread_counter)
92 return;
93
94 /*
95 * For each counter, if the counter wants per-thread events
96 * and this thread used it (the value is non-zero), emit it.
97 */
98 for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++)
99 if (tr2_counter_metadata[cid].want_per_thread_events &&
100 ctx->counter_block.counter[cid].value)
101 fn_apply(&tr2_counter_metadata[cid],
102 &ctx->counter_block.counter[cid],
103 0);
104}
105
106void tr2_emit_final_counters(tr2_tgt_evt_counter_t *fn_apply)
107{
108 enum trace2_counter_id cid;
109
110 /*
111 * Access `final_counter_block` requires holding `tr2tls_mutex`.
112 * We assume that our caller is holding the lock.
113 */
114
115 for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++)
116 if (final_counter_block.counter[cid].value)
117 fn_apply(&tr2_counter_metadata[cid],
118 &final_counter_block.counter[cid],
119 1);
120}