logfire client for zig

remove unused legacy module

the logfire-legacy module (direct OTLP/JSON implementation) was never
used by any downstream projects - both prefect-server and leaflet-search
use the otel-backed logfire module.

deleted:
- src/exporter.zig, root.zig, attribute.zig, span.zig, log.zig, metrics.zig
- examples/basic.zig
- docs/otel-adoption-plan.md

~75KB of dead code removed.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

+5 -2765
+1 -1
README.md
··· 144 144 145 145 ```bash 146 146 zig build test # run tests 147 - zig build example # run examples/basic.zig 147 + zig build example # run example 148 148 ``` 149 149 150 150 ## Status
+4 -37
build.zig
··· 17 17 .optimize = optimize, 18 18 }); 19 19 20 - // main module: otel-backed wrapper (recommended) 20 + // logfire module (otel-backed) 21 21 const logfire_mod = b.addModule("logfire", .{ 22 22 .root_source_file = b.path("src/otel_wrapper.zig"), 23 23 .target = target, ··· 28 28 }, 29 29 }); 30 30 31 - // legacy module: direct OTLP/JSON implementation (deprecated) 32 - const legacy_mod = b.addModule("logfire-legacy", .{ 33 - .root_source_file = b.path("src/root.zig"), 34 - .target = target, 35 - .optimize = optimize, 36 - .imports = &.{ 37 - .{ .name = "otel-api", .module = otel_dep.module("otel-api") }, 38 - .{ .name = "otel-sdk", .module = otel_dep.module("otel-sdk") }, 39 - .{ .name = "otel-exporters", .module = otel_dep.module("otel-exporters") }, 40 - .{ .name = "otel-semconv", .module = otel_dep.module("otel-semconv") }, 41 - }, 42 - }); 43 - 44 - // tests (run against main module) 31 + // tests 45 32 const tests = b.addTest(.{ 46 33 .root_module = b.createModule(.{ 47 34 .root_source_file = b.path("src/otel_wrapper.zig"), ··· 54 41 }), 55 42 }); 56 43 const run_tests = b.addRunArtifact(tests); 57 - 58 - // legacy tests 59 - const legacy_tests = b.addTest(.{ .root_module = legacy_mod }); 60 - const run_legacy_tests = b.addRunArtifact(legacy_tests); 61 44 62 45 const test_step = b.step("test", "run unit tests"); 63 46 test_step.dependOn(&run_tests.step); 64 - test_step.dependOn(&run_legacy_tests.step); 65 47 66 - // example executable (uses new otel-backed module) 48 + // example executable 67 49 const example = b.addExecutable(.{ 68 50 .name = "example", 69 51 .root_module = b.createModule(.{ ··· 75 57 }); 76 58 77 59 const run_example = b.addRunArtifact(example); 78 - const example_step = b.step("example", "run the otel-based example"); 60 + const example_step = b.step("example", "run example"); 79 61 example_step.dependOn(&run_example.step); 80 - 81 - // legacy example (for testing old API during transition) 82 - const legacy_example = b.addExecutable(.{ 83 - .name = "legacy-example", 84 - .root_module = b.createModule(.{ 85 - .root_source_file = b.path("examples/basic.zig"), 86 - .target = target, 87 - .optimize = optimize, 88 - .imports = &.{.{ .name = "logfire", .module = legacy_mod }}, 89 - }), 90 - }); 91 - 92 - const run_legacy_example = b.addRunArtifact(legacy_example); 93 - const legacy_example_step = b.step("legacy-example", "run the legacy example"); 94 - legacy_example_step.dependOn(&run_legacy_example.step); 95 62 96 63 // validation script for otel-zig OTLP export (not in CI, requires LOGFIRE_TOKEN) 97 64 const validate_otel = b.addExecutable(.{
-249
docs/otel-adoption-plan.md
··· 1 - # otel-zig adoption plan 2 - 3 - ## status: in progress (2025-01-25) 4 - 5 - this document explores how logfire-zig should adopt [otel-zig](https://github.com/ibd1279/otel-zig) to become a proper OpenTelemetry-based observability library. 6 - 7 - ## background 8 - 9 - ### current state of logfire-zig 10 - 11 - logfire-zig is currently a standalone OTLP exporter with: 12 - - simple span/log API 13 - - OTLP HTTP/JSON export to Logfire 14 - - token-based configuration (Logfire-specific) 15 - - background flush thread 16 - 17 - it does NOT have: 18 - - full OTel API/SDK compliance 19 - - semantic conventions 20 - - context propagation (W3C traceparent) 21 - - metrics support (partial - types exist but incomplete) 22 - - pipeline architecture 23 - 24 - ### python logfire architecture 25 - 26 - python logfire wraps opentelemetry-python: 27 - 28 - ``` 29 - opentelemetry-python (API/SDK) 30 - +-- opentelemetry-instrumentation-* (ecosystem plugins) 31 - +-- logfire (wrapper + Logfire-specific config) 32 - ``` 33 - 34 - this means: 35 - - instrumentation plugins work for ALL OTel users 36 - - logfire users get ecosystem plugins for free 37 - - logfire adds value through config, dashboards, hosted backend 38 - 39 - ### otel-zig 40 - 41 - [otel-zig](https://github.com/ibd1279/otel-zig) by ibd1279 is a comprehensive OTel implementation. 42 - 43 - **zig version:** 0.15.1 (compatible with logfire-zig's 0.15.0 requirement) 44 - 45 - ## findings 46 - 47 - ### logfire-zig implementation analysis 48 - 49 - **architecture:** 6 modules (root, config, span, log, attribute, exporter, metrics) with a central `Logfire` singleton. 50 - 51 - **core types:** 52 - - `Logfire` - singleton managing all observability, background flush thread 53 - - `Span.Data` - immutable span with trace_id, span_id, parent, timestamps, 32 max attributes 54 - - `LogRecord` - severity, message, trace correlation, attributes 55 - - `Attribute` - tagged union with internal string storage (512 bytes) for copy safety 56 - - `MetricData` - union of sum/gauge/histogram/exponential_histogram 57 - 58 - **threading model:** 59 - - thread-local trace context (`threadlocal var tl_trace_id`, `tl_current_span_id`) 60 - - background flush thread (configurable interval) 61 - - mutex-protected pending lists 62 - - atomic span ID counter 63 - 64 - **OTLP export:** 65 - - HTTP/JSON only (no protobuf, no gRPC) 66 - - per-endpoint batching (/v1/traces, /v1/logs, /v1/metrics) 67 - - uses `std.json.Stringify` for streaming JSON generation 68 - - special handling for hex trace IDs, nanosecond timestamps 69 - 70 - **limitations vs full OTel:** 71 - 1. no trace links or span events 72 - 2. no span status (always UNSET) 73 - 3. no resource detection 74 - 4. no W3C traceparent propagation 75 - 5. metrics attributes ignored 76 - 6. no sampling 77 - 7. no pipeline architecture (processor -> exporter) 78 - 8. 32 attribute limit, 512 byte string limit 79 - 9. no semantic conventions 80 - 81 - **strengths:** 82 - - simple, focused API 83 - - memory-safe attribute handling (internal string storage) 84 - - working background export 85 - - Logfire-specific token handling 86 - 87 - ### otel-zig implementation analysis 88 - 89 - **architecture:** proper OTel structure with api/, sdk/, exporters/, semconv/ separation. 90 - 91 - **core types:** 92 - - `Span`, `Tracer`, `TracerProvider` - union-tagged for noop/bridge polymorphism 93 - - `LogRecord`, `Logger`, `LoggerProvider` - same pattern 94 - - `Meter`, `MeterProvider`, `Counter`, `Gauge`, `Histogram` - full metrics 95 - - `SpanContext` - trace_id, span_id, trace_flags, trace_state, is_remote 96 - 97 - **pipeline architecture:** 98 - ``` 99 - Provider -> Processor -> Exporter 100 - ``` 101 - - traces: TracerProvider -> SpanProcessor -> SpanExporter 102 - - logs: LoggerProvider -> LogRecordProcessor -> LogRecordExporter 103 - - metrics: MeterProvider -> Reader -> MetricExporter 104 - - fluent pipeline builder pattern at compile time 105 - 106 - **OTLP export:** 107 - - protobuf-based (using zig-protobuf dependency) 108 - - HTTP transport (gRPC/JSON configurable) 109 - - proper conversion from Zig types to OTLP messages 110 - 111 - **semantic conventions:** 112 - - comprehensive coverage (v1.24.0) 113 - - http, database, rpc, messaging, exception, network, resource 114 - - standard constant values 115 - 116 - **W3C context propagation:** 117 - - traceparent header injection/extraction 118 - - TextMapCarrier abstraction 119 - - marks remote contexts properly 120 - 121 - **maturity: ~70-75% production-ready** 122 - 123 - known gaps (per TODO.md): 124 - - trace state parsing incomplete (potential leak) 125 - - thread-local context storage not implemented 126 - - histogram configurations incomplete 127 - - parent sampling decorator incomplete 128 - 129 - **strengths:** 130 - - proper API/SDK separation 131 - - all three signals (traces, logs, metrics) 132 - - thread-safe implementations 133 - - no-op implementations for safety 134 - - std.log bridge 135 - - comprehensive tests 136 - 137 - ## comparison 138 - 139 - | aspect | logfire-zig | otel-zig | 140 - |--------|-------------|----------| 141 - | zig version | 0.15.0 | 0.15.1 | 142 - | spec compliance | minimal | ~80% | 143 - | API/SDK separation | no | yes | 144 - | semantic conventions | none | comprehensive | 145 - | context propagation | threadlocal only | W3C traceparent | 146 - | OTLP format | HTTP/JSON | HTTP/protobuf | 147 - | pipeline architecture | no | yes | 148 - | metrics support | partial | full | 149 - | std.log bridge | no | yes | 150 - | spans | working | working | 151 - | logs | working | working | 152 - | maturity | simple/focused | comprehensive | 153 - | Logfire-specific | yes (token, endpoint) | no | 154 - 155 - ## recommendation 156 - 157 - **adopt otel-zig as a dependency** rather than reimplementing. 158 - 159 - ### rationale 160 - 161 - 1. **otel-zig is already 70-75% production-ready** - reimplementing would take months 162 - 2. **proper architecture** - API/SDK separation enables future extensibility 163 - 3. **semantic conventions** - we get http, database, messaging, etc. for free 164 - 4. **W3C propagation** - distributed tracing across services 165 - 5. **ecosystem potential** - instrumentation plugins can target otel-zig API 166 - 6. **same zig version** - no compatibility concerns 167 - 168 - ### what logfire-zig becomes 169 - 170 - 1. **thin wrapper around otel-zig** 171 - 2. **Logfire-specific configuration** (token parsing, endpoint selection) 172 - 3. **Logfire OTLP exporter** (configured otel-zig exporter pointing to Logfire) 173 - 4. **convenience APIs** (optional simpler interface for common cases) 174 - 175 - ### migration path 176 - 177 - **phase 1: add otel-zig dependency** 178 - - add to build.zig.zon 179 - - verify builds and tests pass 180 - - verify OTLP export works with Logfire endpoint 181 - 182 - **phase 2: refactor internals** 183 - - replace `Span.Data` with otel-zig spans 184 - - replace `LogRecord` with otel-zig logs 185 - - replace `MetricData` with otel-zig metrics 186 - - keep `Config` for Logfire-specific settings 187 - - use otel-zig pipeline architecture 188 - 189 - **phase 3: update public API** 190 - - option A: keep simple API, wrap otel internally 191 - ```zig 192 - // current API preserved 193 - const span = logfire.span("http.request", .{ .method = "GET" }); 194 - defer span.end(); 195 - ``` 196 - - option B: expose otel API with convenience wrapper 197 - ```zig 198 - // full otel access when needed 199 - var tracer = logfire.getTracer(); 200 - var span = tracer.startSpan("http.request", .{...}, ctx); 201 - ``` 202 - 203 - **phase 4: deprecate redundant code** 204 - - remove custom OTLP JSON serialization (use otel-zig protobuf) 205 - - remove custom span/log types 206 - - keep only Logfire-specific additions 207 - 208 - ### risks and mitigations 209 - 210 - | risk | mitigation | 211 - |------|------------| 212 - | otel-zig protobuf may not work with Logfire | Logfire accepts OTLP protobuf; test early | 213 - | otel-zig API may change | pin to specific commit/version | 214 - | maintenance burden | contribute fixes upstream | 215 - | otel-zig abandoned | fork if necessary (MIT licensed) | 216 - 217 - ### open questions 218 - 219 - 1. **should we contribute instrumentation plugins to otel-zig?** 220 - - http server instrumentation 221 - - database instrumentation 222 - - or separate zig-otel-contrib repo? 223 - 224 - 2. **how much of current logfire-zig API to preserve?** 225 - - simpler API is nice for quick usage 226 - - but full otel API enables advanced use cases 227 - 228 - 3. **should we help complete otel-zig's gaps?** 229 - - thread-local context storage 230 - - trace state handling 231 - - histogram configurations 232 - 233 - ## next steps 234 - 235 - 1. [x] add otel-zig as dependency in build.zig.zon 236 - 2. [x] test OTLP export to Logfire endpoint (validated with `zig build validate-otel`) 237 - 3. [x] prototype wrapper API (`src/otel_wrapper.zig`, exposed as `logfire-otel` module) 238 - 4. [ ] benchmark performance vs current implementation 239 - 5. [ ] complete wrapper API (logs, metrics) 240 - 6. [ ] create migration PR 241 - 242 - ## appendix: key otel-zig files to understand 243 - 244 - - `src/api/trace/span.zig` - span interface 245 - - `src/sdk/trace/root.zig` - tracer provider setup 246 - - `src/exporters/otlp/root.zig` - OTLP exporter config 247 - - `src/semconv/http.zig` - HTTP semantic conventions 248 - - `src/api/trace/w3c_propagator.zig` - context propagation 249 - - `examples/simple_trace_sdk.zig` - usage example
-91
examples/basic.zig
··· 1 - //! basic logfire example 2 - //! 3 - //! demonstrates spans, logging, metrics, and export to logfire. 4 - //! 5 - //! run with: 6 - //! LOGFIRE_TOKEN=your_token zig build example 7 - //! 8 - //! or without token for console-only output: 9 - //! zig build example 10 - 11 - const std = @import("std"); 12 - const logfire = @import("logfire"); 13 - 14 - pub fn main() !void { 15 - // configure logfire 16 - // will send to logfire if LOGFIRE_TOKEN is set, otherwise just prints 17 - const lf = try logfire.configure(.{ 18 - .service_name = "logfire-zig-example", 19 - .service_version = "0.1.0", 20 - .environment = "development", 21 - }); 22 - defer lf.shutdown(); 23 - 24 - // simple logging 25 - logfire.info("application started", .{}); 26 - logfire.debug("debug message with value: {d}", .{42}); 27 - 28 - // span with timing and string attributes 29 - { 30 - const s = logfire.span("example.work", .{ 31 - .iteration = @as(i64, 1), 32 - .query = "prefect python", 33 - .tag = "python", 34 - }); 35 - defer s.end(); 36 - 37 - // simulate some work 38 - std.posix.nanosleep(0, 10 * std.time.ns_per_ms); 39 - 40 - logfire.info("work completed", .{}); 41 - } 42 - 43 - // span with dynamic string (simulating arena-allocated query) 44 - { 45 - var buf: [64]u8 = undefined; 46 - const query = std.fmt.bufPrint(&buf, "search query {d}", .{42}) catch "fallback"; 47 - 48 - const s = logfire.span("example.dynamic_string", .{ 49 - .query = query, 50 - }); 51 - defer s.end(); 52 - 53 - std.posix.nanosleep(0, 5 * std.time.ns_per_ms); 54 - } 55 - 56 - // span with special characters that might come from URL params 57 - { 58 - const s = logfire.span("example.special_chars", .{ 59 - .query = "hello \"world\" & <test>", 60 - .unicode = "café résumé naïve", 61 - .control = "tab\there\nnewline", 62 - }); 63 - defer s.end(); 64 - } 65 - 66 - // nested spans 67 - { 68 - const outer = logfire.span("example.outer", .{}); 69 - defer outer.end(); 70 - 71 - { 72 - const inner = logfire.span("example.inner", .{ 73 - .depth = @as(i64, 1), 74 - }); 75 - defer inner.end(); 76 - 77 - std.posix.nanosleep(0, 5 * std.time.ns_per_ms); 78 - } 79 - } 80 - 81 - // metrics 82 - logfire.counter("requests.total", 1); 83 - logfire.counter("requests.total", 1); 84 - logfire.gaugeInt("connections.active", 42); 85 - logfire.gaugeDouble("cpu.usage", 0.75); 86 - 87 - // flush to ensure all data is sent 88 - try lf.flush(); 89 - 90 - logfire.info("example complete", .{}); 91 - }
-210
src/attribute.zig
··· 1 - //! OTLP attribute types 2 - //! 3 - //! converts zig values to OTLP-compatible attribute format. 4 - //! string values are copied into internal storage for memory safety. 5 - //! 6 - //! NOTE: string values are stored in _string_storage, NOT as a slice in Value. 7 - //! this is intentional - zig's struct copy semantics would leave dangling pointers 8 - //! if we stored a slice pointing to _string_storage. instead, we store the length 9 - //! and reconstruct the slice via getString() when needed. 10 - 11 - const std = @import("std"); 12 - 13 - pub const Attribute = struct { 14 - key: []const u8, 15 - value: Value, 16 - /// internal storage for copied strings 17 - _string_storage: [max_string_len]u8 = undefined, 18 - _string_len: usize = 0, 19 - 20 - pub const max_string_len = 512; 21 - 22 - /// value types - NOTE: string payload is in _string_storage, not here 23 - /// this ensures correct behavior when Attribute is copied 24 - pub const Value = union(enum) { 25 - string, // string data is in _string_storage[0.._string_len] 26 - int: i64, 27 - float: f64, 28 - bool_val: bool, 29 - }; 30 - 31 - /// get string value - reconstructs slice from internal storage 32 - /// returns null if value is not a string 33 - pub fn getString(self: *const Attribute) ?[]const u8 { 34 - return switch (self.value) { 35 - .string => self._string_storage[0..self._string_len], 36 - else => null, 37 - }; 38 - } 39 - 40 - /// copy a string into internal storage 41 - fn copyString(self: *Attribute, str: []const u8) void { 42 - const len = @min(str.len, max_string_len); 43 - @memcpy(self._string_storage[0..len], str[0..len]); 44 - self._string_len = len; 45 - } 46 - 47 - /// convert a comptime struct to attributes array 48 - /// returns number of attributes written 49 - /// string values are copied for memory safety 50 - pub fn fromStruct(attrs: anytype, out: []Attribute) usize { 51 - const T = @TypeOf(attrs); 52 - const info = @typeInfo(T); 53 - 54 - if (info != .@"struct") return 0; 55 - 56 - const fields = info.@"struct".fields; 57 - var count: usize = 0; 58 - 59 - inline for (fields) |field| { 60 - if (count >= out.len) break; 61 - 62 - const field_value = @field(attrs, field.name); 63 - if (toValueWithCopy(&out[count], field.name, field_value)) { 64 - count += 1; 65 - } 66 - } 67 - 68 - return count; 69 - } 70 - 71 - /// convert value and copy strings into attribute's internal storage 72 - fn toValueWithCopy(attr: *Attribute, key: []const u8, value: anytype) bool { 73 - const T = @TypeOf(value); 74 - const info = @typeInfo(T); 75 - 76 - switch (info) { 77 - .int, .comptime_int => { 78 - attr.* = .{ .key = key, .value = .{ .int = @intCast(value) } }; 79 - return true; 80 - }, 81 - .float, .comptime_float => { 82 - attr.* = .{ .key = key, .value = .{ .float = @floatCast(value) } }; 83 - return true; 84 - }, 85 - .bool => { 86 - attr.* = .{ .key = key, .value = .{ .bool_val = value } }; 87 - return true; 88 - }, 89 - .pointer => |ptr| { 90 - if (ptr.size == .slice and ptr.child == u8) { 91 - // copy string into internal storage, tag value as string 92 - attr.* = .{ .key = key, .value = .string }; 93 - attr.copyString(value); 94 - return true; 95 - } 96 - if (ptr.size == .one) { 97 - const child_info = @typeInfo(ptr.child); 98 - if (child_info == .array and child_info.array.child == u8) { 99 - attr.* = .{ .key = key, .value = .string }; 100 - attr.copyString(value); 101 - return true; 102 - } 103 - } 104 - return false; 105 - }, 106 - .array => |arr| { 107 - if (arr.child == u8) { 108 - attr.* = .{ .key = key, .value = .string }; 109 - attr.copyString(&value); 110 - return true; 111 - } 112 - return false; 113 - }, 114 - .optional => { 115 - if (value) |v| { 116 - return toValueWithCopy(attr, key, v); 117 - } 118 - return false; 119 - }, 120 - else => return false, 121 - } 122 - } 123 - 124 - /// write attribute as OTLP JSON 125 - pub fn writeJson(self: *const Attribute, w: anytype) !void { 126 - try w.writeAll("{\"key\":"); 127 - try writeJsonString(w, self.key); 128 - try w.writeAll(",\"value\":{"); 129 - 130 - switch (self.value) { 131 - .string => { 132 - try w.writeAll("\"stringValue\":"); 133 - try writeJsonString(w, self.getString().?); 134 - }, 135 - .int => |i| { 136 - try w.print("\"intValue\":\"{d}\"", .{i}); 137 - }, 138 - .float => |f| { 139 - try w.print("\"doubleValue\":{d}", .{f}); 140 - }, 141 - .bool_val => |b| { 142 - try w.print("\"boolValue\":{}", .{b}); 143 - }, 144 - } 145 - 146 - try w.writeAll("}}"); 147 - } 148 - }; 149 - 150 - fn writeJsonString(w: anytype, s: []const u8) !void { 151 - try w.writeByte('"'); 152 - for (s) |c| { 153 - switch (c) { 154 - '"' => try w.writeAll("\\\""), 155 - '\\' => try w.writeAll("\\\\"), 156 - '\n' => try w.writeAll("\\n"), 157 - '\r' => try w.writeAll("\\r"), 158 - '\t' => try w.writeAll("\\t"), 159 - 0x00...0x08, 0x0b, 0x0c, 0x0e...0x1f => try w.print("\\u00{x:0>2}", .{c}), 160 - else => try w.writeByte(c), 161 - } 162 - } 163 - try w.writeByte('"'); 164 - } 165 - 166 - // tests 167 - 168 - test "fromStruct basic" { 169 - var attrs: [8]Attribute = undefined; 170 - const count = Attribute.fromStruct(.{ 171 - .name = "test", 172 - .count = @as(i64, 42), 173 - .enabled = true, 174 - }, &attrs); 175 - 176 - try std.testing.expectEqual(@as(usize, 3), count); 177 - try std.testing.expectEqualStrings("name", attrs[0].key); 178 - try std.testing.expectEqualStrings("test", attrs[0].getString().?); 179 - try std.testing.expectEqual(@as(i64, 42), attrs[1].value.int); 180 - try std.testing.expectEqual(true, attrs[2].value.bool_val); 181 - } 182 - 183 - test "writeJson string" { 184 - var buf: [256]u8 = undefined; 185 - var fbs = std.io.fixedBufferStream(&buf); 186 - // create attr with string value via fromStruct 187 - var attrs: [1]Attribute = undefined; 188 - _ = Attribute.fromStruct(.{ .foo = "bar" }, &attrs); 189 - try attrs[0].writeJson(fbs.writer()); 190 - try std.testing.expectEqualStrings("{\"key\":\"foo\",\"value\":{\"stringValue\":\"bar\"}}", fbs.getWritten()); 191 - } 192 - 193 - test "copy safety - string survives struct copy" { 194 - // this test verifies the fix for dangling pointer after struct copy 195 - var original: [1]Attribute = undefined; 196 - _ = Attribute.fromStruct(.{ .query = "hello world" }, &original); 197 - 198 - // copy the struct (this is what span.end() does internally) 199 - const copy = original; 200 - 201 - // the copy should have valid string data 202 - try std.testing.expectEqualStrings("query", copy[0].key); 203 - try std.testing.expectEqualStrings("hello world", copy[0].getString().?); 204 - 205 - // verify the copy's getString returns data from the copy's _string_storage 206 - // not from original (which would be a dangling pointer bug) 207 - const copy_str_ptr = copy[0].getString().?.ptr; 208 - const copy_storage_ptr = &copy[0]._string_storage; 209 - try std.testing.expect(copy_str_ptr == copy_storage_ptr); 210 - }
-891
src/exporter.zig
··· 1 - //! OTLP HTTP/JSON exporter 2 - //! 3 - //! sends spans, logs, and metrics to logfire (or any OTLP-compatible backend) 4 - //! uses HTTP with JSON encoding for simplicity. 5 - //! 6 - //! endpoints: 7 - //! - /v1/traces - span data 8 - //! - /v1/logs - log records 9 - //! - /v1/metrics - metric data 10 - //! 11 - //! see: https://opentelemetry.io/docs/specs/otlp/ 12 - 13 - const std = @import("std"); 14 - const json = std.json; 15 - const Config = @import("config.zig").Config; 16 - const Span = @import("span.zig").Span; 17 - const LogRecord = @import("log.zig").LogRecord; 18 - const Attribute = @import("attribute.zig").Attribute; 19 - const metrics_mod = @import("metrics.zig"); 20 - const MetricData = metrics_mod.MetricData; 21 - 22 - pub const Exporter = struct { 23 - allocator: std.mem.Allocator, 24 - config: Config, 25 - http_client: std.http.Client, 26 - 27 - pub fn init(allocator: std.mem.Allocator, config: Config) Exporter { 28 - return .{ 29 - .allocator = allocator, 30 - .config = config, 31 - .http_client = .{ .allocator = allocator }, 32 - }; 33 - } 34 - 35 - pub fn deinit(self: *Exporter) void { 36 - self.http_client.deinit(); 37 - } 38 - 39 - /// send spans and logs to OTLP endpoints 40 - pub fn send( 41 - self: *Exporter, 42 - spans: []const Span.Data, 43 - logs: []const LogRecord, 44 - ) !void { 45 - try self.sendAll(spans, logs, &.{}); 46 - } 47 - 48 - /// send spans, logs, and metrics to OTLP endpoints 49 - pub fn sendAll( 50 - self: *Exporter, 51 - spans: []const Span.Data, 52 - logs: []const LogRecord, 53 - metrics: []const MetricData, 54 - ) !void { 55 - if (!self.config.shouldSend()) { 56 - self.printToConsole(spans, logs, metrics); 57 - return; 58 - } 59 - 60 - const base_url = self.config.base_url orelse return error.NoBaseUrl; 61 - 62 - if (spans.len > 0) { 63 - const traces_json = try self.buildTracesJson(spans); 64 - defer self.allocator.free(traces_json); 65 - try self.sendToEndpoint(base_url, "/v1/traces", traces_json); 66 - } 67 - 68 - if (logs.len > 0) { 69 - const logs_json = try self.buildLogsJson(logs); 70 - defer self.allocator.free(logs_json); 71 - try self.sendToEndpoint(base_url, "/v1/logs", logs_json); 72 - } 73 - 74 - if (metrics.len > 0) { 75 - const metrics_json = try self.buildMetricsJson(metrics); 76 - defer self.allocator.free(metrics_json); 77 - try self.sendToEndpoint(base_url, "/v1/metrics", metrics_json); 78 - } 79 - } 80 - 81 - fn sendToEndpoint(self: *Exporter, base_url: []const u8, path: []const u8, body: []const u8) !void { 82 - var url_buf: [1024]u8 = undefined; 83 - const url = std.fmt.bufPrint(&url_buf, "{s}{s}", .{ base_url, path }) catch return error.UrlTooLong; 84 - 85 - var aw: std.Io.Writer.Allocating = .init(self.allocator); 86 - defer aw.deinit(); 87 - 88 - var auth_buf: [2048]u8 = undefined; 89 - var auth_header: ?[]const u8 = null; 90 - if (self.config.token) |token| { 91 - auth_header = std.fmt.bufPrint(&auth_buf, "Bearer {s}", .{token}) catch null; 92 - } 93 - 94 - var headers: std.http.Client.Request.Headers = .{ 95 - .content_type = .{ .override = "application/json" }, 96 - .accept_encoding = .{ .override = "identity" }, 97 - .user_agent = .{ .override = "logfire-zig/0.1.0" }, 98 - }; 99 - if (auth_header) |auth| { 100 - headers.authorization = .{ .override = auth }; 101 - } 102 - 103 - const result = self.http_client.fetch(.{ 104 - .location = .{ .url = url }, 105 - .response_writer = &aw.writer, 106 - .method = .POST, 107 - .payload = body, 108 - .headers = headers, 109 - }) catch return error.RequestFailed; 110 - 111 - if (result.status != .ok and result.status != .accepted) { 112 - std.log.warn("logfire: export to {s} failed ({s}): {s}", .{ path, @tagName(result.status), aw.toArrayList().items }); 113 - return error.SendFailed; 114 - } 115 - 116 - // OTLP partial_success response indicates rejected data points 117 - const response = aw.toArrayList().items; 118 - if (response.len > 0) { 119 - std.log.debug("logfire: {s} response: {s}", .{ path, response }); 120 - } 121 - } 122 - 123 - fn buildTracesJson(self: *Exporter, spans: []const Span.Data) ![]u8 { 124 - var output: std.Io.Writer.Allocating = .init(self.allocator); 125 - errdefer output.deinit(); 126 - var jw: json.Stringify = .{ .writer = &output.writer }; 127 - 128 - try jw.beginObject(); 129 - try jw.objectField("resourceSpans"); 130 - try jw.beginArray(); 131 - try jw.beginObject(); 132 - 133 - // resource 134 - try jw.objectField("resource"); 135 - try jw.beginObject(); 136 - try jw.objectField("attributes"); 137 - try self.writeResourceAttributesArray(&jw); 138 - try jw.endObject(); 139 - 140 - // scopeSpans 141 - try jw.objectField("scopeSpans"); 142 - try jw.beginArray(); 143 - try jw.beginObject(); 144 - try jw.objectField("scope"); 145 - try jw.beginObject(); 146 - try jw.objectField("name"); 147 - try jw.write("logfire-zig"); 148 - try jw.endObject(); 149 - 150 - try jw.objectField("spans"); 151 - try jw.beginArray(); 152 - for (spans) |s| { 153 - try self.writeSpanObject(&jw, s); 154 - } 155 - try jw.endArray(); 156 - 157 - try jw.endObject(); 158 - try jw.endArray(); 159 - 160 - try jw.endObject(); 161 - try jw.endArray(); 162 - try jw.endObject(); 163 - 164 - return output.toOwnedSlice(); 165 - } 166 - 167 - fn buildLogsJson(self: *Exporter, logs: []const LogRecord) ![]u8 { 168 - var output: std.Io.Writer.Allocating = .init(self.allocator); 169 - errdefer output.deinit(); 170 - var jw: json.Stringify = .{ .writer = &output.writer }; 171 - 172 - try jw.beginObject(); 173 - try jw.objectField("resourceLogs"); 174 - try jw.beginArray(); 175 - try jw.beginObject(); 176 - 177 - // resource 178 - try jw.objectField("resource"); 179 - try jw.beginObject(); 180 - try jw.objectField("attributes"); 181 - try self.writeResourceAttributesArray(&jw); 182 - try jw.endObject(); 183 - 184 - // scopeLogs 185 - try jw.objectField("scopeLogs"); 186 - try jw.beginArray(); 187 - try jw.beginObject(); 188 - try jw.objectField("scope"); 189 - try jw.beginObject(); 190 - try jw.objectField("name"); 191 - try jw.write("logfire-zig"); 192 - try jw.endObject(); 193 - 194 - try jw.objectField("logRecords"); 195 - try jw.beginArray(); 196 - for (logs) |log| { 197 - try self.writeLogObject(&jw, log); 198 - } 199 - try jw.endArray(); 200 - 201 - try jw.endObject(); 202 - try jw.endArray(); 203 - 204 - try jw.endObject(); 205 - try jw.endArray(); 206 - try jw.endObject(); 207 - 208 - return output.toOwnedSlice(); 209 - } 210 - 211 - pub fn buildMetricsJson(self: *Exporter, metrics: []const MetricData) ![]u8 { 212 - var output: std.Io.Writer.Allocating = .init(self.allocator); 213 - errdefer output.deinit(); 214 - var jw: json.Stringify = .{ .writer = &output.writer }; 215 - 216 - try jw.beginObject(); 217 - try jw.objectField("resourceMetrics"); 218 - try jw.beginArray(); 219 - try jw.beginObject(); 220 - 221 - // resource 222 - try jw.objectField("resource"); 223 - try jw.beginObject(); 224 - try jw.objectField("attributes"); 225 - try self.writeResourceAttributesArray(&jw); 226 - try jw.endObject(); 227 - 228 - // scopeMetrics 229 - try jw.objectField("scopeMetrics"); 230 - try jw.beginArray(); 231 - try jw.beginObject(); 232 - try jw.objectField("scope"); 233 - try jw.beginObject(); 234 - try jw.objectField("name"); 235 - try jw.write("logfire-zig"); 236 - try jw.endObject(); 237 - 238 - try jw.objectField("metrics"); 239 - try jw.beginArray(); 240 - for (metrics) |metric| { 241 - try writeMetricObject(&jw, metric); 242 - } 243 - try jw.endArray(); 244 - 245 - try jw.endObject(); 246 - try jw.endArray(); 247 - 248 - try jw.endObject(); 249 - try jw.endArray(); 250 - try jw.endObject(); 251 - 252 - return output.toOwnedSlice(); 253 - } 254 - 255 - fn writeResourceAttributesArray(self: *Exporter, jw: *json.Stringify) !void { 256 - try jw.beginArray(); 257 - if (self.config.service_name) |name| { 258 - try writeAttributeObject(jw, "service.name", .{ .string = name }); 259 - } 260 - if (self.config.service_version) |version| { 261 - try writeAttributeObject(jw, "service.version", .{ .string = version }); 262 - } 263 - if (self.config.environment) |env| { 264 - try writeAttributeObject(jw, "deployment.environment.name", .{ .string = env }); 265 - } 266 - try jw.endArray(); 267 - } 268 - 269 - fn writeSpanObject(self: *Exporter, jw: *json.Stringify, s: Span.Data) !void { 270 - _ = self; 271 - try jw.beginObject(); 272 - 273 - try jw.objectField("traceId"); 274 - try writeHexString(jw, &s.trace_id); 275 - 276 - try jw.objectField("spanId"); 277 - try writeHexString(jw, &s.span_id); 278 - 279 - if (s.parent_span_id) |parent_id| { 280 - try jw.objectField("parentSpanId"); 281 - try writeHexString(jw, &parent_id); 282 - } 283 - 284 - try jw.objectField("name"); 285 - try jw.write(s.name); 286 - 287 - try jw.objectField("kind"); 288 - try jw.write(@as(i64, 1)); 289 - 290 - try jw.objectField("startTimeUnixNano"); 291 - try writeNsString(jw, s.start_time_ns); 292 - 293 - try jw.objectField("endTimeUnixNano"); 294 - try writeNsString(jw, s.end_time_ns); 295 - 296 - try jw.objectField("attributes"); 297 - try jw.beginArray(); 298 - for (s.attributes[0..s.attribute_count]) |*attr| { 299 - try writeAttributeFromAttr(jw, attr); 300 - } 301 - try jw.endArray(); 302 - 303 - try jw.objectField("status"); 304 - try jw.beginObject(); 305 - try jw.objectField("code"); 306 - try jw.write(@as(i64, 0)); 307 - try jw.endObject(); 308 - 309 - try jw.endObject(); 310 - } 311 - 312 - fn writeLogObject(self: *Exporter, jw: *json.Stringify, log: LogRecord) !void { 313 - _ = self; 314 - try jw.beginObject(); 315 - 316 - try jw.objectField("timeUnixNano"); 317 - try writeNsString(jw, log.timestamp_ns); 318 - 319 - try jw.objectField("severityNumber"); 320 - try jw.write(@as(i64, log.level.severity())); 321 - 322 - try jw.objectField("severityText"); 323 - try jw.write(log.level.name()); 324 - 325 - try jw.objectField("body"); 326 - try jw.beginObject(); 327 - try jw.objectField("stringValue"); 328 - try jw.write(log.message); 329 - try jw.endObject(); 330 - 331 - if (log.trace_id) |tid| { 332 - try jw.objectField("traceId"); 333 - try writeHexString(jw, &tid); 334 - } 335 - 336 - try jw.objectField("attributes"); 337 - try jw.beginArray(); 338 - for (log.attributes[0..log.attribute_count]) |*attr| { 339 - try writeAttributeFromAttr(jw, attr); 340 - } 341 - try jw.endArray(); 342 - 343 - try jw.endObject(); 344 - } 345 - 346 - fn printToConsole(self: *Exporter, spans: []const Span.Data, logs: []const LogRecord, metrics: []const MetricData) void { 347 - const console = self.config.console orelse return; 348 - if (!console.enabled) return; 349 - 350 - for (spans) |s| { 351 - const duration_ms = @as(f64, @floatFromInt(s.end_time_ns - s.start_time_ns)) / 1_000_000.0; 352 - std.debug.print("[span] {s} ({d:.2}ms)\n", .{ s.name, duration_ms }); 353 - } 354 - 355 - for (logs) |log| { 356 - if (@intFromEnum(log.level) < @intFromEnum(console.min_level)) continue; 357 - std.debug.print("[{s}] {s}\n", .{ log.level.name(), log.message }); 358 - } 359 - 360 - for (metrics) |metric| { 361 - std.debug.print("[metric] {s}\n", .{metric.name}); 362 - } 363 - } 364 - }; 365 - 366 - // helper functions 367 - 368 - fn writeMetricObject(jw: *json.Stringify, metric: MetricData) !void { 369 - try jw.beginObject(); 370 - 371 - try jw.objectField("name"); 372 - try jw.write(metric.name); 373 - 374 - if (metric.description.len > 0) { 375 - try jw.objectField("description"); 376 - try jw.write(metric.description); 377 - } 378 - 379 - if (metric.unit.len > 0) { 380 - try jw.objectField("unit"); 381 - try jw.write(metric.unit); 382 - } 383 - 384 - switch (metric.data) { 385 - .sum => |sum| { 386 - try jw.objectField("sum"); 387 - try jw.beginObject(); 388 - try jw.objectField("aggregationTemporality"); 389 - try jw.write(@as(i64, @intFromEnum(sum.temporality))); 390 - try jw.objectField("isMonotonic"); 391 - try jw.write(sum.is_monotonic); 392 - try jw.objectField("dataPoints"); 393 - try jw.beginArray(); 394 - for (sum.data_points) |dp| { 395 - try writeNumberDataPointObject(jw, dp); 396 - } 397 - try jw.endArray(); 398 - try jw.endObject(); 399 - }, 400 - .gauge => |gauge| { 401 - try jw.objectField("gauge"); 402 - try jw.beginObject(); 403 - try jw.objectField("dataPoints"); 404 - try jw.beginArray(); 405 - for (gauge.data_points) |dp| { 406 - try writeNumberDataPointObject(jw, dp); 407 - } 408 - try jw.endArray(); 409 - try jw.endObject(); 410 - }, 411 - .histogram => |hist| { 412 - try jw.objectField("histogram"); 413 - try jw.beginObject(); 414 - try jw.objectField("aggregationTemporality"); 415 - try jw.write(@as(i64, @intFromEnum(hist.temporality))); 416 - try jw.objectField("dataPoints"); 417 - try jw.beginArray(); 418 - for (hist.data_points) |dp| { 419 - try writeHistogramDataPointObject(jw, dp); 420 - } 421 - try jw.endArray(); 422 - try jw.endObject(); 423 - }, 424 - .exponential_histogram => |exp_hist| { 425 - try jw.objectField("exponentialHistogram"); 426 - try jw.beginObject(); 427 - try jw.objectField("aggregationTemporality"); 428 - try jw.write(@as(i64, @intFromEnum(exp_hist.temporality))); 429 - try jw.objectField("dataPoints"); 430 - try jw.beginArray(); 431 - for (exp_hist.data_points) |dp| { 432 - try writeExponentialHistogramDataPointObject(jw, dp); 433 - } 434 - try jw.endArray(); 435 - try jw.endObject(); 436 - }, 437 - } 438 - 439 - try jw.endObject(); 440 - } 441 - 442 - fn writeNumberDataPointObject(jw: *json.Stringify, dp: metrics_mod.NumberDataPoint) !void { 443 - try jw.beginObject(); 444 - 445 - // attributes - always include even if empty 446 - try jw.objectField("attributes"); 447 - try jw.beginArray(); 448 - for (dp.attributes) |*attr| { 449 - try writeAttributeFromAttr(jw, attr); 450 - } 451 - try jw.endArray(); 452 - 453 - try jw.objectField("startTimeUnixNano"); 454 - try writeNsString(jw, dp.start_time_ns); 455 - 456 - try jw.objectField("timeUnixNano"); 457 - try writeNsString(jw, dp.time_ns); 458 - 459 - switch (dp.value) { 460 - .int => |v| { 461 - try jw.objectField("asInt"); 462 - try jw.write(v); // asInt is a number, not a string 463 - }, 464 - .double => |v| { 465 - try jw.objectField("asDouble"); 466 - try jw.write(v); 467 - }, 468 - } 469 - 470 - try jw.endObject(); 471 - } 472 - 473 - fn writeHistogramDataPointObject(jw: *json.Stringify, dp: metrics_mod.HistogramDataPoint) !void { 474 - try jw.beginObject(); 475 - 476 - try jw.objectField("startTimeUnixNano"); 477 - try writeNsString(jw, dp.start_time_ns); 478 - 479 - try jw.objectField("timeUnixNano"); 480 - try writeNsString(jw, dp.time_ns); 481 - 482 - try jw.objectField("count"); 483 - try jw.write(dp.count); // number, not string 484 - 485 - try jw.objectField("sum"); 486 - try jw.write(dp.sum); 487 - 488 - try jw.objectField("bucketCounts"); 489 - try jw.beginArray(); 490 - for (dp.bucket_counts) |c| { 491 - try jw.write(c); // numbers, not strings 492 - } 493 - try jw.endArray(); 494 - 495 - try jw.objectField("explicitBounds"); 496 - try jw.beginArray(); 497 - for (dp.explicit_bounds) |b| { 498 - try jw.write(b); 499 - } 500 - try jw.endArray(); 501 - 502 - try jw.objectField("min"); 503 - try jw.write(dp.min); 504 - 505 - try jw.objectField("max"); 506 - try jw.write(dp.max); 507 - 508 - if (dp.attributes.len > 0) { 509 - try jw.objectField("attributes"); 510 - try jw.beginArray(); 511 - for (dp.attributes) |*attr| { 512 - try writeAttributeFromAttr(jw, attr); 513 - } 514 - try jw.endArray(); 515 - } 516 - 517 - try jw.endObject(); 518 - } 519 - 520 - fn writeExponentialHistogramDataPointObject(jw: *json.Stringify, dp: metrics_mod.ExponentialHistogramDataPoint) !void { 521 - try jw.beginObject(); 522 - 523 - try jw.objectField("startTimeUnixNano"); 524 - try writeNsString(jw, dp.start_time_ns); 525 - 526 - try jw.objectField("timeUnixNano"); 527 - try writeNsString(jw, dp.time_ns); 528 - 529 - try jw.objectField("count"); 530 - try jw.write(dp.count); // number, not string 531 - 532 - try jw.objectField("sum"); 533 - try jw.write(dp.sum); 534 - 535 - try jw.objectField("scale"); 536 - try jw.write(@as(i64, dp.scale)); 537 - 538 - try jw.objectField("zeroCount"); 539 - try jw.write(dp.zero_count); // number, not string 540 - 541 - try jw.objectField("positive"); 542 - try jw.beginObject(); 543 - try jw.objectField("offset"); 544 - try jw.write(@as(i64, dp.positive_offset)); 545 - try jw.objectField("bucketCounts"); 546 - try jw.beginArray(); 547 - for (dp.positive_bucket_counts) |c| { 548 - try jw.write(c); // numbers, not strings 549 - } 550 - try jw.endArray(); 551 - try jw.endObject(); 552 - 553 - try jw.objectField("min"); 554 - try jw.write(dp.min); 555 - 556 - try jw.objectField("max"); 557 - try jw.write(dp.max); 558 - 559 - if (dp.attributes.len > 0) { 560 - try jw.objectField("attributes"); 561 - try jw.beginArray(); 562 - for (dp.attributes) |*attr| { 563 - try writeAttributeFromAttr(jw, attr); 564 - } 565 - try jw.endArray(); 566 - } 567 - 568 - try jw.endObject(); 569 - } 570 - 571 - const AttributeValue = union(enum) { 572 - string: []const u8, 573 - int: i64, 574 - float: f64, 575 - bool_val: bool, 576 - }; 577 - 578 - fn writeAttributeObject(jw: *json.Stringify, key: []const u8, value: AttributeValue) !void { 579 - try jw.beginObject(); 580 - try jw.objectField("key"); 581 - try jw.write(key); 582 - try jw.objectField("value"); 583 - try jw.beginObject(); 584 - switch (value) { 585 - .string => |s| { 586 - try jw.objectField("stringValue"); 587 - try writeStringValue(jw, s); 588 - }, 589 - .int => |i| { 590 - try jw.objectField("intValue"); 591 - try writeIntString(jw, i); 592 - }, 593 - .float => |f| { 594 - try jw.objectField("doubleValue"); 595 - try jw.write(f); 596 - }, 597 - .bool_val => |b| { 598 - try jw.objectField("boolValue"); 599 - try jw.write(b); 600 - }, 601 - } 602 - try jw.endObject(); 603 - try jw.endObject(); 604 - } 605 - 606 - fn writeAttributeFromAttr(jw: *json.Stringify, attr: *const Attribute) !void { 607 - try jw.beginObject(); 608 - try jw.objectField("key"); 609 - try jw.write(attr.key); 610 - try jw.objectField("value"); 611 - try jw.beginObject(); 612 - switch (attr.value) { 613 - .string => { 614 - try jw.objectField("stringValue"); 615 - try writeStringValue(jw, attr.getString().?); 616 - }, 617 - .int => |i| { 618 - try jw.objectField("intValue"); 619 - try writeIntString(jw, i); 620 - }, 621 - .float => |f| { 622 - try jw.objectField("doubleValue"); 623 - try jw.write(f); 624 - }, 625 - .bool_val => |b| { 626 - try jw.objectField("boolValue"); 627 - try jw.write(b); 628 - }, 629 - } 630 - try jw.endObject(); 631 - try jw.endObject(); 632 - } 633 - 634 - fn writeHexString(jw: *json.Stringify, bytes: []const u8) !void { 635 - var buf: [64]u8 = undefined; 636 - var i: usize = 0; 637 - for (bytes) |b| { 638 - _ = std.fmt.bufPrint(buf[i .. i + 2], "{x:0>2}", .{b}) catch unreachable; 639 - i += 2; 640 - } 641 - try jw.write(buf[0..i]); 642 - } 643 - 644 - fn writeNsString(jw: *json.Stringify, ns: i128) !void { 645 - var buf: [32]u8 = undefined; 646 - const s = std.fmt.bufPrint(&buf, "{d}", .{ns}) catch unreachable; 647 - try jw.write(s); 648 - } 649 - 650 - fn writeIntString(jw: *json.Stringify, val: i64) !void { 651 - var buf: [24]u8 = undefined; 652 - const s = std.fmt.bufPrint(&buf, "{d}", .{val}) catch unreachable; 653 - try jw.write(s); 654 - } 655 - 656 - /// write a string value with proper JSON escaping 657 - /// uses raw mode to maintain Stringify state while writing manually 658 - /// escapes control chars and non-ASCII bytes to ensure valid JSON 659 - fn writeStringValue(jw: *json.Stringify, s: []const u8) !void { 660 - try jw.beginWriteRaw(); 661 - try jw.writer.writeByte('"'); 662 - for (s) |c| { 663 - switch (c) { 664 - // characters that need escaping in JSON 665 - '"' => try jw.writer.writeAll("\\\""), 666 - '\\' => try jw.writer.writeAll("\\\\"), 667 - '\n' => try jw.writer.writeAll("\\n"), 668 - '\r' => try jw.writer.writeAll("\\r"), 669 - '\t' => try jw.writer.writeAll("\\t"), 670 - // control characters (0x00-0x1f except \n \r \t, plus DEL 0x7f) 671 - 0x00...0x08, 0x0b, 0x0c, 0x0e...0x1f, 0x7f => { 672 - var buf: [6]u8 = undefined; 673 - _ = std.fmt.bufPrint(&buf, "\\u00{x:0>2}", .{c}) catch unreachable; 674 - try jw.writer.writeAll(&buf); 675 - }, 676 - // non-ASCII bytes (0x80-0xFF) - escape to avoid invalid UTF-8 issues 677 - 0x80...0xff => { 678 - var buf: [6]u8 = undefined; 679 - _ = std.fmt.bufPrint(&buf, "\\u00{x:0>2}", .{c}) catch unreachable; 680 - try jw.writer.writeAll(&buf); 681 - }, 682 - // printable ASCII (0x20-0x7e except " and \) - safe to pass through 683 - else => try jw.writer.writeByte(c), 684 - } 685 - } 686 - try jw.writer.writeByte('"'); 687 - jw.endWriteRaw(); 688 - } 689 - 690 - // tests 691 - 692 - test "buildMetricsJson" { 693 - const allocator = std.testing.allocator; 694 - var config = Config{ 695 - .service_name = "test-service", 696 - .send_to_logfire = .no, 697 - }; 698 - config = config.resolve(); 699 - 700 - var exporter = Exporter.init(allocator, config); 701 - defer exporter.deinit(); 702 - 703 - var dp = metrics_mod.NumberDataPoint{ 704 - .start_time_ns = 1000000000000000000, 705 - .time_ns = 1000000000000000000, 706 - .value = .{ .int = 42 }, 707 - }; 708 - 709 - const metrics = [_]MetricData{ 710 - .{ 711 - .name = "test.counter", 712 - .description = "A test counter", 713 - .unit = "1", 714 - .data = .{ 715 - .sum = .{ 716 - .data_points = @as(*const [1]metrics_mod.NumberDataPoint, &dp), 717 - .temporality = .delta, 718 - .is_monotonic = true, 719 - }, 720 - }, 721 - }, 722 - }; 723 - 724 - const json_out = try exporter.buildMetricsJson(&metrics); 725 - defer allocator.free(json_out); 726 - 727 - // verify structure 728 - try std.testing.expect(std.mem.indexOf(u8, json_out, "test.counter") != null); 729 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\"aggregationTemporality\":1") != null); // delta=1 730 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\"isMonotonic\":true") != null); 731 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\"asInt\":42") != null); 732 - } 733 - 734 - test "buildTracesJson" { 735 - const allocator = std.testing.allocator; 736 - var config = Config{ 737 - .service_name = "test-service", 738 - .send_to_logfire = .no, 739 - }; 740 - config = config.resolve(); 741 - 742 - var exporter = Exporter.init(allocator, config); 743 - defer exporter.deinit(); 744 - 745 - const spans = [_]Span.Data{ 746 - .{ 747 - .name = "test.span", 748 - .trace_id = [_]u8{0x01} ** 16, 749 - .span_id = [_]u8{0x02} ** 8, 750 - .start_time_ns = 1000, 751 - .end_time_ns = 2000, 752 - }, 753 - }; 754 - 755 - const json_out = try exporter.buildTracesJson(&spans); 756 - defer allocator.free(json_out); 757 - 758 - try std.testing.expect(std.mem.indexOf(u8, json_out, "test.span") != null); 759 - try std.testing.expect(std.mem.indexOf(u8, json_out, "test-service") != null); 760 - } 761 - 762 - test "buildTracesJson with string attributes" { 763 - const allocator = std.testing.allocator; 764 - var config = Config{ 765 - .service_name = "test-service", 766 - .send_to_logfire = .no, 767 - }; 768 - config = config.resolve(); 769 - 770 - var exporter = Exporter.init(allocator, config); 771 - defer exporter.deinit(); 772 - 773 - // create span with string attributes 774 - var span_data = Span.Data{ 775 - .name = "test.span", 776 - .trace_id = [_]u8{0x01} ** 16, 777 - .span_id = [_]u8{0x02} ** 8, 778 - .start_time_ns = 1000, 779 - .end_time_ns = 2000, 780 - }; 781 - 782 - // add string attributes manually 783 - span_data.attribute_count = Attribute.fromStruct(.{ 784 - .query = "hello world", 785 - .tag = "python", 786 - }, &span_data.attributes); 787 - 788 - const spans = [_]Span.Data{span_data}; 789 - const json_out = try exporter.buildTracesJson(&spans); 790 - defer allocator.free(json_out); 791 - 792 - 793 - // verify attributes are present 794 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\"query\"") != null); 795 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\"hello world\"") != null); 796 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\"tag\"") != null); 797 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\"python\"") != null); 798 - 799 - // parse with std.json to verify it's valid JSON 800 - const parsed = std.json.parseFromSlice(std.json.Value, allocator, json_out, .{}) catch |err| { 801 - std.debug.print("JSON parse error: {}\n", .{err}); 802 - return err; 803 - }; 804 - defer parsed.deinit(); 805 - } 806 - 807 - test "buildTracesJson with special characters" { 808 - const allocator = std.testing.allocator; 809 - var config = Config{ 810 - .service_name = "test-service", 811 - .send_to_logfire = .no, 812 - }; 813 - config = config.resolve(); 814 - 815 - var exporter = Exporter.init(allocator, config); 816 - defer exporter.deinit(); 817 - 818 - var span_data = Span.Data{ 819 - .name = "test.span", 820 - .trace_id = [_]u8{0x01} ** 16, 821 - .span_id = [_]u8{0x02} ** 8, 822 - .start_time_ns = 1000, 823 - .end_time_ns = 2000, 824 - }; 825 - 826 - // test various special characters 827 - span_data.attribute_count = Attribute.fromStruct(.{ 828 - .query = "hello \"world\" & <test>", 829 - .unicode = "café résumé", 830 - .control = "tab\there", 831 - .backslash = "path\\to\\file", 832 - .newline = "line1\nline2", 833 - }, &span_data.attributes); 834 - 835 - const spans = [_]Span.Data{span_data}; 836 - const json_out = try exporter.buildTracesJson(&spans); 837 - defer allocator.free(json_out); 838 - 839 - 840 - // verify escapes are present 841 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\\\"world\\\"") != null); // escaped quotes 842 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\\\\to\\\\") != null); // escaped backslashes 843 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\\t") != null); // escaped tab 844 - try std.testing.expect(std.mem.indexOf(u8, json_out, "\\n") != null); // escaped newline 845 - 846 - // verify it parses as valid JSON 847 - const parsed = std.json.parseFromSlice(std.json.Value, allocator, json_out, .{}) catch |err| { 848 - std.debug.print("JSON parse error: {}\n", .{err}); 849 - return err; 850 - }; 851 - defer parsed.deinit(); 852 - } 853 - 854 - test "buildTracesJson with raw bytes" { 855 - const allocator = std.testing.allocator; 856 - var config = Config{ 857 - .service_name = "test-service", 858 - .send_to_logfire = .no, 859 - }; 860 - config = config.resolve(); 861 - 862 - var exporter = Exporter.init(allocator, config); 863 - defer exporter.deinit(); 864 - 865 - var span_data = Span.Data{ 866 - .name = "test.span", 867 - .trace_id = [_]u8{0x01} ** 16, 868 - .span_id = [_]u8{0x02} ** 8, 869 - .start_time_ns = 1000, 870 - .end_time_ns = 2000, 871 - }; 872 - 873 - // test raw bytes that might come from malformed URL decoding 874 - // these include invalid UTF-8 sequences 875 - span_data.attribute_count = Attribute.fromStruct(.{ 876 - .query = "test\x80\x81\x82value", // invalid UTF-8 bytes 877 - .ctrl = "null\x00byte", // null byte 878 - }, &span_data.attributes); 879 - 880 - const spans = [_]Span.Data{span_data}; 881 - const json_out = try exporter.buildTracesJson(&spans); 882 - defer allocator.free(json_out); 883 - 884 - 885 - // now that we escape non-ASCII, this should parse successfully 886 - const parsed = std.json.parseFromSlice(std.json.Value, allocator, json_out, .{}) catch |err| { 887 - std.debug.print("JSON parse error: {}\n", .{err}); 888 - return err; 889 - }; 890 - defer parsed.deinit(); 891 - }
-44
src/log.zig
··· 1 - //! structured logging 2 - //! 3 - //! logs are exported as OTLP log records with severity levels. 4 - 5 - const std = @import("std"); 6 - pub const Level = @import("config.zig").Config.Level; 7 - const Attribute = @import("attribute.zig").Attribute; 8 - 9 - pub const LogRecord = struct { 10 - timestamp_ns: i128, 11 - level: Level, 12 - message: []const u8, 13 - trace_id: ?[16]u8, 14 - attributes: [max_attributes]Attribute = undefined, 15 - attribute_count: usize = 0, 16 - 17 - pub const max_attributes = 32; 18 - 19 - pub fn init( 20 - trace_id: ?[16]u8, 21 - level: Level, 22 - message: []const u8, 23 - attrs: anytype, 24 - ) LogRecord { 25 - var record = LogRecord{ 26 - .timestamp_ns = std.time.nanoTimestamp(), 27 - .level = level, 28 - .message = message, 29 - .trace_id = trace_id, 30 - }; 31 - 32 - record.attribute_count = Attribute.fromStruct(attrs, &record.attributes); 33 - 34 - return record; 35 - } 36 - }; 37 - 38 - // tests 39 - 40 - test "LogRecord init" { 41 - const record = LogRecord.init(null, .info, "test message", .{}); 42 - try std.testing.expectEqual(Level.info, record.level); 43 - try std.testing.expectEqualStrings("test message", record.message); 44 - }
-615
src/metrics.zig
··· 1 - //! metrics for observability 2 - //! 3 - //! provides counter, gauge, up-down counter, and histogram instruments 4 - //! matching the OpenTelemetry metrics specification. 5 - //! 6 - //! ## usage 7 - //! 8 - //! ```zig 9 - //! var counter = logfire.u64_counter("requests.total", .{ 10 - //! .description = "total HTTP requests", 11 - //! .unit = "1", 12 - //! }); 13 - //! counter.add(1, &.{.{ .key = "method", .value = .{ .string = "GET" } }}); 14 - //! ``` 15 - 16 - const std = @import("std"); 17 - const Attribute = @import("attribute.zig").Attribute; 18 - 19 - // ============================================================================ 20 - // instrument options 21 - // ============================================================================ 22 - 23 - pub const InstrumentOptions = struct { 24 - description: []const u8 = "", 25 - unit: []const u8 = "", 26 - }; 27 - 28 - pub const HistogramOptions = struct { 29 - description: []const u8 = "", 30 - unit: []const u8 = "", 31 - /// explicit bucket boundaries (defaults to OpenTelemetry default boundaries) 32 - boundaries: ?[]const f64 = null, 33 - }; 34 - 35 - pub const ExponentialHistogramOptions = struct { 36 - description: []const u8 = "", 37 - unit: []const u8 = "", 38 - /// scale factor for exponential buckets (higher = more precision) 39 - scale: i8 = 20, 40 - }; 41 - 42 - // ============================================================================ 43 - // counter - monotonically increasing value 44 - // ============================================================================ 45 - 46 - pub fn Counter(comptime T: type) type { 47 - return struct { 48 - const Self = @This(); 49 - 50 - name: []const u8, 51 - description: []const u8, 52 - unit: []const u8, 53 - value: if (T == u64) std.atomic.Value(u64) else if (T == f64) std.atomic.Value(u64) else @compileError("Counter supports u64 and f64"), 54 - 55 - pub fn init(name: []const u8, opts: InstrumentOptions) Self { 56 - return .{ 57 - .name = name, 58 - .description = opts.description, 59 - .unit = opts.unit, 60 - .value = @TypeOf(@as(Self, undefined).value).init(0), 61 - }; 62 - } 63 - 64 - pub fn add(self: *Self, delta: T, attributes: []const Attribute) void { 65 - _ = attributes; // TODO: attribute aggregation 66 - if (T == f64) { 67 - // store f64 as bits 68 - const bits: u64 = @bitCast(delta); 69 - const old_bits = self.value.load(.monotonic); 70 - const old_val: f64 = @bitCast(old_bits); 71 - const new_bits: u64 = @bitCast(old_val + delta); 72 - // simple add for now (not atomic for f64, but close enough for metrics) 73 - self.value.store(new_bits, .monotonic); 74 - _ = bits; 75 - } else { 76 - _ = self.value.fetchAdd(delta, .monotonic); 77 - } 78 - } 79 - 80 - pub fn get(self: *const Self) T { 81 - const raw = self.value.load(.monotonic); 82 - if (T == f64) { 83 - return @bitCast(raw); 84 - } 85 - return raw; 86 - } 87 - }; 88 - } 89 - 90 - // ============================================================================ 91 - // gauge - instantaneous value 92 - // ============================================================================ 93 - 94 - pub fn Gauge(comptime T: type) type { 95 - return struct { 96 - const Self = @This(); 97 - 98 - name: []const u8, 99 - description: []const u8, 100 - unit: []const u8, 101 - value: if (T == i64) std.atomic.Value(i64) else if (T == u64) std.atomic.Value(u64) else if (T == f64) std.atomic.Value(u64) else @compileError("Gauge supports i64, u64, and f64"), 102 - 103 - pub fn init(name: []const u8, opts: InstrumentOptions) Self { 104 - return .{ 105 - .name = name, 106 - .description = opts.description, 107 - .unit = opts.unit, 108 - .value = @TypeOf(@as(Self, undefined).value).init(0), 109 - }; 110 - } 111 - 112 - pub fn record(self: *Self, value: T, attributes: []const Attribute) void { 113 - _ = attributes; 114 - if (T == f64) { 115 - self.value.store(@bitCast(value), .monotonic); 116 - } else { 117 - self.value.store(value, .monotonic); 118 - } 119 - } 120 - 121 - pub fn get(self: *const Self) T { 122 - const raw = self.value.load(.monotonic); 123 - if (T == f64) { 124 - return @bitCast(raw); 125 - } 126 - return raw; 127 - } 128 - }; 129 - } 130 - 131 - // ============================================================================ 132 - // up-down counter - bidirectional counter 133 - // ============================================================================ 134 - 135 - pub fn UpDownCounter(comptime T: type) type { 136 - return struct { 137 - const Self = @This(); 138 - 139 - name: []const u8, 140 - description: []const u8, 141 - unit: []const u8, 142 - value: if (T == i64) std.atomic.Value(i64) else if (T == f64) std.atomic.Value(u64) else @compileError("UpDownCounter supports i64 and f64"), 143 - 144 - pub fn init(name: []const u8, opts: InstrumentOptions) Self { 145 - return .{ 146 - .name = name, 147 - .description = opts.description, 148 - .unit = opts.unit, 149 - .value = @TypeOf(@as(Self, undefined).value).init(0), 150 - }; 151 - } 152 - 153 - pub fn add(self: *Self, delta: T, attributes: []const Attribute) void { 154 - _ = attributes; 155 - if (T == f64) { 156 - const old_bits = self.value.load(.monotonic); 157 - const old_val: f64 = @bitCast(old_bits); 158 - const new_bits: u64 = @bitCast(old_val + delta); 159 - self.value.store(new_bits, .monotonic); 160 - } else { 161 - _ = self.value.fetchAdd(delta, .monotonic); 162 - } 163 - } 164 - 165 - pub fn get(self: *const Self) T { 166 - const raw = self.value.load(.monotonic); 167 - if (T == f64) { 168 - return @bitCast(raw); 169 - } 170 - return raw; 171 - } 172 - }; 173 - } 174 - 175 - // ============================================================================ 176 - // histogram - value distribution with explicit buckets 177 - // ============================================================================ 178 - 179 - pub fn Histogram(comptime T: type) type { 180 - return struct { 181 - const Self = @This(); 182 - 183 - name: []const u8, 184 - description: []const u8, 185 - unit: []const u8, 186 - boundaries: []const f64, 187 - counts: []std.atomic.Value(u64), 188 - sum: std.atomic.Value(i64), 189 - count: std.atomic.Value(u64), 190 - min: std.atomic.Value(u64), 191 - max: std.atomic.Value(u64), 192 - allocator: std.mem.Allocator, 193 - 194 - /// OpenTelemetry default bucket boundaries 195 - pub const default_boundaries = [_]f64{ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000 }; 196 - 197 - pub fn init(allocator: std.mem.Allocator, name: []const u8, opts: HistogramOptions) !Self { 198 - const boundaries = opts.boundaries orelse &default_boundaries; 199 - const counts = try allocator.alloc(std.atomic.Value(u64), boundaries.len + 1); 200 - for (counts) |*c| { 201 - c.* = std.atomic.Value(u64).init(0); 202 - } 203 - 204 - return .{ 205 - .name = name, 206 - .description = opts.description, 207 - .unit = opts.unit, 208 - .boundaries = boundaries, 209 - .counts = counts, 210 - .sum = std.atomic.Value(i64).init(0), 211 - .count = std.atomic.Value(u64).init(0), 212 - .min = std.atomic.Value(u64).init(std.math.maxInt(u64)), 213 - .max = std.atomic.Value(u64).init(0), 214 - .allocator = allocator, 215 - }; 216 - } 217 - 218 - pub fn deinit(self: *Self) void { 219 - self.allocator.free(self.counts); 220 - } 221 - 222 - pub fn record(self: *Self, value: T, attributes: []const Attribute) void { 223 - _ = attributes; 224 - const val_f64: f64 = if (T == f64) value else @floatFromInt(value); 225 - 226 - // find bucket 227 - var bucket: usize = self.boundaries.len; 228 - for (self.boundaries, 0..) |bound, i| { 229 - if (val_f64 < bound) { 230 - bucket = i; 231 - break; 232 - } 233 - } 234 - 235 - _ = self.counts[bucket].fetchAdd(1, .monotonic); 236 - _ = self.sum.fetchAdd(@intFromFloat(val_f64), .monotonic); 237 - _ = self.count.fetchAdd(1, .monotonic); 238 - 239 - // update min/max 240 - const val_bits: u64 = @bitCast(val_f64); 241 - while (true) { 242 - const old_min = self.min.load(.monotonic); 243 - const old_min_f64: f64 = @bitCast(old_min); 244 - if (val_f64 >= old_min_f64) break; 245 - if (self.min.cmpxchgWeak(old_min, val_bits, .monotonic, .monotonic) == null) break; 246 - } 247 - while (true) { 248 - const old_max = self.max.load(.monotonic); 249 - const old_max_f64: f64 = @bitCast(old_max); 250 - if (val_f64 <= old_max_f64) break; 251 - if (self.max.cmpxchgWeak(old_max, val_bits, .monotonic, .monotonic) == null) break; 252 - } 253 - } 254 - 255 - pub fn getCount(self: *const Self) u64 { 256 - return self.count.load(.monotonic); 257 - } 258 - 259 - pub fn getSum(self: *const Self) f64 { 260 - return @floatFromInt(self.sum.load(.monotonic)); 261 - } 262 - }; 263 - } 264 - 265 - // ============================================================================ 266 - // exponential histogram - Base2 exponential bucket histogram 267 - // ============================================================================ 268 - 269 - pub fn ExponentialHistogram(comptime T: type) type { 270 - return struct { 271 - const Self = @This(); 272 - 273 - name: []const u8, 274 - description: []const u8, 275 - unit: []const u8, 276 - scale: i8, 277 - sum: std.atomic.Value(i64), 278 - count: std.atomic.Value(u64), 279 - zero_count: std.atomic.Value(u64), 280 - min: std.atomic.Value(u64), 281 - max: std.atomic.Value(u64), 282 - // positive bucket counts (simplified - fixed size) 283 - positive_counts: [256]std.atomic.Value(u64), 284 - positive_offset: i32, 285 - 286 - pub fn init(name: []const u8, opts: ExponentialHistogramOptions) Self { 287 - var self = Self{ 288 - .name = name, 289 - .description = opts.description, 290 - .unit = opts.unit, 291 - .scale = opts.scale, 292 - .sum = std.atomic.Value(i64).init(0), 293 - .count = std.atomic.Value(u64).init(0), 294 - .zero_count = std.atomic.Value(u64).init(0), 295 - .min = std.atomic.Value(u64).init(std.math.maxInt(u64)), 296 - .max = std.atomic.Value(u64).init(0), 297 - .positive_counts = undefined, 298 - .positive_offset = 0, 299 - }; 300 - for (&self.positive_counts) |*c| { 301 - c.* = std.atomic.Value(u64).init(0); 302 - } 303 - return self; 304 - } 305 - 306 - pub fn record(self: *Self, value: T, attributes: []const Attribute) void { 307 - _ = attributes; 308 - const val_f64: f64 = if (T == f64) value else @floatFromInt(value); 309 - 310 - if (val_f64 == 0) { 311 - _ = self.zero_count.fetchAdd(1, .monotonic); 312 - } else if (val_f64 > 0) { 313 - // compute bucket index using base2 exponential 314 - const scale_factor = std.math.pow(f64, 2, @as(f64, @floatFromInt(self.scale))); 315 - const bucket_idx: i32 = @intFromFloat(@ceil(@log2(val_f64) * scale_factor)); 316 - const adjusted_idx: usize = @intCast(@max(0, @min(255, bucket_idx - self.positive_offset))); 317 - _ = self.positive_counts[adjusted_idx].fetchAdd(1, .monotonic); 318 - } 319 - 320 - _ = self.sum.fetchAdd(@intFromFloat(val_f64), .monotonic); 321 - _ = self.count.fetchAdd(1, .monotonic); 322 - 323 - // update min/max 324 - const val_bits: u64 = @bitCast(val_f64); 325 - while (true) { 326 - const old_min = self.min.load(.monotonic); 327 - const old_min_f64: f64 = @bitCast(old_min); 328 - if (val_f64 >= old_min_f64) break; 329 - if (self.min.cmpxchgWeak(old_min, val_bits, .monotonic, .monotonic) == null) break; 330 - } 331 - while (true) { 332 - const old_max = self.max.load(.monotonic); 333 - const old_max_f64: f64 = @bitCast(old_max); 334 - if (val_f64 <= old_max_f64) break; 335 - if (self.max.cmpxchgWeak(old_max, val_bits, .monotonic, .monotonic) == null) break; 336 - } 337 - } 338 - 339 - pub fn getCount(self: *const Self) u64 { 340 - return self.count.load(.monotonic); 341 - } 342 - 343 - pub fn getSum(self: *const Self) f64 { 344 - return @floatFromInt(self.sum.load(.monotonic)); 345 - } 346 - }; 347 - } 348 - 349 - // ============================================================================ 350 - // observable instruments (callback-based) 351 - // ============================================================================ 352 - 353 - pub fn ObservableCallback(comptime T: type) type { 354 - return *const fn (observer: *Observer(T)) void; 355 - } 356 - 357 - pub fn Observer(comptime T: type) type { 358 - return struct { 359 - const Self = @This(); 360 - 361 - value: T = 0, 362 - attributes: [32]Attribute = undefined, 363 - attribute_count: usize = 0, 364 - 365 - pub fn observe(self: *Self, value: T, attributes: []const Attribute) void { 366 - self.value = value; 367 - self.attribute_count = @min(attributes.len, 32); 368 - @memcpy(self.attributes[0..self.attribute_count], attributes[0..self.attribute_count]); 369 - } 370 - }; 371 - } 372 - 373 - pub fn ObservableCounter(comptime T: type) type { 374 - return struct { 375 - const Self = @This(); 376 - 377 - name: []const u8, 378 - description: []const u8, 379 - unit: []const u8, 380 - callback: ObservableCallback(T), 381 - 382 - pub fn init(name: []const u8, opts: InstrumentOptions, callback: ObservableCallback(T)) Self { 383 - return .{ 384 - .name = name, 385 - .description = opts.description, 386 - .unit = opts.unit, 387 - .callback = callback, 388 - }; 389 - } 390 - 391 - pub fn observe(self: *const Self) Observer(T) { 392 - var observer = Observer(T){}; 393 - self.callback(&observer); 394 - return observer; 395 - } 396 - }; 397 - } 398 - 399 - pub fn ObservableGauge(comptime T: type) type { 400 - return struct { 401 - const Self = @This(); 402 - 403 - name: []const u8, 404 - description: []const u8, 405 - unit: []const u8, 406 - callback: ObservableCallback(T), 407 - 408 - pub fn init(name: []const u8, opts: InstrumentOptions, callback: ObservableCallback(T)) Self { 409 - return .{ 410 - .name = name, 411 - .description = opts.description, 412 - .unit = opts.unit, 413 - .callback = callback, 414 - }; 415 - } 416 - 417 - pub fn observe(self: *const Self) Observer(T) { 418 - var observer = Observer(T){}; 419 - self.callback(&observer); 420 - return observer; 421 - } 422 - }; 423 - } 424 - 425 - pub fn ObservableUpDownCounter(comptime T: type) type { 426 - return struct { 427 - const Self = @This(); 428 - 429 - name: []const u8, 430 - description: []const u8, 431 - unit: []const u8, 432 - callback: ObservableCallback(T), 433 - 434 - pub fn init(name: []const u8, opts: InstrumentOptions, callback: ObservableCallback(T)) Self { 435 - return .{ 436 - .name = name, 437 - .description = opts.description, 438 - .unit = opts.unit, 439 - .callback = callback, 440 - }; 441 - } 442 - 443 - pub fn observe(self: *const Self) Observer(T) { 444 - var observer = Observer(T){}; 445 - self.callback(&observer); 446 - return observer; 447 - } 448 - }; 449 - } 450 - 451 - // ============================================================================ 452 - // convenience type aliases matching Rust API 453 - // ============================================================================ 454 - 455 - pub const U64Counter = Counter(u64); 456 - pub const F64Counter = Counter(f64); 457 - 458 - pub const I64Gauge = Gauge(i64); 459 - pub const U64Gauge = Gauge(u64); 460 - pub const F64Gauge = Gauge(f64); 461 - 462 - pub const I64UpDownCounter = UpDownCounter(i64); 463 - pub const F64UpDownCounter = UpDownCounter(f64); 464 - 465 - pub const U64Histogram = Histogram(u64); 466 - pub const F64Histogram = Histogram(f64); 467 - 468 - pub const U64ExponentialHistogram = ExponentialHistogram(u64); 469 - pub const F64ExponentialHistogram = ExponentialHistogram(f64); 470 - 471 - pub const U64ObservableCounter = ObservableCounter(u64); 472 - pub const F64ObservableCounter = ObservableCounter(f64); 473 - 474 - pub const I64ObservableGauge = ObservableGauge(i64); 475 - pub const U64ObservableGauge = ObservableGauge(u64); 476 - pub const F64ObservableGauge = ObservableGauge(f64); 477 - 478 - pub const I64ObservableUpDownCounter = ObservableUpDownCounter(i64); 479 - pub const F64ObservableUpDownCounter = ObservableUpDownCounter(f64); 480 - 481 - // ============================================================================ 482 - // metric data for export 483 - // ============================================================================ 484 - 485 - /// aggregation temporality for metric export 486 - pub const AggregationTemporality = enum(u8) { 487 - unspecified = 0, 488 - delta = 1, 489 - cumulative = 2, 490 - }; 491 - 492 - /// data point for sum/gauge metrics 493 - pub const NumberDataPoint = struct { 494 - start_time_ns: i128, 495 - time_ns: i128, 496 - value: union(enum) { 497 - int: i64, 498 - double: f64, 499 - }, 500 - attributes: []const Attribute = &.{}, 501 - }; 502 - 503 - /// data point for histogram metrics 504 - pub const HistogramDataPoint = struct { 505 - start_time_ns: i128, 506 - time_ns: i128, 507 - count: u64, 508 - sum: f64, 509 - bucket_counts: []const u64, 510 - explicit_bounds: []const f64, 511 - min: f64, 512 - max: f64, 513 - attributes: []const Attribute = &.{}, 514 - }; 515 - 516 - /// data point for exponential histogram metrics 517 - pub const ExponentialHistogramDataPoint = struct { 518 - start_time_ns: i128, 519 - time_ns: i128, 520 - count: u64, 521 - sum: f64, 522 - scale: i8, 523 - zero_count: u64, 524 - positive_offset: i32, 525 - positive_bucket_counts: []const u64, 526 - min: f64, 527 - max: f64, 528 - attributes: []const Attribute = &.{}, 529 - }; 530 - 531 - /// metric data for export 532 - pub const MetricData = struct { 533 - name: []const u8, 534 - description: []const u8 = "", 535 - unit: []const u8 = "", 536 - data: union(enum) { 537 - sum: struct { 538 - data_points: []const NumberDataPoint, 539 - temporality: AggregationTemporality = .cumulative, 540 - is_monotonic: bool = true, 541 - }, 542 - gauge: struct { 543 - data_points: []const NumberDataPoint, 544 - }, 545 - histogram: struct { 546 - data_points: []const HistogramDataPoint, 547 - temporality: AggregationTemporality = .cumulative, 548 - }, 549 - exponential_histogram: struct { 550 - data_points: []const ExponentialHistogramDataPoint, 551 - temporality: AggregationTemporality = .cumulative, 552 - }, 553 - }, 554 - }; 555 - 556 - // ============================================================================ 557 - // tests 558 - // ============================================================================ 559 - 560 - test "u64 counter" { 561 - var counter = U64Counter.init("test.counter", .{ .description = "test counter" }); 562 - counter.add(5, &.{}); 563 - counter.add(3, &.{}); 564 - try std.testing.expectEqual(@as(u64, 8), counter.get()); 565 - } 566 - 567 - test "i64 gauge" { 568 - var gauge = I64Gauge.init("test.gauge", .{}); 569 - gauge.record(42, &.{}); 570 - try std.testing.expectEqual(@as(i64, 42), gauge.get()); 571 - gauge.record(-10, &.{}); 572 - try std.testing.expectEqual(@as(i64, -10), gauge.get()); 573 - } 574 - 575 - test "i64 up down counter" { 576 - var counter = I64UpDownCounter.init("test.updown", .{}); 577 - counter.add(5, &.{}); 578 - counter.add(-3, &.{}); 579 - try std.testing.expectEqual(@as(i64, 2), counter.get()); 580 - } 581 - 582 - test "f64 histogram" { 583 - var histogram = try F64Histogram.init(std.testing.allocator, "test.histogram", .{ 584 - .boundaries = &[_]f64{ 10, 50, 100 }, 585 - }); 586 - defer histogram.deinit(); 587 - 588 - histogram.record(5.0, &.{}); // bucket 0 589 - histogram.record(25.0, &.{}); // bucket 1 590 - histogram.record(75.0, &.{}); // bucket 2 591 - histogram.record(200.0, &.{}); // bucket 3 (overflow) 592 - 593 - try std.testing.expectEqual(@as(u64, 4), histogram.getCount()); 594 - } 595 - 596 - test "exponential histogram" { 597 - var histogram = U64ExponentialHistogram.init("test.exp_histogram", .{ .scale = 10 }); 598 - histogram.record(1, &.{}); 599 - histogram.record(10, &.{}); 600 - histogram.record(100, &.{}); 601 - 602 - try std.testing.expectEqual(@as(u64, 3), histogram.getCount()); 603 - } 604 - 605 - test "observable gauge" { 606 - const callback = struct { 607 - fn cb(observer: *Observer(i64)) void { 608 - observer.observe(42, &.{}); 609 - } 610 - }.cb; 611 - 612 - const gauge = I64ObservableGauge.init("test.observable", .{}, callback); 613 - const result = gauge.observe(); 614 - try std.testing.expectEqual(@as(i64, 42), result.value); 615 - }
-536
src/root.zig
··· 1 - //! logfire-zig: zig SDK for pydantic logfire 2 - //! 3 - //! a lightweight observability client that sends traces, logs, and metrics 4 - //! to logfire (or any OTLP-compatible backend) via HTTP/JSON. 5 - //! 6 - //! ## quick start 7 - //! 8 - //! ```zig 9 - //! const logfire = @import("logfire"); 10 - //! 11 - //! pub fn main() !void { 12 - //! var lf = try logfire.configure(.{ 13 - //! .service_name = "my-service", 14 - //! }); 15 - //! defer lf.shutdown(); 16 - //! 17 - //! logfire.info("hello from zig", .{}); 18 - //! 19 - //! const s = logfire.span("do_work", .{ .item_count = 42 }); 20 - //! defer s.end(); 21 - //! // ... do work 22 - //! } 23 - //! ``` 24 - 25 - const std = @import("std"); 26 - 27 - pub const Config = @import("config.zig").Config; 28 - pub const Span = @import("span.zig").Span; 29 - pub const Exporter = @import("exporter.zig").Exporter; 30 - pub const Attribute = @import("attribute.zig").Attribute; 31 - 32 - const metrics_mod = @import("metrics.zig"); 33 - pub const Counter = metrics_mod.Counter; 34 - pub const Gauge = metrics_mod.Gauge; 35 - pub const UpDownCounter = metrics_mod.UpDownCounter; 36 - pub const Histogram = metrics_mod.Histogram; 37 - pub const ExponentialHistogram = metrics_mod.ExponentialHistogram; 38 - pub const MetricData = metrics_mod.MetricData; 39 - pub const NumberDataPoint = metrics_mod.NumberDataPoint; 40 - pub const HistogramDataPoint = metrics_mod.HistogramDataPoint; 41 - pub const ExponentialHistogramDataPoint = metrics_mod.ExponentialHistogramDataPoint; 42 - pub const InstrumentOptions = metrics_mod.InstrumentOptions; 43 - pub const HistogramOptions = metrics_mod.HistogramOptions; 44 - pub const AggregationTemporality = metrics_mod.AggregationTemporality; 45 - 46 - const log_mod = @import("log.zig"); 47 - pub const Level = log_mod.Level; 48 - pub const LogRecord = log_mod.LogRecord; 49 - 50 - /// global logfire instance (set after configure()) 51 - var global_instance: ?*Logfire = null; 52 - var global_mutex: std.Thread.Mutex = .{}; 53 - 54 - /// thread-local trace context - each thread gets its own trace 55 - threadlocal var tl_trace_id: ?[16]u8 = null; 56 - threadlocal var tl_current_span_id: ?[8]u8 = null; 57 - threadlocal var tl_active_span_count: u32 = 0; 58 - 59 - pub const Logfire = struct { 60 - allocator: std.mem.Allocator, 61 - config: Config, 62 - exporter: Exporter, 63 - 64 - /// pending spans/logs/metrics waiting to be exported 65 - pending_spans: std.ArrayList(Span.Data), 66 - pending_logs: std.ArrayList(LogRecord), 67 - pending_metrics: std.ArrayList(MetricData), 68 - pending_mutex: std.Thread.Mutex, 69 - 70 - /// allocated data points (freed on flush) 71 - allocated_data_points: std.ArrayList(*NumberDataPoint), 72 - 73 - /// span ID counter (global, just needs to be unique) 74 - span_id_counter: std.atomic.Value(u64), 75 - 76 - /// background flush thread 77 - flush_thread: ?std.Thread, 78 - running: std.atomic.Value(bool), 79 - 80 - pub fn init(allocator: std.mem.Allocator, config: Config) !*Logfire { 81 - const resolved = config.resolve(); 82 - 83 - const self = try allocator.create(Logfire); 84 - self.* = .{ 85 - .allocator = allocator, 86 - .config = resolved, 87 - .exporter = Exporter.init(allocator, resolved), 88 - .pending_spans = .{}, 89 - .pending_logs = .{}, 90 - .pending_metrics = .{}, 91 - .pending_mutex = .{}, 92 - .allocated_data_points = .{}, 93 - .span_id_counter = std.atomic.Value(u64).init(1), 94 - .flush_thread = null, 95 - .running = std.atomic.Value(bool).init(true), 96 - }; 97 - 98 - // start background flush thread 99 - if (resolved.batch_timeout_ms > 0) { 100 - self.flush_thread = std.Thread.spawn(.{}, flushLoop, .{self}) catch null; 101 - } 102 - 103 - return self; 104 - } 105 - 106 - fn flushLoop(self: *Logfire) void { 107 - const interval_ns = self.config.batch_timeout_ms * std.time.ns_per_ms; 108 - while (self.running.load(.acquire)) { 109 - std.Thread.sleep(interval_ns); 110 - if (!self.running.load(.acquire)) break; 111 - self.flush() catch |e| { 112 - std.log.warn("logfire: background flush failed: {}", .{e}); 113 - }; 114 - } 115 - } 116 - 117 - pub fn deinit(self: *Logfire) void { 118 - // free any remaining allocated data points 119 - for (self.allocated_data_points.items) |dp| { 120 - self.allocator.destroy(dp); 121 - } 122 - self.allocated_data_points.deinit(self.allocator); 123 - self.pending_spans.deinit(self.allocator); 124 - self.pending_logs.deinit(self.allocator); 125 - self.pending_metrics.deinit(self.allocator); 126 - self.exporter.deinit(); 127 - self.allocator.destroy(self); 128 - } 129 - 130 - pub fn shutdown(self: *Logfire) void { 131 - // stop flush thread 132 - self.running.store(false, .release); 133 - if (self.flush_thread) |t| { 134 - t.join(); 135 - } 136 - 137 - // final flush 138 - self.flush() catch |e| { 139 - std.log.warn("logfire: flush failed during shutdown: {}", .{e}); 140 - }; 141 - 142 - global_mutex.lock(); 143 - defer global_mutex.unlock(); 144 - if (global_instance == self) { 145 - global_instance = null; 146 - } 147 - 148 - self.deinit(); 149 - } 150 - 151 - pub fn flush(self: *Logfire) !void { 152 - self.pending_mutex.lock(); 153 - defer self.pending_mutex.unlock(); 154 - 155 - if (self.pending_spans.items.len > 0 or self.pending_logs.items.len > 0 or self.pending_metrics.items.len > 0) { 156 - try self.exporter.sendAll( 157 - self.pending_spans.items, 158 - self.pending_logs.items, 159 - self.pending_metrics.items, 160 - ); 161 - self.pending_spans.clearRetainingCapacity(); 162 - self.pending_logs.clearRetainingCapacity(); 163 - self.pending_metrics.clearRetainingCapacity(); 164 - 165 - // free allocated data points 166 - for (self.allocated_data_points.items) |dp| { 167 - self.allocator.destroy(dp); 168 - } 169 - self.allocated_data_points.clearRetainingCapacity(); 170 - } 171 - } 172 - 173 - pub fn createSpan(self: *Logfire, name: []const u8, attributes: anytype) Span { 174 - // generate new trace ID if no active spans on this thread 175 - if (tl_active_span_count == 0) { 176 - tl_trace_id = generateTraceId(); 177 - } 178 - tl_active_span_count += 1; 179 - 180 - // capture parent span ID before creating new span 181 - const parent_span_id = tl_current_span_id; 182 - const span_id = self.span_id_counter.fetchAdd(1, .monotonic); 183 - 184 - // encode span ID 185 - var span_id_bytes: [8]u8 = undefined; 186 - std.mem.writeInt(u64, &span_id_bytes, span_id, .big); 187 - 188 - // update current span ID for this thread 189 - tl_current_span_id = span_id_bytes; 190 - 191 - return Span.init(self, name, span_id_bytes, tl_trace_id, parent_span_id, attributes); 192 - } 193 - 194 - /// called when a span ends to restore parent context 195 - pub fn spanEnded(self: *Logfire, parent_span_id: ?[8]u8) void { 196 - _ = self; 197 - if (tl_active_span_count > 0) { 198 - tl_active_span_count -= 1; 199 - } 200 - // restore parent as current span 201 - tl_current_span_id = parent_span_id; 202 - } 203 - 204 - /// start a new trace (generates new trace ID for this thread) 205 - pub fn newTrace(self: *Logfire) void { 206 - _ = self; 207 - tl_trace_id = generateTraceId(); 208 - } 209 - 210 - /// get the current thread's trace ID 211 - pub fn currentTraceId() ?[16]u8 { 212 - return tl_trace_id; 213 - } 214 - 215 - pub fn recordLog(self: *Logfire, level: Level, message: []const u8, attributes: anytype) void { 216 - const record = LogRecord.init( 217 - tl_trace_id, 218 - level, 219 - message, 220 - attributes, 221 - ); 222 - 223 - self.pending_mutex.lock(); 224 - defer self.pending_mutex.unlock(); 225 - self.pending_logs.append(self.allocator, record) catch { 226 - std.log.warn("logfire: failed to record log", .{}); 227 - }; 228 - } 229 - 230 - pub fn recordSpanEnd(self: *Logfire, data: Span.Data) void { 231 - self.pending_mutex.lock(); 232 - defer self.pending_mutex.unlock(); 233 - self.pending_spans.append(self.allocator, data) catch { 234 - std.log.warn("logfire: failed to record span", .{}); 235 - }; 236 - } 237 - 238 - pub fn recordMetric(self: *Logfire, data: MetricData) void { 239 - self.pending_mutex.lock(); 240 - defer self.pending_mutex.unlock(); 241 - self.pending_metrics.append(self.allocator, data) catch { 242 - std.log.warn("logfire: failed to record metric", .{}); 243 - }; 244 - } 245 - 246 - /// record a counter value (monotonic sum, delta temporality) 247 - pub fn recordCounter(self: *Logfire, name: []const u8, value: i64, opts: InstrumentOptions) void { 248 - const now = std.time.nanoTimestamp(); 249 - const dp = self.allocator.create(NumberDataPoint) catch return; 250 - dp.* = .{ 251 - .start_time_ns = now, 252 - .time_ns = now, 253 - .value = .{ .int = value }, 254 - }; 255 - 256 - self.pending_mutex.lock(); 257 - defer self.pending_mutex.unlock(); 258 - 259 - // track allocation for cleanup 260 - self.allocated_data_points.append(self.allocator, dp) catch { 261 - self.allocator.destroy(dp); 262 - return; 263 - }; 264 - 265 - self.pending_metrics.append(self.allocator, .{ 266 - .name = name, 267 - .description = opts.description, 268 - .unit = opts.unit, 269 - .data = .{ 270 - .sum = .{ 271 - .data_points = @as(*const [1]NumberDataPoint, dp), 272 - .temporality = .delta, // delta since we send individual increments 273 - .is_monotonic = true, 274 - }, 275 - }, 276 - }) catch {}; 277 - } 278 - 279 - /// record a gauge value (instantaneous) 280 - pub fn recordGaugeInt(self: *Logfire, name: []const u8, value: i64, opts: InstrumentOptions) void { 281 - const now = std.time.nanoTimestamp(); 282 - const dp = self.allocator.create(NumberDataPoint) catch return; 283 - dp.* = .{ 284 - .start_time_ns = now, 285 - .time_ns = now, 286 - .value = .{ .int = value }, 287 - }; 288 - 289 - self.pending_mutex.lock(); 290 - defer self.pending_mutex.unlock(); 291 - 292 - // track allocation for cleanup 293 - self.allocated_data_points.append(self.allocator, dp) catch { 294 - self.allocator.destroy(dp); 295 - return; 296 - }; 297 - 298 - self.pending_metrics.append(self.allocator, .{ 299 - .name = name, 300 - .description = opts.description, 301 - .unit = opts.unit, 302 - .data = .{ 303 - .gauge = .{ 304 - .data_points = @as(*const [1]NumberDataPoint, dp), 305 - }, 306 - }, 307 - }) catch {}; 308 - } 309 - 310 - /// record a gauge value (instantaneous, f64) 311 - pub fn recordGaugeDouble(self: *Logfire, name: []const u8, value: f64, opts: InstrumentOptions) void { 312 - const now = std.time.nanoTimestamp(); 313 - const dp = self.allocator.create(NumberDataPoint) catch return; 314 - dp.* = .{ 315 - .start_time_ns = now, 316 - .time_ns = now, 317 - .value = .{ .double = value }, 318 - }; 319 - 320 - self.pending_mutex.lock(); 321 - defer self.pending_mutex.unlock(); 322 - 323 - // track allocation for cleanup 324 - self.allocated_data_points.append(self.allocator, dp) catch { 325 - self.allocator.destroy(dp); 326 - return; 327 - }; 328 - 329 - self.pending_metrics.append(self.allocator, .{ 330 - .name = name, 331 - .description = opts.description, 332 - .unit = opts.unit, 333 - .data = .{ 334 - .gauge = .{ 335 - .data_points = @as(*const [1]NumberDataPoint, dp), 336 - }, 337 - }, 338 - }) catch {}; 339 - } 340 - 341 - fn generateTraceId() [16]u8 { 342 - var id: [16]u8 = undefined; 343 - std.crypto.random.bytes(&id); 344 - return id; 345 - } 346 - }; 347 - 348 - /// configure logfire with the given options 349 - pub fn configure(options: Config) !*Logfire { 350 - const allocator = std.heap.page_allocator; 351 - const instance = try Logfire.init(allocator, options); 352 - 353 - global_mutex.lock(); 354 - defer global_mutex.unlock(); 355 - global_instance = instance; 356 - 357 - return instance; 358 - } 359 - 360 - /// get the global logfire instance 361 - pub fn getInstance() ?*Logfire { 362 - global_mutex.lock(); 363 - defer global_mutex.unlock(); 364 - return global_instance; 365 - } 366 - 367 - // convenience functions that use the global instance 368 - 369 - pub fn span(name: []const u8, attributes: anytype) Span { 370 - if (getInstance()) |lf| { 371 - return lf.createSpan(name, attributes); 372 - } 373 - return Span.noop(); 374 - } 375 - 376 - /// start a new trace (generates new trace ID for subsequent spans) 377 - pub fn newTrace() void { 378 - if (getInstance()) |lf| { 379 - lf.newTrace(); 380 - } 381 - } 382 - 383 - pub fn trace(comptime fmt: []const u8, args: anytype) void { 384 - logWithLevel(.trace, fmt, args); 385 - } 386 - 387 - pub fn debug(comptime fmt: []const u8, args: anytype) void { 388 - logWithLevel(.debug, fmt, args); 389 - } 390 - 391 - pub fn info(comptime fmt: []const u8, args: anytype) void { 392 - logWithLevel(.info, fmt, args); 393 - } 394 - 395 - pub fn warn(comptime fmt: []const u8, args: anytype) void { 396 - logWithLevel(.warn, fmt, args); 397 - } 398 - 399 - pub fn err(comptime fmt: []const u8, args: anytype) void { 400 - logWithLevel(.err, fmt, args); 401 - } 402 - 403 - fn logWithLevel(level: Level, comptime fmt: []const u8, args: anytype) void { 404 - if (getInstance()) |lf| { 405 - var buf: [4096]u8 = undefined; 406 - const message = std.fmt.bufPrint(&buf, fmt, args) catch fmt; 407 - // dupe the message since buf is on the stack 408 - const owned_message = lf.allocator.dupe(u8, message) catch return; 409 - lf.recordLog(level, owned_message, .{}); 410 - } 411 - } 412 - 413 - // metric convenience functions 414 - 415 - pub fn counter(name: []const u8, value: i64) void { 416 - if (getInstance()) |lf| { 417 - lf.recordCounter(name, value, .{}); 418 - } 419 - } 420 - 421 - pub fn counterWithOpts(name: []const u8, value: i64, opts: InstrumentOptions) void { 422 - if (getInstance()) |lf| { 423 - lf.recordCounter(name, value, opts); 424 - } 425 - } 426 - 427 - pub fn gaugeInt(name: []const u8, value: i64) void { 428 - if (getInstance()) |lf| { 429 - lf.recordGaugeInt(name, value, .{}); 430 - } 431 - } 432 - 433 - pub fn gaugeDouble(name: []const u8, value: f64) void { 434 - if (getInstance()) |lf| { 435 - lf.recordGaugeDouble(name, value, .{}); 436 - } 437 - } 438 - 439 - pub fn metric(data: MetricData) void { 440 - if (getInstance()) |lf| { 441 - lf.recordMetric(data); 442 - } 443 - } 444 - 445 - // tests 446 - 447 - test "basic configuration" { 448 - const lf = try configure(.{ 449 - .service_name = "test-service", 450 - .send_to_logfire = .no, 451 - }); 452 - defer lf.shutdown(); 453 - 454 - try std.testing.expect(getInstance() != null); 455 - } 456 - 457 - test "span creation" { 458 - const lf = try configure(.{ 459 - .service_name = "test-service", 460 - .send_to_logfire = .no, 461 - }); 462 - defer lf.shutdown(); 463 - 464 - const s = span("test.operation", .{}); 465 - defer s.end(); 466 - 467 - try std.testing.expect(s.active); 468 - } 469 - 470 - test "logging" { 471 - const lf = try configure(.{ 472 - .service_name = "test-service", 473 - .send_to_logfire = .no, 474 - }); 475 - defer lf.shutdown(); 476 - 477 - info("test message with {d}", .{42}); 478 - 479 - try std.testing.expectEqual(@as(usize, 1), lf.pending_logs.items.len); 480 - } 481 - 482 - test "metrics recording" { 483 - const lf = try configure(.{ 484 - .service_name = "test-service", 485 - .send_to_logfire = .no, 486 - }); 487 - defer lf.shutdown(); 488 - 489 - counter("requests.total", 1); 490 - gaugeInt("connections.active", 42); 491 - 492 - try std.testing.expectEqual(@as(usize, 2), lf.pending_metrics.items.len); 493 - } 494 - 495 - test "parent-child span linking" { 496 - const lf = try configure(.{ 497 - .service_name = "test-service", 498 - .send_to_logfire = .no, 499 - }); 500 - defer lf.shutdown(); 501 - 502 - // create parent span 503 - const parent = span("parent.operation", .{}); 504 - const parent_span_id = parent.data.span_id; 505 - const parent_trace_id = parent.data.trace_id; 506 - 507 - // parent should have no parent 508 - try std.testing.expect(parent.data.parent_span_id == null); 509 - 510 - // create child span while parent is active 511 - const child = span("child.operation", .{}); 512 - 513 - // child should have parent's span_id as parent_span_id 514 - try std.testing.expect(child.data.parent_span_id != null); 515 - try std.testing.expectEqualSlices(u8, &parent_span_id, &child.data.parent_span_id.?); 516 - 517 - // child should share parent's trace_id 518 - try std.testing.expectEqualSlices(u8, &parent_trace_id, &child.data.trace_id); 519 - 520 - // end spans (child first due to defer order) 521 - child.end(); 522 - parent.end(); 523 - 524 - // should have recorded 2 spans 525 - try std.testing.expectEqual(@as(usize, 2), lf.pending_spans.items.len); 526 - } 527 - 528 - // re-export tests from submodules 529 - test { 530 - _ = @import("config.zig"); 531 - _ = @import("exporter.zig"); 532 - _ = @import("span.zig"); 533 - _ = @import("log.zig"); 534 - _ = @import("attribute.zig"); 535 - _ = @import("metrics.zig"); 536 - }
-91
src/span.zig
··· 1 - //! span tracking for distributed tracing 2 - //! 3 - //! spans measure the duration of operations. 4 - //! use defer to ensure spans are ended even on error paths. 5 - //! 6 - //! ## usage 7 - //! 8 - //! ```zig 9 - //! const s = logfire.span("db.query", .{}); 10 - //! defer s.end(); 11 - //! // ... do work 12 - //! ``` 13 - 14 - const std = @import("std"); 15 - const root = @import("root.zig"); 16 - const Attribute = @import("attribute.zig").Attribute; 17 - 18 - pub const Span = struct { 19 - logfire: ?*root.Logfire, 20 - data: Data, 21 - active: bool, 22 - 23 - pub const max_attributes = 32; 24 - 25 - pub const Data = struct { 26 - name: []const u8, 27 - trace_id: [16]u8, 28 - span_id: [8]u8, 29 - parent_span_id: ?[8]u8 = null, 30 - start_time_ns: i128, 31 - end_time_ns: i128, 32 - attributes: [max_attributes]Attribute = undefined, 33 - attribute_count: usize = 0, 34 - }; 35 - 36 - /// create a span (called by Logfire.createSpan) 37 - pub fn init(logfire: *root.Logfire, name: []const u8, span_id: [8]u8, trace_id: ?[16]u8, parent_span_id: ?[8]u8, attrs: anytype) Span { 38 - var s = Span{ 39 - .logfire = logfire, 40 - .data = .{ 41 - .name = name, 42 - .trace_id = trace_id orelse [_]u8{0} ** 16, 43 - .span_id = span_id, 44 - .parent_span_id = parent_span_id, 45 - .start_time_ns = std.time.nanoTimestamp(), 46 - .end_time_ns = 0, 47 - }, 48 - .active = true, 49 - }; 50 - 51 - // store attributes 52 - s.data.attribute_count = Attribute.fromStruct(attrs, &s.data.attributes); 53 - 54 - return s; 55 - } 56 - 57 - /// create a no-op span (when logfire not configured) 58 - pub fn noop() Span { 59 - return .{ 60 - .logfire = null, 61 - .data = .{ 62 - .name = "", 63 - .trace_id = [_]u8{0} ** 16, 64 - .span_id = [_]u8{0} ** 8, 65 - .start_time_ns = 0, 66 - .end_time_ns = 0, 67 - }, 68 - .active = false, 69 - }; 70 - } 71 - 72 - /// end the span and record it 73 - pub fn end(self: *const Span) void { 74 - if (!self.active) return; 75 - if (self.logfire) |lf| { 76 - var data = self.data; 77 - data.end_time_ns = std.time.nanoTimestamp(); 78 - lf.recordSpanEnd(data); 79 - lf.spanEnded(self.data.parent_span_id); 80 - } 81 - } 82 - }; 83 - 84 - // tests 85 - 86 - test "span lifecycle" { 87 - const s = Span.noop(); 88 - defer s.end(); 89 - 90 - try std.testing.expect(!s.active); 91 - }