tangled
alpha
login
or
join now
atscan.net
/
plcbundle-go
1
fork
atom
[DEPRECATED] Go implementation of plcbundle
1
fork
atom
overview
issues
pulls
pipelines
lot of new tests
tree.fail
4 months ago
f5f4282b
52d734d4
+4820
-5
11 changed files
expand all
collapse all
unified
split
Makefile
cmd
plcbundle
commands
index.go
internal
bundleindex
index_test.go
mempool
mempool_test.go
storage
storage_test.go
sync
sync_test.go
types
types_test.go
server
helpers_test.go
server.go
server_test.go
types_test.go
+14
-4
Makefile
···
28
28
GOFMT=$(GOCMD) fmt
29
29
GOMOD=$(GOCMD) mod
30
30
31
31
+
# Test runner - auto-detect gotestsum
32
32
+
GOTESTSUM := $(shell command -v gotestsum 2> /dev/null)
33
33
+
31
34
# Build flags
32
35
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.gitCommit=$(GIT_COMMIT) -X main.buildDate=$(BUILD_DATE)"
33
36
···
46
49
47
50
# Run tests
48
51
test:
49
49
-
@echo "Running tests..."
50
50
-
$(GOTEST) -v ./...
52
52
+
ifdef GOTESTSUM
53
53
+
@gotestsum -- ./...
54
54
+
else
55
55
+
@echo "Running tests (install gotestsum for better output: go install gotest.tools/gotestsum@latest)"
56
56
+
@$(GOTEST) -v ./...
57
57
+
endif
51
58
52
59
# Run tests with coverage
53
60
test-coverage:
54
54
-
@echo "Running tests with coverage..."
55
55
-
$(GOTEST) -v -cover ./...
61
61
+
ifdef GOTESTSUM
62
62
+
@gotestsum --format testname -- -cover ./...
63
63
+
else
64
64
+
@$(GOTEST) -v -cover ./...
65
65
+
endif
56
66
57
67
# Clean build artifacts
58
68
clean:
-1
cmd/plcbundle/commands/index.go
···
1
1
-
// repo/cmd/plcbundle/commands/index.go
2
1
package commands
3
2
4
3
import (
+844
internal/bundleindex/index_test.go
···
1
1
+
package bundleindex_test
2
2
+
3
3
+
import (
4
4
+
"fmt"
5
5
+
"os"
6
6
+
"path/filepath"
7
7
+
"sync"
8
8
+
"testing"
9
9
+
"time"
10
10
+
11
11
+
"tangled.org/atscan.net/plcbundle/internal/bundleindex"
12
12
+
"tangled.org/atscan.net/plcbundle/internal/types"
13
13
+
)
14
14
+
15
15
+
type testLogger struct {
16
16
+
t *testing.T
17
17
+
}
18
18
+
19
19
+
func (l *testLogger) Printf(format string, v ...interface{}) {
20
20
+
l.t.Logf(format, v...)
21
21
+
}
22
22
+
23
23
+
func (l *testLogger) Println(v ...interface{}) {
24
24
+
l.t.Log(v...)
25
25
+
}
26
26
+
27
27
+
// ====================================================================================
28
28
+
// INDEX CREATION & BASIC OPERATIONS
29
29
+
// ====================================================================================
30
30
+
31
31
+
func TestIndexCreation(t *testing.T) {
32
32
+
t.Run("NewIndex", func(t *testing.T) {
33
33
+
idx := bundleindex.NewIndex("https://plc.directory")
34
34
+
35
35
+
if idx == nil {
36
36
+
t.Fatal("NewIndex returned nil")
37
37
+
}
38
38
+
39
39
+
if idx.Version != types.INDEX_VERSION {
40
40
+
t.Errorf("version mismatch: got %s, want %s", idx.Version, types.INDEX_VERSION)
41
41
+
}
42
42
+
43
43
+
if idx.Origin != "https://plc.directory" {
44
44
+
t.Errorf("origin mismatch: got %s", idx.Origin)
45
45
+
}
46
46
+
47
47
+
if idx.Count() != 0 {
48
48
+
t.Error("new index should be empty")
49
49
+
}
50
50
+
})
51
51
+
52
52
+
t.Run("NewIndex_EmptyOrigin", func(t *testing.T) {
53
53
+
idx := bundleindex.NewIndex("")
54
54
+
55
55
+
if idx.Origin != "" {
56
56
+
t.Error("should allow empty origin")
57
57
+
}
58
58
+
})
59
59
+
}
60
60
+
61
61
+
func TestIndexAddBundle(t *testing.T) {
62
62
+
t.Run("AddSingleBundle", func(t *testing.T) {
63
63
+
idx := bundleindex.NewIndex("test-origin")
64
64
+
65
65
+
meta := &bundleindex.BundleMetadata{
66
66
+
BundleNumber: 1,
67
67
+
StartTime: time.Now(),
68
68
+
EndTime: time.Now().Add(time.Hour),
69
69
+
OperationCount: types.BUNDLE_SIZE,
70
70
+
DIDCount: 1000,
71
71
+
Hash: "hash123",
72
72
+
ContentHash: "content123",
73
73
+
CompressedHash: "compressed123",
74
74
+
CompressedSize: 1024,
75
75
+
UncompressedSize: 5120,
76
76
+
}
77
77
+
78
78
+
idx.AddBundle(meta)
79
79
+
80
80
+
if idx.Count() != 1 {
81
81
+
t.Errorf("count should be 1, got %d", idx.Count())
82
82
+
}
83
83
+
84
84
+
retrieved, err := idx.GetBundle(1)
85
85
+
if err != nil {
86
86
+
t.Fatalf("GetBundle failed: %v", err)
87
87
+
}
88
88
+
89
89
+
if retrieved.Hash != "hash123" {
90
90
+
t.Error("hash mismatch after retrieval")
91
91
+
}
92
92
+
})
93
93
+
94
94
+
t.Run("AddMultipleBundles_AutoSort", func(t *testing.T) {
95
95
+
idx := bundleindex.NewIndex("test-origin")
96
96
+
97
97
+
// Add bundles out of order: 3, 1, 2
98
98
+
for _, num := range []int{3, 1, 2} {
99
99
+
meta := &bundleindex.BundleMetadata{
100
100
+
BundleNumber: num,
101
101
+
StartTime: time.Now(),
102
102
+
EndTime: time.Now().Add(time.Hour),
103
103
+
OperationCount: types.BUNDLE_SIZE,
104
104
+
}
105
105
+
idx.AddBundle(meta)
106
106
+
}
107
107
+
108
108
+
bundles := idx.GetBundles()
109
109
+
110
110
+
// Should be sorted: 1, 2, 3
111
111
+
if bundles[0].BundleNumber != 1 {
112
112
+
t.Error("bundles not sorted")
113
113
+
}
114
114
+
if bundles[1].BundleNumber != 2 {
115
115
+
t.Error("bundles not sorted")
116
116
+
}
117
117
+
if bundles[2].BundleNumber != 3 {
118
118
+
t.Error("bundles not sorted")
119
119
+
}
120
120
+
})
121
121
+
122
122
+
t.Run("UpdateExistingBundle", func(t *testing.T) {
123
123
+
idx := bundleindex.NewIndex("test-origin")
124
124
+
125
125
+
original := &bundleindex.BundleMetadata{
126
126
+
BundleNumber: 1,
127
127
+
Hash: "original_hash",
128
128
+
StartTime: time.Now(),
129
129
+
EndTime: time.Now().Add(time.Hour),
130
130
+
OperationCount: types.BUNDLE_SIZE,
131
131
+
}
132
132
+
133
133
+
idx.AddBundle(original)
134
134
+
135
135
+
// Add again with different hash (update)
136
136
+
updated := &bundleindex.BundleMetadata{
137
137
+
BundleNumber: 1,
138
138
+
Hash: "updated_hash",
139
139
+
StartTime: time.Now(),
140
140
+
EndTime: time.Now().Add(time.Hour),
141
141
+
OperationCount: types.BUNDLE_SIZE,
142
142
+
}
143
143
+
144
144
+
idx.AddBundle(updated)
145
145
+
146
146
+
// Should have only 1 bundle (updated, not duplicated)
147
147
+
if idx.Count() != 1 {
148
148
+
t.Errorf("should have 1 bundle after update, got %d", idx.Count())
149
149
+
}
150
150
+
151
151
+
retrieved, _ := idx.GetBundle(1)
152
152
+
if retrieved.Hash != "updated_hash" {
153
153
+
t.Error("bundle was not updated")
154
154
+
}
155
155
+
})
156
156
+
}
157
157
+
158
158
+
// ====================================================================================
159
159
+
// SAVE & LOAD TESTS
160
160
+
// ====================================================================================
161
161
+
162
162
+
func TestIndexPersistence(t *testing.T) {
163
163
+
tmpDir := t.TempDir()
164
164
+
165
165
+
t.Run("SaveAndLoad", func(t *testing.T) {
166
166
+
indexPath := filepath.Join(tmpDir, "test_index.json")
167
167
+
168
168
+
// Create and populate index
169
169
+
idx := bundleindex.NewIndex("https://plc.directory")
170
170
+
171
171
+
for i := 1; i <= 5; i++ {
172
172
+
meta := &bundleindex.BundleMetadata{
173
173
+
BundleNumber: i,
174
174
+
StartTime: time.Now().Add(time.Duration(i-1) * time.Hour),
175
175
+
EndTime: time.Now().Add(time.Duration(i) * time.Hour),
176
176
+
OperationCount: types.BUNDLE_SIZE,
177
177
+
DIDCount: 1000 * i,
178
178
+
Hash: fmt.Sprintf("hash%d", i),
179
179
+
ContentHash: fmt.Sprintf("content%d", i),
180
180
+
CompressedHash: fmt.Sprintf("compressed%d", i),
181
181
+
CompressedSize: int64(1024 * i),
182
182
+
UncompressedSize: int64(5120 * i),
183
183
+
}
184
184
+
idx.AddBundle(meta)
185
185
+
}
186
186
+
187
187
+
// Save
188
188
+
if err := idx.Save(indexPath); err != nil {
189
189
+
t.Fatalf("Save failed: %v", err)
190
190
+
}
191
191
+
192
192
+
// Verify file exists
193
193
+
if _, err := os.Stat(indexPath); os.IsNotExist(err) {
194
194
+
t.Fatal("index file not created")
195
195
+
}
196
196
+
197
197
+
// Load
198
198
+
loaded, err := bundleindex.LoadIndex(indexPath)
199
199
+
if err != nil {
200
200
+
t.Fatalf("LoadIndex failed: %v", err)
201
201
+
}
202
202
+
203
203
+
// Verify data integrity
204
204
+
if loaded.Count() != 5 {
205
205
+
t.Errorf("loaded count mismatch: got %d, want 5", loaded.Count())
206
206
+
}
207
207
+
208
208
+
if loaded.Origin != "https://plc.directory" {
209
209
+
t.Error("origin not preserved")
210
210
+
}
211
211
+
212
212
+
if loaded.LastBundle != 5 {
213
213
+
t.Error("LastBundle not calculated correctly")
214
214
+
}
215
215
+
216
216
+
// Verify specific bundle
217
217
+
bundle3, err := loaded.GetBundle(3)
218
218
+
if err != nil {
219
219
+
t.Fatalf("GetBundle(3) failed: %v", err)
220
220
+
}
221
221
+
222
222
+
if bundle3.Hash != "hash3" {
223
223
+
t.Error("bundle data not preserved")
224
224
+
}
225
225
+
})
226
226
+
227
227
+
t.Run("AtomicSave", func(t *testing.T) {
228
228
+
indexPath := filepath.Join(tmpDir, "atomic_test.json")
229
229
+
230
230
+
idx := bundleindex.NewIndex("test")
231
231
+
idx.AddBundle(&bundleindex.BundleMetadata{
232
232
+
BundleNumber: 1,
233
233
+
StartTime: time.Now(),
234
234
+
EndTime: time.Now(),
235
235
+
OperationCount: types.BUNDLE_SIZE,
236
236
+
})
237
237
+
238
238
+
idx.Save(indexPath)
239
239
+
240
240
+
// Verify no .tmp file left behind
241
241
+
tmpPath := indexPath + ".tmp"
242
242
+
if _, err := os.Stat(tmpPath); !os.IsNotExist(err) {
243
243
+
t.Error("temporary file should not exist after successful save")
244
244
+
}
245
245
+
})
246
246
+
247
247
+
t.Run("LoadInvalidVersion", func(t *testing.T) {
248
248
+
indexPath := filepath.Join(tmpDir, "invalid_version.json")
249
249
+
250
250
+
// Write index with wrong version
251
251
+
invalidData := `{"version":"99.99","origin":"test","bundles":[]}`
252
252
+
os.WriteFile(indexPath, []byte(invalidData), 0644)
253
253
+
254
254
+
_, err := bundleindex.LoadIndex(indexPath)
255
255
+
if err == nil {
256
256
+
t.Error("should reject index with invalid version")
257
257
+
}
258
258
+
})
259
259
+
260
260
+
t.Run("LoadCorruptedJSON", func(t *testing.T) {
261
261
+
indexPath := filepath.Join(tmpDir, "corrupted.json")
262
262
+
263
263
+
os.WriteFile(indexPath, []byte("{invalid json"), 0644)
264
264
+
265
265
+
_, err := bundleindex.LoadIndex(indexPath)
266
266
+
if err == nil {
267
267
+
t.Error("should reject corrupted JSON")
268
268
+
}
269
269
+
})
270
270
+
}
271
271
+
272
272
+
// ====================================================================================
273
273
+
// QUERY OPERATIONS
274
274
+
// ====================================================================================
275
275
+
276
276
+
func TestIndexQueries(t *testing.T) {
277
277
+
idx := bundleindex.NewIndex("test")
278
278
+
279
279
+
// Populate with bundles
280
280
+
for i := 1; i <= 10; i++ {
281
281
+
meta := &bundleindex.BundleMetadata{
282
282
+
BundleNumber: i,
283
283
+
StartTime: time.Now().Add(time.Duration(i-1) * time.Hour),
284
284
+
EndTime: time.Now().Add(time.Duration(i) * time.Hour),
285
285
+
OperationCount: types.BUNDLE_SIZE,
286
286
+
CompressedSize: int64(i * 1000),
287
287
+
}
288
288
+
idx.AddBundle(meta)
289
289
+
}
290
290
+
291
291
+
t.Run("GetBundle", func(t *testing.T) {
292
292
+
meta, err := idx.GetBundle(5)
293
293
+
if err != nil {
294
294
+
t.Fatalf("GetBundle failed: %v", err)
295
295
+
}
296
296
+
297
297
+
if meta.BundleNumber != 5 {
298
298
+
t.Error("wrong bundle returned")
299
299
+
}
300
300
+
})
301
301
+
302
302
+
t.Run("GetBundle_NotFound", func(t *testing.T) {
303
303
+
_, err := idx.GetBundle(999)
304
304
+
if err == nil {
305
305
+
t.Error("should return error for nonexistent bundle")
306
306
+
}
307
307
+
})
308
308
+
309
309
+
t.Run("GetLastBundle", func(t *testing.T) {
310
310
+
last := idx.GetLastBundle()
311
311
+
312
312
+
if last == nil {
313
313
+
t.Fatal("GetLastBundle returned nil")
314
314
+
}
315
315
+
316
316
+
if last.BundleNumber != 10 {
317
317
+
t.Errorf("last bundle should be 10, got %d", last.BundleNumber)
318
318
+
}
319
319
+
})
320
320
+
321
321
+
t.Run("GetLastBundle_Empty", func(t *testing.T) {
322
322
+
emptyIdx := bundleindex.NewIndex("test")
323
323
+
324
324
+
last := emptyIdx.GetLastBundle()
325
325
+
326
326
+
if last != nil {
327
327
+
t.Error("empty index should return nil for GetLastBundle")
328
328
+
}
329
329
+
})
330
330
+
331
331
+
t.Run("GetBundleRange", func(t *testing.T) {
332
332
+
bundles := idx.GetBundleRange(3, 7)
333
333
+
334
334
+
if len(bundles) != 5 {
335
335
+
t.Errorf("expected 5 bundles, got %d", len(bundles))
336
336
+
}
337
337
+
338
338
+
if bundles[0].BundleNumber != 3 || bundles[4].BundleNumber != 7 {
339
339
+
t.Error("range boundaries incorrect")
340
340
+
}
341
341
+
})
342
342
+
343
343
+
t.Run("GetBundleRange_OutOfBounds", func(t *testing.T) {
344
344
+
bundles := idx.GetBundleRange(100, 200)
345
345
+
346
346
+
if len(bundles) != 0 {
347
347
+
t.Errorf("expected 0 bundles for out-of-range query, got %d", len(bundles))
348
348
+
}
349
349
+
})
350
350
+
351
351
+
t.Run("GetBundles_ReturnsShallowCopy", func(t *testing.T) {
352
352
+
bundles1 := idx.GetBundles()
353
353
+
bundles2 := idx.GetBundles()
354
354
+
355
355
+
// Should be different slices
356
356
+
if &bundles1[0] == &bundles2[0] {
357
357
+
t.Error("GetBundles should return copy, not same slice")
358
358
+
}
359
359
+
360
360
+
// But same data
361
361
+
if bundles1[0].BundleNumber != bundles2[0].BundleNumber {
362
362
+
t.Error("bundle data should be same")
363
363
+
}
364
364
+
})
365
365
+
}
366
366
+
367
367
+
// ====================================================================================
368
368
+
// GAP DETECTION - CRITICAL FOR INTEGRITY
369
369
+
// ====================================================================================
370
370
+
371
371
+
func TestIndexFindGaps(t *testing.T) {
372
372
+
t.Run("NoGaps", func(t *testing.T) {
373
373
+
idx := bundleindex.NewIndex("test")
374
374
+
375
375
+
for i := 1; i <= 10; i++ {
376
376
+
idx.AddBundle(createTestMetadata(i))
377
377
+
}
378
378
+
379
379
+
gaps := idx.FindGaps()
380
380
+
381
381
+
if len(gaps) != 0 {
382
382
+
t.Errorf("expected no gaps, found %d: %v", len(gaps), gaps)
383
383
+
}
384
384
+
})
385
385
+
386
386
+
t.Run("SingleGap", func(t *testing.T) {
387
387
+
idx := bundleindex.NewIndex("test")
388
388
+
389
389
+
// Add bundles 1, 2, 4, 5 (missing 3)
390
390
+
for _, num := range []int{1, 2, 4, 5} {
391
391
+
idx.AddBundle(createTestMetadata(num))
392
392
+
}
393
393
+
394
394
+
gaps := idx.FindGaps()
395
395
+
396
396
+
if len(gaps) != 1 {
397
397
+
t.Errorf("expected 1 gap, got %d", len(gaps))
398
398
+
}
399
399
+
400
400
+
if len(gaps) > 0 && gaps[0] != 3 {
401
401
+
t.Errorf("expected gap at 3, got %d", gaps[0])
402
402
+
}
403
403
+
})
404
404
+
405
405
+
t.Run("MultipleGaps", func(t *testing.T) {
406
406
+
idx := bundleindex.NewIndex("test")
407
407
+
408
408
+
// Add bundles 1, 2, 5, 6, 9, 10 (missing 3, 4, 7, 8)
409
409
+
for _, num := range []int{1, 2, 5, 6, 9, 10} {
410
410
+
idx.AddBundle(createTestMetadata(num))
411
411
+
}
412
412
+
413
413
+
gaps := idx.FindGaps()
414
414
+
415
415
+
expectedGaps := []int{3, 4, 7, 8}
416
416
+
if len(gaps) != len(expectedGaps) {
417
417
+
t.Errorf("expected %d gaps, got %d", len(expectedGaps), len(gaps))
418
418
+
}
419
419
+
420
420
+
for i, expected := range expectedGaps {
421
421
+
if gaps[i] != expected {
422
422
+
t.Errorf("gap %d: got %d, want %d", i, gaps[i], expected)
423
423
+
}
424
424
+
}
425
425
+
})
426
426
+
427
427
+
t.Run("FindGaps_EmptyIndex", func(t *testing.T) {
428
428
+
idx := bundleindex.NewIndex("test")
429
429
+
430
430
+
gaps := idx.FindGaps()
431
431
+
432
432
+
if len(gaps) > 0 {
433
433
+
t.Error("empty index should have no gaps")
434
434
+
}
435
435
+
})
436
436
+
437
437
+
t.Run("FindGaps_NonSequentialStart", func(t *testing.T) {
438
438
+
idx := bundleindex.NewIndex("test")
439
439
+
440
440
+
// Start at bundle 100
441
441
+
for i := 100; i <= 105; i++ {
442
442
+
idx.AddBundle(createTestMetadata(i))
443
443
+
}
444
444
+
445
445
+
gaps := idx.FindGaps()
446
446
+
447
447
+
// No gaps between 100-105
448
448
+
if len(gaps) != 0 {
449
449
+
t.Errorf("expected no gaps, got %d", len(gaps))
450
450
+
}
451
451
+
})
452
452
+
}
453
453
+
454
454
+
// ====================================================================================
455
455
+
// STATISTICS & DERIVED FIELDS
456
456
+
// ====================================================================================
457
457
+
458
458
+
func TestIndexStatistics(t *testing.T) {
459
459
+
idx := bundleindex.NewIndex("test")
460
460
+
461
461
+
t.Run("StatsEmpty", func(t *testing.T) {
462
462
+
stats := idx.GetStats()
463
463
+
464
464
+
if stats["bundle_count"].(int) != 0 {
465
465
+
t.Error("empty index should have count 0")
466
466
+
}
467
467
+
})
468
468
+
469
469
+
t.Run("StatsPopulated", func(t *testing.T) {
470
470
+
totalSize := int64(0)
471
471
+
totalUncompressed := int64(0)
472
472
+
473
473
+
for i := 1; i <= 5; i++ {
474
474
+
meta := &bundleindex.BundleMetadata{
475
475
+
BundleNumber: i,
476
476
+
StartTime: time.Now().Add(time.Duration(i-1) * time.Hour),
477
477
+
EndTime: time.Now().Add(time.Duration(i) * time.Hour),
478
478
+
OperationCount: types.BUNDLE_SIZE,
479
479
+
CompressedSize: int64(1000 * i),
480
480
+
UncompressedSize: int64(5000 * i),
481
481
+
}
482
482
+
idx.AddBundle(meta)
483
483
+
totalSize += meta.CompressedSize
484
484
+
totalUncompressed += meta.UncompressedSize
485
485
+
}
486
486
+
487
487
+
stats := idx.GetStats()
488
488
+
489
489
+
if stats["bundle_count"].(int) != 5 {
490
490
+
t.Error("bundle count mismatch")
491
491
+
}
492
492
+
493
493
+
if stats["first_bundle"].(int) != 1 {
494
494
+
t.Error("first_bundle mismatch")
495
495
+
}
496
496
+
497
497
+
if stats["last_bundle"].(int) != 5 {
498
498
+
t.Error("last_bundle mismatch")
499
499
+
}
500
500
+
501
501
+
if stats["total_size"].(int64) != totalSize {
502
502
+
t.Errorf("total_size mismatch: got %d, want %d", stats["total_size"].(int64), totalSize)
503
503
+
}
504
504
+
505
505
+
if stats["total_uncompressed_size"].(int64) != totalUncompressed {
506
506
+
t.Error("total_uncompressed_size mismatch")
507
507
+
}
508
508
+
509
509
+
if _, ok := stats["start_time"]; !ok {
510
510
+
t.Error("stats missing start_time")
511
511
+
}
512
512
+
513
513
+
if _, ok := stats["end_time"]; !ok {
514
514
+
t.Error("stats missing end_time")
515
515
+
}
516
516
+
517
517
+
if stats["gaps"].(int) != 0 {
518
518
+
t.Error("should have no gaps")
519
519
+
}
520
520
+
})
521
521
+
522
522
+
t.Run("StatsRecalculateAfterAdd", func(t *testing.T) {
523
523
+
idx := bundleindex.NewIndex("test")
524
524
+
525
525
+
idx.AddBundle(&bundleindex.BundleMetadata{
526
526
+
BundleNumber: 1,
527
527
+
StartTime: time.Now(),
528
528
+
EndTime: time.Now(),
529
529
+
OperationCount: types.BUNDLE_SIZE,
530
530
+
CompressedSize: 1000,
531
531
+
})
532
532
+
533
533
+
stats1 := idx.GetStats()
534
534
+
size1 := stats1["total_size"].(int64)
535
535
+
536
536
+
// Add another bundle
537
537
+
idx.AddBundle(&bundleindex.BundleMetadata{
538
538
+
BundleNumber: 2,
539
539
+
StartTime: time.Now(),
540
540
+
EndTime: time.Now(),
541
541
+
OperationCount: types.BUNDLE_SIZE,
542
542
+
CompressedSize: 2000,
543
543
+
})
544
544
+
545
545
+
stats2 := idx.GetStats()
546
546
+
size2 := stats2["total_size"].(int64)
547
547
+
548
548
+
if size2 != size1+2000 {
549
549
+
t.Errorf("total_size not recalculated: got %d, want %d", size2, size1+2000)
550
550
+
}
551
551
+
552
552
+
if stats2["last_bundle"].(int) != 2 {
553
553
+
t.Error("last_bundle not recalculated")
554
554
+
}
555
555
+
})
556
556
+
}
557
557
+
558
558
+
// ====================================================================================
559
559
+
// REBUILD OPERATION
560
560
+
// ====================================================================================
561
561
+
562
562
+
func TestIndexRebuild(t *testing.T) {
563
563
+
t.Run("RebuildFromMetadata", func(t *testing.T) {
564
564
+
idx := bundleindex.NewIndex("original")
565
565
+
566
566
+
// Add some bundles
567
567
+
for i := 1; i <= 3; i++ {
568
568
+
idx.AddBundle(createTestMetadata(i))
569
569
+
}
570
570
+
571
571
+
if idx.Count() != 3 {
572
572
+
t.Fatal("setup failed")
573
573
+
}
574
574
+
575
575
+
// Create new metadata for rebuild
576
576
+
newMetadata := []*bundleindex.BundleMetadata{
577
577
+
createTestMetadata(1),
578
578
+
createTestMetadata(2),
579
579
+
createTestMetadata(5),
580
580
+
createTestMetadata(6),
581
581
+
}
582
582
+
583
583
+
// Rebuild
584
584
+
idx.Rebuild(newMetadata)
585
585
+
586
586
+
// Should now have 4 bundles
587
587
+
if idx.Count() != 4 {
588
588
+
t.Errorf("after rebuild, expected 4 bundles, got %d", idx.Count())
589
589
+
}
590
590
+
591
591
+
// Should have new bundles 5, 6
592
592
+
if _, err := idx.GetBundle(5); err != nil {
593
593
+
t.Error("should have bundle 5 after rebuild")
594
594
+
}
595
595
+
596
596
+
// Should not have bundle 3
597
597
+
if _, err := idx.GetBundle(3); err == nil {
598
598
+
t.Error("should not have bundle 3 after rebuild")
599
599
+
}
600
600
+
601
601
+
// Origin should be preserved
602
602
+
if idx.Origin != "original" {
603
603
+
t.Error("origin should be preserved during rebuild")
604
604
+
}
605
605
+
})
606
606
+
607
607
+
t.Run("RebuildAutoSorts", func(t *testing.T) {
608
608
+
idx := bundleindex.NewIndex("test")
609
609
+
610
610
+
// Rebuild with unsorted data
611
611
+
unsorted := []*bundleindex.BundleMetadata{
612
612
+
createTestMetadata(5),
613
613
+
createTestMetadata(2),
614
614
+
createTestMetadata(8),
615
615
+
createTestMetadata(1),
616
616
+
}
617
617
+
618
618
+
idx.Rebuild(unsorted)
619
619
+
620
620
+
bundles := idx.GetBundles()
621
621
+
622
622
+
// Should be sorted
623
623
+
for i := 0; i < len(bundles)-1; i++ {
624
624
+
if bundles[i].BundleNumber >= bundles[i+1].BundleNumber {
625
625
+
t.Error("bundles not sorted after rebuild")
626
626
+
}
627
627
+
}
628
628
+
})
629
629
+
}
630
630
+
631
631
+
// ====================================================================================
632
632
+
// CLEAR OPERATION
633
633
+
// ====================================================================================
634
634
+
635
635
+
func TestIndexClear(t *testing.T) {
636
636
+
idx := bundleindex.NewIndex("test")
637
637
+
638
638
+
// Populate
639
639
+
for i := 1; i <= 10; i++ {
640
640
+
idx.AddBundle(createTestMetadata(i))
641
641
+
}
642
642
+
643
643
+
if idx.Count() != 10 {
644
644
+
t.Fatal("setup failed")
645
645
+
}
646
646
+
647
647
+
// Clear
648
648
+
idx.Clear()
649
649
+
650
650
+
if idx.Count() != 0 {
651
651
+
t.Error("count should be 0 after clear")
652
652
+
}
653
653
+
654
654
+
if idx.LastBundle != 0 {
655
655
+
t.Error("LastBundle should be 0 after clear")
656
656
+
}
657
657
+
658
658
+
if idx.TotalSize != 0 {
659
659
+
t.Error("TotalSize should be 0 after clear")
660
660
+
}
661
661
+
662
662
+
// Should be able to add after clear
663
663
+
idx.AddBundle(createTestMetadata(1))
664
664
+
665
665
+
if idx.Count() != 1 {
666
666
+
t.Error("should be able to add after clear")
667
667
+
}
668
668
+
}
669
669
+
670
670
+
// ====================================================================================
671
671
+
// CONCURRENCY TESTS
672
672
+
// ====================================================================================
673
673
+
674
674
+
func TestIndexConcurrency(t *testing.T) {
675
675
+
t.Run("ConcurrentReads", func(t *testing.T) {
676
676
+
idx := bundleindex.NewIndex("test")
677
677
+
678
678
+
// Populate
679
679
+
for i := 1; i <= 100; i++ {
680
680
+
idx.AddBundle(createTestMetadata(i))
681
681
+
}
682
682
+
683
683
+
// 100 concurrent readers
684
684
+
var wg sync.WaitGroup
685
685
+
errors := make(chan error, 100)
686
686
+
687
687
+
for i := 0; i < 100; i++ {
688
688
+
wg.Add(1)
689
689
+
go func(id int) {
690
690
+
defer wg.Done()
691
691
+
692
692
+
// Various read operations
693
693
+
idx.Count()
694
694
+
idx.GetLastBundle()
695
695
+
idx.GetBundles()
696
696
+
idx.FindGaps()
697
697
+
idx.GetStats()
698
698
+
699
699
+
if _, err := idx.GetBundle(id%100 + 1); err != nil {
700
700
+
errors <- err
701
701
+
}
702
702
+
}(i)
703
703
+
}
704
704
+
705
705
+
wg.Wait()
706
706
+
close(errors)
707
707
+
708
708
+
for err := range errors {
709
709
+
t.Errorf("concurrent read error: %v", err)
710
710
+
}
711
711
+
})
712
712
+
713
713
+
t.Run("ConcurrentReadsDuringSave", func(t *testing.T) {
714
714
+
tmpDir := t.TempDir()
715
715
+
indexPath := filepath.Join(tmpDir, "concurrent.json")
716
716
+
717
717
+
idx := bundleindex.NewIndex("test")
718
718
+
719
719
+
for i := 1; i <= 50; i++ {
720
720
+
idx.AddBundle(createTestMetadata(i))
721
721
+
}
722
722
+
723
723
+
var wg sync.WaitGroup
724
724
+
725
725
+
// Saver goroutine
726
726
+
wg.Add(1)
727
727
+
go func() {
728
728
+
defer wg.Done()
729
729
+
for i := 0; i < 10; i++ {
730
730
+
idx.Save(indexPath)
731
731
+
time.Sleep(10 * time.Millisecond)
732
732
+
}
733
733
+
}()
734
734
+
735
735
+
// Reader goroutines
736
736
+
for i := 0; i < 10; i++ {
737
737
+
wg.Add(1)
738
738
+
go func() {
739
739
+
defer wg.Done()
740
740
+
for j := 0; j < 50; j++ {
741
741
+
idx.Count()
742
742
+
idx.GetBundles()
743
743
+
time.Sleep(5 * time.Millisecond)
744
744
+
}
745
745
+
}()
746
746
+
}
747
747
+
748
748
+
wg.Wait()
749
749
+
})
750
750
+
}
751
751
+
752
752
+
// ====================================================================================
753
753
+
// REMOTE UPDATE TESTS (FOR CLONING)
754
754
+
// ====================================================================================
755
755
+
756
756
+
func TestIndexUpdateFromRemote(t *testing.T) {
757
757
+
758
758
+
t.Run("UpdateFromRemote_Basic", func(t *testing.T) {
759
759
+
idx := bundleindex.NewIndex("test")
760
760
+
761
761
+
// Local has bundles 1-3
762
762
+
for i := 1; i <= 3; i++ {
763
763
+
idx.AddBundle(createTestMetadata(i))
764
764
+
}
765
765
+
766
766
+
// Remote has bundles 1-5
767
767
+
remoteMeta := make(map[int]*bundleindex.BundleMetadata)
768
768
+
for i := 1; i <= 5; i++ {
769
769
+
remoteMeta[i] = createTestMetadata(i)
770
770
+
}
771
771
+
772
772
+
bundlesToUpdate := []int{4, 5}
773
773
+
774
774
+
// Mock file existence (4 and 5 exist)
775
775
+
fileExists := func(bundleNum int) bool {
776
776
+
return bundleNum == 4 || bundleNum == 5
777
777
+
}
778
778
+
779
779
+
logger := &testLogger{t: &testing.T{}}
780
780
+
781
781
+
err := idx.UpdateFromRemote(bundlesToUpdate, remoteMeta, fileExists, false, logger)
782
782
+
if err != nil {
783
783
+
t.Fatalf("UpdateFromRemote failed: %v", err)
784
784
+
}
785
785
+
786
786
+
// Should now have 5 bundles
787
787
+
if idx.Count() != 5 {
788
788
+
t.Errorf("expected 5 bundles after update, got %d", idx.Count())
789
789
+
}
790
790
+
})
791
791
+
792
792
+
t.Run("UpdateFromRemote_SkipsMissingFiles", func(t *testing.T) {
793
793
+
idx := bundleindex.NewIndex("test")
794
794
+
795
795
+
remoteMeta := map[int]*bundleindex.BundleMetadata{
796
796
+
1: createTestMetadata(1),
797
797
+
2: createTestMetadata(2),
798
798
+
}
799
799
+
800
800
+
bundlesToUpdate := []int{1, 2}
801
801
+
802
802
+
// Only bundle 1 exists locally
803
803
+
fileExists := func(bundleNum int) bool {
804
804
+
return bundleNum == 1
805
805
+
}
806
806
+
807
807
+
logger := &testLogger{t: &testing.T{}}
808
808
+
809
809
+
err := idx.UpdateFromRemote(bundlesToUpdate, remoteMeta, fileExists, false, logger)
810
810
+
if err != nil {
811
811
+
t.Fatalf("UpdateFromRemote failed: %v", err)
812
812
+
}
813
813
+
814
814
+
// Should only have bundle 1
815
815
+
if idx.Count() != 1 {
816
816
+
t.Errorf("expected 1 bundle, got %d", idx.Count())
817
817
+
}
818
818
+
819
819
+
if _, err := idx.GetBundle(2); err == nil {
820
820
+
t.Error("should not have bundle 2 (file missing)")
821
821
+
}
822
822
+
})
823
823
+
}
824
824
+
825
825
+
// ====================================================================================
826
826
+
// HELPER FUNCTIONS
827
827
+
// ====================================================================================
828
828
+
829
829
+
func createTestMetadata(bundleNum int) *bundleindex.BundleMetadata {
830
830
+
return &bundleindex.BundleMetadata{
831
831
+
BundleNumber: bundleNum,
832
832
+
StartTime: time.Now().Add(time.Duration(bundleNum-1) * time.Hour),
833
833
+
EndTime: time.Now().Add(time.Duration(bundleNum) * time.Hour),
834
834
+
OperationCount: types.BUNDLE_SIZE,
835
835
+
DIDCount: 1000,
836
836
+
Hash: fmt.Sprintf("hash%d", bundleNum),
837
837
+
ContentHash: fmt.Sprintf("content%d", bundleNum),
838
838
+
Parent: fmt.Sprintf("parent%d", bundleNum-1),
839
839
+
CompressedHash: fmt.Sprintf("compressed%d", bundleNum),
840
840
+
CompressedSize: int64(1000 * bundleNum),
841
841
+
UncompressedSize: int64(5000 * bundleNum),
842
842
+
CreatedAt: time.Now(),
843
843
+
}
844
844
+
}
+908
internal/mempool/mempool_test.go
···
1
1
+
package mempool_test
2
2
+
3
3
+
import (
4
4
+
"fmt"
5
5
+
"os"
6
6
+
"path/filepath"
7
7
+
"sync"
8
8
+
"testing"
9
9
+
"time"
10
10
+
11
11
+
"tangled.org/atscan.net/plcbundle/internal/mempool"
12
12
+
"tangled.org/atscan.net/plcbundle/internal/plcclient"
13
13
+
"tangled.org/atscan.net/plcbundle/internal/types"
14
14
+
)
15
15
+
16
16
+
type testLogger struct {
17
17
+
t *testing.T
18
18
+
}
19
19
+
20
20
+
func (l *testLogger) Printf(format string, v ...interface{}) {
21
21
+
l.t.Logf(format, v...)
22
22
+
}
23
23
+
24
24
+
func (l *testLogger) Println(v ...interface{}) {
25
25
+
l.t.Log(v...)
26
26
+
}
27
27
+
28
28
+
// ====================================================================================
29
29
+
// CHRONOLOGICAL VALIDATION - MOST CRITICAL
30
30
+
// ====================================================================================
31
31
+
32
32
+
func TestMempoolChronologicalStrict(t *testing.T) {
33
33
+
tmpDir := t.TempDir()
34
34
+
logger := &testLogger{t: t}
35
35
+
baseTime := time.Now().Add(-time.Hour)
36
36
+
37
37
+
t.Run("RejectOutOfOrder", func(t *testing.T) {
38
38
+
minTime := baseTime
39
39
+
m, err := mempool.NewMempool(tmpDir, 1, minTime, logger)
40
40
+
if err != nil {
41
41
+
t.Fatalf("NewMempool failed: %v", err)
42
42
+
}
43
43
+
44
44
+
// Add operations in order: 1, 2, 4
45
45
+
ops := []plcclient.PLCOperation{
46
46
+
{CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)},
47
47
+
{CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)},
48
48
+
{CID: "cid4", CreatedAt: baseTime.Add(4 * time.Second)},
49
49
+
}
50
50
+
51
51
+
_, err = m.Add(ops)
52
52
+
if err != nil {
53
53
+
t.Fatalf("Add failed: %v", err)
54
54
+
}
55
55
+
56
56
+
// Now try to add operation 3 (out of order)
57
57
+
outOfOrder := []plcclient.PLCOperation{
58
58
+
{CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second)},
59
59
+
}
60
60
+
61
61
+
_, err = m.Add(outOfOrder)
62
62
+
if err == nil {
63
63
+
t.Error("expected chronological validation error, got nil")
64
64
+
}
65
65
+
66
66
+
if m.Count() != 3 {
67
67
+
t.Errorf("count should still be 3, got %d", m.Count())
68
68
+
}
69
69
+
})
70
70
+
71
71
+
t.Run("RejectBeforeMinTimestamp", func(t *testing.T) {
72
72
+
minTime := baseTime.Add(10 * time.Second)
73
73
+
m, err := mempool.NewMempool(tmpDir, 2, minTime, logger)
74
74
+
if err != nil {
75
75
+
t.Fatalf("NewMempool failed: %v", err)
76
76
+
}
77
77
+
78
78
+
// Try to add operation before min timestamp
79
79
+
tooEarly := []plcclient.PLCOperation{
80
80
+
{CID: "cid1", CreatedAt: baseTime}, // Before minTime
81
81
+
}
82
82
+
83
83
+
_, err = m.Add(tooEarly)
84
84
+
if err == nil {
85
85
+
t.Error("expected error for operation before min timestamp")
86
86
+
}
87
87
+
})
88
88
+
89
89
+
t.Run("AllowEqualTimestamps", func(t *testing.T) {
90
90
+
minTime := baseTime
91
91
+
m, err := mempool.NewMempool(tmpDir, 3, minTime, logger)
92
92
+
if err != nil {
93
93
+
t.Fatalf("NewMempool failed: %v", err)
94
94
+
}
95
95
+
96
96
+
// Multiple operations with same timestamp (happens in real PLC data)
97
97
+
sameTime := baseTime.Add(5 * time.Second)
98
98
+
ops := []plcclient.PLCOperation{
99
99
+
{CID: "cid1", CreatedAt: sameTime},
100
100
+
{CID: "cid2", CreatedAt: sameTime},
101
101
+
{CID: "cid3", CreatedAt: sameTime},
102
102
+
}
103
103
+
104
104
+
added, err := m.Add(ops)
105
105
+
if err != nil {
106
106
+
t.Fatalf("should allow equal timestamps: %v", err)
107
107
+
}
108
108
+
109
109
+
if added != 3 {
110
110
+
t.Errorf("expected 3 added, got %d", added)
111
111
+
}
112
112
+
})
113
113
+
114
114
+
t.Run("ChronologicalAfterReload", func(t *testing.T) {
115
115
+
minTime := baseTime
116
116
+
m, err := mempool.NewMempool(tmpDir, 4, minTime, logger)
117
117
+
if err != nil {
118
118
+
t.Fatalf("NewMempool failed: %v", err)
119
119
+
}
120
120
+
121
121
+
// Add some operations
122
122
+
ops1 := []plcclient.PLCOperation{
123
123
+
{CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)},
124
124
+
{CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)},
125
125
+
}
126
126
+
m.Add(ops1)
127
127
+
m.Save()
128
128
+
129
129
+
// Reload mempool
130
130
+
m2, err := mempool.NewMempool(tmpDir, 4, minTime, logger)
131
131
+
if err != nil {
132
132
+
t.Fatalf("NewMempool reload failed: %v", err)
133
133
+
}
134
134
+
135
135
+
// Try to add out-of-order operation
136
136
+
outOfOrder := []plcclient.PLCOperation{
137
137
+
{CID: "cid0", CreatedAt: baseTime}, // Before loaded ops
138
138
+
}
139
139
+
140
140
+
_, err = m2.Add(outOfOrder)
141
141
+
if err == nil {
142
142
+
t.Error("should reject out-of-order after reload")
143
143
+
}
144
144
+
145
145
+
// Add valid operation after loaded ones
146
146
+
validOps := []plcclient.PLCOperation{
147
147
+
{CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second)},
148
148
+
}
149
149
+
150
150
+
added, err := m2.Add(validOps)
151
151
+
if err != nil {
152
152
+
t.Fatalf("should accept in-order operation: %v", err)
153
153
+
}
154
154
+
155
155
+
if added != 1 {
156
156
+
t.Error("should have added 1 operation")
157
157
+
}
158
158
+
})
159
159
+
160
160
+
t.Run("StrictIncreasingOrder", func(t *testing.T) {
161
161
+
minTime := baseTime
162
162
+
m, err := mempool.NewMempool(tmpDir, 5, minTime, logger)
163
163
+
if err != nil {
164
164
+
t.Fatalf("NewMempool failed: %v", err)
165
165
+
}
166
166
+
167
167
+
// Each operation must be >= previous timestamp
168
168
+
ops := []plcclient.PLCOperation{
169
169
+
{CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)},
170
170
+
{CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)},
171
171
+
{CID: "cid3", CreatedAt: baseTime.Add(2 * time.Second)}, // Equal - OK
172
172
+
{CID: "cid4", CreatedAt: baseTime.Add(3 * time.Second)},
173
173
+
}
174
174
+
175
175
+
added, err := m.Add(ops)
176
176
+
if err != nil {
177
177
+
t.Fatalf("should allow non-decreasing timestamps: %v", err)
178
178
+
}
179
179
+
180
180
+
if added != 4 {
181
181
+
t.Errorf("expected 4 added, got %d", added)
182
182
+
}
183
183
+
})
184
184
+
}
185
185
+
186
186
+
// ====================================================================================
187
187
+
// DUPLICATE PREVENTION
188
188
+
// ====================================================================================
189
189
+
190
190
+
func TestMempoolDuplicatePrevention(t *testing.T) {
191
191
+
tmpDir := t.TempDir()
192
192
+
logger := &testLogger{t: t}
193
193
+
baseTime := time.Now().Add(-time.Hour)
194
194
+
195
195
+
t.Run("SameCIDTwice", func(t *testing.T) {
196
196
+
minTime := baseTime
197
197
+
m, err := mempool.NewMempool(tmpDir, 6, minTime, logger)
198
198
+
if err != nil {
199
199
+
t.Fatalf("NewMempool failed: %v", err)
200
200
+
}
201
201
+
202
202
+
op := plcclient.PLCOperation{
203
203
+
CID: "duplicate_cid",
204
204
+
DID: "did:plc:test",
205
205
+
CreatedAt: baseTime.Add(1 * time.Second),
206
206
+
}
207
207
+
208
208
+
// Add first time
209
209
+
added, err := m.Add([]plcclient.PLCOperation{op})
210
210
+
if err != nil {
211
211
+
t.Fatalf("first add failed: %v", err)
212
212
+
}
213
213
+
if added != 1 {
214
214
+
t.Error("first add should succeed")
215
215
+
}
216
216
+
217
217
+
// Add same CID again (should be silently skipped)
218
218
+
added, err = m.Add([]plcclient.PLCOperation{op})
219
219
+
if err != nil {
220
220
+
t.Fatalf("duplicate add should not error: %v", err)
221
221
+
}
222
222
+
if added != 0 {
223
223
+
t.Errorf("duplicate should be skipped, but added=%d", added)
224
224
+
}
225
225
+
226
226
+
if m.Count() != 1 {
227
227
+
t.Errorf("count should be 1, got %d", m.Count())
228
228
+
}
229
229
+
})
230
230
+
231
231
+
t.Run("DuplicateAcrossSaveLoad", func(t *testing.T) {
232
232
+
minTime := baseTime
233
233
+
m, err := mempool.NewMempool(tmpDir, 7, minTime, logger)
234
234
+
if err != nil {
235
235
+
t.Fatalf("NewMempool failed: %v", err)
236
236
+
}
237
237
+
238
238
+
op := plcclient.PLCOperation{
239
239
+
CID: "persistent_cid",
240
240
+
DID: "did:plc:test",
241
241
+
CreatedAt: baseTime.Add(1 * time.Second),
242
242
+
}
243
243
+
244
244
+
// Add and save
245
245
+
m.Add([]plcclient.PLCOperation{op})
246
246
+
m.Save()
247
247
+
248
248
+
// Reload
249
249
+
m2, err := mempool.NewMempool(tmpDir, 7, minTime, logger)
250
250
+
if err != nil {
251
251
+
t.Fatalf("reload failed: %v", err)
252
252
+
}
253
253
+
254
254
+
// Try to add same operation
255
255
+
added, err := m2.Add([]plcclient.PLCOperation{op})
256
256
+
if err != nil {
257
257
+
t.Fatalf("add after reload failed: %v", err)
258
258
+
}
259
259
+
260
260
+
if added != 0 {
261
261
+
t.Errorf("duplicate should be skipped after reload, added=%d", added)
262
262
+
}
263
263
+
264
264
+
if m2.Count() != 1 {
265
265
+
t.Errorf("count should be 1, got %d", m2.Count())
266
266
+
}
267
267
+
})
268
268
+
269
269
+
t.Run("DuplicatesInBatch", func(t *testing.T) {
270
270
+
minTime := baseTime
271
271
+
m, err := mempool.NewMempool(tmpDir, 8, minTime, logger)
272
272
+
if err != nil {
273
273
+
t.Fatalf("NewMempool failed: %v", err)
274
274
+
}
275
275
+
276
276
+
// Batch contains duplicates
277
277
+
ops := []plcclient.PLCOperation{
278
278
+
{CID: "cid1", DID: "did:plc:001", CreatedAt: baseTime.Add(1 * time.Second)},
279
279
+
{CID: "cid2", DID: "did:plc:002", CreatedAt: baseTime.Add(2 * time.Second)},
280
280
+
{CID: "cid1", DID: "did:plc:001", CreatedAt: baseTime.Add(3 * time.Second)}, // Duplicate CID
281
281
+
}
282
282
+
283
283
+
added, err := m.Add(ops)
284
284
+
if err != nil {
285
285
+
t.Fatalf("Add failed: %v", err)
286
286
+
}
287
287
+
288
288
+
// Should only add 2 (skip duplicate)
289
289
+
if added != 2 {
290
290
+
t.Errorf("expected 2 unique operations, added %d", added)
291
291
+
}
292
292
+
293
293
+
if m.Count() != 2 {
294
294
+
t.Errorf("count should be 2, got %d", m.Count())
295
295
+
}
296
296
+
})
297
297
+
}
298
298
+
299
299
+
// ====================================================================================
300
300
+
// PERSISTENCE & CORRUPTION HANDLING
301
301
+
// ====================================================================================
302
302
+
303
303
+
func TestMempoolPersistence(t *testing.T) {
304
304
+
tmpDir := t.TempDir()
305
305
+
logger := &testLogger{t: t}
306
306
+
baseTime := time.Now().Add(-time.Hour)
307
307
+
308
308
+
t.Run("SaveAndLoad", func(t *testing.T) {
309
309
+
minTime := baseTime
310
310
+
m, err := mempool.NewMempool(tmpDir, 9, minTime, logger)
311
311
+
if err != nil {
312
312
+
t.Fatalf("NewMempool failed: %v", err)
313
313
+
}
314
314
+
315
315
+
ops := makeTestOperations(50)
316
316
+
m.Add(ops)
317
317
+
318
318
+
if err := m.Save(); err != nil {
319
319
+
t.Fatalf("Save failed: %v", err)
320
320
+
}
321
321
+
322
322
+
// Reload
323
323
+
m2, err := mempool.NewMempool(tmpDir, 9, minTime, logger)
324
324
+
if err != nil {
325
325
+
t.Fatalf("reload failed: %v", err)
326
326
+
}
327
327
+
328
328
+
if m2.Count() != 50 {
329
329
+
t.Errorf("after reload, expected 50 ops, got %d", m2.Count())
330
330
+
}
331
331
+
332
332
+
// Verify data integrity
333
333
+
loaded := m2.Peek(50)
334
334
+
for i := 0; i < 50; i++ {
335
335
+
if loaded[i].CID != ops[i].CID {
336
336
+
t.Errorf("op %d CID mismatch after reload", i)
337
337
+
}
338
338
+
}
339
339
+
})
340
340
+
341
341
+
// Fix the IncrementalSave test - line ~353
342
342
+
t.Run("IncrementalSave", func(t *testing.T) {
343
343
+
minTime := baseTime
344
344
+
m, err := mempool.NewMempool(tmpDir, 10, minTime, logger)
345
345
+
if err != nil {
346
346
+
t.Fatalf("NewMempool failed: %v", err)
347
347
+
}
348
348
+
349
349
+
// Add 10 ops and save
350
350
+
ops1 := makeTestOperations(10)
351
351
+
m.Add(ops1)
352
352
+
m.Save()
353
353
+
354
354
+
// Add 10 more and save
355
355
+
// FIX: makeTestOperationsFrom(start, COUNT) - so we want (10, 10) not (10, 20)
356
356
+
ops2 := makeTestOperationsFrom(10, 10) // ← Changed from (10, 20)
357
357
+
m.Add(ops2)
358
358
+
m.Save()
359
359
+
360
360
+
// Reload - should have all 20
361
361
+
m2, err := mempool.NewMempool(tmpDir, 10, minTime, logger)
362
362
+
if err != nil {
363
363
+
t.Fatalf("reload failed: %v", err)
364
364
+
}
365
365
+
366
366
+
if m2.Count() != 20 {
367
367
+
t.Errorf("expected 20 ops after incremental saves, got %d", m2.Count())
368
368
+
}
369
369
+
})
370
370
+
371
371
+
t.Run("CorruptedMempoolFile", func(t *testing.T) {
372
372
+
minTime := baseTime
373
373
+
mempoolFile := filepath.Join(tmpDir, "plc_mempool_000011.jsonl")
374
374
+
375
375
+
// Write corrupted data
376
376
+
os.WriteFile(mempoolFile, []byte("{invalid json\n{also bad"), 0644)
377
377
+
378
378
+
// Should error on load
379
379
+
_, err := mempool.NewMempool(tmpDir, 11, minTime, logger)
380
380
+
if err == nil {
381
381
+
t.Error("expected error loading corrupted mempool")
382
382
+
}
383
383
+
})
384
384
+
385
385
+
t.Run("DeleteMempool", func(t *testing.T) {
386
386
+
minTime := baseTime
387
387
+
m, err := mempool.NewMempool(tmpDir, 12, minTime, logger)
388
388
+
if err != nil {
389
389
+
t.Fatalf("NewMempool failed: %v", err)
390
390
+
}
391
391
+
392
392
+
ops := makeTestOperations(10)
393
393
+
m.Add(ops)
394
394
+
m.Save()
395
395
+
396
396
+
// Verify file exists
397
397
+
mempoolFile := filepath.Join(tmpDir, "plc_mempool_000012.jsonl")
398
398
+
if _, err := os.Stat(mempoolFile); os.IsNotExist(err) {
399
399
+
t.Fatal("mempool file should exist after save")
400
400
+
}
401
401
+
402
402
+
// Delete
403
403
+
if err := m.Delete(); err != nil {
404
404
+
t.Fatalf("Delete failed: %v", err)
405
405
+
}
406
406
+
407
407
+
// Verify file gone
408
408
+
if _, err := os.Stat(mempoolFile); !os.IsNotExist(err) {
409
409
+
t.Error("mempool file should be deleted")
410
410
+
}
411
411
+
})
412
412
+
}
413
413
+
414
414
+
// ====================================================================================
415
415
+
// TAKE OPERATIONS - CRITICAL FOR BUNDLING
416
416
+
// ====================================================================================
417
417
+
418
418
+
func TestMempoolTakeOperations(t *testing.T) {
419
419
+
tmpDir := t.TempDir()
420
420
+
logger := &testLogger{t: t}
421
421
+
baseTime := time.Now().Add(-time.Hour)
422
422
+
423
423
+
t.Run("TakeExact", func(t *testing.T) {
424
424
+
minTime := baseTime
425
425
+
m, err := mempool.NewMempool(tmpDir, 13, minTime, logger)
426
426
+
if err != nil {
427
427
+
t.Fatalf("NewMempool failed: %v", err)
428
428
+
}
429
429
+
430
430
+
m.Add(makeTestOperations(100))
431
431
+
432
432
+
taken, err := m.Take(50)
433
433
+
if err != nil {
434
434
+
t.Fatalf("Take failed: %v", err)
435
435
+
}
436
436
+
437
437
+
if len(taken) != 50 {
438
438
+
t.Errorf("expected 50 operations, got %d", len(taken))
439
439
+
}
440
440
+
441
441
+
if m.Count() != 50 {
442
442
+
t.Errorf("expected 50 remaining, got %d", m.Count())
443
443
+
}
444
444
+
})
445
445
+
446
446
+
t.Run("TakeMoreThanAvailable", func(t *testing.T) {
447
447
+
minTime := baseTime
448
448
+
m, err := mempool.NewMempool(tmpDir, 14, minTime, logger)
449
449
+
if err != nil {
450
450
+
t.Fatalf("NewMempool failed: %v", err)
451
451
+
}
452
452
+
453
453
+
m.Add(makeTestOperations(30))
454
454
+
455
455
+
// Try to take 100 (only 30 available)
456
456
+
taken, err := m.Take(100)
457
457
+
if err != nil {
458
458
+
t.Fatalf("Take failed: %v", err)
459
459
+
}
460
460
+
461
461
+
if len(taken) != 30 {
462
462
+
t.Errorf("expected 30 operations (all available), got %d", len(taken))
463
463
+
}
464
464
+
465
465
+
if m.Count() != 0 {
466
466
+
t.Errorf("mempool should be empty, got %d", m.Count())
467
467
+
}
468
468
+
})
469
469
+
470
470
+
t.Run("TakePreservesOrder", func(t *testing.T) {
471
471
+
minTime := baseTime
472
472
+
m, err := mempool.NewMempool(tmpDir, 15, minTime, logger)
473
473
+
if err != nil {
474
474
+
t.Fatalf("NewMempool failed: %v", err)
475
475
+
}
476
476
+
477
477
+
ops := makeTestOperations(100)
478
478
+
m.Add(ops)
479
479
+
480
480
+
taken, err := m.Take(50)
481
481
+
if err != nil {
482
482
+
t.Fatalf("Take failed: %v", err)
483
483
+
}
484
484
+
485
485
+
// Verify first 50 match
486
486
+
for i := 0; i < 50; i++ {
487
487
+
if taken[i].CID != ops[i].CID {
488
488
+
t.Errorf("operation %d mismatch: got %s, want %s", i, taken[i].CID, ops[i].CID)
489
489
+
}
490
490
+
}
491
491
+
492
492
+
// Remaining should be ops[50:100]
493
493
+
remaining := m.Peek(50)
494
494
+
for i := 0; i < 50; i++ {
495
495
+
if remaining[i].CID != ops[50+i].CID {
496
496
+
t.Errorf("remaining op %d mismatch", i)
497
497
+
}
498
498
+
}
499
499
+
})
500
500
+
501
501
+
t.Run("TakeFromEmpty", func(t *testing.T) {
502
502
+
minTime := baseTime
503
503
+
m, err := mempool.NewMempool(tmpDir, 16, minTime, logger)
504
504
+
if err != nil {
505
505
+
t.Fatalf("NewMempool failed: %v", err)
506
506
+
}
507
507
+
508
508
+
taken, err := m.Take(10)
509
509
+
if err != nil {
510
510
+
t.Fatalf("Take from empty failed: %v", err)
511
511
+
}
512
512
+
513
513
+
if len(taken) != 0 {
514
514
+
t.Errorf("expected 0 operations from empty mempool, got %d", len(taken))
515
515
+
}
516
516
+
})
517
517
+
}
518
518
+
519
519
+
// ====================================================================================
520
520
+
// VALIDATION TESTS
521
521
+
// ====================================================================================
522
522
+
523
523
+
func TestMempoolValidation(t *testing.T) {
524
524
+
tmpDir := t.TempDir()
525
525
+
logger := &testLogger{t: t}
526
526
+
baseTime := time.Now().Add(-time.Hour)
527
527
+
528
528
+
t.Run("ValidateChronological", func(t *testing.T) {
529
529
+
minTime := baseTime
530
530
+
m, err := mempool.NewMempool(tmpDir, 17, minTime, logger)
531
531
+
if err != nil {
532
532
+
t.Fatalf("NewMempool failed: %v", err)
533
533
+
}
534
534
+
535
535
+
ops := makeTestOperations(100)
536
536
+
m.Add(ops)
537
537
+
538
538
+
if err := m.Validate(); err != nil {
539
539
+
t.Errorf("Validate failed on valid mempool: %v", err)
540
540
+
}
541
541
+
})
542
542
+
543
543
+
t.Run("ValidateDetectsMinTimestampViolation", func(t *testing.T) {
544
544
+
minTime := baseTime.Add(10 * time.Second)
545
545
+
_, err := mempool.NewMempool(tmpDir, 18, minTime, logger)
546
546
+
if err != nil {
547
547
+
t.Fatalf("NewMempool failed: %v", err)
548
548
+
}
549
549
+
550
550
+
// Manually add operation before min (bypassing Add validation)
551
551
+
// This simulates corrupted state
552
552
+
ops := makeTestOperations(10)
553
553
+
ops[0].CreatedAt = baseTime // Before minTime
554
554
+
555
555
+
// Note: This is hard to test since Add enforces validation
556
556
+
// Better to test through file corruption
557
557
+
})
558
558
+
559
559
+
t.Run("ValidateDetectsDuplicateCIDs", func(t *testing.T) {
560
560
+
// Test for duplicate CID detection
561
561
+
// Similar challenge - Add prevents duplicates
562
562
+
// Would need to manually construct corrupted state
563
563
+
})
564
564
+
}
565
565
+
566
566
+
// ====================================================================================
567
567
+
// CONCURRENCY TESTS
568
568
+
// ====================================================================================
569
569
+
570
570
+
func TestMempoolConcurrency(t *testing.T) {
571
571
+
tmpDir := t.TempDir()
572
572
+
logger := &testLogger{t: t}
573
573
+
baseTime := time.Now().Add(-time.Hour)
574
574
+
575
575
+
t.Run("ConcurrentReads", func(t *testing.T) {
576
576
+
minTime := baseTime
577
577
+
m, err := mempool.NewMempool(tmpDir, 19, minTime, logger)
578
578
+
if err != nil {
579
579
+
t.Fatalf("NewMempool failed: %v", err)
580
580
+
}
581
581
+
582
582
+
m.Add(makeTestOperations(1000))
583
583
+
584
584
+
// 100 concurrent readers
585
585
+
var wg sync.WaitGroup
586
586
+
for i := 0; i < 100; i++ {
587
587
+
wg.Add(1)
588
588
+
go func() {
589
589
+
defer wg.Done()
590
590
+
count := m.Count()
591
591
+
if count != 1000 {
592
592
+
t.Errorf("count mismatch: got %d", count)
593
593
+
}
594
594
+
595
595
+
peek := m.Peek(10)
596
596
+
if len(peek) != 10 {
597
597
+
t.Errorf("peek mismatch: got %d", len(peek))
598
598
+
}
599
599
+
}()
600
600
+
}
601
601
+
wg.Wait()
602
602
+
})
603
603
+
604
604
+
t.Run("ConcurrentAddAndRead", func(t *testing.T) {
605
605
+
minTime := baseTime
606
606
+
m, err := mempool.NewMempool(tmpDir, 20, minTime, logger)
607
607
+
if err != nil {
608
608
+
t.Fatalf("NewMempool failed: %v", err)
609
609
+
}
610
610
+
611
611
+
var wg sync.WaitGroup
612
612
+
errors := make(chan error, 100)
613
613
+
614
614
+
// Writer goroutine
615
615
+
wg.Add(1)
616
616
+
go func() {
617
617
+
defer wg.Done()
618
618
+
for i := 0; i < 10; i++ {
619
619
+
ops := []plcclient.PLCOperation{
620
620
+
{CID: fmt.Sprintf("cid%d", i*100), CreatedAt: baseTime.Add(time.Duration(i*100) * time.Second)},
621
621
+
}
622
622
+
if _, err := m.Add(ops); err != nil {
623
623
+
errors <- err
624
624
+
}
625
625
+
time.Sleep(10 * time.Millisecond)
626
626
+
}
627
627
+
}()
628
628
+
629
629
+
// Reader goroutines
630
630
+
for i := 0; i < 10; i++ {
631
631
+
wg.Add(1)
632
632
+
go func() {
633
633
+
defer wg.Done()
634
634
+
for j := 0; j < 20; j++ {
635
635
+
m.Count()
636
636
+
m.Peek(5)
637
637
+
time.Sleep(5 * time.Millisecond)
638
638
+
}
639
639
+
}()
640
640
+
}
641
641
+
642
642
+
wg.Wait()
643
643
+
close(errors)
644
644
+
645
645
+
for err := range errors {
646
646
+
t.Errorf("concurrent operation error: %v", err)
647
647
+
}
648
648
+
})
649
649
+
}
650
650
+
651
651
+
// ====================================================================================
652
652
+
// STATS & METADATA TESTS
653
653
+
// ====================================================================================
654
654
+
655
655
+
func TestMempoolStats(t *testing.T) {
656
656
+
tmpDir := t.TempDir()
657
657
+
logger := &testLogger{t: t}
658
658
+
baseTime := time.Now().Add(-time.Hour)
659
659
+
660
660
+
t.Run("StatsEmpty", func(t *testing.T) {
661
661
+
minTime := baseTime
662
662
+
m, err := mempool.NewMempool(tmpDir, 21, minTime, logger)
663
663
+
if err != nil {
664
664
+
t.Fatalf("NewMempool failed: %v", err)
665
665
+
}
666
666
+
667
667
+
stats := m.Stats()
668
668
+
669
669
+
if stats["count"].(int) != 0 {
670
670
+
t.Error("empty mempool should have count 0")
671
671
+
}
672
672
+
673
673
+
if stats["can_create_bundle"].(bool) {
674
674
+
t.Error("empty mempool cannot create bundle")
675
675
+
}
676
676
+
677
677
+
if stats["target_bundle"].(int) != 21 {
678
678
+
t.Error("target bundle mismatch")
679
679
+
}
680
680
+
})
681
681
+
682
682
+
t.Run("StatsPopulated", func(t *testing.T) {
683
683
+
minTime := baseTime
684
684
+
m, err := mempool.NewMempool(tmpDir, 22, minTime, logger)
685
685
+
if err != nil {
686
686
+
t.Fatalf("NewMempool failed: %v", err)
687
687
+
}
688
688
+
689
689
+
ops := makeTestOperations(100)
690
690
+
m.Add(ops)
691
691
+
692
692
+
stats := m.Stats()
693
693
+
694
694
+
if stats["count"].(int) != 100 {
695
695
+
t.Error("count mismatch in stats")
696
696
+
}
697
697
+
698
698
+
if _, ok := stats["first_time"]; !ok {
699
699
+
t.Error("stats missing first_time")
700
700
+
}
701
701
+
702
702
+
if _, ok := stats["last_time"]; !ok {
703
703
+
t.Error("stats missing last_time")
704
704
+
}
705
705
+
706
706
+
if _, ok := stats["size_bytes"]; !ok {
707
707
+
t.Error("stats missing size_bytes")
708
708
+
}
709
709
+
710
710
+
if stats["did_count"].(int) != 100 {
711
711
+
t.Error("did_count should match operation count for unique DIDs")
712
712
+
}
713
713
+
})
714
714
+
715
715
+
t.Run("StatsCanCreateBundle", func(t *testing.T) {
716
716
+
minTime := baseTime
717
717
+
m, err := mempool.NewMempool(tmpDir, 23, minTime, logger)
718
718
+
if err != nil {
719
719
+
t.Fatalf("NewMempool failed: %v", err)
720
720
+
}
721
721
+
722
722
+
// Add exactly BUNDLE_SIZE operations
723
723
+
m.Add(makeTestOperations(types.BUNDLE_SIZE))
724
724
+
725
725
+
stats := m.Stats()
726
726
+
727
727
+
if !stats["can_create_bundle"].(bool) {
728
728
+
t.Error("should be able to create bundle with BUNDLE_SIZE operations")
729
729
+
}
730
730
+
})
731
731
+
}
732
732
+
733
733
+
// ====================================================================================
734
734
+
// DID SEARCH TESTS
735
735
+
// ====================================================================================
736
736
+
737
737
+
func TestMempoolDIDSearch(t *testing.T) {
738
738
+
tmpDir := t.TempDir()
739
739
+
logger := &testLogger{t: t}
740
740
+
baseTime := time.Now().Add(-time.Hour)
741
741
+
742
742
+
t.Run("FindDIDOperations", func(t *testing.T) {
743
743
+
minTime := baseTime
744
744
+
m, err := mempool.NewMempool(tmpDir, 24, minTime, logger)
745
745
+
if err != nil {
746
746
+
t.Fatalf("NewMempool failed: %v", err)
747
747
+
}
748
748
+
749
749
+
targetDID := "did:plc:target"
750
750
+
751
751
+
ops := []plcclient.PLCOperation{
752
752
+
{DID: "did:plc:other1", CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)},
753
753
+
{DID: targetDID, CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)},
754
754
+
{DID: "did:plc:other2", CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second)},
755
755
+
{DID: targetDID, CID: "cid4", CreatedAt: baseTime.Add(4 * time.Second)},
756
756
+
{DID: "did:plc:other3", CID: "cid5", CreatedAt: baseTime.Add(5 * time.Second)},
757
757
+
}
758
758
+
759
759
+
m.Add(ops)
760
760
+
761
761
+
// Search
762
762
+
found := m.FindDIDOperations(targetDID)
763
763
+
764
764
+
if len(found) != 2 {
765
765
+
t.Errorf("expected 2 operations for %s, got %d", targetDID, len(found))
766
766
+
}
767
767
+
768
768
+
if found[0].CID != "cid2" || found[1].CID != "cid4" {
769
769
+
t.Error("wrong operations returned")
770
770
+
}
771
771
+
})
772
772
+
773
773
+
t.Run("FindLatestDIDOperation", func(t *testing.T) {
774
774
+
minTime := baseTime
775
775
+
m, err := mempool.NewMempool(tmpDir, 25, minTime, logger)
776
776
+
if err != nil {
777
777
+
t.Fatalf("NewMempool failed: %v", err)
778
778
+
}
779
779
+
780
780
+
targetDID := "did:plc:target"
781
781
+
782
782
+
ops := []plcclient.PLCOperation{
783
783
+
{DID: targetDID, CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second), Nullified: false},
784
784
+
{DID: targetDID, CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second), Nullified: false},
785
785
+
{DID: targetDID, CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second), Nullified: true}, // Nullified
786
786
+
}
787
787
+
788
788
+
m.Add(ops)
789
789
+
790
790
+
// Should return cid2 (latest non-nullified)
791
791
+
latest := m.FindLatestDIDOperation(targetDID)
792
792
+
793
793
+
if latest == nil {
794
794
+
t.Fatal("expected to find operation, got nil")
795
795
+
}
796
796
+
797
797
+
if latest.CID != "cid2" {
798
798
+
t.Errorf("expected cid2 (latest non-nullified), got %s", latest.CID)
799
799
+
}
800
800
+
})
801
801
+
802
802
+
t.Run("FindLatestDIDOperation_AllNullified", func(t *testing.T) {
803
803
+
minTime := baseTime
804
804
+
m, err := mempool.NewMempool(tmpDir, 26, minTime, logger)
805
805
+
if err != nil {
806
806
+
t.Fatalf("NewMempool failed: %v", err)
807
807
+
}
808
808
+
809
809
+
targetDID := "did:plc:target"
810
810
+
811
811
+
ops := []plcclient.PLCOperation{
812
812
+
{DID: targetDID, CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second), Nullified: true},
813
813
+
{DID: targetDID, CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second), Nullified: true},
814
814
+
}
815
815
+
816
816
+
m.Add(ops)
817
817
+
818
818
+
latest := m.FindLatestDIDOperation(targetDID)
819
819
+
820
820
+
if latest != nil {
821
821
+
t.Error("should return nil when all operations are nullified")
822
822
+
}
823
823
+
})
824
824
+
825
825
+
t.Run("FindDIDOperations_NotFound", func(t *testing.T) {
826
826
+
minTime := baseTime
827
827
+
m, err := mempool.NewMempool(tmpDir, 27, minTime, logger)
828
828
+
if err != nil {
829
829
+
t.Fatalf("NewMempool failed: %v", err)
830
830
+
}
831
831
+
832
832
+
m.Add(makeTestOperations(100))
833
833
+
834
834
+
found := m.FindDIDOperations("did:plc:nonexistent")
835
835
+
836
836
+
if len(found) != 0 {
837
837
+
t.Errorf("expected empty result, got %d operations", len(found))
838
838
+
}
839
839
+
})
840
840
+
}
841
841
+
842
842
+
// ====================================================================================
843
843
+
// CLEAR OPERATION TESTS
844
844
+
// ====================================================================================
845
845
+
846
846
+
func TestMempoolClear(t *testing.T) {
847
847
+
tmpDir := t.TempDir()
848
848
+
logger := &testLogger{t: t}
849
849
+
baseTime := time.Now().Add(-time.Hour)
850
850
+
851
851
+
t.Run("ClearPopulated", func(t *testing.T) {
852
852
+
minTime := baseTime
853
853
+
m, err := mempool.NewMempool(tmpDir, 28, minTime, logger)
854
854
+
if err != nil {
855
855
+
t.Fatalf("NewMempool failed: %v", err)
856
856
+
}
857
857
+
858
858
+
m.Add(makeTestOperations(100))
859
859
+
860
860
+
if m.Count() != 100 {
861
861
+
t.Fatal("setup failed")
862
862
+
}
863
863
+
864
864
+
m.Clear()
865
865
+
866
866
+
if m.Count() != 0 {
867
867
+
t.Errorf("after clear, count should be 0, got %d", m.Count())
868
868
+
}
869
869
+
870
870
+
// Should be able to add new operations
871
871
+
newOps := []plcclient.PLCOperation{
872
872
+
{CID: "new1", CreatedAt: baseTime.Add(200 * time.Second)},
873
873
+
}
874
874
+
875
875
+
added, err := m.Add(newOps)
876
876
+
if err != nil {
877
877
+
t.Fatalf("Add after clear failed: %v", err)
878
878
+
}
879
879
+
880
880
+
if added != 1 {
881
881
+
t.Error("should be able to add after clear")
882
882
+
}
883
883
+
})
884
884
+
}
885
885
+
886
886
+
// ====================================================================================
887
887
+
// HELPER FUNCTIONS
888
888
+
// ====================================================================================
889
889
+
890
890
+
func makeTestOperations(count int) []plcclient.PLCOperation {
891
891
+
return makeTestOperationsFrom(0, count)
892
892
+
}
893
893
+
894
894
+
func makeTestOperationsFrom(start, count int) []plcclient.PLCOperation {
895
895
+
ops := make([]plcclient.PLCOperation, count)
896
896
+
baseTime := time.Now().Add(-time.Hour)
897
897
+
898
898
+
for i := 0; i < count; i++ {
899
899
+
idx := start + i
900
900
+
ops[i] = plcclient.PLCOperation{
901
901
+
DID: fmt.Sprintf("did:plc:test%06d", idx),
902
902
+
CID: fmt.Sprintf("bafy%06d", idx),
903
903
+
CreatedAt: baseTime.Add(time.Duration(idx) * time.Second),
904
904
+
}
905
905
+
}
906
906
+
907
907
+
return ops
908
908
+
}
+867
internal/storage/storage_test.go
···
1
1
+
package storage_test
2
2
+
3
3
+
import (
4
4
+
"bufio"
5
5
+
"bytes"
6
6
+
"fmt"
7
7
+
"os"
8
8
+
"path/filepath"
9
9
+
"sync"
10
10
+
"testing"
11
11
+
"time"
12
12
+
13
13
+
"tangled.org/atscan.net/plcbundle/internal/plcclient"
14
14
+
"tangled.org/atscan.net/plcbundle/internal/storage"
15
15
+
)
16
16
+
17
17
+
type testLogger struct {
18
18
+
t *testing.T
19
19
+
}
20
20
+
21
21
+
func (l *testLogger) Printf(format string, v ...interface{}) {
22
22
+
l.t.Logf(format, v...)
23
23
+
}
24
24
+
25
25
+
func (l *testLogger) Println(v ...interface{}) {
26
26
+
l.t.Log(v...)
27
27
+
}
28
28
+
29
29
+
// ====================================================================================
30
30
+
// COMPRESSION TESTS
31
31
+
// ====================================================================================
32
32
+
33
33
+
func TestStorageCompression(t *testing.T) {
34
34
+
tmpDir := t.TempDir()
35
35
+
logger := &testLogger{t: t}
36
36
+
ops, err := storage.NewOperations(logger)
37
37
+
if err != nil {
38
38
+
t.Fatalf("NewOperations failed: %v", err)
39
39
+
}
40
40
+
defer ops.Close()
41
41
+
42
42
+
t.Run("RoundTripCompression", func(t *testing.T) {
43
43
+
tests := []struct {
44
44
+
name string
45
45
+
count int
46
46
+
}{
47
47
+
{"Empty", 0},
48
48
+
{"Single", 1},
49
49
+
{"Small", 10},
50
50
+
{"Medium", 100},
51
51
+
{"Large", 1000},
52
52
+
{"FullBundle", 10000},
53
53
+
}
54
54
+
55
55
+
for _, tt := range tests {
56
56
+
t.Run(tt.name, func(t *testing.T) {
57
57
+
if tt.count == 0 {
58
58
+
return // Skip empty for now
59
59
+
}
60
60
+
61
61
+
original := makeTestOperations(tt.count)
62
62
+
path := filepath.Join(tmpDir, tt.name+".jsonl.zst")
63
63
+
64
64
+
// Save
65
65
+
_, _, _, _, err := ops.SaveBundle(path, original)
66
66
+
if err != nil {
67
67
+
t.Fatalf("SaveBundle failed: %v", err)
68
68
+
}
69
69
+
70
70
+
// Load
71
71
+
loaded, err := ops.LoadBundle(path)
72
72
+
if err != nil {
73
73
+
t.Fatalf("LoadBundle failed: %v", err)
74
74
+
}
75
75
+
76
76
+
// Verify count
77
77
+
if len(loaded) != len(original) {
78
78
+
t.Errorf("count mismatch: got %d, want %d", len(loaded), len(original))
79
79
+
}
80
80
+
81
81
+
// Verify each operation
82
82
+
for i := range original {
83
83
+
if loaded[i].DID != original[i].DID {
84
84
+
t.Errorf("op %d DID mismatch: got %s, want %s", i, loaded[i].DID, original[i].DID)
85
85
+
}
86
86
+
if loaded[i].CID != original[i].CID {
87
87
+
t.Errorf("op %d CID mismatch: got %s, want %s", i, loaded[i].CID, original[i].CID)
88
88
+
}
89
89
+
if !loaded[i].CreatedAt.Equal(original[i].CreatedAt) {
90
90
+
t.Errorf("op %d timestamp mismatch", i)
91
91
+
}
92
92
+
}
93
93
+
})
94
94
+
}
95
95
+
})
96
96
+
97
97
+
t.Run("CompressionRatio", func(t *testing.T) {
98
98
+
operations := makeTestOperations(10000)
99
99
+
path := filepath.Join(tmpDir, "compression_test.jsonl.zst")
100
100
+
101
101
+
_, _, uncompSize, compSize, err := ops.SaveBundle(path, operations)
102
102
+
if err != nil {
103
103
+
t.Fatalf("SaveBundle failed: %v", err)
104
104
+
}
105
105
+
106
106
+
if compSize >= uncompSize {
107
107
+
t.Errorf("compression failed: compressed=%d >= uncompressed=%d", compSize, uncompSize)
108
108
+
}
109
109
+
110
110
+
ratio := float64(uncompSize) / float64(compSize)
111
111
+
if ratio < 2.0 {
112
112
+
t.Errorf("poor compression ratio: %.2fx (expected > 2.0x)", ratio)
113
113
+
}
114
114
+
115
115
+
t.Logf("Compression ratio: %.2fx (%d → %d bytes)", ratio, uncompSize, compSize)
116
116
+
})
117
117
+
118
118
+
t.Run("CompressedDataIntegrity", func(t *testing.T) {
119
119
+
operations := makeTestOperations(100)
120
120
+
path := filepath.Join(tmpDir, "integrity_test.jsonl.zst")
121
121
+
122
122
+
contentHash, compHash, _, _, err := ops.SaveBundle(path, operations)
123
123
+
if err != nil {
124
124
+
t.Fatalf("SaveBundle failed: %v", err)
125
125
+
}
126
126
+
127
127
+
// Recalculate hashes
128
128
+
calcCompHash, _, calcContentHash, _, err := ops.CalculateFileHashes(path)
129
129
+
if err != nil {
130
130
+
t.Fatalf("CalculateFileHashes failed: %v", err)
131
131
+
}
132
132
+
133
133
+
if calcCompHash != compHash {
134
134
+
t.Errorf("compressed hash mismatch: got %s, want %s", calcCompHash, compHash)
135
135
+
}
136
136
+
137
137
+
if calcContentHash != contentHash {
138
138
+
t.Errorf("content hash mismatch: got %s, want %s", calcContentHash, contentHash)
139
139
+
}
140
140
+
})
141
141
+
}
142
142
+
143
143
+
// ====================================================================================
144
144
+
// HASHING TESTS - CRITICAL FOR CHAIN INTEGRITY
145
145
+
// ====================================================================================
146
146
+
147
147
+
func TestStorageHashing(t *testing.T) {
148
148
+
logger := &testLogger{t: t}
149
149
+
ops, err := storage.NewOperations(logger)
150
150
+
if err != nil {
151
151
+
t.Fatalf("NewOperations failed: %v", err)
152
152
+
}
153
153
+
defer ops.Close()
154
154
+
155
155
+
t.Run("HashDeterminism", func(t *testing.T) {
156
156
+
data := []byte("test data for hashing")
157
157
+
158
158
+
// Calculate hash multiple times
159
159
+
hashes := make([]string, 100)
160
160
+
for i := 0; i < 100; i++ {
161
161
+
hashes[i] = ops.Hash(data)
162
162
+
}
163
163
+
164
164
+
// All should be identical
165
165
+
firstHash := hashes[0]
166
166
+
for i, h := range hashes {
167
167
+
if h != firstHash {
168
168
+
t.Errorf("hash %d differs: got %s, want %s", i, h, firstHash)
169
169
+
}
170
170
+
}
171
171
+
172
172
+
// Verify it's actually a valid SHA256 hex (64 chars)
173
173
+
if len(firstHash) != 64 {
174
174
+
t.Errorf("invalid hash length: got %d, want 64", len(firstHash))
175
175
+
}
176
176
+
})
177
177
+
178
178
+
t.Run("ChainHashCalculation", func(t *testing.T) {
179
179
+
contentHash := "abc123def456"
180
180
+
181
181
+
// Genesis bundle (no parent)
182
182
+
genesisHash := ops.CalculateChainHash("", contentHash)
183
183
+
expectedGenesis := ops.Hash([]byte("plcbundle:genesis:" + contentHash))
184
184
+
if genesisHash != expectedGenesis {
185
185
+
t.Errorf("genesis hash mismatch: got %s, want %s", genesisHash, expectedGenesis)
186
186
+
}
187
187
+
188
188
+
// Second bundle (has parent)
189
189
+
parentHash := genesisHash
190
190
+
childHash := ops.CalculateChainHash(parentHash, contentHash)
191
191
+
expectedChild := ops.Hash([]byte(parentHash + ":" + contentHash))
192
192
+
if childHash != expectedChild {
193
193
+
t.Errorf("child hash mismatch: got %s, want %s", childHash, expectedChild)
194
194
+
}
195
195
+
196
196
+
// Chain continues
197
197
+
grandchildHash := ops.CalculateChainHash(childHash, contentHash)
198
198
+
expectedGrandchild := ops.Hash([]byte(childHash + ":" + contentHash))
199
199
+
if grandchildHash != expectedGrandchild {
200
200
+
t.Errorf("grandchild hash mismatch")
201
201
+
}
202
202
+
})
203
203
+
204
204
+
t.Run("HashSensitivity", func(t *testing.T) {
205
205
+
// Small changes should produce completely different hashes
206
206
+
data1 := []byte("test data")
207
207
+
data2 := []byte("test datb") // Changed one char
208
208
+
data3 := []byte("test data ") // Added space
209
209
+
210
210
+
hash1 := ops.Hash(data1)
211
211
+
hash2 := ops.Hash(data2)
212
212
+
hash3 := ops.Hash(data3)
213
213
+
214
214
+
if hash1 == hash2 {
215
215
+
t.Error("different data produced same hash (collision!)")
216
216
+
}
217
217
+
if hash1 == hash3 {
218
218
+
t.Error("different data produced same hash (collision!)")
219
219
+
}
220
220
+
})
221
221
+
222
222
+
t.Run("EmptyDataHash", func(t *testing.T) {
223
223
+
hash := ops.Hash([]byte{})
224
224
+
if len(hash) != 64 {
225
225
+
t.Errorf("empty data hash invalid length: %d", len(hash))
226
226
+
}
227
227
+
// SHA256 of empty string is known constant
228
228
+
// e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
229
229
+
expected := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
230
230
+
if hash != expected {
231
231
+
t.Errorf("empty data hash mismatch: got %s, want %s", hash, expected)
232
232
+
}
233
233
+
})
234
234
+
}
235
235
+
236
236
+
// ====================================================================================
237
237
+
// CONCURRENCY TESTS - CRITICAL FOR PRODUCTION
238
238
+
// ====================================================================================
239
239
+
240
240
+
func TestStorageConcurrency(t *testing.T) {
241
241
+
tmpDir := t.TempDir()
242
242
+
logger := &testLogger{t: t}
243
243
+
ops, err := storage.NewOperations(logger)
244
244
+
if err != nil {
245
245
+
t.Fatalf("NewOperations failed: %v", err)
246
246
+
}
247
247
+
defer ops.Close()
248
248
+
249
249
+
t.Run("ParallelBundleReads", func(t *testing.T) {
250
250
+
// Create test bundle
251
251
+
operations := makeTestOperations(10000)
252
252
+
path := filepath.Join(tmpDir, "parallel_test.jsonl.zst")
253
253
+
_, _, _, _, err := ops.SaveBundle(path, operations)
254
254
+
if err != nil {
255
255
+
t.Fatalf("SaveBundle failed: %v", err)
256
256
+
}
257
257
+
258
258
+
// Read from 100 goroutines simultaneously
259
259
+
var wg sync.WaitGroup
260
260
+
errors := make(chan error, 100)
261
261
+
262
262
+
for i := 0; i < 100; i++ {
263
263
+
wg.Add(1)
264
264
+
go func(id int) {
265
265
+
defer wg.Done()
266
266
+
loaded, err := ops.LoadBundle(path)
267
267
+
if err != nil {
268
268
+
errors <- err
269
269
+
return
270
270
+
}
271
271
+
if len(loaded) != 10000 {
272
272
+
errors <- err
273
273
+
}
274
274
+
}(i)
275
275
+
}
276
276
+
277
277
+
wg.Wait()
278
278
+
close(errors)
279
279
+
280
280
+
for err := range errors {
281
281
+
t.Errorf("concurrent read error: %v", err)
282
282
+
}
283
283
+
})
284
284
+
285
285
+
t.Run("LoadOperationAtPositionConcurrency", func(t *testing.T) {
286
286
+
// Critical test - this is heavily used by DID lookups
287
287
+
operations := makeTestOperations(10000)
288
288
+
path := filepath.Join(tmpDir, "position_test.jsonl.zst")
289
289
+
_, _, _, _, err := ops.SaveBundle(path, operations)
290
290
+
if err != nil {
291
291
+
t.Fatalf("SaveBundle failed: %v", err)
292
292
+
}
293
293
+
294
294
+
// 200 concurrent position reads
295
295
+
var wg sync.WaitGroup
296
296
+
errors := make(chan error, 200)
297
297
+
298
298
+
for i := 0; i < 200; i++ {
299
299
+
wg.Add(1)
300
300
+
go func(position int) {
301
301
+
defer wg.Done()
302
302
+
op, err := ops.LoadOperationAtPosition(path, position%10000)
303
303
+
if err != nil {
304
304
+
errors <- err
305
305
+
return
306
306
+
}
307
307
+
if op == nil {
308
308
+
errors <- err
309
309
+
}
310
310
+
}(i)
311
311
+
}
312
312
+
313
313
+
wg.Wait()
314
314
+
close(errors)
315
315
+
316
316
+
for err := range errors {
317
317
+
t.Errorf("concurrent position read error: %v", err)
318
318
+
}
319
319
+
})
320
320
+
321
321
+
t.Run("ConcurrentHashVerification", func(t *testing.T) {
322
322
+
operations := makeTestOperations(1000)
323
323
+
path := filepath.Join(tmpDir, "verify_test.jsonl.zst")
324
324
+
_, compHash, _, _, err := ops.SaveBundle(path, operations)
325
325
+
if err != nil {
326
326
+
t.Fatalf("SaveBundle failed: %v", err)
327
327
+
}
328
328
+
329
329
+
var wg sync.WaitGroup
330
330
+
for i := 0; i < 50; i++ {
331
331
+
wg.Add(1)
332
332
+
go func() {
333
333
+
defer wg.Done()
334
334
+
valid, _, err := ops.VerifyHash(path, compHash)
335
335
+
if err != nil {
336
336
+
t.Errorf("VerifyHash failed: %v", err)
337
337
+
}
338
338
+
if !valid {
339
339
+
t.Error("hash verification failed")
340
340
+
}
341
341
+
}()
342
342
+
}
343
343
+
wg.Wait()
344
344
+
})
345
345
+
}
346
346
+
347
347
+
// ====================================================================================
348
348
+
// EDGE CASES & ERROR HANDLING
349
349
+
// ====================================================================================
350
350
+
351
351
+
func TestStorageEdgeCases(t *testing.T) {
352
352
+
tmpDir := t.TempDir()
353
353
+
logger := &testLogger{t: t}
354
354
+
ops, err := storage.NewOperations(logger)
355
355
+
if err != nil {
356
356
+
t.Fatalf("NewOperations failed: %v", err)
357
357
+
}
358
358
+
defer ops.Close()
359
359
+
360
360
+
t.Run("CorruptedZstdFile", func(t *testing.T) {
361
361
+
path := filepath.Join(tmpDir, "corrupted.jsonl.zst")
362
362
+
// Write invalid zstd data
363
363
+
os.WriteFile(path, []byte("this is not valid zstd data"), 0644)
364
364
+
365
365
+
_, err := ops.LoadBundle(path)
366
366
+
if err == nil {
367
367
+
t.Error("expected error loading corrupted file, got nil")
368
368
+
}
369
369
+
})
370
370
+
371
371
+
t.Run("TruncatedFile", func(t *testing.T) {
372
372
+
operations := makeTestOperations(100)
373
373
+
path := filepath.Join(tmpDir, "truncated.jsonl.zst")
374
374
+
ops.SaveBundle(path, operations)
375
375
+
376
376
+
// Read and truncate
377
377
+
data, _ := os.ReadFile(path)
378
378
+
os.WriteFile(path, data[:len(data)/2], 0644)
379
379
+
380
380
+
_, err := ops.LoadBundle(path)
381
381
+
if err == nil {
382
382
+
t.Error("expected error loading truncated file, got nil")
383
383
+
}
384
384
+
})
385
385
+
386
386
+
t.Run("InvalidJSONL", func(t *testing.T) {
387
387
+
path := filepath.Join(tmpDir, "invalid.jsonl.zst")
388
388
+
invalidData := []byte("{invalid json}\n{also invalid}")
389
389
+
390
390
+
// Manually compress invalid data
391
391
+
operations := makeTestOperations(10)
392
392
+
ops.SaveBundle(path, operations) // Create valid file first
393
393
+
394
394
+
// Now corrupt it with invalid JSON
395
395
+
// This is hard to test properly since SaveBundle enforces valid data
396
396
+
// Better to test ParseJSONL directly
397
397
+
_, err := ops.ParseJSONL(invalidData)
398
398
+
if err == nil {
399
399
+
t.Error("expected error parsing invalid JSONL, got nil")
400
400
+
}
401
401
+
})
402
402
+
403
403
+
t.Run("NonExistentFile", func(t *testing.T) {
404
404
+
_, err := ops.LoadBundle("/nonexistent/path/file.jsonl.zst")
405
405
+
if err == nil {
406
406
+
t.Error("expected error loading nonexistent file, got nil")
407
407
+
}
408
408
+
})
409
409
+
410
410
+
t.Run("InvalidPosition", func(t *testing.T) {
411
411
+
operations := makeTestOperations(100)
412
412
+
path := filepath.Join(tmpDir, "position_test.jsonl.zst")
413
413
+
ops.SaveBundle(path, operations)
414
414
+
415
415
+
// Negative position
416
416
+
_, err := ops.LoadOperationAtPosition(path, -1)
417
417
+
if err == nil {
418
418
+
t.Error("expected error for negative position")
419
419
+
}
420
420
+
421
421
+
// Position beyond file
422
422
+
_, err = ops.LoadOperationAtPosition(path, 10000)
423
423
+
if err == nil {
424
424
+
t.Error("expected error for position beyond file")
425
425
+
}
426
426
+
})
427
427
+
}
428
428
+
429
429
+
// ====================================================================================
430
430
+
// BOUNDARY CONDITIONS - CRITICAL FOR BUNDLE CHAINING
431
431
+
// ====================================================================================
432
432
+
433
433
+
func TestStorageBoundaryConditions(t *testing.T) {
434
434
+
logger := &testLogger{t: t}
435
435
+
ops, err := storage.NewOperations(logger)
436
436
+
if err != nil {
437
437
+
t.Fatalf("NewOperations failed: %v", err)
438
438
+
}
439
439
+
defer ops.Close()
440
440
+
441
441
+
t.Run("GetBoundaryCIDs_SingleOperation", func(t *testing.T) {
442
442
+
baseTime := time.Now()
443
443
+
operations := []plcclient.PLCOperation{
444
444
+
{CID: "cid1", CreatedAt: baseTime},
445
445
+
}
446
446
+
447
447
+
boundaryTime, cids := ops.GetBoundaryCIDs(operations)
448
448
+
449
449
+
if !boundaryTime.Equal(baseTime) {
450
450
+
t.Error("boundary time mismatch")
451
451
+
}
452
452
+
if len(cids) != 1 {
453
453
+
t.Errorf("expected 1 boundary CID, got %d", len(cids))
454
454
+
}
455
455
+
if !cids["cid1"] {
456
456
+
t.Error("expected cid1 in boundary set")
457
457
+
}
458
458
+
})
459
459
+
460
460
+
t.Run("GetBoundaryCIDs_MultipleSameTimestamp", func(t *testing.T) {
461
461
+
// CRITICAL: Operations with identical timestamps (happens in real data)
462
462
+
baseTime := time.Now()
463
463
+
operations := []plcclient.PLCOperation{
464
464
+
{CID: "cid1", CreatedAt: baseTime.Add(-2 * time.Second)},
465
465
+
{CID: "cid2", CreatedAt: baseTime.Add(-1 * time.Second)},
466
466
+
{CID: "cid3", CreatedAt: baseTime}, // Last timestamp
467
467
+
{CID: "cid4", CreatedAt: baseTime}, // Same as cid3
468
468
+
{CID: "cid5", CreatedAt: baseTime}, // Same as cid3
469
469
+
}
470
470
+
471
471
+
boundaryTime, cids := ops.GetBoundaryCIDs(operations)
472
472
+
473
473
+
if !boundaryTime.Equal(baseTime) {
474
474
+
t.Error("boundary time should be last operation time")
475
475
+
}
476
476
+
477
477
+
// Should return ALL CIDs with the last timestamp
478
478
+
if len(cids) != 3 {
479
479
+
t.Errorf("expected 3 boundary CIDs, got %d", len(cids))
480
480
+
}
481
481
+
482
482
+
for _, expectedCID := range []string{"cid3", "cid4", "cid5"} {
483
483
+
if !cids[expectedCID] {
484
484
+
t.Errorf("expected %s in boundary set", expectedCID)
485
485
+
}
486
486
+
}
487
487
+
488
488
+
// Earlier CIDs should NOT be in set
489
489
+
if cids["cid1"] || cids["cid2"] {
490
490
+
t.Error("earlier CIDs should not be in boundary set")
491
491
+
}
492
492
+
})
493
493
+
494
494
+
t.Run("GetBoundaryCIDs_AllSameTimestamp", func(t *testing.T) {
495
495
+
baseTime := time.Now()
496
496
+
operations := []plcclient.PLCOperation{
497
497
+
{CID: "cid1", CreatedAt: baseTime},
498
498
+
{CID: "cid2", CreatedAt: baseTime},
499
499
+
{CID: "cid3", CreatedAt: baseTime},
500
500
+
}
501
501
+
502
502
+
_, cids := ops.GetBoundaryCIDs(operations)
503
503
+
504
504
+
if len(cids) != 3 {
505
505
+
t.Errorf("expected all 3 CIDs, got %d", len(cids))
506
506
+
}
507
507
+
})
508
508
+
509
509
+
t.Run("GetBoundaryCIDs_EmptyOperations", func(t *testing.T) {
510
510
+
operations := []plcclient.PLCOperation{}
511
511
+
boundaryTime, cids := ops.GetBoundaryCIDs(operations)
512
512
+
513
513
+
if !boundaryTime.IsZero() {
514
514
+
t.Error("expected zero time for empty operations")
515
515
+
}
516
516
+
if len(cids) > 0 {
517
517
+
t.Error("expected nil or empty CID set")
518
518
+
}
519
519
+
})
520
520
+
521
521
+
t.Run("StripBoundaryDuplicates_ActualDuplication", func(t *testing.T) {
522
522
+
// CRITICAL: This prevents duplicate operations across bundle boundaries
523
523
+
baseTime := time.Now()
524
524
+
boundaryTimestamp := baseTime.Format(time.RFC3339Nano)
525
525
+
526
526
+
prevBoundaryCIDs := map[string]bool{
527
527
+
"cid3": true,
528
528
+
"cid4": true,
529
529
+
}
530
530
+
531
531
+
operations := []plcclient.PLCOperation{
532
532
+
{CID: "cid3", CreatedAt: baseTime}, // Duplicate - should be stripped
533
533
+
{CID: "cid4", CreatedAt: baseTime}, // Duplicate - should be stripped
534
534
+
{CID: "cid5", CreatedAt: baseTime}, // New - should be kept
535
535
+
{CID: "cid6", CreatedAt: baseTime.Add(1 * time.Second)}, // After boundary - kept
536
536
+
}
537
537
+
538
538
+
result := ops.StripBoundaryDuplicates(operations, boundaryTimestamp, prevBoundaryCIDs)
539
539
+
540
540
+
if len(result) != 2 {
541
541
+
t.Errorf("expected 2 operations after stripping, got %d", len(result))
542
542
+
}
543
543
+
544
544
+
if result[0].CID != "cid5" {
545
545
+
t.Errorf("expected cid5 first, got %s", result[0].CID)
546
546
+
}
547
547
+
if result[1].CID != "cid6" {
548
548
+
t.Errorf("expected cid6 second, got %s", result[1].CID)
549
549
+
}
550
550
+
})
551
551
+
552
552
+
t.Run("StripBoundaryDuplicates_NoDuplicates", func(t *testing.T) {
553
553
+
baseTime := time.Now()
554
554
+
boundaryTimestamp := baseTime.Format(time.RFC3339Nano)
555
555
+
556
556
+
prevBoundaryCIDs := map[string]bool{
557
557
+
"old_cid": true,
558
558
+
}
559
559
+
560
560
+
operations := []plcclient.PLCOperation{
561
561
+
{CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)},
562
562
+
{CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)},
563
563
+
}
564
564
+
565
565
+
result := ops.StripBoundaryDuplicates(operations, boundaryTimestamp, prevBoundaryCIDs)
566
566
+
567
567
+
if len(result) != 2 {
568
568
+
t.Errorf("expected 2 operations, got %d", len(result))
569
569
+
}
570
570
+
})
571
571
+
572
572
+
t.Run("StripBoundaryDuplicates_EmptyPrevious", func(t *testing.T) {
573
573
+
baseTime := time.Now()
574
574
+
operations := makeTestOperations(10)
575
575
+
576
576
+
result := ops.StripBoundaryDuplicates(operations, baseTime.Format(time.RFC3339Nano), nil)
577
577
+
578
578
+
if len(result) != len(operations) {
579
579
+
t.Error("should not strip anything with no previous boundary CIDs")
580
580
+
}
581
581
+
})
582
582
+
}
583
583
+
584
584
+
// ====================================================================================
585
585
+
// SERIALIZATION TESTS
586
586
+
// ====================================================================================
587
587
+
588
588
+
func TestStorageSerialization(t *testing.T) {
589
589
+
logger := &testLogger{t: t}
590
590
+
ops, err := storage.NewOperations(logger)
591
591
+
if err != nil {
592
592
+
t.Fatalf("NewOperations failed: %v", err)
593
593
+
}
594
594
+
defer ops.Close()
595
595
+
596
596
+
t.Run("SerializeJSONL_PreservesRawJSON", func(t *testing.T) {
597
597
+
rawJSON := []byte(`{"did":"did:plc:test","cid":"bafytest","createdAt":"2024-01-01T00:00:00.000Z"}`)
598
598
+
op := plcclient.PLCOperation{
599
599
+
DID: "did:plc:test",
600
600
+
CID: "bafytest",
601
601
+
CreatedAt: time.Now(),
602
602
+
RawJSON: rawJSON,
603
603
+
}
604
604
+
605
605
+
result := ops.SerializeJSONL([]plcclient.PLCOperation{op})
606
606
+
607
607
+
// Should use RawJSON directly
608
608
+
if !containsBytes(result, rawJSON) {
609
609
+
t.Error("SerializeJSONL did not preserve RawJSON")
610
610
+
}
611
611
+
})
612
612
+
613
613
+
t.Run("SerializeJSONL_MarshalsFallback", func(t *testing.T) {
614
614
+
op := plcclient.PLCOperation{
615
615
+
DID: "did:plc:test",
616
616
+
CID: "bafytest",
617
617
+
CreatedAt: time.Now(),
618
618
+
// No RawJSON - should marshal
619
619
+
}
620
620
+
621
621
+
result := ops.SerializeJSONL([]plcclient.PLCOperation{op})
622
622
+
623
623
+
if len(result) == 0 {
624
624
+
t.Error("SerializeJSONL returned empty result")
625
625
+
}
626
626
+
627
627
+
// Should contain the DID
628
628
+
if !containsBytes(result, []byte("did:plc:test")) {
629
629
+
t.Error("serialized data missing DID")
630
630
+
}
631
631
+
})
632
632
+
633
633
+
t.Run("ParseJSONL_RoundTrip", func(t *testing.T) {
634
634
+
original := makeTestOperations(100)
635
635
+
data := ops.SerializeJSONL(original)
636
636
+
637
637
+
parsed, err := ops.ParseJSONL(data)
638
638
+
if err != nil {
639
639
+
t.Fatalf("ParseJSONL failed: %v", err)
640
640
+
}
641
641
+
642
642
+
if len(parsed) != len(original) {
643
643
+
t.Errorf("count mismatch: got %d, want %d", len(parsed), len(original))
644
644
+
}
645
645
+
646
646
+
// Verify RawJSON is populated
647
647
+
for i, op := range parsed {
648
648
+
if len(op.RawJSON) == 0 {
649
649
+
t.Errorf("operation %d missing RawJSON", i)
650
650
+
}
651
651
+
}
652
652
+
})
653
653
+
}
654
654
+
655
655
+
// ====================================================================================
656
656
+
// UTILITY FUNCTION TESTS
657
657
+
// ====================================================================================
658
658
+
659
659
+
func TestStorageUtilities(t *testing.T) {
660
660
+
tmpDir := t.TempDir()
661
661
+
logger := &testLogger{t: t}
662
662
+
ops, err := storage.NewOperations(logger)
663
663
+
if err != nil {
664
664
+
t.Fatalf("NewOperations failed: %v", err)
665
665
+
}
666
666
+
defer ops.Close()
667
667
+
668
668
+
t.Run("ExtractUniqueDIDs", func(t *testing.T) {
669
669
+
operations := []plcclient.PLCOperation{
670
670
+
{DID: "did:plc:aaa"},
671
671
+
{DID: "did:plc:bbb"},
672
672
+
{DID: "did:plc:aaa"}, // Duplicate
673
673
+
{DID: "did:plc:ccc"},
674
674
+
{DID: "did:plc:bbb"}, // Duplicate
675
675
+
{DID: "did:plc:aaa"}, // Duplicate
676
676
+
}
677
677
+
678
678
+
dids := ops.ExtractUniqueDIDs(operations)
679
679
+
680
680
+
if len(dids) != 3 {
681
681
+
t.Errorf("expected 3 unique DIDs, got %d", len(dids))
682
682
+
}
683
683
+
684
684
+
// Verify all expected DIDs present
685
685
+
didSet := make(map[string]bool)
686
686
+
for _, did := range dids {
687
687
+
didSet[did] = true
688
688
+
}
689
689
+
690
690
+
for _, expectedDID := range []string{"did:plc:aaa", "did:plc:bbb", "did:plc:ccc"} {
691
691
+
if !didSet[expectedDID] {
692
692
+
t.Errorf("missing expected DID: %s", expectedDID)
693
693
+
}
694
694
+
}
695
695
+
})
696
696
+
697
697
+
t.Run("ExtractUniqueDIDs_Empty", func(t *testing.T) {
698
698
+
dids := ops.ExtractUniqueDIDs([]plcclient.PLCOperation{})
699
699
+
if len(dids) != 0 {
700
700
+
t.Error("expected empty result for empty input")
701
701
+
}
702
702
+
})
703
703
+
704
704
+
t.Run("FileExists", func(t *testing.T) {
705
705
+
existingFile := filepath.Join(tmpDir, "exists.txt")
706
706
+
os.WriteFile(existingFile, []byte("test"), 0644)
707
707
+
708
708
+
if !ops.FileExists(existingFile) {
709
709
+
t.Error("FileExists returned false for existing file")
710
710
+
}
711
711
+
712
712
+
if ops.FileExists(filepath.Join(tmpDir, "nonexistent.txt")) {
713
713
+
t.Error("FileExists returned true for nonexistent file")
714
714
+
}
715
715
+
})
716
716
+
717
717
+
t.Run("GetFileSize", func(t *testing.T) {
718
718
+
testFile := filepath.Join(tmpDir, "size_test.txt")
719
719
+
testData := []byte("exactly 12 b")
720
720
+
os.WriteFile(testFile, testData, 0644)
721
721
+
722
722
+
size, err := ops.GetFileSize(testFile)
723
723
+
if err != nil {
724
724
+
t.Fatalf("GetFileSize failed: %v", err)
725
725
+
}
726
726
+
727
727
+
if size != int64(len(testData)) {
728
728
+
t.Errorf("size mismatch: got %d, want %d", size, len(testData))
729
729
+
}
730
730
+
})
731
731
+
}
732
732
+
733
733
+
// ====================================================================================
734
734
+
// STREAMING TESTS
735
735
+
// ====================================================================================
736
736
+
737
737
+
func TestStorageStreaming(t *testing.T) {
738
738
+
tmpDir := t.TempDir()
739
739
+
logger := &testLogger{t: t}
740
740
+
ops, err := storage.NewOperations(logger)
741
741
+
if err != nil {
742
742
+
t.Fatalf("NewOperations failed: %v", err)
743
743
+
}
744
744
+
defer ops.Close()
745
745
+
746
746
+
t.Run("StreamRaw", func(t *testing.T) {
747
747
+
operations := makeTestOperations(100)
748
748
+
path := filepath.Join(tmpDir, "stream_raw.jsonl.zst")
749
749
+
_, _, _, _, err := ops.SaveBundle(path, operations)
750
750
+
if err != nil {
751
751
+
t.Fatalf("SaveBundle failed: %v", err)
752
752
+
}
753
753
+
754
754
+
reader, err := ops.StreamRaw(path)
755
755
+
if err != nil {
756
756
+
t.Fatalf("StreamRaw failed: %v", err)
757
757
+
}
758
758
+
defer reader.Close()
759
759
+
760
760
+
// Read all data
761
761
+
data := make([]byte, 1024*1024)
762
762
+
n, err := reader.Read(data)
763
763
+
if err != nil && err.Error() != "EOF" {
764
764
+
t.Fatalf("Read failed: %v", err)
765
765
+
}
766
766
+
767
767
+
if n == 0 {
768
768
+
t.Error("StreamRaw returned no data")
769
769
+
}
770
770
+
})
771
771
+
772
772
+
t.Run("StreamDecompressed", func(t *testing.T) {
773
773
+
operations := makeTestOperations(100)
774
774
+
path := filepath.Join(tmpDir, "stream_decomp.jsonl.zst")
775
775
+
ops.SaveBundle(path, operations)
776
776
+
777
777
+
reader, err := ops.StreamDecompressed(path)
778
778
+
if err != nil {
779
779
+
t.Fatalf("StreamDecompressed failed: %v", err)
780
780
+
}
781
781
+
defer reader.Close()
782
782
+
783
783
+
// Count JSONL lines
784
784
+
scanner := bufio.NewScanner(reader)
785
785
+
lineCount := 0
786
786
+
for scanner.Scan() {
787
787
+
lineCount++
788
788
+
}
789
789
+
790
790
+
if lineCount != 100 {
791
791
+
t.Errorf("expected 100 lines, got %d", lineCount)
792
792
+
}
793
793
+
})
794
794
+
}
795
795
+
796
796
+
// ====================================================================================
797
797
+
// PERFORMANCE / BENCHMARK TESTS
798
798
+
// ====================================================================================
799
799
+
800
800
+
func BenchmarkStorageOperations(b *testing.B) {
801
801
+
tmpDir := b.TempDir()
802
802
+
logger := &testLogger{t: &testing.T{}}
803
803
+
ops, _ := storage.NewOperations(logger)
804
804
+
defer ops.Close()
805
805
+
806
806
+
operations := makeTestOperations(10000)
807
807
+
808
808
+
b.Run("SaveBundle", func(b *testing.B) {
809
809
+
for i := 0; i < b.N; i++ {
810
810
+
path := filepath.Join(tmpDir, fmt.Sprintf("bench_%d.jsonl.zst", i))
811
811
+
ops.SaveBundle(path, operations)
812
812
+
}
813
813
+
})
814
814
+
815
815
+
// Create bundle for read benchmarks
816
816
+
testPath := filepath.Join(tmpDir, "bench_read.jsonl.zst")
817
817
+
ops.SaveBundle(testPath, operations)
818
818
+
819
819
+
b.Run("LoadBundle", func(b *testing.B) {
820
820
+
for i := 0; i < b.N; i++ {
821
821
+
ops.LoadBundle(testPath)
822
822
+
}
823
823
+
})
824
824
+
825
825
+
b.Run("LoadOperationAtPosition", func(b *testing.B) {
826
826
+
for i := 0; i < b.N; i++ {
827
827
+
ops.LoadOperationAtPosition(testPath, i%10000)
828
828
+
}
829
829
+
})
830
830
+
831
831
+
b.Run("Hash", func(b *testing.B) {
832
832
+
data := ops.SerializeJSONL(operations)
833
833
+
b.ResetTimer()
834
834
+
for i := 0; i < b.N; i++ {
835
835
+
ops.Hash(data)
836
836
+
}
837
837
+
})
838
838
+
839
839
+
b.Run("SerializeJSONL", func(b *testing.B) {
840
840
+
for i := 0; i < b.N; i++ {
841
841
+
ops.SerializeJSONL(operations)
842
842
+
}
843
843
+
})
844
844
+
}
845
845
+
846
846
+
// ====================================================================================
847
847
+
// HELPER FUNCTIONS
848
848
+
// ====================================================================================
849
849
+
850
850
+
func makeTestOperations(count int) []plcclient.PLCOperation {
851
851
+
ops := make([]plcclient.PLCOperation, count)
852
852
+
baseTime := time.Now().Add(-time.Hour)
853
853
+
854
854
+
for i := 0; i < count; i++ {
855
855
+
ops[i] = plcclient.PLCOperation{
856
856
+
DID: fmt.Sprintf("did:plc:test%06d", i),
857
857
+
CID: fmt.Sprintf("bafy%06d", i),
858
858
+
CreatedAt: baseTime.Add(time.Duration(i) * time.Second),
859
859
+
}
860
860
+
}
861
861
+
862
862
+
return ops
863
863
+
}
864
864
+
865
865
+
func containsBytes(haystack, needle []byte) bool {
866
866
+
return bytes.Contains(haystack, needle)
867
867
+
}
+742
internal/sync/sync_test.go
···
1
1
+
package sync_test
2
2
+
3
3
+
import (
4
4
+
"context"
5
5
+
"fmt"
6
6
+
"net/http"
7
7
+
"net/http/httptest"
8
8
+
"sync"
9
9
+
"sync/atomic"
10
10
+
"testing"
11
11
+
"time"
12
12
+
13
13
+
"github.com/goccy/go-json"
14
14
+
"tangled.org/atscan.net/plcbundle/internal/plcclient"
15
15
+
"tangled.org/atscan.net/plcbundle/internal/storage"
16
16
+
internalsync "tangled.org/atscan.net/plcbundle/internal/sync"
17
17
+
)
18
18
+
19
19
+
type testLogger struct {
20
20
+
t *testing.T
21
21
+
}
22
22
+
23
23
+
func (l *testLogger) Printf(format string, v ...interface{}) {
24
24
+
l.t.Logf(format, v...)
25
25
+
}
26
26
+
27
27
+
func (l *testLogger) Println(v ...interface{}) {
28
28
+
l.t.Log(v...)
29
29
+
}
30
30
+
31
31
+
// Mock mempool for testing
32
32
+
type mockMempool struct {
33
33
+
operations []plcclient.PLCOperation
34
34
+
mu sync.Mutex
35
35
+
saveCount int32
36
36
+
}
37
37
+
38
38
+
func newMockMempool() *mockMempool {
39
39
+
return &mockMempool{
40
40
+
operations: make([]plcclient.PLCOperation, 0),
41
41
+
}
42
42
+
}
43
43
+
44
44
+
func (m *mockMempool) Add(ops []plcclient.PLCOperation) (int, error) {
45
45
+
m.mu.Lock()
46
46
+
defer m.mu.Unlock()
47
47
+
48
48
+
// Build existing CID set (like real mempool does)
49
49
+
existingCIDs := make(map[string]bool)
50
50
+
for _, op := range m.operations {
51
51
+
existingCIDs[op.CID] = true
52
52
+
}
53
53
+
54
54
+
// Only add new operations (deduplicate by CID)
55
55
+
addedCount := 0
56
56
+
for _, op := range ops {
57
57
+
if !existingCIDs[op.CID] {
58
58
+
m.operations = append(m.operations, op)
59
59
+
existingCIDs[op.CID] = true
60
60
+
addedCount++
61
61
+
}
62
62
+
}
63
63
+
64
64
+
return addedCount, nil // ← Return actual added count
65
65
+
}
66
66
+
67
67
+
func (m *mockMempool) Save() error {
68
68
+
atomic.AddInt32(&m.saveCount, 1)
69
69
+
return nil
70
70
+
}
71
71
+
72
72
+
func (m *mockMempool) SaveIfNeeded() error {
73
73
+
return m.Save()
74
74
+
}
75
75
+
76
76
+
func (m *mockMempool) Count() int {
77
77
+
m.mu.Lock()
78
78
+
defer m.mu.Unlock()
79
79
+
return len(m.operations)
80
80
+
}
81
81
+
82
82
+
func (m *mockMempool) GetLastTime() string {
83
83
+
m.mu.Lock()
84
84
+
defer m.mu.Unlock()
85
85
+
if len(m.operations) == 0 {
86
86
+
return ""
87
87
+
}
88
88
+
return m.operations[len(m.operations)-1].CreatedAt.Format(time.RFC3339Nano)
89
89
+
}
90
90
+
91
91
+
// ====================================================================================
92
92
+
// FETCHER TESTS - DEDUPLICATION & RETRY LOGIC
93
93
+
// ====================================================================================
94
94
+
95
95
+
func TestFetcherDeduplication(t *testing.T) {
96
96
+
t.Run("BoundaryDuplicateHandling", func(t *testing.T) {
97
97
+
// Setup mock server
98
98
+
baseTime := time.Now()
99
99
+
boundaryTime := baseTime.Add(5 * time.Second)
100
100
+
101
101
+
// Simulate operations at bundle boundary
102
102
+
mockOps := []plcclient.PLCOperation{
103
103
+
{DID: "did:plc:001", CID: "cid1", CreatedAt: boundaryTime},
104
104
+
{DID: "did:plc:002", CID: "cid2", CreatedAt: boundaryTime},
105
105
+
{DID: "did:plc:003", CID: "cid3", CreatedAt: boundaryTime.Add(1 * time.Second)},
106
106
+
}
107
107
+
108
108
+
server := createMockPLCServer(t, mockOps)
109
109
+
defer server.Close()
110
110
+
111
111
+
// Create fetcher
112
112
+
client := plcclient.NewClient(server.URL)
113
113
+
defer client.Close()
114
114
+
115
115
+
logger := &testLogger{t: t}
116
116
+
ops, _ := storage.NewOperations(logger)
117
117
+
defer ops.Close()
118
118
+
119
119
+
fetcher := internalsync.NewFetcher(client, ops, logger)
120
120
+
121
121
+
// Previous bundle had cid1 and cid2 at boundary
122
122
+
prevBoundaryCIDs := map[string]bool{
123
123
+
"cid1": true,
124
124
+
"cid2": true,
125
125
+
}
126
126
+
127
127
+
mempool := newMockMempool()
128
128
+
129
129
+
// Fetch
130
130
+
newOps, fetchCount, err := fetcher.FetchToMempool(
131
131
+
context.Background(),
132
132
+
boundaryTime.Add(-1*time.Second).Format(time.RFC3339Nano),
133
133
+
prevBoundaryCIDs,
134
134
+
10,
135
135
+
true, // quiet
136
136
+
mempool,
137
137
+
0,
138
138
+
)
139
139
+
140
140
+
if err != nil {
141
141
+
t.Fatalf("FetchToMempool failed: %v", err)
142
142
+
}
143
143
+
144
144
+
// Should have filtered out cid1 and cid2 (duplicates)
145
145
+
// Only cid3 should be returned
146
146
+
if len(newOps) != 1 {
147
147
+
t.Errorf("expected 1 unique operation, got %d", len(newOps))
148
148
+
}
149
149
+
150
150
+
if len(newOps) > 0 && newOps[0].CID != "cid3" {
151
151
+
t.Errorf("expected cid3, got %s", newOps[0].CID)
152
152
+
}
153
153
+
154
154
+
if fetchCount == 0 {
155
155
+
t.Error("expected at least one fetch")
156
156
+
}
157
157
+
})
158
158
+
159
159
+
t.Run("ConcurrentFetchDedup", func(t *testing.T) {
160
160
+
baseTime := time.Now()
161
161
+
mockOps := make([]plcclient.PLCOperation, 50)
162
162
+
for i := 0; i < 50; i++ {
163
163
+
mockOps[i] = plcclient.PLCOperation{
164
164
+
DID: fmt.Sprintf("did:plc:%03d", i),
165
165
+
CID: fmt.Sprintf("cid%03d", i),
166
166
+
CreatedAt: baseTime.Add(time.Duration(i) * time.Second),
167
167
+
}
168
168
+
}
169
169
+
170
170
+
server := createMockPLCServer(t, mockOps)
171
171
+
defer server.Close()
172
172
+
173
173
+
client := plcclient.NewClient(server.URL)
174
174
+
defer client.Close()
175
175
+
176
176
+
logger := &testLogger{t: t}
177
177
+
storageOps, _ := storage.NewOperations(logger)
178
178
+
defer storageOps.Close()
179
179
+
180
180
+
fetcher := internalsync.NewFetcher(client, storageOps, logger)
181
181
+
mempool := newMockMempool()
182
182
+
183
183
+
// First fetch
184
184
+
initialCount := mempool.Count()
185
185
+
_, _, err := fetcher.FetchToMempool(
186
186
+
context.Background(),
187
187
+
"",
188
188
+
nil,
189
189
+
30,
190
190
+
true,
191
191
+
mempool,
192
192
+
0,
193
193
+
)
194
194
+
if err != nil {
195
195
+
t.Fatalf("First fetch failed: %v", err)
196
196
+
}
197
197
+
198
198
+
countAfterFirst := mempool.Count()
199
199
+
addedFirst := countAfterFirst - initialCount
200
200
+
201
201
+
if addedFirst == 0 {
202
202
+
t.Fatal("first fetch should add operations")
203
203
+
}
204
204
+
205
205
+
// Second fetch with same cursor - mempool deduplicates
206
206
+
countBeforeSecond := mempool.Count()
207
207
+
_, _, err = fetcher.FetchToMempool(
208
208
+
context.Background(),
209
209
+
"", // Same cursor - fetches same data
210
210
+
nil,
211
211
+
30,
212
212
+
true,
213
213
+
mempool,
214
214
+
1,
215
215
+
)
216
216
+
if err != nil {
217
217
+
t.Fatalf("Second fetch failed: %v", err)
218
218
+
}
219
219
+
220
220
+
countAfterSecond := mempool.Count()
221
221
+
addedSecond := countAfterSecond - countBeforeSecond
222
222
+
223
223
+
// Mempool's Add() method deduplicates by CID
224
224
+
// So second fetch should add 0 (all duplicates)
225
225
+
if addedSecond != 0 {
226
226
+
t.Errorf("expected 0 new ops in mempool after second fetch (duplicates), got %d", addedSecond)
227
227
+
}
228
228
+
229
229
+
t.Logf("First fetch: +%d ops, Second fetch: +%d ops (deduped)", addedFirst, addedSecond)
230
230
+
})
231
231
+
232
232
+
t.Run("EmptyBoundaryCIDs", func(t *testing.T) {
233
233
+
baseTime := time.Now()
234
234
+
mockOps := []plcclient.PLCOperation{
235
235
+
{DID: "did:plc:001", CID: "cid1", CreatedAt: baseTime},
236
236
+
}
237
237
+
238
238
+
server := createMockPLCServer(t, mockOps)
239
239
+
defer server.Close()
240
240
+
241
241
+
client := plcclient.NewClient(server.URL)
242
242
+
defer client.Close()
243
243
+
244
244
+
logger := &testLogger{t: t}
245
245
+
storageOps, _ := storage.NewOperations(logger)
246
246
+
defer storageOps.Close()
247
247
+
248
248
+
fetcher := internalsync.NewFetcher(client, storageOps, logger)
249
249
+
mempool := newMockMempool()
250
250
+
251
251
+
// Fetch with no boundary CIDs (genesis bundle)
252
252
+
newOps, _, err := fetcher.FetchToMempool(
253
253
+
context.Background(),
254
254
+
"",
255
255
+
nil, // No previous boundary
256
256
+
10,
257
257
+
true,
258
258
+
mempool,
259
259
+
0,
260
260
+
)
261
261
+
262
262
+
if err != nil {
263
263
+
t.Fatalf("FetchToMempool failed: %v", err)
264
264
+
}
265
265
+
266
266
+
if len(newOps) != 1 {
267
267
+
t.Errorf("expected 1 operation, got %d", len(newOps))
268
268
+
}
269
269
+
})
270
270
+
}
271
271
+
272
272
+
func TestFetcherRetry(t *testing.T) {
273
273
+
t.Run("TransientFailures", func(t *testing.T) {
274
274
+
attemptCount := 0
275
275
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
276
276
+
attemptCount++
277
277
+
278
278
+
if attemptCount < 3 {
279
279
+
// Fail first 2 attempts
280
280
+
w.WriteHeader(500)
281
281
+
return
282
282
+
}
283
283
+
284
284
+
// Succeed on 3rd attempt
285
285
+
w.Header().Set("Content-Type", "application/x-ndjson")
286
286
+
op := plcclient.PLCOperation{
287
287
+
DID: "did:plc:test",
288
288
+
CID: "cid1",
289
289
+
CreatedAt: time.Now(),
290
290
+
}
291
291
+
json.NewEncoder(w).Encode(op)
292
292
+
}))
293
293
+
defer server.Close()
294
294
+
295
295
+
client := plcclient.NewClient(server.URL)
296
296
+
defer client.Close()
297
297
+
298
298
+
// Should retry and eventually succeed
299
299
+
_, err := client.Export(context.Background(), plcclient.ExportOptions{Count: 1})
300
300
+
if err != nil {
301
301
+
t.Fatalf("expected retry to succeed, got error: %v", err)
302
302
+
}
303
303
+
304
304
+
if attemptCount < 3 {
305
305
+
t.Errorf("expected at least 3 attempts, got %d", attemptCount)
306
306
+
}
307
307
+
})
308
308
+
309
309
+
t.Run("RateLimitHandling", func(t *testing.T) {
310
310
+
attemptCount := 0
311
311
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
312
312
+
attemptCount++
313
313
+
314
314
+
if attemptCount == 1 {
315
315
+
// Return 429 with Retry-After
316
316
+
w.Header().Set("Retry-After", "1")
317
317
+
w.WriteHeader(429)
318
318
+
return
319
319
+
}
320
320
+
321
321
+
// Success
322
322
+
w.Header().Set("Content-Type", "application/x-ndjson")
323
323
+
op := plcclient.PLCOperation{
324
324
+
DID: "did:plc:test",
325
325
+
CID: "cid1",
326
326
+
CreatedAt: time.Now(),
327
327
+
}
328
328
+
json.NewEncoder(w).Encode(op)
329
329
+
}))
330
330
+
defer server.Close()
331
331
+
332
332
+
client := plcclient.NewClient(server.URL)
333
333
+
defer client.Close()
334
334
+
335
335
+
startTime := time.Now()
336
336
+
_, err := client.Export(context.Background(), plcclient.ExportOptions{Count: 1})
337
337
+
elapsed := time.Since(startTime)
338
338
+
339
339
+
if err != nil {
340
340
+
t.Fatalf("expected success after rate limit, got: %v", err)
341
341
+
}
342
342
+
343
343
+
// Should have waited at least 1 second
344
344
+
if elapsed < 1*time.Second {
345
345
+
t.Errorf("expected wait for rate limit, elapsed: %v", elapsed)
346
346
+
}
347
347
+
348
348
+
if attemptCount != 2 {
349
349
+
t.Errorf("expected 2 attempts, got %d", attemptCount)
350
350
+
}
351
351
+
})
352
352
+
353
353
+
t.Run("ContextCancellation", func(t *testing.T) {
354
354
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
355
355
+
// Slow response
356
356
+
time.Sleep(5 * time.Second)
357
357
+
}))
358
358
+
defer server.Close()
359
359
+
360
360
+
client := plcclient.NewClient(server.URL)
361
361
+
defer client.Close()
362
362
+
363
363
+
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
364
364
+
defer cancel()
365
365
+
366
366
+
_, err := client.Export(ctx, plcclient.ExportOptions{Count: 1})
367
367
+
if err == nil {
368
368
+
t.Error("expected timeout error, got nil")
369
369
+
}
370
370
+
})
371
371
+
}
372
372
+
373
373
+
func TestFetcherMempoolIntegration(t *testing.T) {
374
374
+
t.Run("AutoSaveAfterFetch", func(t *testing.T) {
375
375
+
baseTime := time.Now()
376
376
+
mockOps := []plcclient.PLCOperation{
377
377
+
{DID: "did:plc:001", CID: "cid1", CreatedAt: baseTime},
378
378
+
{DID: "did:plc:002", CID: "cid2", CreatedAt: baseTime.Add(1 * time.Second)},
379
379
+
}
380
380
+
381
381
+
server := createMockPLCServer(t, mockOps)
382
382
+
defer server.Close()
383
383
+
384
384
+
client := plcclient.NewClient(server.URL)
385
385
+
defer client.Close()
386
386
+
387
387
+
logger := &testLogger{t: t}
388
388
+
storageOps, _ := storage.NewOperations(logger)
389
389
+
defer storageOps.Close()
390
390
+
391
391
+
fetcher := internalsync.NewFetcher(client, storageOps, logger)
392
392
+
mempool := newMockMempool()
393
393
+
394
394
+
_, _, err := fetcher.FetchToMempool(
395
395
+
context.Background(),
396
396
+
"",
397
397
+
nil,
398
398
+
10,
399
399
+
true,
400
400
+
mempool,
401
401
+
0,
402
402
+
)
403
403
+
404
404
+
if err != nil {
405
405
+
t.Fatalf("FetchToMempool failed: %v", err)
406
406
+
}
407
407
+
408
408
+
// Verify mempool.SaveIfNeeded was called
409
409
+
if mempool.saveCount == 0 {
410
410
+
t.Error("expected mempool to be saved after fetch")
411
411
+
}
412
412
+
})
413
413
+
}
414
414
+
415
415
+
// ====================================================================================
416
416
+
// CLONER TESTS
417
417
+
// ====================================================================================
418
418
+
419
419
+
func TestClonerAtomicity(t *testing.T) {
420
420
+
// Note: Cloner tests would need more complex mocking
421
421
+
// Including mock HTTP server, file system operations, etc.
422
422
+
// This is a template showing what to test
423
423
+
424
424
+
t.Run("InterruptedClone", func(t *testing.T) {
425
425
+
// TODO: Test context cancellation mid-download
426
426
+
// Verify:
427
427
+
// - .tmp files are cleaned up OR kept for resume
428
428
+
// - Index not updated for incomplete downloads
429
429
+
// - Partial progress can resume with --resume flag
430
430
+
})
431
431
+
432
432
+
t.Run("HashVerificationFailure", func(t *testing.T) {
433
433
+
// TODO: Mock server returns file with wrong hash
434
434
+
// Verify:
435
435
+
// - File is deleted (or .tmp is not renamed)
436
436
+
// - Bundle NOT added to index
437
437
+
// - Error returned to user
438
438
+
})
439
439
+
440
440
+
t.Run("IndexUpdateTiming", func(t *testing.T) {
441
441
+
// CRITICAL: Index must only update AFTER file write succeeds
442
442
+
// TODO: Implement test that verifies ordering
443
443
+
})
444
444
+
}
445
445
+
446
446
+
// ====================================================================================
447
447
+
// SYNC LOOP TESTS
448
448
+
// ====================================================================================
449
449
+
450
450
+
func TestSyncLoopBehavior(t *testing.T) {
451
451
+
t.Run("CatchUpDetection", func(t *testing.T) {
452
452
+
// Mock manager
453
453
+
mockMgr := &mockSyncManager{
454
454
+
lastBundle: 5,
455
455
+
mempoolCount: 500,
456
456
+
}
457
457
+
458
458
+
logger := &testLogger{t: t}
459
459
+
config := &internalsync.SyncLoopConfig{
460
460
+
MaxBundles: 0,
461
461
+
Verbose: false,
462
462
+
Logger: logger,
463
463
+
}
464
464
+
465
465
+
// First sync should detect "caught up" when no progress
466
466
+
synced, err := internalsync.SyncOnce(context.Background(), mockMgr, config, false)
467
467
+
468
468
+
if err != nil {
469
469
+
t.Fatalf("SyncOnce failed: %v", err)
470
470
+
}
471
471
+
472
472
+
// Should return 0 if already caught up
473
473
+
if synced != 0 {
474
474
+
t.Logf("Note: synced %d bundles (manager may not be caught up)", synced)
475
475
+
}
476
476
+
})
477
477
+
478
478
+
t.Run("MaxBundlesLimit", func(t *testing.T) {
479
479
+
mockMgr := &mockSyncManager{
480
480
+
lastBundle: 0,
481
481
+
mempoolCount: 10000, // Always has enough for bundle
482
482
+
}
483
483
+
484
484
+
logger := &testLogger{t: t}
485
485
+
config := &internalsync.SyncLoopConfig{
486
486
+
MaxBundles: 3,
487
487
+
Verbose: false,
488
488
+
Logger: logger,
489
489
+
}
490
490
+
491
491
+
ctx := context.Background()
492
492
+
synced, err := internalsync.SyncOnce(ctx, mockMgr, config, false)
493
493
+
494
494
+
if err != nil {
495
495
+
t.Fatalf("SyncOnce failed: %v", err)
496
496
+
}
497
497
+
498
498
+
// Should respect max limit
499
499
+
if synced > 3 {
500
500
+
t.Errorf("synced %d bundles, but max was 3", synced)
501
501
+
}
502
502
+
})
503
503
+
504
504
+
t.Run("GracefulShutdown", func(t *testing.T) {
505
505
+
mockMgr := &mockSyncManager{
506
506
+
lastBundle: 0,
507
507
+
mempoolCount: 10000,
508
508
+
fetchDelay: 50 * time.Millisecond,
509
509
+
}
510
510
+
511
511
+
logger := &testLogger{t: t}
512
512
+
config := &internalsync.SyncLoopConfig{
513
513
+
Interval: 100 * time.Millisecond,
514
514
+
MaxBundles: 0,
515
515
+
Verbose: false,
516
516
+
Logger: logger,
517
517
+
}
518
518
+
519
519
+
ctx, cancel := context.WithCancel(context.Background())
520
520
+
521
521
+
// Start sync loop in goroutine
522
522
+
done := make(chan error, 1)
523
523
+
go func() {
524
524
+
done <- internalsync.RunSyncLoop(ctx, mockMgr, config)
525
525
+
}()
526
526
+
527
527
+
// Let it run briefly (should complete at least one cycle)
528
528
+
time.Sleep(250 * time.Millisecond)
529
529
+
530
530
+
// Cancel context
531
531
+
cancel()
532
532
+
533
533
+
// Should exit gracefully with context.Canceled error
534
534
+
select {
535
535
+
case err := <-done:
536
536
+
// Expected: context.Canceled or nil
537
537
+
if err != nil && err != context.Canceled {
538
538
+
t.Errorf("unexpected error on shutdown: %v", err)
539
539
+
}
540
540
+
t.Logf("Sync loop stopped cleanly: %v", err)
541
541
+
542
542
+
case <-time.After(2 * time.Second):
543
543
+
t.Error("sync loop did not stop within timeout after context cancellation")
544
544
+
}
545
545
+
546
546
+
// NOTE: Mempool saving on shutdown is handled by the caller (commands/server),
547
547
+
// not by the sync loop itself. The sync loop only respects context cancellation.
548
548
+
//
549
549
+
// For mempool save testing, see command-level tests.
550
550
+
})
551
551
+
}
552
552
+
553
553
+
// ====================================================================================
554
554
+
// BUNDLER TESTS
555
555
+
// ====================================================================================
556
556
+
557
557
+
func TestBundlerCreateBundle(t *testing.T) {
558
558
+
logger := &testLogger{t: t}
559
559
+
storageOps, _ := storage.NewOperations(logger)
560
560
+
defer storageOps.Close()
561
561
+
562
562
+
t.Run("BasicBundleCreation", func(t *testing.T) {
563
563
+
operations := makeTestOperations(10000)
564
564
+
cursor := operations[len(operations)-1].CreatedAt.Format(time.RFC3339Nano)
565
565
+
566
566
+
bundle := internalsync.CreateBundle(1, operations, cursor, "", storageOps)
567
567
+
568
568
+
if bundle.BundleNumber != 1 {
569
569
+
t.Errorf("wrong bundle number: got %d, want 1", bundle.BundleNumber)
570
570
+
}
571
571
+
572
572
+
if len(bundle.Operations) != 10000 {
573
573
+
t.Errorf("wrong operation count: got %d, want 10000", len(bundle.Operations))
574
574
+
}
575
575
+
576
576
+
if bundle.DIDCount == 0 {
577
577
+
t.Error("DIDCount should not be zero")
578
578
+
}
579
579
+
580
580
+
if len(bundle.BoundaryCIDs) == 0 {
581
581
+
t.Error("BoundaryCIDs should not be empty")
582
582
+
}
583
583
+
584
584
+
if bundle.Cursor != cursor {
585
585
+
t.Error("cursor mismatch")
586
586
+
}
587
587
+
})
588
588
+
589
589
+
t.Run("GenesisBundle", func(t *testing.T) {
590
590
+
operations := makeTestOperations(10000)
591
591
+
cursor := operations[len(operations)-1].CreatedAt.Format(time.RFC3339Nano)
592
592
+
593
593
+
bundle := internalsync.CreateBundle(1, operations, cursor, "", storageOps)
594
594
+
595
595
+
// Genesis should have empty parent
596
596
+
if bundle.Parent != "" {
597
597
+
t.Errorf("genesis bundle should have empty parent, got %s", bundle.Parent)
598
598
+
}
599
599
+
})
600
600
+
601
601
+
t.Run("ChainedBundle", func(t *testing.T) {
602
602
+
operations := makeTestOperations(10000)
603
603
+
cursor := operations[len(operations)-1].CreatedAt.Format(time.RFC3339Nano)
604
604
+
parentHash := "parent_hash_from_bundle_1"
605
605
+
606
606
+
bundle := internalsync.CreateBundle(2, operations, cursor, parentHash, storageOps)
607
607
+
608
608
+
if bundle.Parent != parentHash {
609
609
+
t.Errorf("parent mismatch: got %s, want %s", bundle.Parent, parentHash)
610
610
+
}
611
611
+
612
612
+
if bundle.BundleNumber != 2 {
613
613
+
t.Error("bundle number should be 2")
614
614
+
}
615
615
+
})
616
616
+
617
617
+
t.Run("BoundaryTimestamps", func(t *testing.T) {
618
618
+
baseTime := time.Now()
619
619
+
620
620
+
// Create operations where last 5 share same timestamp
621
621
+
operations := makeTestOperations(10000)
622
622
+
for i := 9995; i < 10000; i++ {
623
623
+
operations[i].CreatedAt = baseTime
624
624
+
}
625
625
+
626
626
+
cursor := baseTime.Format(time.RFC3339Nano)
627
627
+
bundle := internalsync.CreateBundle(1, operations, cursor, "", storageOps)
628
628
+
629
629
+
// Should capture all 5 CIDs at boundary
630
630
+
if len(bundle.BoundaryCIDs) != 5 {
631
631
+
t.Errorf("expected 5 boundary CIDs, got %d", len(bundle.BoundaryCIDs))
632
632
+
}
633
633
+
})
634
634
+
}
635
635
+
636
636
+
// ====================================================================================
637
637
+
// MOCK SERVER & HELPERS
638
638
+
// ====================================================================================
639
639
+
640
640
+
func createMockPLCServer(_ *testing.T, operations []plcclient.PLCOperation) *httptest.Server {
641
641
+
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
642
642
+
if r.URL.Path != "/export" {
643
643
+
w.WriteHeader(404)
644
644
+
return
645
645
+
}
646
646
+
647
647
+
w.Header().Set("Content-Type", "application/x-ndjson")
648
648
+
649
649
+
// Return operations as JSONL
650
650
+
for _, op := range operations {
651
651
+
json.NewEncoder(w).Encode(op)
652
652
+
}
653
653
+
}))
654
654
+
}
655
655
+
656
656
+
func makeTestOperations(count int) []plcclient.PLCOperation {
657
657
+
ops := make([]plcclient.PLCOperation, count)
658
658
+
baseTime := time.Now().Add(-time.Hour)
659
659
+
660
660
+
for i := 0; i < count; i++ {
661
661
+
ops[i] = plcclient.PLCOperation{
662
662
+
DID: fmt.Sprintf("did:plc:test%06d", i),
663
663
+
CID: fmt.Sprintf("bafy%06d", i),
664
664
+
CreatedAt: baseTime.Add(time.Duration(i) * time.Second),
665
665
+
}
666
666
+
}
667
667
+
668
668
+
return ops
669
669
+
}
670
670
+
671
671
+
// Mock sync manager for testing
672
672
+
type mockSyncManager struct {
673
673
+
lastBundle int
674
674
+
mempoolCount int
675
675
+
fetchDelay time.Duration
676
676
+
mempoolSaveCount int
677
677
+
mu sync.Mutex
678
678
+
}
679
679
+
680
680
+
func (m *mockSyncManager) GetLastBundleNumber() int {
681
681
+
m.mu.Lock()
682
682
+
defer m.mu.Unlock()
683
683
+
return m.lastBundle
684
684
+
}
685
685
+
686
686
+
func (m *mockSyncManager) GetMempoolCount() int {
687
687
+
m.mu.Lock()
688
688
+
defer m.mu.Unlock()
689
689
+
return m.mempoolCount
690
690
+
}
691
691
+
692
692
+
func (m *mockSyncManager) FetchAndSaveNextBundle(ctx context.Context, quiet bool) (int, time.Duration, error) {
693
693
+
m.mu.Lock()
694
694
+
defer m.mu.Unlock()
695
695
+
696
696
+
if m.fetchDelay > 0 {
697
697
+
time.Sleep(m.fetchDelay)
698
698
+
}
699
699
+
700
700
+
// Simulate creating bundle if we have enough ops
701
701
+
if m.mempoolCount >= 10000 {
702
702
+
m.lastBundle++
703
703
+
m.mempoolCount -= 10000
704
704
+
return m.lastBundle, 10 * time.Millisecond, nil
705
705
+
}
706
706
+
707
707
+
// Not enough ops
708
708
+
return 0, 0, fmt.Errorf("insufficient operations")
709
709
+
}
710
710
+
711
711
+
func (m *mockSyncManager) SaveMempool() error {
712
712
+
m.mu.Lock()
713
713
+
defer m.mu.Unlock()
714
714
+
m.mempoolSaveCount++
715
715
+
return nil
716
716
+
}
717
717
+
718
718
+
func TestMockMempoolDeduplication(t *testing.T) {
719
719
+
m := newMockMempool()
720
720
+
721
721
+
op1 := plcclient.PLCOperation{
722
722
+
CID: "duplicate_cid",
723
723
+
DID: "did:plc:test",
724
724
+
CreatedAt: time.Now(),
725
725
+
}
726
726
+
727
727
+
// Add first time
728
728
+
added, _ := m.Add([]plcclient.PLCOperation{op1})
729
729
+
if added != 1 {
730
730
+
t.Fatalf("first add should return 1, got %d", added)
731
731
+
}
732
732
+
733
733
+
// Add same CID again
734
734
+
added, _ = m.Add([]plcclient.PLCOperation{op1})
735
735
+
if added != 0 {
736
736
+
t.Fatalf("duplicate add should return 0, got %d", added)
737
737
+
}
738
738
+
739
739
+
if m.Count() != 1 {
740
740
+
t.Fatalf("count should be 1, got %d", m.Count())
741
741
+
}
742
742
+
}
+218
internal/types/types_test.go
···
1
1
+
package types_test
2
2
+
3
3
+
import (
4
4
+
"bytes"
5
5
+
"fmt"
6
6
+
"testing"
7
7
+
8
8
+
"tangled.org/atscan.net/plcbundle/internal/types"
9
9
+
)
10
10
+
11
11
+
// ====================================================================================
12
12
+
// CONSTANT VALIDATION TESTS
13
13
+
// ====================================================================================
14
14
+
15
15
+
func TestConstants(t *testing.T) {
16
16
+
t.Run("BundleSize", func(t *testing.T) {
17
17
+
if types.BUNDLE_SIZE != 10000 {
18
18
+
t.Errorf("BUNDLE_SIZE = %d, want 10000", types.BUNDLE_SIZE)
19
19
+
}
20
20
+
21
21
+
// Ensure it's a reasonable size
22
22
+
if types.BUNDLE_SIZE < 1000 {
23
23
+
t.Error("BUNDLE_SIZE too small")
24
24
+
}
25
25
+
26
26
+
if types.BUNDLE_SIZE > 100000 {
27
27
+
t.Error("BUNDLE_SIZE too large")
28
28
+
}
29
29
+
})
30
30
+
31
31
+
t.Run("IndexFile", func(t *testing.T) {
32
32
+
if types.INDEX_FILE != "plc_bundles.json" {
33
33
+
t.Errorf("INDEX_FILE = %s, want plc_bundles.json", types.INDEX_FILE)
34
34
+
}
35
35
+
36
36
+
// Should be a valid filename
37
37
+
if types.INDEX_FILE == "" {
38
38
+
t.Error("INDEX_FILE should not be empty")
39
39
+
}
40
40
+
41
41
+
// Should have .json extension
42
42
+
if len(types.INDEX_FILE) < 5 || types.INDEX_FILE[len(types.INDEX_FILE)-5:] != ".json" {
43
43
+
t.Error("INDEX_FILE should have .json extension")
44
44
+
}
45
45
+
})
46
46
+
47
47
+
t.Run("IndexVersion", func(t *testing.T) {
48
48
+
if types.INDEX_VERSION != "1.0" {
49
49
+
t.Errorf("INDEX_VERSION = %s, want 1.0", types.INDEX_VERSION)
50
50
+
}
51
51
+
52
52
+
// Should follow semantic versioning format (at least major.minor)
53
53
+
if len(types.INDEX_VERSION) < 3 {
54
54
+
t.Error("INDEX_VERSION should follow semantic versioning")
55
55
+
}
56
56
+
})
57
57
+
}
58
58
+
59
59
+
// ====================================================================================
60
60
+
// LOGGER INTERFACE COMPLIANCE TESTS
61
61
+
// ====================================================================================
62
62
+
63
63
+
func TestLoggerInterface(t *testing.T) {
64
64
+
t.Run("MockLoggerImplementsInterface", func(t *testing.T) {
65
65
+
var logger types.Logger = &mockLogger{}
66
66
+
67
67
+
// Should compile and not panic
68
68
+
logger.Printf("test %s", "message")
69
69
+
logger.Println("test", "message")
70
70
+
})
71
71
+
72
72
+
t.Run("BufferedLoggerImplementation", func(t *testing.T) {
73
73
+
buf := &bytes.Buffer{}
74
74
+
logger := &bufferedLogger{buf: buf}
75
75
+
76
76
+
// Cast to interface
77
77
+
var _ types.Logger = logger
78
78
+
79
79
+
logger.Printf("formatted %s %d", "message", 42)
80
80
+
logger.Println("plain", "message")
81
81
+
82
82
+
output := buf.String()
83
83
+
84
84
+
if !containsString(output, "formatted message 42") {
85
85
+
t.Error("Printf output not captured")
86
86
+
}
87
87
+
88
88
+
if !containsString(output, "plain message") {
89
89
+
t.Error("Println output not captured")
90
90
+
}
91
91
+
})
92
92
+
93
93
+
t.Run("NullLoggerImplementation", func(t *testing.T) {
94
94
+
// Logger that discards all output
95
95
+
logger := &nullLogger{}
96
96
+
97
97
+
// Should not panic
98
98
+
var _ types.Logger = logger
99
99
+
logger.Printf("test %s", "ignored")
100
100
+
logger.Println("also", "ignored")
101
101
+
})
102
102
+
103
103
+
t.Run("MultiLoggerImplementation", func(t *testing.T) {
104
104
+
// Logger that writes to multiple destinations
105
105
+
buf1 := &bytes.Buffer{}
106
106
+
buf2 := &bytes.Buffer{}
107
107
+
108
108
+
logger := &multiLogger{
109
109
+
loggers: []types.Logger{
110
110
+
&bufferedLogger{buf: buf1},
111
111
+
&bufferedLogger{buf: buf2},
112
112
+
},
113
113
+
}
114
114
+
115
115
+
var _ types.Logger = logger
116
116
+
117
117
+
logger.Printf("test %s", "message")
118
118
+
119
119
+
// Both buffers should have the message
120
120
+
if !containsString(buf1.String(), "test message") {
121
121
+
t.Error("first logger didn't receive message")
122
122
+
}
123
123
+
124
124
+
if !containsString(buf2.String(), "test message") {
125
125
+
t.Error("second logger didn't receive message")
126
126
+
}
127
127
+
})
128
128
+
}
129
129
+
130
130
+
// ====================================================================================
131
131
+
// CONSTANT USAGE IN CALCULATIONS
132
132
+
// ====================================================================================
133
133
+
134
134
+
func TestConstantUsage(t *testing.T) {
135
135
+
t.Run("GlobalPositionCalculation", func(t *testing.T) {
136
136
+
// Global position = bundleNumber * BUNDLE_SIZE + position
137
137
+
bundleNumber := 42
138
138
+
position := 1337
139
139
+
140
140
+
globalPos := bundleNumber*types.BUNDLE_SIZE + position
141
141
+
expected := 420000 + 1337
142
142
+
143
143
+
if globalPos != expected {
144
144
+
t.Errorf("global position calculation incorrect: got %d, want %d", globalPos, expected)
145
145
+
}
146
146
+
})
147
147
+
148
148
+
t.Run("BundleFromGlobalPosition", func(t *testing.T) {
149
149
+
globalPos := 88410345
150
150
+
151
151
+
bundleNumber := globalPos / types.BUNDLE_SIZE
152
152
+
position := globalPos % types.BUNDLE_SIZE
153
153
+
154
154
+
if bundleNumber != 8841 {
155
155
+
t.Errorf("bundle calculation wrong: got %d, want 8841", bundleNumber)
156
156
+
}
157
157
+
158
158
+
if position != 345 {
159
159
+
t.Errorf("position calculation wrong: got %d, want 345", position)
160
160
+
}
161
161
+
})
162
162
+
163
163
+
t.Run("OperationCountPerBundle", func(t *testing.T) {
164
164
+
// Each bundle should have exactly BUNDLE_SIZE operations
165
165
+
bundleCount := 100
166
166
+
totalOps := bundleCount * types.BUNDLE_SIZE
167
167
+
168
168
+
if totalOps != 1000000 {
169
169
+
t.Errorf("total ops calculation: got %d, want 1000000", totalOps)
170
170
+
}
171
171
+
})
172
172
+
}
173
173
+
174
174
+
// ====================================================================================
175
175
+
// HELPER IMPLEMENTATIONS
176
176
+
// ====================================================================================
177
177
+
178
178
+
type mockLogger struct{}
179
179
+
180
180
+
func (l *mockLogger) Printf(format string, v ...interface{}) {}
181
181
+
func (l *mockLogger) Println(v ...interface{}) {}
182
182
+
183
183
+
type bufferedLogger struct {
184
184
+
buf *bytes.Buffer
185
185
+
}
186
186
+
187
187
+
func (l *bufferedLogger) Printf(format string, v ...interface{}) {
188
188
+
fmt.Fprintf(l.buf, format+"\n", v...)
189
189
+
}
190
190
+
191
191
+
func (l *bufferedLogger) Println(v ...interface{}) {
192
192
+
fmt.Fprintln(l.buf, v...)
193
193
+
}
194
194
+
195
195
+
type nullLogger struct{}
196
196
+
197
197
+
func (l *nullLogger) Printf(format string, v ...interface{}) {}
198
198
+
func (l *nullLogger) Println(v ...interface{}) {}
199
199
+
200
200
+
type multiLogger struct {
201
201
+
loggers []types.Logger
202
202
+
}
203
203
+
204
204
+
func (l *multiLogger) Printf(format string, v ...interface{}) {
205
205
+
for _, logger := range l.loggers {
206
206
+
logger.Printf(format, v...)
207
207
+
}
208
208
+
}
209
209
+
210
210
+
func (l *multiLogger) Println(v ...interface{}) {
211
211
+
for _, logger := range l.loggers {
212
212
+
logger.Println(v...)
213
213
+
}
214
214
+
}
215
215
+
216
216
+
func containsString(haystack, needle string) bool {
217
217
+
return bytes.Contains([]byte(haystack), []byte(needle))
218
218
+
}
+32
server/helpers_test.go
···
1
1
+
// repo/server/helpers_test.go
2
2
+
package server_test
3
3
+
4
4
+
import (
5
5
+
"io"
6
6
+
"net/http"
7
7
+
"net/http/httptest"
8
8
+
"testing"
9
9
+
)
10
10
+
11
11
+
func TestServerHelperFunctions(t *testing.T) {
12
12
+
// Note: Many helper functions are unexported, so we test them indirectly
13
13
+
14
14
+
t.Run("FormatNumber_ViaOutput", func(t *testing.T) {
15
15
+
// This tests the formatNumber function indirectly
16
16
+
srv, _, cleanup := setupTestServer(t, false)
17
17
+
defer cleanup()
18
18
+
19
19
+
ts := httptest.NewServer(srv)
20
20
+
defer ts.Close()
21
21
+
22
22
+
resp, _ := http.Get(ts.URL + "/")
23
23
+
body, _ := io.ReadAll(resp.Body)
24
24
+
resp.Body.Close()
25
25
+
26
26
+
// Large numbers should be formatted with commas
27
27
+
// Check if output looks reasonable
28
28
+
if len(body) == 0 {
29
29
+
t.Error("root page is empty")
30
30
+
}
31
31
+
})
32
32
+
}
+5
server/server.go
···
106
106
func (s *Server) GetStartTime() time.Time {
107
107
return s.startTime
108
108
}
109
109
+
110
110
+
// Add this method to Server
111
111
+
func (s *Server) Handler() http.Handler {
112
112
+
return s.createHandler()
113
113
+
}
+1068
server/server_test.go
···
1
1
+
package server_test
2
2
+
3
3
+
import (
4
4
+
"bytes"
5
5
+
"context"
6
6
+
"encoding/json"
7
7
+
"fmt"
8
8
+
"io"
9
9
+
"net/http"
10
10
+
"net/http/httptest"
11
11
+
"path/filepath"
12
12
+
"strings"
13
13
+
"sync"
14
14
+
"testing"
15
15
+
"time"
16
16
+
17
17
+
"github.com/gorilla/websocket"
18
18
+
"tangled.org/atscan.net/plcbundle/bundle"
19
19
+
"tangled.org/atscan.net/plcbundle/internal/bundleindex"
20
20
+
"tangled.org/atscan.net/plcbundle/internal/plcclient"
21
21
+
"tangled.org/atscan.net/plcbundle/internal/storage"
22
22
+
"tangled.org/atscan.net/plcbundle/server"
23
23
+
)
24
24
+
25
25
+
type testLogger struct {
26
26
+
t *testing.T
27
27
+
}
28
28
+
29
29
+
func (l *testLogger) Printf(format string, v ...interface{}) {
30
30
+
l.t.Logf(format, v...)
31
31
+
}
32
32
+
33
33
+
func (l *testLogger) Println(v ...interface{}) {
34
34
+
l.t.Log(v...)
35
35
+
}
36
36
+
37
37
+
// ====================================================================================
38
38
+
// HTTP ENDPOINT TESTS
39
39
+
// ====================================================================================
40
40
+
41
41
+
func TestServerHTTPEndpoints(t *testing.T) {
42
42
+
handler, _, cleanup := setupTestServer(t, false)
43
43
+
defer cleanup()
44
44
+
45
45
+
ts := httptest.NewServer(handler)
46
46
+
defer ts.Close()
47
47
+
48
48
+
t.Run("RootEndpoint", func(t *testing.T) {
49
49
+
resp, err := http.Get(ts.URL + "/")
50
50
+
if err != nil {
51
51
+
t.Fatalf("GET / failed: %v", err)
52
52
+
}
53
53
+
defer resp.Body.Close()
54
54
+
55
55
+
if resp.StatusCode != 200 {
56
56
+
t.Errorf("expected 200, got %d", resp.StatusCode)
57
57
+
}
58
58
+
59
59
+
body, _ := io.ReadAll(resp.Body)
60
60
+
bodyStr := string(body)
61
61
+
62
62
+
// Should contain welcome message
63
63
+
if !strings.Contains(bodyStr, "plcbundle server") {
64
64
+
t.Error("root page missing title")
65
65
+
}
66
66
+
67
67
+
// Should show API endpoints
68
68
+
if !strings.Contains(bodyStr, "API Endpoints") {
69
69
+
t.Error("root page missing API documentation")
70
70
+
}
71
71
+
})
72
72
+
73
73
+
t.Run("IndexJSON", func(t *testing.T) {
74
74
+
resp, err := http.Get(ts.URL + "/index.json")
75
75
+
if err != nil {
76
76
+
t.Fatalf("GET /index.json failed: %v", err)
77
77
+
}
78
78
+
defer resp.Body.Close()
79
79
+
80
80
+
if resp.StatusCode != 200 {
81
81
+
t.Errorf("expected 200, got %d", resp.StatusCode)
82
82
+
}
83
83
+
84
84
+
// Should be JSON
85
85
+
contentType := resp.Header.Get("Content-Type")
86
86
+
if !strings.Contains(contentType, "application/json") {
87
87
+
t.Errorf("wrong content type: %s", contentType)
88
88
+
}
89
89
+
90
90
+
// Parse JSON
91
91
+
var idx bundleindex.Index
92
92
+
if err := json.NewDecoder(resp.Body).Decode(&idx); err != nil {
93
93
+
t.Fatalf("failed to parse index JSON: %v", err)
94
94
+
}
95
95
+
96
96
+
if idx.Version != "1.0" {
97
97
+
t.Errorf("index version mismatch: got %s", idx.Version)
98
98
+
}
99
99
+
})
100
100
+
101
101
+
t.Run("BundleMetadata", func(t *testing.T) {
102
102
+
resp, err := http.Get(ts.URL + "/bundle/1")
103
103
+
if err != nil {
104
104
+
t.Fatalf("GET /bundle/1 failed: %v", err)
105
105
+
}
106
106
+
defer resp.Body.Close()
107
107
+
108
108
+
if resp.StatusCode != 200 {
109
109
+
t.Errorf("expected 200, got %d", resp.StatusCode)
110
110
+
}
111
111
+
112
112
+
var meta bundleindex.BundleMetadata
113
113
+
if err := json.NewDecoder(resp.Body).Decode(&meta); err != nil {
114
114
+
t.Fatalf("failed to parse bundle metadata: %v", err)
115
115
+
}
116
116
+
117
117
+
if meta.BundleNumber != 1 {
118
118
+
t.Error("wrong bundle returned")
119
119
+
}
120
120
+
121
121
+
// Verify it has the fields we set
122
122
+
if meta.ContentHash == "" {
123
123
+
t.Error("metadata missing content hash")
124
124
+
}
125
125
+
})
126
126
+
127
127
+
t.Run("BundleMetadata_NotFound", func(t *testing.T) {
128
128
+
resp, err := http.Get(ts.URL + "/bundle/9999")
129
129
+
if err != nil {
130
130
+
t.Fatalf("GET /bundle/9999 failed: %v", err)
131
131
+
}
132
132
+
defer resp.Body.Close()
133
133
+
134
134
+
if resp.StatusCode != 404 {
135
135
+
t.Errorf("expected 404 for nonexistent bundle, got %d", resp.StatusCode)
136
136
+
}
137
137
+
})
138
138
+
139
139
+
t.Run("BundleMetadata_InvalidNumber", func(t *testing.T) {
140
140
+
resp, err := http.Get(ts.URL + "/bundle/invalid")
141
141
+
if err != nil {
142
142
+
t.Fatalf("GET /bundle/invalid failed: %v", err)
143
143
+
}
144
144
+
defer resp.Body.Close()
145
145
+
146
146
+
if resp.StatusCode != 400 {
147
147
+
t.Errorf("expected 400 for invalid bundle number, got %d", resp.StatusCode)
148
148
+
}
149
149
+
})
150
150
+
151
151
+
t.Run("BundleData_Raw", func(t *testing.T) {
152
152
+
resp, err := http.Get(ts.URL + "/data/1")
153
153
+
if err != nil {
154
154
+
t.Fatalf("GET /data/1 failed: %v", err)
155
155
+
}
156
156
+
defer resp.Body.Close()
157
157
+
158
158
+
if resp.StatusCode != 200 {
159
159
+
// If 500, read error body
160
160
+
if resp.StatusCode == 500 {
161
161
+
body, _ := io.ReadAll(resp.Body)
162
162
+
t.Fatalf("expected 200, got 500. Error: %s", string(body))
163
163
+
}
164
164
+
t.Errorf("expected 200, got %d", resp.StatusCode)
165
165
+
}
166
166
+
167
167
+
// Should be zstd compressed
168
168
+
contentType := resp.Header.Get("Content-Type")
169
169
+
if !strings.Contains(contentType, "application/zstd") {
170
170
+
t.Errorf("wrong content type for raw data: %s", contentType)
171
171
+
}
172
172
+
173
173
+
// Should have content-disposition header
174
174
+
disposition := resp.Header.Get("Content-Disposition")
175
175
+
if !strings.Contains(disposition, "000001.jsonl.zst") {
176
176
+
t.Errorf("wrong disposition header: %s", disposition)
177
177
+
}
178
178
+
179
179
+
// Should have data
180
180
+
data, _ := io.ReadAll(resp.Body)
181
181
+
if len(data) == 0 {
182
182
+
t.Error("bundle data is empty")
183
183
+
}
184
184
+
185
185
+
t.Logf("Bundle data size: %d bytes", len(data))
186
186
+
})
187
187
+
188
188
+
t.Run("BundleJSONL_Decompressed", func(t *testing.T) {
189
189
+
resp, err := http.Get(ts.URL + "/jsonl/1")
190
190
+
if err != nil {
191
191
+
t.Fatalf("GET /jsonl/1 failed: %v", err)
192
192
+
}
193
193
+
defer resp.Body.Close()
194
194
+
195
195
+
if resp.StatusCode != 200 {
196
196
+
t.Errorf("expected 200, got %d", resp.StatusCode)
197
197
+
}
198
198
+
199
199
+
// Should be JSONL
200
200
+
contentType := resp.Header.Get("Content-Type")
201
201
+
if !strings.Contains(contentType, "application/x-ndjson") {
202
202
+
t.Errorf("wrong content type for JSONL: %s", contentType)
203
203
+
}
204
204
+
205
205
+
// Count lines
206
206
+
data, _ := io.ReadAll(resp.Body)
207
207
+
lines := bytes.Count(data, []byte("\n"))
208
208
+
209
209
+
if lines == 0 {
210
210
+
t.Error("JSONL should have lines")
211
211
+
}
212
212
+
})
213
213
+
214
214
+
t.Run("StatusEndpoint", func(t *testing.T) {
215
215
+
resp, err := http.Get(ts.URL + "/status")
216
216
+
if err != nil {
217
217
+
t.Fatalf("GET /status failed: %v", err)
218
218
+
}
219
219
+
defer resp.Body.Close()
220
220
+
221
221
+
if resp.StatusCode != 200 {
222
222
+
t.Errorf("expected 200, got %d", resp.StatusCode)
223
223
+
}
224
224
+
225
225
+
var status server.StatusResponse
226
226
+
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
227
227
+
t.Fatalf("failed to parse status JSON: %v", err)
228
228
+
}
229
229
+
230
230
+
// Verify structure
231
231
+
if status.Server.Version == "" {
232
232
+
t.Error("status missing server version")
233
233
+
}
234
234
+
235
235
+
if status.Bundles.Count < 0 {
236
236
+
t.Error("invalid bundle count")
237
237
+
}
238
238
+
239
239
+
if status.Server.UptimeSeconds < 0 {
240
240
+
t.Error("invalid uptime")
241
241
+
}
242
242
+
})
243
243
+
}
244
244
+
245
245
+
// ====================================================================================
246
246
+
// DID RESOLUTION ENDPOINT TESTS
247
247
+
// ====================================================================================
248
248
+
249
249
+
func TestServerDIDResolution(t *testing.T) {
250
250
+
handler, _, cleanup := setupTestServerWithResolver(t)
251
251
+
defer cleanup()
252
252
+
253
253
+
ts := httptest.NewServer(handler)
254
254
+
defer ts.Close()
255
255
+
256
256
+
// Use valid did:plc format: "did:plc:" + 24 chars base32 (a-z, 2-7 only)
257
257
+
testDID := "did:plc:abc234def567ghi234jkl456" // Valid format
258
258
+
259
259
+
t.Run("DIDDocument", func(t *testing.T) {
260
260
+
resp, err := http.Get(ts.URL + "/" + testDID)
261
261
+
if err != nil {
262
262
+
t.Fatalf("GET /%s failed: %v", testDID, err)
263
263
+
}
264
264
+
defer resp.Body.Close()
265
265
+
266
266
+
// Should be 404 (not in test data) or 500 (no DID index)
267
267
+
// NOT 400 (that means invalid format)
268
268
+
if resp.StatusCode == 400 {
269
269
+
body, _ := io.ReadAll(resp.Body)
270
270
+
t.Fatalf("got 400 (invalid DID format): %s", string(body))
271
271
+
}
272
272
+
273
273
+
if resp.StatusCode == 500 {
274
274
+
t.Log("Expected 500 (no DID index)")
275
275
+
return
276
276
+
}
277
277
+
278
278
+
if resp.StatusCode == 404 {
279
279
+
t.Log("Expected 404 (DID not found)")
280
280
+
return
281
281
+
}
282
282
+
283
283
+
if resp.StatusCode == 200 {
284
284
+
var doc plcclient.DIDDocument
285
285
+
if err := json.NewDecoder(resp.Body).Decode(&doc); err != nil {
286
286
+
t.Fatalf("failed to parse DID document: %v", err)
287
287
+
}
288
288
+
}
289
289
+
})
290
290
+
291
291
+
t.Run("DIDData_RawState", func(t *testing.T) {
292
292
+
resp, err := http.Get(ts.URL + "/" + testDID + "/data")
293
293
+
if err != nil {
294
294
+
t.Fatalf("GET /%s/data failed: %v", testDID, err)
295
295
+
}
296
296
+
defer resp.Body.Close()
297
297
+
298
298
+
// /data endpoint validates format, so 400 is NOT acceptable for valid DID
299
299
+
if resp.StatusCode == 400 {
300
300
+
body, _ := io.ReadAll(resp.Body)
301
301
+
t.Fatalf("got 400 for valid DID format: %s", string(body))
302
302
+
}
303
303
+
304
304
+
// 404 or 500 acceptable (no data / no index)
305
305
+
if resp.StatusCode == 500 || resp.StatusCode == 404 {
306
306
+
t.Logf("Expected error (no DID index): status %d", resp.StatusCode)
307
307
+
return
308
308
+
}
309
309
+
310
310
+
if resp.StatusCode == 200 {
311
311
+
var state plcclient.DIDState
312
312
+
json.NewDecoder(resp.Body).Decode(&state)
313
313
+
}
314
314
+
})
315
315
+
316
316
+
t.Run("DIDAuditLog", func(t *testing.T) {
317
317
+
resp, err := http.Get(ts.URL + "/" + testDID + "/log/audit")
318
318
+
if err != nil {
319
319
+
t.Fatalf("request failed: %v", err)
320
320
+
}
321
321
+
defer resp.Body.Close()
322
322
+
323
323
+
// Should NOT be 400 for valid DID
324
324
+
if resp.StatusCode == 400 {
325
325
+
body, _ := io.ReadAll(resp.Body)
326
326
+
t.Fatalf("got 400 for valid DID format: %s", string(body))
327
327
+
}
328
328
+
329
329
+
// 404, 500 acceptable
330
330
+
if resp.StatusCode == 500 || resp.StatusCode == 404 {
331
331
+
t.Logf("Expected error (no DID index): status %d", resp.StatusCode)
332
332
+
return
333
333
+
}
334
334
+
})
335
335
+
336
336
+
// Test invalid formats on /data endpoint (which validates properly)
337
337
+
t.Run("InvalidDIDFormat_OnDataEndpoint", func(t *testing.T) {
338
338
+
// Test DIDs that START with "did:plc:" but are still invalid
339
339
+
// (routing checks prefix first, so "did:invalid:" returns 404 before validation)
340
340
+
invalidDIDs := []string{
341
341
+
"did:plc:short", // Too short (< 24 chars)
342
342
+
"did:plc:tooshort2345", // Still too short
343
343
+
"did:plc:contains0189invalidchars456", // Has 0,1,8,9 (invalid in base32)
344
344
+
"did:plc:UPPERCASENOTALLOWED1234", // Has uppercase
345
345
+
"did:plc:has-dashes-not-allowed12", // Has dashes
346
346
+
"did:plc:waytoolonggggggggggggggggg", // Too long (> 24 chars)
347
347
+
}
348
348
+
349
349
+
for _, invalidDID := range invalidDIDs {
350
350
+
resp, err := http.Get(ts.URL + "/" + invalidDID + "/data")
351
351
+
if err != nil {
352
352
+
t.Fatalf("request to %s failed: %v", invalidDID, err)
353
353
+
}
354
354
+
355
355
+
body, _ := io.ReadAll(resp.Body)
356
356
+
resp.Body.Close()
357
357
+
358
358
+
// /data endpoint validates format and should return 400
359
359
+
if resp.StatusCode != 400 {
360
360
+
t.Logf("DID %s: got %d (body: %s)", invalidDID, resp.StatusCode, string(body))
361
361
+
// Some might also return 500 if they pass initial checks
362
362
+
// but fail deeper validation - that's also acceptable
363
363
+
if resp.StatusCode != 500 {
364
364
+
t.Errorf("DID %s: expected 400 or 500, got %d", invalidDID, resp.StatusCode)
365
365
+
}
366
366
+
}
367
367
+
}
368
368
+
})
369
369
+
370
370
+
t.Run("InvalidDIDMethod_Returns404", func(t *testing.T) {
371
371
+
// DIDs with wrong method get 404 from routing (never reach validation)
372
372
+
wrongMethodDIDs := []string{
373
373
+
"did:invalid:format",
374
374
+
"did:web:example.com",
375
375
+
"did:key:z6MkhaXgBZDvotDkL5257faiztiGiC2QtKLGpbnnEGta2doK",
376
376
+
"notadid",
377
377
+
}
378
378
+
379
379
+
for _, did := range wrongMethodDIDs {
380
380
+
resp, err := http.Get(ts.URL + "/" + did + "/data")
381
381
+
if err != nil {
382
382
+
t.Fatalf("request failed: %v", err)
383
383
+
}
384
384
+
resp.Body.Close()
385
385
+
386
386
+
// Should get 404 (not a did:plc: path)
387
387
+
if resp.StatusCode != 404 {
388
388
+
t.Errorf("DID %s: expected 404 from routing, got %d", did, resp.StatusCode)
389
389
+
}
390
390
+
}
391
391
+
})
392
392
+
393
393
+
t.Run("NotADIDPath", func(t *testing.T) {
394
394
+
resp, err := http.Get(ts.URL + "/notadid")
395
395
+
if err != nil {
396
396
+
t.Fatalf("request failed: %v", err)
397
397
+
}
398
398
+
defer resp.Body.Close()
399
399
+
400
400
+
if resp.StatusCode != 404 {
401
401
+
t.Errorf("expected 404 for non-DID path, got %d", resp.StatusCode)
402
402
+
}
403
403
+
})
404
404
+
}
405
405
+
406
406
+
// ====================================================================================
407
407
+
// CORS MIDDLEWARE TESTS
408
408
+
// ====================================================================================
409
409
+
410
410
+
func TestServerCORS(t *testing.T) {
411
411
+
srv, _, cleanup := setupTestServer(t, false)
412
412
+
defer cleanup()
413
413
+
414
414
+
ts := httptest.NewServer(srv)
415
415
+
defer ts.Close()
416
416
+
417
417
+
t.Run("CORS_Headers_GET", func(t *testing.T) {
418
418
+
resp, err := http.Get(ts.URL + "/index.json")
419
419
+
if err != nil {
420
420
+
t.Fatalf("request failed: %v", err)
421
421
+
}
422
422
+
defer resp.Body.Close()
423
423
+
424
424
+
// Check CORS headers
425
425
+
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
426
426
+
t.Error("missing or wrong Access-Control-Allow-Origin header")
427
427
+
}
428
428
+
429
429
+
methods := resp.Header.Get("Access-Control-Allow-Methods")
430
430
+
if !strings.Contains(methods, "GET") {
431
431
+
t.Errorf("Access-Control-Allow-Methods missing GET: %s", methods)
432
432
+
}
433
433
+
})
434
434
+
435
435
+
t.Run("CORS_Preflight_OPTIONS", func(t *testing.T) {
436
436
+
req, _ := http.NewRequest("OPTIONS", ts.URL+"/index.json", nil)
437
437
+
req.Header.Set("Access-Control-Request-Method", "GET")
438
438
+
req.Header.Set("Access-Control-Request-Headers", "Content-Type")
439
439
+
440
440
+
resp, err := http.DefaultClient.Do(req)
441
441
+
if err != nil {
442
442
+
t.Fatalf("OPTIONS request failed: %v", err)
443
443
+
}
444
444
+
defer resp.Body.Close()
445
445
+
446
446
+
if resp.StatusCode != 204 {
447
447
+
t.Errorf("expected 204 for OPTIONS, got %d", resp.StatusCode)
448
448
+
}
449
449
+
450
450
+
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
451
451
+
t.Error("CORS headers missing on OPTIONS")
452
452
+
}
453
453
+
454
454
+
maxAge := resp.Header.Get("Access-Control-Max-Age")
455
455
+
if maxAge != "86400" {
456
456
+
t.Errorf("wrong max-age: %s", maxAge)
457
457
+
}
458
458
+
})
459
459
+
}
460
460
+
461
461
+
// ====================================================================================
462
462
+
// WEBSOCKET TESTS
463
463
+
// ====================================================================================
464
464
+
465
465
+
func TestServerWebSocket(t *testing.T) {
466
466
+
srv, _, cleanup := setupTestServer(t, true) // Enable WebSocket
467
467
+
defer cleanup()
468
468
+
469
469
+
ts := httptest.NewServer(srv)
470
470
+
defer ts.Close()
471
471
+
472
472
+
wsURL := "ws" + strings.TrimPrefix(ts.URL, "http") + "/ws"
473
473
+
474
474
+
t.Run("WebSocket_Connect", func(t *testing.T) {
475
475
+
ws, _, err := websocket.DefaultDialer.Dial(wsURL, nil)
476
476
+
if err != nil {
477
477
+
t.Fatalf("WebSocket dial failed: %v", err)
478
478
+
}
479
479
+
defer ws.Close()
480
480
+
481
481
+
// Should connect successfully
482
482
+
t.Log("WebSocket connected successfully")
483
483
+
})
484
484
+
485
485
+
t.Run("WebSocket_ReceiveOperations", func(t *testing.T) {
486
486
+
ws, _, err := websocket.DefaultDialer.Dial(wsURL+"?cursor=0", nil)
487
487
+
if err != nil {
488
488
+
t.Fatalf("WebSocket dial failed: %v", err)
489
489
+
}
490
490
+
defer ws.Close()
491
491
+
492
492
+
// Set read deadline
493
493
+
ws.SetReadDeadline(time.Now().Add(5 * time.Second))
494
494
+
495
495
+
// Read a message (should get operations or timeout)
496
496
+
_, message, err := ws.ReadMessage()
497
497
+
if err != nil {
498
498
+
// Timeout is OK (no operations available)
499
499
+
if !strings.Contains(err.Error(), "timeout") {
500
500
+
t.Logf("Read error (may be OK if no ops): %v", err)
501
501
+
}
502
502
+
return
503
503
+
}
504
504
+
505
505
+
// If we got a message, verify it's valid JSON
506
506
+
var op plcclient.PLCOperation
507
507
+
if err := json.Unmarshal(message, &op); err != nil {
508
508
+
t.Errorf("received invalid operation JSON: %v", err)
509
509
+
}
510
510
+
511
511
+
t.Logf("Received operation: %s", op.CID)
512
512
+
})
513
513
+
514
514
+
t.Run("WebSocket_InvalidCursor", func(t *testing.T) {
515
515
+
resp, err := http.Get(ts.URL + "/ws?cursor=invalid")
516
516
+
if err != nil {
517
517
+
t.Fatalf("request failed: %v", err)
518
518
+
}
519
519
+
defer resp.Body.Close()
520
520
+
521
521
+
if resp.StatusCode != 400 {
522
522
+
t.Errorf("expected 400 for invalid cursor, got %d", resp.StatusCode)
523
523
+
}
524
524
+
})
525
525
+
526
526
+
t.Run("WebSocket_CloseGracefully", func(t *testing.T) {
527
527
+
ws, _, err := websocket.DefaultDialer.Dial(wsURL, nil)
528
528
+
if err != nil {
529
529
+
t.Fatalf("WebSocket dial failed: %v", err)
530
530
+
}
531
531
+
532
532
+
// Close immediately
533
533
+
err = ws.WriteMessage(websocket.CloseMessage,
534
534
+
websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
535
535
+
if err != nil {
536
536
+
t.Logf("close message error (may be OK): %v", err)
537
537
+
}
538
538
+
539
539
+
ws.Close()
540
540
+
t.Log("WebSocket closed gracefully")
541
541
+
})
542
542
+
}
543
543
+
544
544
+
// ====================================================================================
545
545
+
// SYNC MODE TESTS
546
546
+
// ====================================================================================
547
547
+
548
548
+
func TestServerSyncMode(t *testing.T) {
549
549
+
srv, _, cleanup := setupTestServer(t, true)
550
550
+
defer cleanup()
551
551
+
552
552
+
ts := httptest.NewServer(srv)
553
553
+
defer ts.Close()
554
554
+
555
555
+
t.Run("MempoolEndpoint", func(t *testing.T) {
556
556
+
resp, err := http.Get(ts.URL + "/mempool")
557
557
+
if err != nil {
558
558
+
t.Fatalf("GET /mempool failed: %v", err)
559
559
+
}
560
560
+
defer resp.Body.Close()
561
561
+
562
562
+
if resp.StatusCode != 200 {
563
563
+
t.Errorf("expected 200, got %d", resp.StatusCode)
564
564
+
}
565
565
+
566
566
+
// Should be JSONL
567
567
+
contentType := resp.Header.Get("Content-Type")
568
568
+
if !strings.Contains(contentType, "application/x-ndjson") {
569
569
+
t.Errorf("wrong content type: %s", contentType)
570
570
+
}
571
571
+
})
572
572
+
573
573
+
t.Run("StatusWithMempool", func(t *testing.T) {
574
574
+
resp, err := http.Get(ts.URL + "/status")
575
575
+
if err != nil {
576
576
+
t.Fatalf("GET /status failed: %v", err)
577
577
+
}
578
578
+
defer resp.Body.Close()
579
579
+
580
580
+
var status server.StatusResponse
581
581
+
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
582
582
+
t.Fatalf("failed to parse status: %v", err)
583
583
+
}
584
584
+
585
585
+
// Sync mode should include mempool stats
586
586
+
if status.Server.SyncMode {
587
587
+
if status.Mempool == nil {
588
588
+
t.Error("sync mode status missing mempool")
589
589
+
}
590
590
+
}
591
591
+
})
592
592
+
}
593
593
+
594
594
+
// ====================================================================================
595
595
+
// CONCURRENT REQUEST TESTS
596
596
+
// ====================================================================================
597
597
+
598
598
+
func TestServerConcurrency(t *testing.T) {
599
599
+
srv, _, cleanup := setupTestServer(t, false)
600
600
+
defer cleanup()
601
601
+
602
602
+
ts := httptest.NewServer(srv)
603
603
+
defer ts.Close()
604
604
+
605
605
+
t.Run("ConcurrentIndexRequests", func(t *testing.T) {
606
606
+
var wg sync.WaitGroup
607
607
+
errors := make(chan error, 100)
608
608
+
609
609
+
for i := 0; i < 100; i++ {
610
610
+
wg.Add(1)
611
611
+
go func() {
612
612
+
defer wg.Done()
613
613
+
614
614
+
resp, err := http.Get(ts.URL + "/index.json")
615
615
+
if err != nil {
616
616
+
errors <- err
617
617
+
return
618
618
+
}
619
619
+
defer resp.Body.Close()
620
620
+
621
621
+
if resp.StatusCode != 200 {
622
622
+
errors <- fmt.Errorf("status %d", resp.StatusCode)
623
623
+
}
624
624
+
}()
625
625
+
}
626
626
+
627
627
+
wg.Wait()
628
628
+
close(errors)
629
629
+
630
630
+
for err := range errors {
631
631
+
t.Errorf("concurrent request error: %v", err)
632
632
+
}
633
633
+
})
634
634
+
635
635
+
t.Run("ConcurrentBundleRequests", func(t *testing.T) {
636
636
+
var wg sync.WaitGroup
637
637
+
errors := make(chan error, 50)
638
638
+
639
639
+
for i := 0; i < 50; i++ {
640
640
+
wg.Add(1)
641
641
+
go func(bundleNum int) {
642
642
+
defer wg.Done()
643
643
+
644
644
+
resp, err := http.Get(fmt.Sprintf("%s/bundle/%d", ts.URL, bundleNum%3+1))
645
645
+
if err != nil {
646
646
+
errors <- err
647
647
+
return
648
648
+
}
649
649
+
defer resp.Body.Close()
650
650
+
651
651
+
if resp.StatusCode != 200 && resp.StatusCode != 404 {
652
652
+
errors <- fmt.Errorf("unexpected status %d", resp.StatusCode)
653
653
+
}
654
654
+
}(i)
655
655
+
}
656
656
+
657
657
+
wg.Wait()
658
658
+
close(errors)
659
659
+
660
660
+
for err := range errors {
661
661
+
t.Errorf("concurrent request error: %v", err)
662
662
+
}
663
663
+
})
664
664
+
665
665
+
t.Run("MixedEndpointConcurrency", func(t *testing.T) {
666
666
+
var wg sync.WaitGroup
667
667
+
668
668
+
endpoints := []string{
669
669
+
"/",
670
670
+
"/index.json",
671
671
+
"/bundle/1",
672
672
+
"/data/1",
673
673
+
"/jsonl/1",
674
674
+
"/status",
675
675
+
}
676
676
+
677
677
+
for i := 0; i < 30; i++ {
678
678
+
wg.Add(1)
679
679
+
go func(id int) {
680
680
+
defer wg.Done()
681
681
+
682
682
+
endpoint := endpoints[id%len(endpoints)]
683
683
+
resp, err := http.Get(ts.URL + endpoint)
684
684
+
if err != nil {
685
685
+
t.Errorf("request to %s failed: %v", endpoint, err)
686
686
+
return
687
687
+
}
688
688
+
defer resp.Body.Close()
689
689
+
690
690
+
// Read body to completion
691
691
+
io.ReadAll(resp.Body)
692
692
+
}(i)
693
693
+
}
694
694
+
695
695
+
wg.Wait()
696
696
+
})
697
697
+
}
698
698
+
699
699
+
// ====================================================================================
700
700
+
// ERROR HANDLING TESTS
701
701
+
// ====================================================================================
702
702
+
703
703
+
func TestServerErrorHandling(t *testing.T) {
704
704
+
srv, _, cleanup := setupTestServer(t, false)
705
705
+
defer cleanup()
706
706
+
707
707
+
ts := httptest.NewServer(srv)
708
708
+
defer ts.Close()
709
709
+
710
710
+
t.Run("404_NotFound", func(t *testing.T) {
711
711
+
resp, err := http.Get(ts.URL + "/nonexistent")
712
712
+
if err != nil {
713
713
+
t.Fatalf("request failed: %v", err)
714
714
+
}
715
715
+
defer resp.Body.Close()
716
716
+
717
717
+
if resp.StatusCode != 404 {
718
718
+
t.Errorf("expected 404, got %d", resp.StatusCode)
719
719
+
}
720
720
+
})
721
721
+
722
722
+
t.Run("405_MethodNotAllowed", func(t *testing.T) {
723
723
+
// POST to GET-only endpoint
724
724
+
resp, err := http.Post(ts.URL+"/index.json", "application/json", bytes.NewReader([]byte("{}")))
725
725
+
if err != nil {
726
726
+
t.Fatalf("request failed: %v", err)
727
727
+
}
728
728
+
defer resp.Body.Close()
729
729
+
730
730
+
if resp.StatusCode != 404 && resp.StatusCode != 405 {
731
731
+
t.Logf("Note: Got status %d (404/405 both acceptable)", resp.StatusCode)
732
732
+
}
733
733
+
})
734
734
+
735
735
+
t.Run("LargeRequestHandling", func(t *testing.T) {
736
736
+
// Request very large bundle number
737
737
+
resp, err := http.Get(ts.URL + "/bundle/999999")
738
738
+
if err != nil {
739
739
+
t.Fatalf("request failed: %v", err)
740
740
+
}
741
741
+
defer resp.Body.Close()
742
742
+
743
743
+
if resp.StatusCode != 404 {
744
744
+
t.Errorf("expected 404 for large bundle number, got %d", resp.StatusCode)
745
745
+
}
746
746
+
})
747
747
+
}
748
748
+
749
749
+
// ====================================================================================
750
750
+
// MIDDLEWARE TESTS
751
751
+
// ====================================================================================
752
752
+
753
753
+
func TestServerMiddleware(t *testing.T) {
754
754
+
srv, _, cleanup := setupTestServer(t, false)
755
755
+
defer cleanup()
756
756
+
757
757
+
ts := httptest.NewServer(srv)
758
758
+
defer ts.Close()
759
759
+
760
760
+
t.Run("JSON_ContentType", func(t *testing.T) {
761
761
+
resp, err := http.Get(ts.URL + "/index.json")
762
762
+
if err != nil {
763
763
+
t.Fatalf("request failed: %v", err)
764
764
+
}
765
765
+
defer resp.Body.Close()
766
766
+
767
767
+
contentType := resp.Header.Get("Content-Type")
768
768
+
if !strings.Contains(contentType, "application/json") {
769
769
+
t.Errorf("wrong content type: %s", contentType)
770
770
+
}
771
771
+
})
772
772
+
773
773
+
t.Run("CORS_AllowsAllOrigins", func(t *testing.T) {
774
774
+
req, _ := http.NewRequest("GET", ts.URL+"/index.json", nil)
775
775
+
req.Header.Set("Origin", "https://example.com")
776
776
+
777
777
+
resp, err := http.DefaultClient.Do(req)
778
778
+
if err != nil {
779
779
+
t.Fatalf("request failed: %v", err)
780
780
+
}
781
781
+
defer resp.Body.Close()
782
782
+
783
783
+
allowOrigin := resp.Header.Get("Access-Control-Allow-Origin")
784
784
+
if allowOrigin != "*" {
785
785
+
t.Errorf("CORS not allowing all origins: %s", allowOrigin)
786
786
+
}
787
787
+
})
788
788
+
}
789
789
+
790
790
+
// ====================================================================================
791
791
+
// HELPER FUNCTIONS & FORMATTERS
792
792
+
// ====================================================================================
793
793
+
794
794
+
func TestServerHelpers(t *testing.T) {
795
795
+
t.Run("FormatNumber", func(t *testing.T) {
796
796
+
// Note: formatNumber is not exported, so we test indirectly
797
797
+
// through endpoints that use it (like root page)
798
798
+
799
799
+
srv, _, cleanup := setupTestServer(t, false)
800
800
+
defer cleanup()
801
801
+
802
802
+
ts := httptest.NewServer(srv)
803
803
+
defer ts.Close()
804
804
+
805
805
+
resp, _ := http.Get(ts.URL + "/")
806
806
+
body, _ := io.ReadAll(resp.Body)
807
807
+
resp.Body.Close()
808
808
+
809
809
+
// Should have formatted numbers with commas
810
810
+
// (if there are any large numbers in output)
811
811
+
t.Logf("Root page length: %d bytes", len(body))
812
812
+
})
813
813
+
}
814
814
+
815
815
+
// ====================================================================================
816
816
+
// MEMORY & PERFORMANCE TESTS
817
817
+
// ====================================================================================
818
818
+
819
819
+
func TestServerPerformance(t *testing.T) {
820
820
+
if testing.Short() {
821
821
+
t.Skip("skipping performance test in short mode")
822
822
+
}
823
823
+
824
824
+
srv, _, cleanup := setupTestServer(t, false)
825
825
+
defer cleanup()
826
826
+
827
827
+
ts := httptest.NewServer(srv)
828
828
+
defer ts.Close()
829
829
+
830
830
+
t.Run("MemoryDebugEndpoint", func(t *testing.T) {
831
831
+
resp, err := http.Get(ts.URL + "/debug/memory")
832
832
+
if err != nil {
833
833
+
t.Fatalf("GET /debug/memory failed: %v", err)
834
834
+
}
835
835
+
defer resp.Body.Close()
836
836
+
837
837
+
if resp.StatusCode != 200 {
838
838
+
t.Errorf("expected 200, got %d", resp.StatusCode)
839
839
+
}
840
840
+
841
841
+
body, _ := io.ReadAll(resp.Body)
842
842
+
bodyStr := string(body)
843
843
+
844
844
+
if !strings.Contains(bodyStr, "Memory Stats") {
845
845
+
t.Error("memory debug output missing stats")
846
846
+
}
847
847
+
848
848
+
if !strings.Contains(bodyStr, "Alloc:") {
849
849
+
t.Error("memory debug missing allocation info")
850
850
+
}
851
851
+
})
852
852
+
853
853
+
t.Run("ResponseTime", func(t *testing.T) {
854
854
+
// Measure response time for index
855
855
+
start := time.Now()
856
856
+
resp, err := http.Get(ts.URL + "/index.json")
857
857
+
elapsed := time.Since(start)
858
858
+
859
859
+
if err != nil {
860
860
+
t.Fatalf("request failed: %v", err)
861
861
+
}
862
862
+
resp.Body.Close()
863
863
+
864
864
+
// Should be fast (< 100ms for index)
865
865
+
if elapsed > 100*time.Millisecond {
866
866
+
t.Logf("Warning: slow response time: %v", elapsed)
867
867
+
}
868
868
+
869
869
+
t.Logf("Index response time: %v", elapsed)
870
870
+
})
871
871
+
}
872
872
+
873
873
+
// ====================================================================================
874
874
+
// SERVER LIFECYCLE TESTS
875
875
+
// ====================================================================================
876
876
+
877
877
+
func TestServerLifecycle(t *testing.T) {
878
878
+
t.Run("StartAndStop", func(t *testing.T) {
879
879
+
mgr, mgrCleanup := setupTestManager(t)
880
880
+
defer mgrCleanup()
881
881
+
882
882
+
config := &server.Config{
883
883
+
Addr: "127.0.0.1:0", // Random port
884
884
+
SyncMode: false,
885
885
+
EnableWebSocket: false,
886
886
+
EnableResolver: false,
887
887
+
Version: "test",
888
888
+
}
889
889
+
890
890
+
srv := server.New(mgr, config)
891
891
+
892
892
+
// Start in goroutine
893
893
+
errChan := make(chan error, 1)
894
894
+
go func() {
895
895
+
// This will block
896
896
+
errChan <- srv.ListenAndServe()
897
897
+
}()
898
898
+
899
899
+
// Give it time to start
900
900
+
time.Sleep(100 * time.Millisecond)
901
901
+
902
902
+
// Shutdown
903
903
+
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
904
904
+
defer cancel()
905
905
+
906
906
+
if err := srv.Shutdown(ctx); err != nil {
907
907
+
t.Errorf("shutdown failed: %v", err)
908
908
+
}
909
909
+
910
910
+
// Should exit
911
911
+
select {
912
912
+
case err := <-errChan:
913
913
+
if err != nil && err != http.ErrServerClosed {
914
914
+
t.Errorf("unexpected error: %v", err)
915
915
+
}
916
916
+
case <-time.After(2 * time.Second):
917
917
+
t.Error("server did not stop after shutdown")
918
918
+
}
919
919
+
})
920
920
+
921
921
+
t.Run("GetStartTime", func(t *testing.T) {
922
922
+
mgr, cleanup := setupTestManager(t)
923
923
+
defer cleanup()
924
924
+
925
925
+
config := &server.Config{
926
926
+
Addr: ":0",
927
927
+
Version: "test",
928
928
+
}
929
929
+
930
930
+
before := time.Now()
931
931
+
srv := server.New(mgr, config)
932
932
+
after := time.Now()
933
933
+
934
934
+
startTime := srv.GetStartTime()
935
935
+
936
936
+
if startTime.Before(before) || startTime.After(after) {
937
937
+
t.Error("start time not in expected range")
938
938
+
}
939
939
+
})
940
940
+
}
941
941
+
942
942
+
// ====================================================================================
943
943
+
// SETUP HELPERS
944
944
+
// ====================================================================================
945
945
+
946
946
+
func setupTestServer(t *testing.T, enableWebSocket bool) (http.Handler, *server.Server, func()) {
947
947
+
mgr, cleanup := setupTestManager(t)
948
948
+
949
949
+
config := &server.Config{
950
950
+
Addr: ":8080",
951
951
+
SyncMode: true,
952
952
+
SyncInterval: 1 * time.Minute,
953
953
+
EnableWebSocket: enableWebSocket,
954
954
+
EnableResolver: false,
955
955
+
Version: "test",
956
956
+
}
957
957
+
958
958
+
srv := server.New(mgr, config)
959
959
+
960
960
+
// Get handler from server
961
961
+
handler := srv.Handler() // Use new method
962
962
+
963
963
+
return handler, srv, cleanup
964
964
+
}
965
965
+
966
966
+
func setupTestServerWithResolver(t *testing.T) (http.Handler, *server.Server, func()) {
967
967
+
mgr, cleanup := setupTestManager(t)
968
968
+
969
969
+
config := &server.Config{
970
970
+
Addr: ":8080",
971
971
+
SyncMode: false,
972
972
+
EnableWebSocket: false,
973
973
+
EnableResolver: true,
974
974
+
Version: "test",
975
975
+
}
976
976
+
977
977
+
srv := server.New(mgr, config)
978
978
+
handler := srv.Handler()
979
979
+
980
980
+
return handler, srv, cleanup
981
981
+
}
982
982
+
983
983
+
func setupTestManager(t *testing.T) (*bundle.Manager, func()) {
984
984
+
tmpDir := t.TempDir()
985
985
+
986
986
+
config := bundle.DefaultConfig(tmpDir)
987
987
+
config.AutoInit = true
988
988
+
config.VerifyOnLoad = false // Disable verification in tests
989
989
+
990
990
+
// Create storage operations ONCE and reuse
991
991
+
logger := &testLogger{t: t}
992
992
+
storageOps, err := storage.NewOperations(logger)
993
993
+
if err != nil {
994
994
+
t.Fatalf("failed to create storage operations: %v", err)
995
995
+
}
996
996
+
997
997
+
mgr, err := bundle.NewManager(config, nil)
998
998
+
if err != nil {
999
999
+
storageOps.Close()
1000
1000
+
t.Fatalf("failed to create manager: %v", err)
1001
1001
+
}
1002
1002
+
1003
1003
+
// Add test bundles with actual files
1004
1004
+
for i := 1; i <= 3; i++ {
1005
1005
+
// Create actual bundle file FIRST
1006
1006
+
path := filepath.Join(tmpDir, fmt.Sprintf("%06d.jsonl.zst", i))
1007
1007
+
ops := makeMinimalTestOperations(10000, i*10000) // Unique ops per bundle
1008
1008
+
1009
1009
+
contentHash, compHash, uncompSize, compSize, err := storageOps.SaveBundle(path, ops)
1010
1010
+
if err != nil {
1011
1011
+
t.Fatalf("failed to save test bundle %d: %v", i, err)
1012
1012
+
}
1013
1013
+
1014
1014
+
// Create metadata that matches the actual file
1015
1015
+
meta := &bundleindex.BundleMetadata{
1016
1016
+
BundleNumber: i,
1017
1017
+
StartTime: ops[0].CreatedAt,
1018
1018
+
EndTime: ops[len(ops)-1].CreatedAt,
1019
1019
+
OperationCount: len(ops),
1020
1020
+
DIDCount: len(ops), // All unique in test data
1021
1021
+
Hash: fmt.Sprintf("hash%d", i),
1022
1022
+
ContentHash: contentHash, // Use actual hash
1023
1023
+
CompressedHash: compHash, // Use actual hash
1024
1024
+
CompressedSize: compSize, // Use actual size
1025
1025
+
UncompressedSize: uncompSize, // Use actual size
1026
1026
+
CreatedAt: time.Now(),
1027
1027
+
}
1028
1028
+
1029
1029
+
mgr.GetIndex().AddBundle(meta)
1030
1030
+
}
1031
1031
+
1032
1032
+
if err := mgr.SaveIndex(); err != nil {
1033
1033
+
t.Fatalf("failed to save index: %v", err)
1034
1034
+
}
1035
1035
+
1036
1036
+
cleanup := func() {
1037
1037
+
storageOps.Close()
1038
1038
+
mgr.Close()
1039
1039
+
}
1040
1040
+
1041
1041
+
return mgr, cleanup
1042
1042
+
}
1043
1043
+
1044
1044
+
func makeMinimalTestOperations(count int, offset int) []plcclient.PLCOperation {
1045
1045
+
ops := make([]plcclient.PLCOperation, count)
1046
1046
+
baseTime := time.Now().Add(-time.Hour)
1047
1047
+
1048
1048
+
for i := 0; i < count; i++ {
1049
1049
+
idx := offset + i
1050
1050
+
1051
1051
+
// Create valid base32 DID identifier (24 chars, only a-z and 2-7)
1052
1052
+
// Convert index to base32-like string
1053
1053
+
identifier := fmt.Sprintf("%024d", idx)
1054
1054
+
// Replace invalid chars (0,1,8,9) with valid ones
1055
1055
+
identifier = strings.ReplaceAll(identifier, "0", "a")
1056
1056
+
identifier = strings.ReplaceAll(identifier, "1", "b")
1057
1057
+
identifier = strings.ReplaceAll(identifier, "8", "c")
1058
1058
+
identifier = strings.ReplaceAll(identifier, "9", "d")
1059
1059
+
1060
1060
+
ops[i] = plcclient.PLCOperation{
1061
1061
+
DID: "did:plc:" + identifier,
1062
1062
+
CID: fmt.Sprintf("bafytest%012d", idx),
1063
1063
+
CreatedAt: baseTime.Add(time.Duration(idx) * time.Second),
1064
1064
+
}
1065
1065
+
}
1066
1066
+
1067
1067
+
return ops
1068
1068
+
}
+122
server/types_test.go
···
1
1
+
package server_test
2
2
+
3
3
+
import (
4
4
+
"encoding/json"
5
5
+
"testing"
6
6
+
"time"
7
7
+
8
8
+
"tangled.org/atscan.net/plcbundle/server"
9
9
+
)
10
10
+
11
11
+
func TestServerResponseTypes(t *testing.T) {
12
12
+
t.Run("StatusResponse_JSON", func(t *testing.T) {
13
13
+
response := server.StatusResponse{
14
14
+
Server: server.ServerStatus{
15
15
+
Version: "1.0.0",
16
16
+
UptimeSeconds: 3600,
17
17
+
SyncMode: true,
18
18
+
SyncIntervalSeconds: 60,
19
19
+
WebSocketEnabled: true,
20
20
+
Origin: "https://plc.directory",
21
21
+
},
22
22
+
Bundles: server.BundleStatus{
23
23
+
Count: 100,
24
24
+
FirstBundle: 1,
25
25
+
LastBundle: 100,
26
26
+
TotalSize: 1024000,
27
27
+
UncompressedSize: 5120000,
28
28
+
CompressionRatio: 5.0,
29
29
+
TotalOperations: 1000000,
30
30
+
AvgOpsPerHour: 10000,
31
31
+
UpdatedAt: time.Now(),
32
32
+
HeadAgeSeconds: 30,
33
33
+
RootHash: "root_hash",
34
34
+
HeadHash: "head_hash",
35
35
+
Gaps: 0,
36
36
+
HasGaps: false,
37
37
+
},
38
38
+
}
39
39
+
40
40
+
// Should marshal to JSON
41
41
+
data, err := json.Marshal(response)
42
42
+
if err != nil {
43
43
+
t.Fatalf("failed to marshal StatusResponse: %v", err)
44
44
+
}
45
45
+
46
46
+
// Should unmarshal back
47
47
+
var decoded server.StatusResponse
48
48
+
if err := json.Unmarshal(data, &decoded); err != nil {
49
49
+
t.Fatalf("failed to unmarshal StatusResponse: %v", err)
50
50
+
}
51
51
+
52
52
+
// Verify round-trip
53
53
+
if decoded.Server.Version != "1.0.0" {
54
54
+
t.Error("version not preserved")
55
55
+
}
56
56
+
57
57
+
if decoded.Bundles.Count != 100 {
58
58
+
t.Error("bundle count not preserved")
59
59
+
}
60
60
+
})
61
61
+
62
62
+
t.Run("MempoolStatus_JSON", func(t *testing.T) {
63
63
+
status := server.MempoolStatus{
64
64
+
Count: 500,
65
65
+
TargetBundle: 42,
66
66
+
CanCreateBundle: false,
67
67
+
MinTimestamp: time.Now(),
68
68
+
Validated: true,
69
69
+
ProgressPercent: 5.0,
70
70
+
BundleSize: 10000,
71
71
+
OperationsNeeded: 9500,
72
72
+
FirstTime: time.Now().Add(-time.Hour),
73
73
+
LastTime: time.Now(),
74
74
+
TimespanSeconds: 3600,
75
75
+
LastOpAgeSeconds: 10,
76
76
+
EtaNextBundleSeconds: 1800,
77
77
+
}
78
78
+
79
79
+
data, err := json.Marshal(status)
80
80
+
if err != nil {
81
81
+
t.Fatalf("failed to marshal MempoolStatus: %v", err)
82
82
+
}
83
83
+
84
84
+
var decoded server.MempoolStatus
85
85
+
if err := json.Unmarshal(data, &decoded); err != nil {
86
86
+
t.Fatalf("failed to unmarshal MempoolStatus: %v", err)
87
87
+
}
88
88
+
89
89
+
if decoded.Count != 500 {
90
90
+
t.Error("count not preserved")
91
91
+
}
92
92
+
93
93
+
if decoded.ProgressPercent != 5.0 {
94
94
+
t.Error("progress not preserved")
95
95
+
}
96
96
+
})
97
97
+
98
98
+
t.Run("BundleStatus_WithGaps", func(t *testing.T) {
99
99
+
status := server.BundleStatus{
100
100
+
Count: 100,
101
101
+
Gaps: 3,
102
102
+
HasGaps: true,
103
103
+
GapNumbers: []int{5, 23, 67},
104
104
+
}
105
105
+
106
106
+
data, err := json.Marshal(status)
107
107
+
if err != nil {
108
108
+
t.Fatalf("marshal failed: %v", err)
109
109
+
}
110
110
+
111
111
+
var decoded server.BundleStatus
112
112
+
json.Unmarshal(data, &decoded)
113
113
+
114
114
+
if !decoded.HasGaps {
115
115
+
t.Error("HasGaps flag not preserved")
116
116
+
}
117
117
+
118
118
+
if len(decoded.GapNumbers) != 3 {
119
119
+
t.Error("gap numbers not preserved")
120
120
+
}
121
121
+
})
122
122
+
}