[DEPRECATED] Go implementation of plcbundle

lot of new tests

+4820 -5
+14 -4
Makefile
··· 28 28 GOFMT=$(GOCMD) fmt 29 29 GOMOD=$(GOCMD) mod 30 30 31 + # Test runner - auto-detect gotestsum 32 + GOTESTSUM := $(shell command -v gotestsum 2> /dev/null) 33 + 31 34 # Build flags 32 35 LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.gitCommit=$(GIT_COMMIT) -X main.buildDate=$(BUILD_DATE)" 33 36 ··· 46 49 47 50 # Run tests 48 51 test: 49 - @echo "Running tests..." 50 - $(GOTEST) -v ./... 52 + ifdef GOTESTSUM 53 + @gotestsum -- ./... 54 + else 55 + @echo "Running tests (install gotestsum for better output: go install gotest.tools/gotestsum@latest)" 56 + @$(GOTEST) -v ./... 57 + endif 51 58 52 59 # Run tests with coverage 53 60 test-coverage: 54 - @echo "Running tests with coverage..." 55 - $(GOTEST) -v -cover ./... 61 + ifdef GOTESTSUM 62 + @gotestsum --format testname -- -cover ./... 63 + else 64 + @$(GOTEST) -v -cover ./... 65 + endif 56 66 57 67 # Clean build artifacts 58 68 clean:
-1
cmd/plcbundle/commands/index.go
··· 1 - // repo/cmd/plcbundle/commands/index.go 2 1 package commands 3 2 4 3 import (
+844
internal/bundleindex/index_test.go
··· 1 + package bundleindex_test 2 + 3 + import ( 4 + "fmt" 5 + "os" 6 + "path/filepath" 7 + "sync" 8 + "testing" 9 + "time" 10 + 11 + "tangled.org/atscan.net/plcbundle/internal/bundleindex" 12 + "tangled.org/atscan.net/plcbundle/internal/types" 13 + ) 14 + 15 + type testLogger struct { 16 + t *testing.T 17 + } 18 + 19 + func (l *testLogger) Printf(format string, v ...interface{}) { 20 + l.t.Logf(format, v...) 21 + } 22 + 23 + func (l *testLogger) Println(v ...interface{}) { 24 + l.t.Log(v...) 25 + } 26 + 27 + // ==================================================================================== 28 + // INDEX CREATION & BASIC OPERATIONS 29 + // ==================================================================================== 30 + 31 + func TestIndexCreation(t *testing.T) { 32 + t.Run("NewIndex", func(t *testing.T) { 33 + idx := bundleindex.NewIndex("https://plc.directory") 34 + 35 + if idx == nil { 36 + t.Fatal("NewIndex returned nil") 37 + } 38 + 39 + if idx.Version != types.INDEX_VERSION { 40 + t.Errorf("version mismatch: got %s, want %s", idx.Version, types.INDEX_VERSION) 41 + } 42 + 43 + if idx.Origin != "https://plc.directory" { 44 + t.Errorf("origin mismatch: got %s", idx.Origin) 45 + } 46 + 47 + if idx.Count() != 0 { 48 + t.Error("new index should be empty") 49 + } 50 + }) 51 + 52 + t.Run("NewIndex_EmptyOrigin", func(t *testing.T) { 53 + idx := bundleindex.NewIndex("") 54 + 55 + if idx.Origin != "" { 56 + t.Error("should allow empty origin") 57 + } 58 + }) 59 + } 60 + 61 + func TestIndexAddBundle(t *testing.T) { 62 + t.Run("AddSingleBundle", func(t *testing.T) { 63 + idx := bundleindex.NewIndex("test-origin") 64 + 65 + meta := &bundleindex.BundleMetadata{ 66 + BundleNumber: 1, 67 + StartTime: time.Now(), 68 + EndTime: time.Now().Add(time.Hour), 69 + OperationCount: types.BUNDLE_SIZE, 70 + DIDCount: 1000, 71 + Hash: "hash123", 72 + ContentHash: "content123", 73 + CompressedHash: "compressed123", 74 + CompressedSize: 1024, 75 + UncompressedSize: 5120, 76 + } 77 + 78 + idx.AddBundle(meta) 79 + 80 + if idx.Count() != 1 { 81 + t.Errorf("count should be 1, got %d", idx.Count()) 82 + } 83 + 84 + retrieved, err := idx.GetBundle(1) 85 + if err != nil { 86 + t.Fatalf("GetBundle failed: %v", err) 87 + } 88 + 89 + if retrieved.Hash != "hash123" { 90 + t.Error("hash mismatch after retrieval") 91 + } 92 + }) 93 + 94 + t.Run("AddMultipleBundles_AutoSort", func(t *testing.T) { 95 + idx := bundleindex.NewIndex("test-origin") 96 + 97 + // Add bundles out of order: 3, 1, 2 98 + for _, num := range []int{3, 1, 2} { 99 + meta := &bundleindex.BundleMetadata{ 100 + BundleNumber: num, 101 + StartTime: time.Now(), 102 + EndTime: time.Now().Add(time.Hour), 103 + OperationCount: types.BUNDLE_SIZE, 104 + } 105 + idx.AddBundle(meta) 106 + } 107 + 108 + bundles := idx.GetBundles() 109 + 110 + // Should be sorted: 1, 2, 3 111 + if bundles[0].BundleNumber != 1 { 112 + t.Error("bundles not sorted") 113 + } 114 + if bundles[1].BundleNumber != 2 { 115 + t.Error("bundles not sorted") 116 + } 117 + if bundles[2].BundleNumber != 3 { 118 + t.Error("bundles not sorted") 119 + } 120 + }) 121 + 122 + t.Run("UpdateExistingBundle", func(t *testing.T) { 123 + idx := bundleindex.NewIndex("test-origin") 124 + 125 + original := &bundleindex.BundleMetadata{ 126 + BundleNumber: 1, 127 + Hash: "original_hash", 128 + StartTime: time.Now(), 129 + EndTime: time.Now().Add(time.Hour), 130 + OperationCount: types.BUNDLE_SIZE, 131 + } 132 + 133 + idx.AddBundle(original) 134 + 135 + // Add again with different hash (update) 136 + updated := &bundleindex.BundleMetadata{ 137 + BundleNumber: 1, 138 + Hash: "updated_hash", 139 + StartTime: time.Now(), 140 + EndTime: time.Now().Add(time.Hour), 141 + OperationCount: types.BUNDLE_SIZE, 142 + } 143 + 144 + idx.AddBundle(updated) 145 + 146 + // Should have only 1 bundle (updated, not duplicated) 147 + if idx.Count() != 1 { 148 + t.Errorf("should have 1 bundle after update, got %d", idx.Count()) 149 + } 150 + 151 + retrieved, _ := idx.GetBundle(1) 152 + if retrieved.Hash != "updated_hash" { 153 + t.Error("bundle was not updated") 154 + } 155 + }) 156 + } 157 + 158 + // ==================================================================================== 159 + // SAVE & LOAD TESTS 160 + // ==================================================================================== 161 + 162 + func TestIndexPersistence(t *testing.T) { 163 + tmpDir := t.TempDir() 164 + 165 + t.Run("SaveAndLoad", func(t *testing.T) { 166 + indexPath := filepath.Join(tmpDir, "test_index.json") 167 + 168 + // Create and populate index 169 + idx := bundleindex.NewIndex("https://plc.directory") 170 + 171 + for i := 1; i <= 5; i++ { 172 + meta := &bundleindex.BundleMetadata{ 173 + BundleNumber: i, 174 + StartTime: time.Now().Add(time.Duration(i-1) * time.Hour), 175 + EndTime: time.Now().Add(time.Duration(i) * time.Hour), 176 + OperationCount: types.BUNDLE_SIZE, 177 + DIDCount: 1000 * i, 178 + Hash: fmt.Sprintf("hash%d", i), 179 + ContentHash: fmt.Sprintf("content%d", i), 180 + CompressedHash: fmt.Sprintf("compressed%d", i), 181 + CompressedSize: int64(1024 * i), 182 + UncompressedSize: int64(5120 * i), 183 + } 184 + idx.AddBundle(meta) 185 + } 186 + 187 + // Save 188 + if err := idx.Save(indexPath); err != nil { 189 + t.Fatalf("Save failed: %v", err) 190 + } 191 + 192 + // Verify file exists 193 + if _, err := os.Stat(indexPath); os.IsNotExist(err) { 194 + t.Fatal("index file not created") 195 + } 196 + 197 + // Load 198 + loaded, err := bundleindex.LoadIndex(indexPath) 199 + if err != nil { 200 + t.Fatalf("LoadIndex failed: %v", err) 201 + } 202 + 203 + // Verify data integrity 204 + if loaded.Count() != 5 { 205 + t.Errorf("loaded count mismatch: got %d, want 5", loaded.Count()) 206 + } 207 + 208 + if loaded.Origin != "https://plc.directory" { 209 + t.Error("origin not preserved") 210 + } 211 + 212 + if loaded.LastBundle != 5 { 213 + t.Error("LastBundle not calculated correctly") 214 + } 215 + 216 + // Verify specific bundle 217 + bundle3, err := loaded.GetBundle(3) 218 + if err != nil { 219 + t.Fatalf("GetBundle(3) failed: %v", err) 220 + } 221 + 222 + if bundle3.Hash != "hash3" { 223 + t.Error("bundle data not preserved") 224 + } 225 + }) 226 + 227 + t.Run("AtomicSave", func(t *testing.T) { 228 + indexPath := filepath.Join(tmpDir, "atomic_test.json") 229 + 230 + idx := bundleindex.NewIndex("test") 231 + idx.AddBundle(&bundleindex.BundleMetadata{ 232 + BundleNumber: 1, 233 + StartTime: time.Now(), 234 + EndTime: time.Now(), 235 + OperationCount: types.BUNDLE_SIZE, 236 + }) 237 + 238 + idx.Save(indexPath) 239 + 240 + // Verify no .tmp file left behind 241 + tmpPath := indexPath + ".tmp" 242 + if _, err := os.Stat(tmpPath); !os.IsNotExist(err) { 243 + t.Error("temporary file should not exist after successful save") 244 + } 245 + }) 246 + 247 + t.Run("LoadInvalidVersion", func(t *testing.T) { 248 + indexPath := filepath.Join(tmpDir, "invalid_version.json") 249 + 250 + // Write index with wrong version 251 + invalidData := `{"version":"99.99","origin":"test","bundles":[]}` 252 + os.WriteFile(indexPath, []byte(invalidData), 0644) 253 + 254 + _, err := bundleindex.LoadIndex(indexPath) 255 + if err == nil { 256 + t.Error("should reject index with invalid version") 257 + } 258 + }) 259 + 260 + t.Run("LoadCorruptedJSON", func(t *testing.T) { 261 + indexPath := filepath.Join(tmpDir, "corrupted.json") 262 + 263 + os.WriteFile(indexPath, []byte("{invalid json"), 0644) 264 + 265 + _, err := bundleindex.LoadIndex(indexPath) 266 + if err == nil { 267 + t.Error("should reject corrupted JSON") 268 + } 269 + }) 270 + } 271 + 272 + // ==================================================================================== 273 + // QUERY OPERATIONS 274 + // ==================================================================================== 275 + 276 + func TestIndexQueries(t *testing.T) { 277 + idx := bundleindex.NewIndex("test") 278 + 279 + // Populate with bundles 280 + for i := 1; i <= 10; i++ { 281 + meta := &bundleindex.BundleMetadata{ 282 + BundleNumber: i, 283 + StartTime: time.Now().Add(time.Duration(i-1) * time.Hour), 284 + EndTime: time.Now().Add(time.Duration(i) * time.Hour), 285 + OperationCount: types.BUNDLE_SIZE, 286 + CompressedSize: int64(i * 1000), 287 + } 288 + idx.AddBundle(meta) 289 + } 290 + 291 + t.Run("GetBundle", func(t *testing.T) { 292 + meta, err := idx.GetBundle(5) 293 + if err != nil { 294 + t.Fatalf("GetBundle failed: %v", err) 295 + } 296 + 297 + if meta.BundleNumber != 5 { 298 + t.Error("wrong bundle returned") 299 + } 300 + }) 301 + 302 + t.Run("GetBundle_NotFound", func(t *testing.T) { 303 + _, err := idx.GetBundle(999) 304 + if err == nil { 305 + t.Error("should return error for nonexistent bundle") 306 + } 307 + }) 308 + 309 + t.Run("GetLastBundle", func(t *testing.T) { 310 + last := idx.GetLastBundle() 311 + 312 + if last == nil { 313 + t.Fatal("GetLastBundle returned nil") 314 + } 315 + 316 + if last.BundleNumber != 10 { 317 + t.Errorf("last bundle should be 10, got %d", last.BundleNumber) 318 + } 319 + }) 320 + 321 + t.Run("GetLastBundle_Empty", func(t *testing.T) { 322 + emptyIdx := bundleindex.NewIndex("test") 323 + 324 + last := emptyIdx.GetLastBundle() 325 + 326 + if last != nil { 327 + t.Error("empty index should return nil for GetLastBundle") 328 + } 329 + }) 330 + 331 + t.Run("GetBundleRange", func(t *testing.T) { 332 + bundles := idx.GetBundleRange(3, 7) 333 + 334 + if len(bundles) != 5 { 335 + t.Errorf("expected 5 bundles, got %d", len(bundles)) 336 + } 337 + 338 + if bundles[0].BundleNumber != 3 || bundles[4].BundleNumber != 7 { 339 + t.Error("range boundaries incorrect") 340 + } 341 + }) 342 + 343 + t.Run("GetBundleRange_OutOfBounds", func(t *testing.T) { 344 + bundles := idx.GetBundleRange(100, 200) 345 + 346 + if len(bundles) != 0 { 347 + t.Errorf("expected 0 bundles for out-of-range query, got %d", len(bundles)) 348 + } 349 + }) 350 + 351 + t.Run("GetBundles_ReturnsShallowCopy", func(t *testing.T) { 352 + bundles1 := idx.GetBundles() 353 + bundles2 := idx.GetBundles() 354 + 355 + // Should be different slices 356 + if &bundles1[0] == &bundles2[0] { 357 + t.Error("GetBundles should return copy, not same slice") 358 + } 359 + 360 + // But same data 361 + if bundles1[0].BundleNumber != bundles2[0].BundleNumber { 362 + t.Error("bundle data should be same") 363 + } 364 + }) 365 + } 366 + 367 + // ==================================================================================== 368 + // GAP DETECTION - CRITICAL FOR INTEGRITY 369 + // ==================================================================================== 370 + 371 + func TestIndexFindGaps(t *testing.T) { 372 + t.Run("NoGaps", func(t *testing.T) { 373 + idx := bundleindex.NewIndex("test") 374 + 375 + for i := 1; i <= 10; i++ { 376 + idx.AddBundle(createTestMetadata(i)) 377 + } 378 + 379 + gaps := idx.FindGaps() 380 + 381 + if len(gaps) != 0 { 382 + t.Errorf("expected no gaps, found %d: %v", len(gaps), gaps) 383 + } 384 + }) 385 + 386 + t.Run("SingleGap", func(t *testing.T) { 387 + idx := bundleindex.NewIndex("test") 388 + 389 + // Add bundles 1, 2, 4, 5 (missing 3) 390 + for _, num := range []int{1, 2, 4, 5} { 391 + idx.AddBundle(createTestMetadata(num)) 392 + } 393 + 394 + gaps := idx.FindGaps() 395 + 396 + if len(gaps) != 1 { 397 + t.Errorf("expected 1 gap, got %d", len(gaps)) 398 + } 399 + 400 + if len(gaps) > 0 && gaps[0] != 3 { 401 + t.Errorf("expected gap at 3, got %d", gaps[0]) 402 + } 403 + }) 404 + 405 + t.Run("MultipleGaps", func(t *testing.T) { 406 + idx := bundleindex.NewIndex("test") 407 + 408 + // Add bundles 1, 2, 5, 6, 9, 10 (missing 3, 4, 7, 8) 409 + for _, num := range []int{1, 2, 5, 6, 9, 10} { 410 + idx.AddBundle(createTestMetadata(num)) 411 + } 412 + 413 + gaps := idx.FindGaps() 414 + 415 + expectedGaps := []int{3, 4, 7, 8} 416 + if len(gaps) != len(expectedGaps) { 417 + t.Errorf("expected %d gaps, got %d", len(expectedGaps), len(gaps)) 418 + } 419 + 420 + for i, expected := range expectedGaps { 421 + if gaps[i] != expected { 422 + t.Errorf("gap %d: got %d, want %d", i, gaps[i], expected) 423 + } 424 + } 425 + }) 426 + 427 + t.Run("FindGaps_EmptyIndex", func(t *testing.T) { 428 + idx := bundleindex.NewIndex("test") 429 + 430 + gaps := idx.FindGaps() 431 + 432 + if len(gaps) > 0 { 433 + t.Error("empty index should have no gaps") 434 + } 435 + }) 436 + 437 + t.Run("FindGaps_NonSequentialStart", func(t *testing.T) { 438 + idx := bundleindex.NewIndex("test") 439 + 440 + // Start at bundle 100 441 + for i := 100; i <= 105; i++ { 442 + idx.AddBundle(createTestMetadata(i)) 443 + } 444 + 445 + gaps := idx.FindGaps() 446 + 447 + // No gaps between 100-105 448 + if len(gaps) != 0 { 449 + t.Errorf("expected no gaps, got %d", len(gaps)) 450 + } 451 + }) 452 + } 453 + 454 + // ==================================================================================== 455 + // STATISTICS & DERIVED FIELDS 456 + // ==================================================================================== 457 + 458 + func TestIndexStatistics(t *testing.T) { 459 + idx := bundleindex.NewIndex("test") 460 + 461 + t.Run("StatsEmpty", func(t *testing.T) { 462 + stats := idx.GetStats() 463 + 464 + if stats["bundle_count"].(int) != 0 { 465 + t.Error("empty index should have count 0") 466 + } 467 + }) 468 + 469 + t.Run("StatsPopulated", func(t *testing.T) { 470 + totalSize := int64(0) 471 + totalUncompressed := int64(0) 472 + 473 + for i := 1; i <= 5; i++ { 474 + meta := &bundleindex.BundleMetadata{ 475 + BundleNumber: i, 476 + StartTime: time.Now().Add(time.Duration(i-1) * time.Hour), 477 + EndTime: time.Now().Add(time.Duration(i) * time.Hour), 478 + OperationCount: types.BUNDLE_SIZE, 479 + CompressedSize: int64(1000 * i), 480 + UncompressedSize: int64(5000 * i), 481 + } 482 + idx.AddBundle(meta) 483 + totalSize += meta.CompressedSize 484 + totalUncompressed += meta.UncompressedSize 485 + } 486 + 487 + stats := idx.GetStats() 488 + 489 + if stats["bundle_count"].(int) != 5 { 490 + t.Error("bundle count mismatch") 491 + } 492 + 493 + if stats["first_bundle"].(int) != 1 { 494 + t.Error("first_bundle mismatch") 495 + } 496 + 497 + if stats["last_bundle"].(int) != 5 { 498 + t.Error("last_bundle mismatch") 499 + } 500 + 501 + if stats["total_size"].(int64) != totalSize { 502 + t.Errorf("total_size mismatch: got %d, want %d", stats["total_size"].(int64), totalSize) 503 + } 504 + 505 + if stats["total_uncompressed_size"].(int64) != totalUncompressed { 506 + t.Error("total_uncompressed_size mismatch") 507 + } 508 + 509 + if _, ok := stats["start_time"]; !ok { 510 + t.Error("stats missing start_time") 511 + } 512 + 513 + if _, ok := stats["end_time"]; !ok { 514 + t.Error("stats missing end_time") 515 + } 516 + 517 + if stats["gaps"].(int) != 0 { 518 + t.Error("should have no gaps") 519 + } 520 + }) 521 + 522 + t.Run("StatsRecalculateAfterAdd", func(t *testing.T) { 523 + idx := bundleindex.NewIndex("test") 524 + 525 + idx.AddBundle(&bundleindex.BundleMetadata{ 526 + BundleNumber: 1, 527 + StartTime: time.Now(), 528 + EndTime: time.Now(), 529 + OperationCount: types.BUNDLE_SIZE, 530 + CompressedSize: 1000, 531 + }) 532 + 533 + stats1 := idx.GetStats() 534 + size1 := stats1["total_size"].(int64) 535 + 536 + // Add another bundle 537 + idx.AddBundle(&bundleindex.BundleMetadata{ 538 + BundleNumber: 2, 539 + StartTime: time.Now(), 540 + EndTime: time.Now(), 541 + OperationCount: types.BUNDLE_SIZE, 542 + CompressedSize: 2000, 543 + }) 544 + 545 + stats2 := idx.GetStats() 546 + size2 := stats2["total_size"].(int64) 547 + 548 + if size2 != size1+2000 { 549 + t.Errorf("total_size not recalculated: got %d, want %d", size2, size1+2000) 550 + } 551 + 552 + if stats2["last_bundle"].(int) != 2 { 553 + t.Error("last_bundle not recalculated") 554 + } 555 + }) 556 + } 557 + 558 + // ==================================================================================== 559 + // REBUILD OPERATION 560 + // ==================================================================================== 561 + 562 + func TestIndexRebuild(t *testing.T) { 563 + t.Run("RebuildFromMetadata", func(t *testing.T) { 564 + idx := bundleindex.NewIndex("original") 565 + 566 + // Add some bundles 567 + for i := 1; i <= 3; i++ { 568 + idx.AddBundle(createTestMetadata(i)) 569 + } 570 + 571 + if idx.Count() != 3 { 572 + t.Fatal("setup failed") 573 + } 574 + 575 + // Create new metadata for rebuild 576 + newMetadata := []*bundleindex.BundleMetadata{ 577 + createTestMetadata(1), 578 + createTestMetadata(2), 579 + createTestMetadata(5), 580 + createTestMetadata(6), 581 + } 582 + 583 + // Rebuild 584 + idx.Rebuild(newMetadata) 585 + 586 + // Should now have 4 bundles 587 + if idx.Count() != 4 { 588 + t.Errorf("after rebuild, expected 4 bundles, got %d", idx.Count()) 589 + } 590 + 591 + // Should have new bundles 5, 6 592 + if _, err := idx.GetBundle(5); err != nil { 593 + t.Error("should have bundle 5 after rebuild") 594 + } 595 + 596 + // Should not have bundle 3 597 + if _, err := idx.GetBundle(3); err == nil { 598 + t.Error("should not have bundle 3 after rebuild") 599 + } 600 + 601 + // Origin should be preserved 602 + if idx.Origin != "original" { 603 + t.Error("origin should be preserved during rebuild") 604 + } 605 + }) 606 + 607 + t.Run("RebuildAutoSorts", func(t *testing.T) { 608 + idx := bundleindex.NewIndex("test") 609 + 610 + // Rebuild with unsorted data 611 + unsorted := []*bundleindex.BundleMetadata{ 612 + createTestMetadata(5), 613 + createTestMetadata(2), 614 + createTestMetadata(8), 615 + createTestMetadata(1), 616 + } 617 + 618 + idx.Rebuild(unsorted) 619 + 620 + bundles := idx.GetBundles() 621 + 622 + // Should be sorted 623 + for i := 0; i < len(bundles)-1; i++ { 624 + if bundles[i].BundleNumber >= bundles[i+1].BundleNumber { 625 + t.Error("bundles not sorted after rebuild") 626 + } 627 + } 628 + }) 629 + } 630 + 631 + // ==================================================================================== 632 + // CLEAR OPERATION 633 + // ==================================================================================== 634 + 635 + func TestIndexClear(t *testing.T) { 636 + idx := bundleindex.NewIndex("test") 637 + 638 + // Populate 639 + for i := 1; i <= 10; i++ { 640 + idx.AddBundle(createTestMetadata(i)) 641 + } 642 + 643 + if idx.Count() != 10 { 644 + t.Fatal("setup failed") 645 + } 646 + 647 + // Clear 648 + idx.Clear() 649 + 650 + if idx.Count() != 0 { 651 + t.Error("count should be 0 after clear") 652 + } 653 + 654 + if idx.LastBundle != 0 { 655 + t.Error("LastBundle should be 0 after clear") 656 + } 657 + 658 + if idx.TotalSize != 0 { 659 + t.Error("TotalSize should be 0 after clear") 660 + } 661 + 662 + // Should be able to add after clear 663 + idx.AddBundle(createTestMetadata(1)) 664 + 665 + if idx.Count() != 1 { 666 + t.Error("should be able to add after clear") 667 + } 668 + } 669 + 670 + // ==================================================================================== 671 + // CONCURRENCY TESTS 672 + // ==================================================================================== 673 + 674 + func TestIndexConcurrency(t *testing.T) { 675 + t.Run("ConcurrentReads", func(t *testing.T) { 676 + idx := bundleindex.NewIndex("test") 677 + 678 + // Populate 679 + for i := 1; i <= 100; i++ { 680 + idx.AddBundle(createTestMetadata(i)) 681 + } 682 + 683 + // 100 concurrent readers 684 + var wg sync.WaitGroup 685 + errors := make(chan error, 100) 686 + 687 + for i := 0; i < 100; i++ { 688 + wg.Add(1) 689 + go func(id int) { 690 + defer wg.Done() 691 + 692 + // Various read operations 693 + idx.Count() 694 + idx.GetLastBundle() 695 + idx.GetBundles() 696 + idx.FindGaps() 697 + idx.GetStats() 698 + 699 + if _, err := idx.GetBundle(id%100 + 1); err != nil { 700 + errors <- err 701 + } 702 + }(i) 703 + } 704 + 705 + wg.Wait() 706 + close(errors) 707 + 708 + for err := range errors { 709 + t.Errorf("concurrent read error: %v", err) 710 + } 711 + }) 712 + 713 + t.Run("ConcurrentReadsDuringSave", func(t *testing.T) { 714 + tmpDir := t.TempDir() 715 + indexPath := filepath.Join(tmpDir, "concurrent.json") 716 + 717 + idx := bundleindex.NewIndex("test") 718 + 719 + for i := 1; i <= 50; i++ { 720 + idx.AddBundle(createTestMetadata(i)) 721 + } 722 + 723 + var wg sync.WaitGroup 724 + 725 + // Saver goroutine 726 + wg.Add(1) 727 + go func() { 728 + defer wg.Done() 729 + for i := 0; i < 10; i++ { 730 + idx.Save(indexPath) 731 + time.Sleep(10 * time.Millisecond) 732 + } 733 + }() 734 + 735 + // Reader goroutines 736 + for i := 0; i < 10; i++ { 737 + wg.Add(1) 738 + go func() { 739 + defer wg.Done() 740 + for j := 0; j < 50; j++ { 741 + idx.Count() 742 + idx.GetBundles() 743 + time.Sleep(5 * time.Millisecond) 744 + } 745 + }() 746 + } 747 + 748 + wg.Wait() 749 + }) 750 + } 751 + 752 + // ==================================================================================== 753 + // REMOTE UPDATE TESTS (FOR CLONING) 754 + // ==================================================================================== 755 + 756 + func TestIndexUpdateFromRemote(t *testing.T) { 757 + 758 + t.Run("UpdateFromRemote_Basic", func(t *testing.T) { 759 + idx := bundleindex.NewIndex("test") 760 + 761 + // Local has bundles 1-3 762 + for i := 1; i <= 3; i++ { 763 + idx.AddBundle(createTestMetadata(i)) 764 + } 765 + 766 + // Remote has bundles 1-5 767 + remoteMeta := make(map[int]*bundleindex.BundleMetadata) 768 + for i := 1; i <= 5; i++ { 769 + remoteMeta[i] = createTestMetadata(i) 770 + } 771 + 772 + bundlesToUpdate := []int{4, 5} 773 + 774 + // Mock file existence (4 and 5 exist) 775 + fileExists := func(bundleNum int) bool { 776 + return bundleNum == 4 || bundleNum == 5 777 + } 778 + 779 + logger := &testLogger{t: &testing.T{}} 780 + 781 + err := idx.UpdateFromRemote(bundlesToUpdate, remoteMeta, fileExists, false, logger) 782 + if err != nil { 783 + t.Fatalf("UpdateFromRemote failed: %v", err) 784 + } 785 + 786 + // Should now have 5 bundles 787 + if idx.Count() != 5 { 788 + t.Errorf("expected 5 bundles after update, got %d", idx.Count()) 789 + } 790 + }) 791 + 792 + t.Run("UpdateFromRemote_SkipsMissingFiles", func(t *testing.T) { 793 + idx := bundleindex.NewIndex("test") 794 + 795 + remoteMeta := map[int]*bundleindex.BundleMetadata{ 796 + 1: createTestMetadata(1), 797 + 2: createTestMetadata(2), 798 + } 799 + 800 + bundlesToUpdate := []int{1, 2} 801 + 802 + // Only bundle 1 exists locally 803 + fileExists := func(bundleNum int) bool { 804 + return bundleNum == 1 805 + } 806 + 807 + logger := &testLogger{t: &testing.T{}} 808 + 809 + err := idx.UpdateFromRemote(bundlesToUpdate, remoteMeta, fileExists, false, logger) 810 + if err != nil { 811 + t.Fatalf("UpdateFromRemote failed: %v", err) 812 + } 813 + 814 + // Should only have bundle 1 815 + if idx.Count() != 1 { 816 + t.Errorf("expected 1 bundle, got %d", idx.Count()) 817 + } 818 + 819 + if _, err := idx.GetBundle(2); err == nil { 820 + t.Error("should not have bundle 2 (file missing)") 821 + } 822 + }) 823 + } 824 + 825 + // ==================================================================================== 826 + // HELPER FUNCTIONS 827 + // ==================================================================================== 828 + 829 + func createTestMetadata(bundleNum int) *bundleindex.BundleMetadata { 830 + return &bundleindex.BundleMetadata{ 831 + BundleNumber: bundleNum, 832 + StartTime: time.Now().Add(time.Duration(bundleNum-1) * time.Hour), 833 + EndTime: time.Now().Add(time.Duration(bundleNum) * time.Hour), 834 + OperationCount: types.BUNDLE_SIZE, 835 + DIDCount: 1000, 836 + Hash: fmt.Sprintf("hash%d", bundleNum), 837 + ContentHash: fmt.Sprintf("content%d", bundleNum), 838 + Parent: fmt.Sprintf("parent%d", bundleNum-1), 839 + CompressedHash: fmt.Sprintf("compressed%d", bundleNum), 840 + CompressedSize: int64(1000 * bundleNum), 841 + UncompressedSize: int64(5000 * bundleNum), 842 + CreatedAt: time.Now(), 843 + } 844 + }
+908
internal/mempool/mempool_test.go
··· 1 + package mempool_test 2 + 3 + import ( 4 + "fmt" 5 + "os" 6 + "path/filepath" 7 + "sync" 8 + "testing" 9 + "time" 10 + 11 + "tangled.org/atscan.net/plcbundle/internal/mempool" 12 + "tangled.org/atscan.net/plcbundle/internal/plcclient" 13 + "tangled.org/atscan.net/plcbundle/internal/types" 14 + ) 15 + 16 + type testLogger struct { 17 + t *testing.T 18 + } 19 + 20 + func (l *testLogger) Printf(format string, v ...interface{}) { 21 + l.t.Logf(format, v...) 22 + } 23 + 24 + func (l *testLogger) Println(v ...interface{}) { 25 + l.t.Log(v...) 26 + } 27 + 28 + // ==================================================================================== 29 + // CHRONOLOGICAL VALIDATION - MOST CRITICAL 30 + // ==================================================================================== 31 + 32 + func TestMempoolChronologicalStrict(t *testing.T) { 33 + tmpDir := t.TempDir() 34 + logger := &testLogger{t: t} 35 + baseTime := time.Now().Add(-time.Hour) 36 + 37 + t.Run("RejectOutOfOrder", func(t *testing.T) { 38 + minTime := baseTime 39 + m, err := mempool.NewMempool(tmpDir, 1, minTime, logger) 40 + if err != nil { 41 + t.Fatalf("NewMempool failed: %v", err) 42 + } 43 + 44 + // Add operations in order: 1, 2, 4 45 + ops := []plcclient.PLCOperation{ 46 + {CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)}, 47 + {CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)}, 48 + {CID: "cid4", CreatedAt: baseTime.Add(4 * time.Second)}, 49 + } 50 + 51 + _, err = m.Add(ops) 52 + if err != nil { 53 + t.Fatalf("Add failed: %v", err) 54 + } 55 + 56 + // Now try to add operation 3 (out of order) 57 + outOfOrder := []plcclient.PLCOperation{ 58 + {CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second)}, 59 + } 60 + 61 + _, err = m.Add(outOfOrder) 62 + if err == nil { 63 + t.Error("expected chronological validation error, got nil") 64 + } 65 + 66 + if m.Count() != 3 { 67 + t.Errorf("count should still be 3, got %d", m.Count()) 68 + } 69 + }) 70 + 71 + t.Run("RejectBeforeMinTimestamp", func(t *testing.T) { 72 + minTime := baseTime.Add(10 * time.Second) 73 + m, err := mempool.NewMempool(tmpDir, 2, minTime, logger) 74 + if err != nil { 75 + t.Fatalf("NewMempool failed: %v", err) 76 + } 77 + 78 + // Try to add operation before min timestamp 79 + tooEarly := []plcclient.PLCOperation{ 80 + {CID: "cid1", CreatedAt: baseTime}, // Before minTime 81 + } 82 + 83 + _, err = m.Add(tooEarly) 84 + if err == nil { 85 + t.Error("expected error for operation before min timestamp") 86 + } 87 + }) 88 + 89 + t.Run("AllowEqualTimestamps", func(t *testing.T) { 90 + minTime := baseTime 91 + m, err := mempool.NewMempool(tmpDir, 3, minTime, logger) 92 + if err != nil { 93 + t.Fatalf("NewMempool failed: %v", err) 94 + } 95 + 96 + // Multiple operations with same timestamp (happens in real PLC data) 97 + sameTime := baseTime.Add(5 * time.Second) 98 + ops := []plcclient.PLCOperation{ 99 + {CID: "cid1", CreatedAt: sameTime}, 100 + {CID: "cid2", CreatedAt: sameTime}, 101 + {CID: "cid3", CreatedAt: sameTime}, 102 + } 103 + 104 + added, err := m.Add(ops) 105 + if err != nil { 106 + t.Fatalf("should allow equal timestamps: %v", err) 107 + } 108 + 109 + if added != 3 { 110 + t.Errorf("expected 3 added, got %d", added) 111 + } 112 + }) 113 + 114 + t.Run("ChronologicalAfterReload", func(t *testing.T) { 115 + minTime := baseTime 116 + m, err := mempool.NewMempool(tmpDir, 4, minTime, logger) 117 + if err != nil { 118 + t.Fatalf("NewMempool failed: %v", err) 119 + } 120 + 121 + // Add some operations 122 + ops1 := []plcclient.PLCOperation{ 123 + {CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)}, 124 + {CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)}, 125 + } 126 + m.Add(ops1) 127 + m.Save() 128 + 129 + // Reload mempool 130 + m2, err := mempool.NewMempool(tmpDir, 4, minTime, logger) 131 + if err != nil { 132 + t.Fatalf("NewMempool reload failed: %v", err) 133 + } 134 + 135 + // Try to add out-of-order operation 136 + outOfOrder := []plcclient.PLCOperation{ 137 + {CID: "cid0", CreatedAt: baseTime}, // Before loaded ops 138 + } 139 + 140 + _, err = m2.Add(outOfOrder) 141 + if err == nil { 142 + t.Error("should reject out-of-order after reload") 143 + } 144 + 145 + // Add valid operation after loaded ones 146 + validOps := []plcclient.PLCOperation{ 147 + {CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second)}, 148 + } 149 + 150 + added, err := m2.Add(validOps) 151 + if err != nil { 152 + t.Fatalf("should accept in-order operation: %v", err) 153 + } 154 + 155 + if added != 1 { 156 + t.Error("should have added 1 operation") 157 + } 158 + }) 159 + 160 + t.Run("StrictIncreasingOrder", func(t *testing.T) { 161 + minTime := baseTime 162 + m, err := mempool.NewMempool(tmpDir, 5, minTime, logger) 163 + if err != nil { 164 + t.Fatalf("NewMempool failed: %v", err) 165 + } 166 + 167 + // Each operation must be >= previous timestamp 168 + ops := []plcclient.PLCOperation{ 169 + {CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)}, 170 + {CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)}, 171 + {CID: "cid3", CreatedAt: baseTime.Add(2 * time.Second)}, // Equal - OK 172 + {CID: "cid4", CreatedAt: baseTime.Add(3 * time.Second)}, 173 + } 174 + 175 + added, err := m.Add(ops) 176 + if err != nil { 177 + t.Fatalf("should allow non-decreasing timestamps: %v", err) 178 + } 179 + 180 + if added != 4 { 181 + t.Errorf("expected 4 added, got %d", added) 182 + } 183 + }) 184 + } 185 + 186 + // ==================================================================================== 187 + // DUPLICATE PREVENTION 188 + // ==================================================================================== 189 + 190 + func TestMempoolDuplicatePrevention(t *testing.T) { 191 + tmpDir := t.TempDir() 192 + logger := &testLogger{t: t} 193 + baseTime := time.Now().Add(-time.Hour) 194 + 195 + t.Run("SameCIDTwice", func(t *testing.T) { 196 + minTime := baseTime 197 + m, err := mempool.NewMempool(tmpDir, 6, minTime, logger) 198 + if err != nil { 199 + t.Fatalf("NewMempool failed: %v", err) 200 + } 201 + 202 + op := plcclient.PLCOperation{ 203 + CID: "duplicate_cid", 204 + DID: "did:plc:test", 205 + CreatedAt: baseTime.Add(1 * time.Second), 206 + } 207 + 208 + // Add first time 209 + added, err := m.Add([]plcclient.PLCOperation{op}) 210 + if err != nil { 211 + t.Fatalf("first add failed: %v", err) 212 + } 213 + if added != 1 { 214 + t.Error("first add should succeed") 215 + } 216 + 217 + // Add same CID again (should be silently skipped) 218 + added, err = m.Add([]plcclient.PLCOperation{op}) 219 + if err != nil { 220 + t.Fatalf("duplicate add should not error: %v", err) 221 + } 222 + if added != 0 { 223 + t.Errorf("duplicate should be skipped, but added=%d", added) 224 + } 225 + 226 + if m.Count() != 1 { 227 + t.Errorf("count should be 1, got %d", m.Count()) 228 + } 229 + }) 230 + 231 + t.Run("DuplicateAcrossSaveLoad", func(t *testing.T) { 232 + minTime := baseTime 233 + m, err := mempool.NewMempool(tmpDir, 7, minTime, logger) 234 + if err != nil { 235 + t.Fatalf("NewMempool failed: %v", err) 236 + } 237 + 238 + op := plcclient.PLCOperation{ 239 + CID: "persistent_cid", 240 + DID: "did:plc:test", 241 + CreatedAt: baseTime.Add(1 * time.Second), 242 + } 243 + 244 + // Add and save 245 + m.Add([]plcclient.PLCOperation{op}) 246 + m.Save() 247 + 248 + // Reload 249 + m2, err := mempool.NewMempool(tmpDir, 7, minTime, logger) 250 + if err != nil { 251 + t.Fatalf("reload failed: %v", err) 252 + } 253 + 254 + // Try to add same operation 255 + added, err := m2.Add([]plcclient.PLCOperation{op}) 256 + if err != nil { 257 + t.Fatalf("add after reload failed: %v", err) 258 + } 259 + 260 + if added != 0 { 261 + t.Errorf("duplicate should be skipped after reload, added=%d", added) 262 + } 263 + 264 + if m2.Count() != 1 { 265 + t.Errorf("count should be 1, got %d", m2.Count()) 266 + } 267 + }) 268 + 269 + t.Run("DuplicatesInBatch", func(t *testing.T) { 270 + minTime := baseTime 271 + m, err := mempool.NewMempool(tmpDir, 8, minTime, logger) 272 + if err != nil { 273 + t.Fatalf("NewMempool failed: %v", err) 274 + } 275 + 276 + // Batch contains duplicates 277 + ops := []plcclient.PLCOperation{ 278 + {CID: "cid1", DID: "did:plc:001", CreatedAt: baseTime.Add(1 * time.Second)}, 279 + {CID: "cid2", DID: "did:plc:002", CreatedAt: baseTime.Add(2 * time.Second)}, 280 + {CID: "cid1", DID: "did:plc:001", CreatedAt: baseTime.Add(3 * time.Second)}, // Duplicate CID 281 + } 282 + 283 + added, err := m.Add(ops) 284 + if err != nil { 285 + t.Fatalf("Add failed: %v", err) 286 + } 287 + 288 + // Should only add 2 (skip duplicate) 289 + if added != 2 { 290 + t.Errorf("expected 2 unique operations, added %d", added) 291 + } 292 + 293 + if m.Count() != 2 { 294 + t.Errorf("count should be 2, got %d", m.Count()) 295 + } 296 + }) 297 + } 298 + 299 + // ==================================================================================== 300 + // PERSISTENCE & CORRUPTION HANDLING 301 + // ==================================================================================== 302 + 303 + func TestMempoolPersistence(t *testing.T) { 304 + tmpDir := t.TempDir() 305 + logger := &testLogger{t: t} 306 + baseTime := time.Now().Add(-time.Hour) 307 + 308 + t.Run("SaveAndLoad", func(t *testing.T) { 309 + minTime := baseTime 310 + m, err := mempool.NewMempool(tmpDir, 9, minTime, logger) 311 + if err != nil { 312 + t.Fatalf("NewMempool failed: %v", err) 313 + } 314 + 315 + ops := makeTestOperations(50) 316 + m.Add(ops) 317 + 318 + if err := m.Save(); err != nil { 319 + t.Fatalf("Save failed: %v", err) 320 + } 321 + 322 + // Reload 323 + m2, err := mempool.NewMempool(tmpDir, 9, minTime, logger) 324 + if err != nil { 325 + t.Fatalf("reload failed: %v", err) 326 + } 327 + 328 + if m2.Count() != 50 { 329 + t.Errorf("after reload, expected 50 ops, got %d", m2.Count()) 330 + } 331 + 332 + // Verify data integrity 333 + loaded := m2.Peek(50) 334 + for i := 0; i < 50; i++ { 335 + if loaded[i].CID != ops[i].CID { 336 + t.Errorf("op %d CID mismatch after reload", i) 337 + } 338 + } 339 + }) 340 + 341 + // Fix the IncrementalSave test - line ~353 342 + t.Run("IncrementalSave", func(t *testing.T) { 343 + minTime := baseTime 344 + m, err := mempool.NewMempool(tmpDir, 10, minTime, logger) 345 + if err != nil { 346 + t.Fatalf("NewMempool failed: %v", err) 347 + } 348 + 349 + // Add 10 ops and save 350 + ops1 := makeTestOperations(10) 351 + m.Add(ops1) 352 + m.Save() 353 + 354 + // Add 10 more and save 355 + // FIX: makeTestOperationsFrom(start, COUNT) - so we want (10, 10) not (10, 20) 356 + ops2 := makeTestOperationsFrom(10, 10) // ← Changed from (10, 20) 357 + m.Add(ops2) 358 + m.Save() 359 + 360 + // Reload - should have all 20 361 + m2, err := mempool.NewMempool(tmpDir, 10, minTime, logger) 362 + if err != nil { 363 + t.Fatalf("reload failed: %v", err) 364 + } 365 + 366 + if m2.Count() != 20 { 367 + t.Errorf("expected 20 ops after incremental saves, got %d", m2.Count()) 368 + } 369 + }) 370 + 371 + t.Run("CorruptedMempoolFile", func(t *testing.T) { 372 + minTime := baseTime 373 + mempoolFile := filepath.Join(tmpDir, "plc_mempool_000011.jsonl") 374 + 375 + // Write corrupted data 376 + os.WriteFile(mempoolFile, []byte("{invalid json\n{also bad"), 0644) 377 + 378 + // Should error on load 379 + _, err := mempool.NewMempool(tmpDir, 11, minTime, logger) 380 + if err == nil { 381 + t.Error("expected error loading corrupted mempool") 382 + } 383 + }) 384 + 385 + t.Run("DeleteMempool", func(t *testing.T) { 386 + minTime := baseTime 387 + m, err := mempool.NewMempool(tmpDir, 12, minTime, logger) 388 + if err != nil { 389 + t.Fatalf("NewMempool failed: %v", err) 390 + } 391 + 392 + ops := makeTestOperations(10) 393 + m.Add(ops) 394 + m.Save() 395 + 396 + // Verify file exists 397 + mempoolFile := filepath.Join(tmpDir, "plc_mempool_000012.jsonl") 398 + if _, err := os.Stat(mempoolFile); os.IsNotExist(err) { 399 + t.Fatal("mempool file should exist after save") 400 + } 401 + 402 + // Delete 403 + if err := m.Delete(); err != nil { 404 + t.Fatalf("Delete failed: %v", err) 405 + } 406 + 407 + // Verify file gone 408 + if _, err := os.Stat(mempoolFile); !os.IsNotExist(err) { 409 + t.Error("mempool file should be deleted") 410 + } 411 + }) 412 + } 413 + 414 + // ==================================================================================== 415 + // TAKE OPERATIONS - CRITICAL FOR BUNDLING 416 + // ==================================================================================== 417 + 418 + func TestMempoolTakeOperations(t *testing.T) { 419 + tmpDir := t.TempDir() 420 + logger := &testLogger{t: t} 421 + baseTime := time.Now().Add(-time.Hour) 422 + 423 + t.Run("TakeExact", func(t *testing.T) { 424 + minTime := baseTime 425 + m, err := mempool.NewMempool(tmpDir, 13, minTime, logger) 426 + if err != nil { 427 + t.Fatalf("NewMempool failed: %v", err) 428 + } 429 + 430 + m.Add(makeTestOperations(100)) 431 + 432 + taken, err := m.Take(50) 433 + if err != nil { 434 + t.Fatalf("Take failed: %v", err) 435 + } 436 + 437 + if len(taken) != 50 { 438 + t.Errorf("expected 50 operations, got %d", len(taken)) 439 + } 440 + 441 + if m.Count() != 50 { 442 + t.Errorf("expected 50 remaining, got %d", m.Count()) 443 + } 444 + }) 445 + 446 + t.Run("TakeMoreThanAvailable", func(t *testing.T) { 447 + minTime := baseTime 448 + m, err := mempool.NewMempool(tmpDir, 14, minTime, logger) 449 + if err != nil { 450 + t.Fatalf("NewMempool failed: %v", err) 451 + } 452 + 453 + m.Add(makeTestOperations(30)) 454 + 455 + // Try to take 100 (only 30 available) 456 + taken, err := m.Take(100) 457 + if err != nil { 458 + t.Fatalf("Take failed: %v", err) 459 + } 460 + 461 + if len(taken) != 30 { 462 + t.Errorf("expected 30 operations (all available), got %d", len(taken)) 463 + } 464 + 465 + if m.Count() != 0 { 466 + t.Errorf("mempool should be empty, got %d", m.Count()) 467 + } 468 + }) 469 + 470 + t.Run("TakePreservesOrder", func(t *testing.T) { 471 + minTime := baseTime 472 + m, err := mempool.NewMempool(tmpDir, 15, minTime, logger) 473 + if err != nil { 474 + t.Fatalf("NewMempool failed: %v", err) 475 + } 476 + 477 + ops := makeTestOperations(100) 478 + m.Add(ops) 479 + 480 + taken, err := m.Take(50) 481 + if err != nil { 482 + t.Fatalf("Take failed: %v", err) 483 + } 484 + 485 + // Verify first 50 match 486 + for i := 0; i < 50; i++ { 487 + if taken[i].CID != ops[i].CID { 488 + t.Errorf("operation %d mismatch: got %s, want %s", i, taken[i].CID, ops[i].CID) 489 + } 490 + } 491 + 492 + // Remaining should be ops[50:100] 493 + remaining := m.Peek(50) 494 + for i := 0; i < 50; i++ { 495 + if remaining[i].CID != ops[50+i].CID { 496 + t.Errorf("remaining op %d mismatch", i) 497 + } 498 + } 499 + }) 500 + 501 + t.Run("TakeFromEmpty", func(t *testing.T) { 502 + minTime := baseTime 503 + m, err := mempool.NewMempool(tmpDir, 16, minTime, logger) 504 + if err != nil { 505 + t.Fatalf("NewMempool failed: %v", err) 506 + } 507 + 508 + taken, err := m.Take(10) 509 + if err != nil { 510 + t.Fatalf("Take from empty failed: %v", err) 511 + } 512 + 513 + if len(taken) != 0 { 514 + t.Errorf("expected 0 operations from empty mempool, got %d", len(taken)) 515 + } 516 + }) 517 + } 518 + 519 + // ==================================================================================== 520 + // VALIDATION TESTS 521 + // ==================================================================================== 522 + 523 + func TestMempoolValidation(t *testing.T) { 524 + tmpDir := t.TempDir() 525 + logger := &testLogger{t: t} 526 + baseTime := time.Now().Add(-time.Hour) 527 + 528 + t.Run("ValidateChronological", func(t *testing.T) { 529 + minTime := baseTime 530 + m, err := mempool.NewMempool(tmpDir, 17, minTime, logger) 531 + if err != nil { 532 + t.Fatalf("NewMempool failed: %v", err) 533 + } 534 + 535 + ops := makeTestOperations(100) 536 + m.Add(ops) 537 + 538 + if err := m.Validate(); err != nil { 539 + t.Errorf("Validate failed on valid mempool: %v", err) 540 + } 541 + }) 542 + 543 + t.Run("ValidateDetectsMinTimestampViolation", func(t *testing.T) { 544 + minTime := baseTime.Add(10 * time.Second) 545 + _, err := mempool.NewMempool(tmpDir, 18, minTime, logger) 546 + if err != nil { 547 + t.Fatalf("NewMempool failed: %v", err) 548 + } 549 + 550 + // Manually add operation before min (bypassing Add validation) 551 + // This simulates corrupted state 552 + ops := makeTestOperations(10) 553 + ops[0].CreatedAt = baseTime // Before minTime 554 + 555 + // Note: This is hard to test since Add enforces validation 556 + // Better to test through file corruption 557 + }) 558 + 559 + t.Run("ValidateDetectsDuplicateCIDs", func(t *testing.T) { 560 + // Test for duplicate CID detection 561 + // Similar challenge - Add prevents duplicates 562 + // Would need to manually construct corrupted state 563 + }) 564 + } 565 + 566 + // ==================================================================================== 567 + // CONCURRENCY TESTS 568 + // ==================================================================================== 569 + 570 + func TestMempoolConcurrency(t *testing.T) { 571 + tmpDir := t.TempDir() 572 + logger := &testLogger{t: t} 573 + baseTime := time.Now().Add(-time.Hour) 574 + 575 + t.Run("ConcurrentReads", func(t *testing.T) { 576 + minTime := baseTime 577 + m, err := mempool.NewMempool(tmpDir, 19, minTime, logger) 578 + if err != nil { 579 + t.Fatalf("NewMempool failed: %v", err) 580 + } 581 + 582 + m.Add(makeTestOperations(1000)) 583 + 584 + // 100 concurrent readers 585 + var wg sync.WaitGroup 586 + for i := 0; i < 100; i++ { 587 + wg.Add(1) 588 + go func() { 589 + defer wg.Done() 590 + count := m.Count() 591 + if count != 1000 { 592 + t.Errorf("count mismatch: got %d", count) 593 + } 594 + 595 + peek := m.Peek(10) 596 + if len(peek) != 10 { 597 + t.Errorf("peek mismatch: got %d", len(peek)) 598 + } 599 + }() 600 + } 601 + wg.Wait() 602 + }) 603 + 604 + t.Run("ConcurrentAddAndRead", func(t *testing.T) { 605 + minTime := baseTime 606 + m, err := mempool.NewMempool(tmpDir, 20, minTime, logger) 607 + if err != nil { 608 + t.Fatalf("NewMempool failed: %v", err) 609 + } 610 + 611 + var wg sync.WaitGroup 612 + errors := make(chan error, 100) 613 + 614 + // Writer goroutine 615 + wg.Add(1) 616 + go func() { 617 + defer wg.Done() 618 + for i := 0; i < 10; i++ { 619 + ops := []plcclient.PLCOperation{ 620 + {CID: fmt.Sprintf("cid%d", i*100), CreatedAt: baseTime.Add(time.Duration(i*100) * time.Second)}, 621 + } 622 + if _, err := m.Add(ops); err != nil { 623 + errors <- err 624 + } 625 + time.Sleep(10 * time.Millisecond) 626 + } 627 + }() 628 + 629 + // Reader goroutines 630 + for i := 0; i < 10; i++ { 631 + wg.Add(1) 632 + go func() { 633 + defer wg.Done() 634 + for j := 0; j < 20; j++ { 635 + m.Count() 636 + m.Peek(5) 637 + time.Sleep(5 * time.Millisecond) 638 + } 639 + }() 640 + } 641 + 642 + wg.Wait() 643 + close(errors) 644 + 645 + for err := range errors { 646 + t.Errorf("concurrent operation error: %v", err) 647 + } 648 + }) 649 + } 650 + 651 + // ==================================================================================== 652 + // STATS & METADATA TESTS 653 + // ==================================================================================== 654 + 655 + func TestMempoolStats(t *testing.T) { 656 + tmpDir := t.TempDir() 657 + logger := &testLogger{t: t} 658 + baseTime := time.Now().Add(-time.Hour) 659 + 660 + t.Run("StatsEmpty", func(t *testing.T) { 661 + minTime := baseTime 662 + m, err := mempool.NewMempool(tmpDir, 21, minTime, logger) 663 + if err != nil { 664 + t.Fatalf("NewMempool failed: %v", err) 665 + } 666 + 667 + stats := m.Stats() 668 + 669 + if stats["count"].(int) != 0 { 670 + t.Error("empty mempool should have count 0") 671 + } 672 + 673 + if stats["can_create_bundle"].(bool) { 674 + t.Error("empty mempool cannot create bundle") 675 + } 676 + 677 + if stats["target_bundle"].(int) != 21 { 678 + t.Error("target bundle mismatch") 679 + } 680 + }) 681 + 682 + t.Run("StatsPopulated", func(t *testing.T) { 683 + minTime := baseTime 684 + m, err := mempool.NewMempool(tmpDir, 22, minTime, logger) 685 + if err != nil { 686 + t.Fatalf("NewMempool failed: %v", err) 687 + } 688 + 689 + ops := makeTestOperations(100) 690 + m.Add(ops) 691 + 692 + stats := m.Stats() 693 + 694 + if stats["count"].(int) != 100 { 695 + t.Error("count mismatch in stats") 696 + } 697 + 698 + if _, ok := stats["first_time"]; !ok { 699 + t.Error("stats missing first_time") 700 + } 701 + 702 + if _, ok := stats["last_time"]; !ok { 703 + t.Error("stats missing last_time") 704 + } 705 + 706 + if _, ok := stats["size_bytes"]; !ok { 707 + t.Error("stats missing size_bytes") 708 + } 709 + 710 + if stats["did_count"].(int) != 100 { 711 + t.Error("did_count should match operation count for unique DIDs") 712 + } 713 + }) 714 + 715 + t.Run("StatsCanCreateBundle", func(t *testing.T) { 716 + minTime := baseTime 717 + m, err := mempool.NewMempool(tmpDir, 23, minTime, logger) 718 + if err != nil { 719 + t.Fatalf("NewMempool failed: %v", err) 720 + } 721 + 722 + // Add exactly BUNDLE_SIZE operations 723 + m.Add(makeTestOperations(types.BUNDLE_SIZE)) 724 + 725 + stats := m.Stats() 726 + 727 + if !stats["can_create_bundle"].(bool) { 728 + t.Error("should be able to create bundle with BUNDLE_SIZE operations") 729 + } 730 + }) 731 + } 732 + 733 + // ==================================================================================== 734 + // DID SEARCH TESTS 735 + // ==================================================================================== 736 + 737 + func TestMempoolDIDSearch(t *testing.T) { 738 + tmpDir := t.TempDir() 739 + logger := &testLogger{t: t} 740 + baseTime := time.Now().Add(-time.Hour) 741 + 742 + t.Run("FindDIDOperations", func(t *testing.T) { 743 + minTime := baseTime 744 + m, err := mempool.NewMempool(tmpDir, 24, minTime, logger) 745 + if err != nil { 746 + t.Fatalf("NewMempool failed: %v", err) 747 + } 748 + 749 + targetDID := "did:plc:target" 750 + 751 + ops := []plcclient.PLCOperation{ 752 + {DID: "did:plc:other1", CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)}, 753 + {DID: targetDID, CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)}, 754 + {DID: "did:plc:other2", CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second)}, 755 + {DID: targetDID, CID: "cid4", CreatedAt: baseTime.Add(4 * time.Second)}, 756 + {DID: "did:plc:other3", CID: "cid5", CreatedAt: baseTime.Add(5 * time.Second)}, 757 + } 758 + 759 + m.Add(ops) 760 + 761 + // Search 762 + found := m.FindDIDOperations(targetDID) 763 + 764 + if len(found) != 2 { 765 + t.Errorf("expected 2 operations for %s, got %d", targetDID, len(found)) 766 + } 767 + 768 + if found[0].CID != "cid2" || found[1].CID != "cid4" { 769 + t.Error("wrong operations returned") 770 + } 771 + }) 772 + 773 + t.Run("FindLatestDIDOperation", func(t *testing.T) { 774 + minTime := baseTime 775 + m, err := mempool.NewMempool(tmpDir, 25, minTime, logger) 776 + if err != nil { 777 + t.Fatalf("NewMempool failed: %v", err) 778 + } 779 + 780 + targetDID := "did:plc:target" 781 + 782 + ops := []plcclient.PLCOperation{ 783 + {DID: targetDID, CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second), Nullified: false}, 784 + {DID: targetDID, CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second), Nullified: false}, 785 + {DID: targetDID, CID: "cid3", CreatedAt: baseTime.Add(3 * time.Second), Nullified: true}, // Nullified 786 + } 787 + 788 + m.Add(ops) 789 + 790 + // Should return cid2 (latest non-nullified) 791 + latest := m.FindLatestDIDOperation(targetDID) 792 + 793 + if latest == nil { 794 + t.Fatal("expected to find operation, got nil") 795 + } 796 + 797 + if latest.CID != "cid2" { 798 + t.Errorf("expected cid2 (latest non-nullified), got %s", latest.CID) 799 + } 800 + }) 801 + 802 + t.Run("FindLatestDIDOperation_AllNullified", func(t *testing.T) { 803 + minTime := baseTime 804 + m, err := mempool.NewMempool(tmpDir, 26, minTime, logger) 805 + if err != nil { 806 + t.Fatalf("NewMempool failed: %v", err) 807 + } 808 + 809 + targetDID := "did:plc:target" 810 + 811 + ops := []plcclient.PLCOperation{ 812 + {DID: targetDID, CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second), Nullified: true}, 813 + {DID: targetDID, CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second), Nullified: true}, 814 + } 815 + 816 + m.Add(ops) 817 + 818 + latest := m.FindLatestDIDOperation(targetDID) 819 + 820 + if latest != nil { 821 + t.Error("should return nil when all operations are nullified") 822 + } 823 + }) 824 + 825 + t.Run("FindDIDOperations_NotFound", func(t *testing.T) { 826 + minTime := baseTime 827 + m, err := mempool.NewMempool(tmpDir, 27, minTime, logger) 828 + if err != nil { 829 + t.Fatalf("NewMempool failed: %v", err) 830 + } 831 + 832 + m.Add(makeTestOperations(100)) 833 + 834 + found := m.FindDIDOperations("did:plc:nonexistent") 835 + 836 + if len(found) != 0 { 837 + t.Errorf("expected empty result, got %d operations", len(found)) 838 + } 839 + }) 840 + } 841 + 842 + // ==================================================================================== 843 + // CLEAR OPERATION TESTS 844 + // ==================================================================================== 845 + 846 + func TestMempoolClear(t *testing.T) { 847 + tmpDir := t.TempDir() 848 + logger := &testLogger{t: t} 849 + baseTime := time.Now().Add(-time.Hour) 850 + 851 + t.Run("ClearPopulated", func(t *testing.T) { 852 + minTime := baseTime 853 + m, err := mempool.NewMempool(tmpDir, 28, minTime, logger) 854 + if err != nil { 855 + t.Fatalf("NewMempool failed: %v", err) 856 + } 857 + 858 + m.Add(makeTestOperations(100)) 859 + 860 + if m.Count() != 100 { 861 + t.Fatal("setup failed") 862 + } 863 + 864 + m.Clear() 865 + 866 + if m.Count() != 0 { 867 + t.Errorf("after clear, count should be 0, got %d", m.Count()) 868 + } 869 + 870 + // Should be able to add new operations 871 + newOps := []plcclient.PLCOperation{ 872 + {CID: "new1", CreatedAt: baseTime.Add(200 * time.Second)}, 873 + } 874 + 875 + added, err := m.Add(newOps) 876 + if err != nil { 877 + t.Fatalf("Add after clear failed: %v", err) 878 + } 879 + 880 + if added != 1 { 881 + t.Error("should be able to add after clear") 882 + } 883 + }) 884 + } 885 + 886 + // ==================================================================================== 887 + // HELPER FUNCTIONS 888 + // ==================================================================================== 889 + 890 + func makeTestOperations(count int) []plcclient.PLCOperation { 891 + return makeTestOperationsFrom(0, count) 892 + } 893 + 894 + func makeTestOperationsFrom(start, count int) []plcclient.PLCOperation { 895 + ops := make([]plcclient.PLCOperation, count) 896 + baseTime := time.Now().Add(-time.Hour) 897 + 898 + for i := 0; i < count; i++ { 899 + idx := start + i 900 + ops[i] = plcclient.PLCOperation{ 901 + DID: fmt.Sprintf("did:plc:test%06d", idx), 902 + CID: fmt.Sprintf("bafy%06d", idx), 903 + CreatedAt: baseTime.Add(time.Duration(idx) * time.Second), 904 + } 905 + } 906 + 907 + return ops 908 + }
+867
internal/storage/storage_test.go
··· 1 + package storage_test 2 + 3 + import ( 4 + "bufio" 5 + "bytes" 6 + "fmt" 7 + "os" 8 + "path/filepath" 9 + "sync" 10 + "testing" 11 + "time" 12 + 13 + "tangled.org/atscan.net/plcbundle/internal/plcclient" 14 + "tangled.org/atscan.net/plcbundle/internal/storage" 15 + ) 16 + 17 + type testLogger struct { 18 + t *testing.T 19 + } 20 + 21 + func (l *testLogger) Printf(format string, v ...interface{}) { 22 + l.t.Logf(format, v...) 23 + } 24 + 25 + func (l *testLogger) Println(v ...interface{}) { 26 + l.t.Log(v...) 27 + } 28 + 29 + // ==================================================================================== 30 + // COMPRESSION TESTS 31 + // ==================================================================================== 32 + 33 + func TestStorageCompression(t *testing.T) { 34 + tmpDir := t.TempDir() 35 + logger := &testLogger{t: t} 36 + ops, err := storage.NewOperations(logger) 37 + if err != nil { 38 + t.Fatalf("NewOperations failed: %v", err) 39 + } 40 + defer ops.Close() 41 + 42 + t.Run("RoundTripCompression", func(t *testing.T) { 43 + tests := []struct { 44 + name string 45 + count int 46 + }{ 47 + {"Empty", 0}, 48 + {"Single", 1}, 49 + {"Small", 10}, 50 + {"Medium", 100}, 51 + {"Large", 1000}, 52 + {"FullBundle", 10000}, 53 + } 54 + 55 + for _, tt := range tests { 56 + t.Run(tt.name, func(t *testing.T) { 57 + if tt.count == 0 { 58 + return // Skip empty for now 59 + } 60 + 61 + original := makeTestOperations(tt.count) 62 + path := filepath.Join(tmpDir, tt.name+".jsonl.zst") 63 + 64 + // Save 65 + _, _, _, _, err := ops.SaveBundle(path, original) 66 + if err != nil { 67 + t.Fatalf("SaveBundle failed: %v", err) 68 + } 69 + 70 + // Load 71 + loaded, err := ops.LoadBundle(path) 72 + if err != nil { 73 + t.Fatalf("LoadBundle failed: %v", err) 74 + } 75 + 76 + // Verify count 77 + if len(loaded) != len(original) { 78 + t.Errorf("count mismatch: got %d, want %d", len(loaded), len(original)) 79 + } 80 + 81 + // Verify each operation 82 + for i := range original { 83 + if loaded[i].DID != original[i].DID { 84 + t.Errorf("op %d DID mismatch: got %s, want %s", i, loaded[i].DID, original[i].DID) 85 + } 86 + if loaded[i].CID != original[i].CID { 87 + t.Errorf("op %d CID mismatch: got %s, want %s", i, loaded[i].CID, original[i].CID) 88 + } 89 + if !loaded[i].CreatedAt.Equal(original[i].CreatedAt) { 90 + t.Errorf("op %d timestamp mismatch", i) 91 + } 92 + } 93 + }) 94 + } 95 + }) 96 + 97 + t.Run("CompressionRatio", func(t *testing.T) { 98 + operations := makeTestOperations(10000) 99 + path := filepath.Join(tmpDir, "compression_test.jsonl.zst") 100 + 101 + _, _, uncompSize, compSize, err := ops.SaveBundle(path, operations) 102 + if err != nil { 103 + t.Fatalf("SaveBundle failed: %v", err) 104 + } 105 + 106 + if compSize >= uncompSize { 107 + t.Errorf("compression failed: compressed=%d >= uncompressed=%d", compSize, uncompSize) 108 + } 109 + 110 + ratio := float64(uncompSize) / float64(compSize) 111 + if ratio < 2.0 { 112 + t.Errorf("poor compression ratio: %.2fx (expected > 2.0x)", ratio) 113 + } 114 + 115 + t.Logf("Compression ratio: %.2fx (%d → %d bytes)", ratio, uncompSize, compSize) 116 + }) 117 + 118 + t.Run("CompressedDataIntegrity", func(t *testing.T) { 119 + operations := makeTestOperations(100) 120 + path := filepath.Join(tmpDir, "integrity_test.jsonl.zst") 121 + 122 + contentHash, compHash, _, _, err := ops.SaveBundle(path, operations) 123 + if err != nil { 124 + t.Fatalf("SaveBundle failed: %v", err) 125 + } 126 + 127 + // Recalculate hashes 128 + calcCompHash, _, calcContentHash, _, err := ops.CalculateFileHashes(path) 129 + if err != nil { 130 + t.Fatalf("CalculateFileHashes failed: %v", err) 131 + } 132 + 133 + if calcCompHash != compHash { 134 + t.Errorf("compressed hash mismatch: got %s, want %s", calcCompHash, compHash) 135 + } 136 + 137 + if calcContentHash != contentHash { 138 + t.Errorf("content hash mismatch: got %s, want %s", calcContentHash, contentHash) 139 + } 140 + }) 141 + } 142 + 143 + // ==================================================================================== 144 + // HASHING TESTS - CRITICAL FOR CHAIN INTEGRITY 145 + // ==================================================================================== 146 + 147 + func TestStorageHashing(t *testing.T) { 148 + logger := &testLogger{t: t} 149 + ops, err := storage.NewOperations(logger) 150 + if err != nil { 151 + t.Fatalf("NewOperations failed: %v", err) 152 + } 153 + defer ops.Close() 154 + 155 + t.Run("HashDeterminism", func(t *testing.T) { 156 + data := []byte("test data for hashing") 157 + 158 + // Calculate hash multiple times 159 + hashes := make([]string, 100) 160 + for i := 0; i < 100; i++ { 161 + hashes[i] = ops.Hash(data) 162 + } 163 + 164 + // All should be identical 165 + firstHash := hashes[0] 166 + for i, h := range hashes { 167 + if h != firstHash { 168 + t.Errorf("hash %d differs: got %s, want %s", i, h, firstHash) 169 + } 170 + } 171 + 172 + // Verify it's actually a valid SHA256 hex (64 chars) 173 + if len(firstHash) != 64 { 174 + t.Errorf("invalid hash length: got %d, want 64", len(firstHash)) 175 + } 176 + }) 177 + 178 + t.Run("ChainHashCalculation", func(t *testing.T) { 179 + contentHash := "abc123def456" 180 + 181 + // Genesis bundle (no parent) 182 + genesisHash := ops.CalculateChainHash("", contentHash) 183 + expectedGenesis := ops.Hash([]byte("plcbundle:genesis:" + contentHash)) 184 + if genesisHash != expectedGenesis { 185 + t.Errorf("genesis hash mismatch: got %s, want %s", genesisHash, expectedGenesis) 186 + } 187 + 188 + // Second bundle (has parent) 189 + parentHash := genesisHash 190 + childHash := ops.CalculateChainHash(parentHash, contentHash) 191 + expectedChild := ops.Hash([]byte(parentHash + ":" + contentHash)) 192 + if childHash != expectedChild { 193 + t.Errorf("child hash mismatch: got %s, want %s", childHash, expectedChild) 194 + } 195 + 196 + // Chain continues 197 + grandchildHash := ops.CalculateChainHash(childHash, contentHash) 198 + expectedGrandchild := ops.Hash([]byte(childHash + ":" + contentHash)) 199 + if grandchildHash != expectedGrandchild { 200 + t.Errorf("grandchild hash mismatch") 201 + } 202 + }) 203 + 204 + t.Run("HashSensitivity", func(t *testing.T) { 205 + // Small changes should produce completely different hashes 206 + data1 := []byte("test data") 207 + data2 := []byte("test datb") // Changed one char 208 + data3 := []byte("test data ") // Added space 209 + 210 + hash1 := ops.Hash(data1) 211 + hash2 := ops.Hash(data2) 212 + hash3 := ops.Hash(data3) 213 + 214 + if hash1 == hash2 { 215 + t.Error("different data produced same hash (collision!)") 216 + } 217 + if hash1 == hash3 { 218 + t.Error("different data produced same hash (collision!)") 219 + } 220 + }) 221 + 222 + t.Run("EmptyDataHash", func(t *testing.T) { 223 + hash := ops.Hash([]byte{}) 224 + if len(hash) != 64 { 225 + t.Errorf("empty data hash invalid length: %d", len(hash)) 226 + } 227 + // SHA256 of empty string is known constant 228 + // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 229 + expected := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" 230 + if hash != expected { 231 + t.Errorf("empty data hash mismatch: got %s, want %s", hash, expected) 232 + } 233 + }) 234 + } 235 + 236 + // ==================================================================================== 237 + // CONCURRENCY TESTS - CRITICAL FOR PRODUCTION 238 + // ==================================================================================== 239 + 240 + func TestStorageConcurrency(t *testing.T) { 241 + tmpDir := t.TempDir() 242 + logger := &testLogger{t: t} 243 + ops, err := storage.NewOperations(logger) 244 + if err != nil { 245 + t.Fatalf("NewOperations failed: %v", err) 246 + } 247 + defer ops.Close() 248 + 249 + t.Run("ParallelBundleReads", func(t *testing.T) { 250 + // Create test bundle 251 + operations := makeTestOperations(10000) 252 + path := filepath.Join(tmpDir, "parallel_test.jsonl.zst") 253 + _, _, _, _, err := ops.SaveBundle(path, operations) 254 + if err != nil { 255 + t.Fatalf("SaveBundle failed: %v", err) 256 + } 257 + 258 + // Read from 100 goroutines simultaneously 259 + var wg sync.WaitGroup 260 + errors := make(chan error, 100) 261 + 262 + for i := 0; i < 100; i++ { 263 + wg.Add(1) 264 + go func(id int) { 265 + defer wg.Done() 266 + loaded, err := ops.LoadBundle(path) 267 + if err != nil { 268 + errors <- err 269 + return 270 + } 271 + if len(loaded) != 10000 { 272 + errors <- err 273 + } 274 + }(i) 275 + } 276 + 277 + wg.Wait() 278 + close(errors) 279 + 280 + for err := range errors { 281 + t.Errorf("concurrent read error: %v", err) 282 + } 283 + }) 284 + 285 + t.Run("LoadOperationAtPositionConcurrency", func(t *testing.T) { 286 + // Critical test - this is heavily used by DID lookups 287 + operations := makeTestOperations(10000) 288 + path := filepath.Join(tmpDir, "position_test.jsonl.zst") 289 + _, _, _, _, err := ops.SaveBundle(path, operations) 290 + if err != nil { 291 + t.Fatalf("SaveBundle failed: %v", err) 292 + } 293 + 294 + // 200 concurrent position reads 295 + var wg sync.WaitGroup 296 + errors := make(chan error, 200) 297 + 298 + for i := 0; i < 200; i++ { 299 + wg.Add(1) 300 + go func(position int) { 301 + defer wg.Done() 302 + op, err := ops.LoadOperationAtPosition(path, position%10000) 303 + if err != nil { 304 + errors <- err 305 + return 306 + } 307 + if op == nil { 308 + errors <- err 309 + } 310 + }(i) 311 + } 312 + 313 + wg.Wait() 314 + close(errors) 315 + 316 + for err := range errors { 317 + t.Errorf("concurrent position read error: %v", err) 318 + } 319 + }) 320 + 321 + t.Run("ConcurrentHashVerification", func(t *testing.T) { 322 + operations := makeTestOperations(1000) 323 + path := filepath.Join(tmpDir, "verify_test.jsonl.zst") 324 + _, compHash, _, _, err := ops.SaveBundle(path, operations) 325 + if err != nil { 326 + t.Fatalf("SaveBundle failed: %v", err) 327 + } 328 + 329 + var wg sync.WaitGroup 330 + for i := 0; i < 50; i++ { 331 + wg.Add(1) 332 + go func() { 333 + defer wg.Done() 334 + valid, _, err := ops.VerifyHash(path, compHash) 335 + if err != nil { 336 + t.Errorf("VerifyHash failed: %v", err) 337 + } 338 + if !valid { 339 + t.Error("hash verification failed") 340 + } 341 + }() 342 + } 343 + wg.Wait() 344 + }) 345 + } 346 + 347 + // ==================================================================================== 348 + // EDGE CASES & ERROR HANDLING 349 + // ==================================================================================== 350 + 351 + func TestStorageEdgeCases(t *testing.T) { 352 + tmpDir := t.TempDir() 353 + logger := &testLogger{t: t} 354 + ops, err := storage.NewOperations(logger) 355 + if err != nil { 356 + t.Fatalf("NewOperations failed: %v", err) 357 + } 358 + defer ops.Close() 359 + 360 + t.Run("CorruptedZstdFile", func(t *testing.T) { 361 + path := filepath.Join(tmpDir, "corrupted.jsonl.zst") 362 + // Write invalid zstd data 363 + os.WriteFile(path, []byte("this is not valid zstd data"), 0644) 364 + 365 + _, err := ops.LoadBundle(path) 366 + if err == nil { 367 + t.Error("expected error loading corrupted file, got nil") 368 + } 369 + }) 370 + 371 + t.Run("TruncatedFile", func(t *testing.T) { 372 + operations := makeTestOperations(100) 373 + path := filepath.Join(tmpDir, "truncated.jsonl.zst") 374 + ops.SaveBundle(path, operations) 375 + 376 + // Read and truncate 377 + data, _ := os.ReadFile(path) 378 + os.WriteFile(path, data[:len(data)/2], 0644) 379 + 380 + _, err := ops.LoadBundle(path) 381 + if err == nil { 382 + t.Error("expected error loading truncated file, got nil") 383 + } 384 + }) 385 + 386 + t.Run("InvalidJSONL", func(t *testing.T) { 387 + path := filepath.Join(tmpDir, "invalid.jsonl.zst") 388 + invalidData := []byte("{invalid json}\n{also invalid}") 389 + 390 + // Manually compress invalid data 391 + operations := makeTestOperations(10) 392 + ops.SaveBundle(path, operations) // Create valid file first 393 + 394 + // Now corrupt it with invalid JSON 395 + // This is hard to test properly since SaveBundle enforces valid data 396 + // Better to test ParseJSONL directly 397 + _, err := ops.ParseJSONL(invalidData) 398 + if err == nil { 399 + t.Error("expected error parsing invalid JSONL, got nil") 400 + } 401 + }) 402 + 403 + t.Run("NonExistentFile", func(t *testing.T) { 404 + _, err := ops.LoadBundle("/nonexistent/path/file.jsonl.zst") 405 + if err == nil { 406 + t.Error("expected error loading nonexistent file, got nil") 407 + } 408 + }) 409 + 410 + t.Run("InvalidPosition", func(t *testing.T) { 411 + operations := makeTestOperations(100) 412 + path := filepath.Join(tmpDir, "position_test.jsonl.zst") 413 + ops.SaveBundle(path, operations) 414 + 415 + // Negative position 416 + _, err := ops.LoadOperationAtPosition(path, -1) 417 + if err == nil { 418 + t.Error("expected error for negative position") 419 + } 420 + 421 + // Position beyond file 422 + _, err = ops.LoadOperationAtPosition(path, 10000) 423 + if err == nil { 424 + t.Error("expected error for position beyond file") 425 + } 426 + }) 427 + } 428 + 429 + // ==================================================================================== 430 + // BOUNDARY CONDITIONS - CRITICAL FOR BUNDLE CHAINING 431 + // ==================================================================================== 432 + 433 + func TestStorageBoundaryConditions(t *testing.T) { 434 + logger := &testLogger{t: t} 435 + ops, err := storage.NewOperations(logger) 436 + if err != nil { 437 + t.Fatalf("NewOperations failed: %v", err) 438 + } 439 + defer ops.Close() 440 + 441 + t.Run("GetBoundaryCIDs_SingleOperation", func(t *testing.T) { 442 + baseTime := time.Now() 443 + operations := []plcclient.PLCOperation{ 444 + {CID: "cid1", CreatedAt: baseTime}, 445 + } 446 + 447 + boundaryTime, cids := ops.GetBoundaryCIDs(operations) 448 + 449 + if !boundaryTime.Equal(baseTime) { 450 + t.Error("boundary time mismatch") 451 + } 452 + if len(cids) != 1 { 453 + t.Errorf("expected 1 boundary CID, got %d", len(cids)) 454 + } 455 + if !cids["cid1"] { 456 + t.Error("expected cid1 in boundary set") 457 + } 458 + }) 459 + 460 + t.Run("GetBoundaryCIDs_MultipleSameTimestamp", func(t *testing.T) { 461 + // CRITICAL: Operations with identical timestamps (happens in real data) 462 + baseTime := time.Now() 463 + operations := []plcclient.PLCOperation{ 464 + {CID: "cid1", CreatedAt: baseTime.Add(-2 * time.Second)}, 465 + {CID: "cid2", CreatedAt: baseTime.Add(-1 * time.Second)}, 466 + {CID: "cid3", CreatedAt: baseTime}, // Last timestamp 467 + {CID: "cid4", CreatedAt: baseTime}, // Same as cid3 468 + {CID: "cid5", CreatedAt: baseTime}, // Same as cid3 469 + } 470 + 471 + boundaryTime, cids := ops.GetBoundaryCIDs(operations) 472 + 473 + if !boundaryTime.Equal(baseTime) { 474 + t.Error("boundary time should be last operation time") 475 + } 476 + 477 + // Should return ALL CIDs with the last timestamp 478 + if len(cids) != 3 { 479 + t.Errorf("expected 3 boundary CIDs, got %d", len(cids)) 480 + } 481 + 482 + for _, expectedCID := range []string{"cid3", "cid4", "cid5"} { 483 + if !cids[expectedCID] { 484 + t.Errorf("expected %s in boundary set", expectedCID) 485 + } 486 + } 487 + 488 + // Earlier CIDs should NOT be in set 489 + if cids["cid1"] || cids["cid2"] { 490 + t.Error("earlier CIDs should not be in boundary set") 491 + } 492 + }) 493 + 494 + t.Run("GetBoundaryCIDs_AllSameTimestamp", func(t *testing.T) { 495 + baseTime := time.Now() 496 + operations := []plcclient.PLCOperation{ 497 + {CID: "cid1", CreatedAt: baseTime}, 498 + {CID: "cid2", CreatedAt: baseTime}, 499 + {CID: "cid3", CreatedAt: baseTime}, 500 + } 501 + 502 + _, cids := ops.GetBoundaryCIDs(operations) 503 + 504 + if len(cids) != 3 { 505 + t.Errorf("expected all 3 CIDs, got %d", len(cids)) 506 + } 507 + }) 508 + 509 + t.Run("GetBoundaryCIDs_EmptyOperations", func(t *testing.T) { 510 + operations := []plcclient.PLCOperation{} 511 + boundaryTime, cids := ops.GetBoundaryCIDs(operations) 512 + 513 + if !boundaryTime.IsZero() { 514 + t.Error("expected zero time for empty operations") 515 + } 516 + if len(cids) > 0 { 517 + t.Error("expected nil or empty CID set") 518 + } 519 + }) 520 + 521 + t.Run("StripBoundaryDuplicates_ActualDuplication", func(t *testing.T) { 522 + // CRITICAL: This prevents duplicate operations across bundle boundaries 523 + baseTime := time.Now() 524 + boundaryTimestamp := baseTime.Format(time.RFC3339Nano) 525 + 526 + prevBoundaryCIDs := map[string]bool{ 527 + "cid3": true, 528 + "cid4": true, 529 + } 530 + 531 + operations := []plcclient.PLCOperation{ 532 + {CID: "cid3", CreatedAt: baseTime}, // Duplicate - should be stripped 533 + {CID: "cid4", CreatedAt: baseTime}, // Duplicate - should be stripped 534 + {CID: "cid5", CreatedAt: baseTime}, // New - should be kept 535 + {CID: "cid6", CreatedAt: baseTime.Add(1 * time.Second)}, // After boundary - kept 536 + } 537 + 538 + result := ops.StripBoundaryDuplicates(operations, boundaryTimestamp, prevBoundaryCIDs) 539 + 540 + if len(result) != 2 { 541 + t.Errorf("expected 2 operations after stripping, got %d", len(result)) 542 + } 543 + 544 + if result[0].CID != "cid5" { 545 + t.Errorf("expected cid5 first, got %s", result[0].CID) 546 + } 547 + if result[1].CID != "cid6" { 548 + t.Errorf("expected cid6 second, got %s", result[1].CID) 549 + } 550 + }) 551 + 552 + t.Run("StripBoundaryDuplicates_NoDuplicates", func(t *testing.T) { 553 + baseTime := time.Now() 554 + boundaryTimestamp := baseTime.Format(time.RFC3339Nano) 555 + 556 + prevBoundaryCIDs := map[string]bool{ 557 + "old_cid": true, 558 + } 559 + 560 + operations := []plcclient.PLCOperation{ 561 + {CID: "cid1", CreatedAt: baseTime.Add(1 * time.Second)}, 562 + {CID: "cid2", CreatedAt: baseTime.Add(2 * time.Second)}, 563 + } 564 + 565 + result := ops.StripBoundaryDuplicates(operations, boundaryTimestamp, prevBoundaryCIDs) 566 + 567 + if len(result) != 2 { 568 + t.Errorf("expected 2 operations, got %d", len(result)) 569 + } 570 + }) 571 + 572 + t.Run("StripBoundaryDuplicates_EmptyPrevious", func(t *testing.T) { 573 + baseTime := time.Now() 574 + operations := makeTestOperations(10) 575 + 576 + result := ops.StripBoundaryDuplicates(operations, baseTime.Format(time.RFC3339Nano), nil) 577 + 578 + if len(result) != len(operations) { 579 + t.Error("should not strip anything with no previous boundary CIDs") 580 + } 581 + }) 582 + } 583 + 584 + // ==================================================================================== 585 + // SERIALIZATION TESTS 586 + // ==================================================================================== 587 + 588 + func TestStorageSerialization(t *testing.T) { 589 + logger := &testLogger{t: t} 590 + ops, err := storage.NewOperations(logger) 591 + if err != nil { 592 + t.Fatalf("NewOperations failed: %v", err) 593 + } 594 + defer ops.Close() 595 + 596 + t.Run("SerializeJSONL_PreservesRawJSON", func(t *testing.T) { 597 + rawJSON := []byte(`{"did":"did:plc:test","cid":"bafytest","createdAt":"2024-01-01T00:00:00.000Z"}`) 598 + op := plcclient.PLCOperation{ 599 + DID: "did:plc:test", 600 + CID: "bafytest", 601 + CreatedAt: time.Now(), 602 + RawJSON: rawJSON, 603 + } 604 + 605 + result := ops.SerializeJSONL([]plcclient.PLCOperation{op}) 606 + 607 + // Should use RawJSON directly 608 + if !containsBytes(result, rawJSON) { 609 + t.Error("SerializeJSONL did not preserve RawJSON") 610 + } 611 + }) 612 + 613 + t.Run("SerializeJSONL_MarshalsFallback", func(t *testing.T) { 614 + op := plcclient.PLCOperation{ 615 + DID: "did:plc:test", 616 + CID: "bafytest", 617 + CreatedAt: time.Now(), 618 + // No RawJSON - should marshal 619 + } 620 + 621 + result := ops.SerializeJSONL([]plcclient.PLCOperation{op}) 622 + 623 + if len(result) == 0 { 624 + t.Error("SerializeJSONL returned empty result") 625 + } 626 + 627 + // Should contain the DID 628 + if !containsBytes(result, []byte("did:plc:test")) { 629 + t.Error("serialized data missing DID") 630 + } 631 + }) 632 + 633 + t.Run("ParseJSONL_RoundTrip", func(t *testing.T) { 634 + original := makeTestOperations(100) 635 + data := ops.SerializeJSONL(original) 636 + 637 + parsed, err := ops.ParseJSONL(data) 638 + if err != nil { 639 + t.Fatalf("ParseJSONL failed: %v", err) 640 + } 641 + 642 + if len(parsed) != len(original) { 643 + t.Errorf("count mismatch: got %d, want %d", len(parsed), len(original)) 644 + } 645 + 646 + // Verify RawJSON is populated 647 + for i, op := range parsed { 648 + if len(op.RawJSON) == 0 { 649 + t.Errorf("operation %d missing RawJSON", i) 650 + } 651 + } 652 + }) 653 + } 654 + 655 + // ==================================================================================== 656 + // UTILITY FUNCTION TESTS 657 + // ==================================================================================== 658 + 659 + func TestStorageUtilities(t *testing.T) { 660 + tmpDir := t.TempDir() 661 + logger := &testLogger{t: t} 662 + ops, err := storage.NewOperations(logger) 663 + if err != nil { 664 + t.Fatalf("NewOperations failed: %v", err) 665 + } 666 + defer ops.Close() 667 + 668 + t.Run("ExtractUniqueDIDs", func(t *testing.T) { 669 + operations := []plcclient.PLCOperation{ 670 + {DID: "did:plc:aaa"}, 671 + {DID: "did:plc:bbb"}, 672 + {DID: "did:plc:aaa"}, // Duplicate 673 + {DID: "did:plc:ccc"}, 674 + {DID: "did:plc:bbb"}, // Duplicate 675 + {DID: "did:plc:aaa"}, // Duplicate 676 + } 677 + 678 + dids := ops.ExtractUniqueDIDs(operations) 679 + 680 + if len(dids) != 3 { 681 + t.Errorf("expected 3 unique DIDs, got %d", len(dids)) 682 + } 683 + 684 + // Verify all expected DIDs present 685 + didSet := make(map[string]bool) 686 + for _, did := range dids { 687 + didSet[did] = true 688 + } 689 + 690 + for _, expectedDID := range []string{"did:plc:aaa", "did:plc:bbb", "did:plc:ccc"} { 691 + if !didSet[expectedDID] { 692 + t.Errorf("missing expected DID: %s", expectedDID) 693 + } 694 + } 695 + }) 696 + 697 + t.Run("ExtractUniqueDIDs_Empty", func(t *testing.T) { 698 + dids := ops.ExtractUniqueDIDs([]plcclient.PLCOperation{}) 699 + if len(dids) != 0 { 700 + t.Error("expected empty result for empty input") 701 + } 702 + }) 703 + 704 + t.Run("FileExists", func(t *testing.T) { 705 + existingFile := filepath.Join(tmpDir, "exists.txt") 706 + os.WriteFile(existingFile, []byte("test"), 0644) 707 + 708 + if !ops.FileExists(existingFile) { 709 + t.Error("FileExists returned false for existing file") 710 + } 711 + 712 + if ops.FileExists(filepath.Join(tmpDir, "nonexistent.txt")) { 713 + t.Error("FileExists returned true for nonexistent file") 714 + } 715 + }) 716 + 717 + t.Run("GetFileSize", func(t *testing.T) { 718 + testFile := filepath.Join(tmpDir, "size_test.txt") 719 + testData := []byte("exactly 12 b") 720 + os.WriteFile(testFile, testData, 0644) 721 + 722 + size, err := ops.GetFileSize(testFile) 723 + if err != nil { 724 + t.Fatalf("GetFileSize failed: %v", err) 725 + } 726 + 727 + if size != int64(len(testData)) { 728 + t.Errorf("size mismatch: got %d, want %d", size, len(testData)) 729 + } 730 + }) 731 + } 732 + 733 + // ==================================================================================== 734 + // STREAMING TESTS 735 + // ==================================================================================== 736 + 737 + func TestStorageStreaming(t *testing.T) { 738 + tmpDir := t.TempDir() 739 + logger := &testLogger{t: t} 740 + ops, err := storage.NewOperations(logger) 741 + if err != nil { 742 + t.Fatalf("NewOperations failed: %v", err) 743 + } 744 + defer ops.Close() 745 + 746 + t.Run("StreamRaw", func(t *testing.T) { 747 + operations := makeTestOperations(100) 748 + path := filepath.Join(tmpDir, "stream_raw.jsonl.zst") 749 + _, _, _, _, err := ops.SaveBundle(path, operations) 750 + if err != nil { 751 + t.Fatalf("SaveBundle failed: %v", err) 752 + } 753 + 754 + reader, err := ops.StreamRaw(path) 755 + if err != nil { 756 + t.Fatalf("StreamRaw failed: %v", err) 757 + } 758 + defer reader.Close() 759 + 760 + // Read all data 761 + data := make([]byte, 1024*1024) 762 + n, err := reader.Read(data) 763 + if err != nil && err.Error() != "EOF" { 764 + t.Fatalf("Read failed: %v", err) 765 + } 766 + 767 + if n == 0 { 768 + t.Error("StreamRaw returned no data") 769 + } 770 + }) 771 + 772 + t.Run("StreamDecompressed", func(t *testing.T) { 773 + operations := makeTestOperations(100) 774 + path := filepath.Join(tmpDir, "stream_decomp.jsonl.zst") 775 + ops.SaveBundle(path, operations) 776 + 777 + reader, err := ops.StreamDecompressed(path) 778 + if err != nil { 779 + t.Fatalf("StreamDecompressed failed: %v", err) 780 + } 781 + defer reader.Close() 782 + 783 + // Count JSONL lines 784 + scanner := bufio.NewScanner(reader) 785 + lineCount := 0 786 + for scanner.Scan() { 787 + lineCount++ 788 + } 789 + 790 + if lineCount != 100 { 791 + t.Errorf("expected 100 lines, got %d", lineCount) 792 + } 793 + }) 794 + } 795 + 796 + // ==================================================================================== 797 + // PERFORMANCE / BENCHMARK TESTS 798 + // ==================================================================================== 799 + 800 + func BenchmarkStorageOperations(b *testing.B) { 801 + tmpDir := b.TempDir() 802 + logger := &testLogger{t: &testing.T{}} 803 + ops, _ := storage.NewOperations(logger) 804 + defer ops.Close() 805 + 806 + operations := makeTestOperations(10000) 807 + 808 + b.Run("SaveBundle", func(b *testing.B) { 809 + for i := 0; i < b.N; i++ { 810 + path := filepath.Join(tmpDir, fmt.Sprintf("bench_%d.jsonl.zst", i)) 811 + ops.SaveBundle(path, operations) 812 + } 813 + }) 814 + 815 + // Create bundle for read benchmarks 816 + testPath := filepath.Join(tmpDir, "bench_read.jsonl.zst") 817 + ops.SaveBundle(testPath, operations) 818 + 819 + b.Run("LoadBundle", func(b *testing.B) { 820 + for i := 0; i < b.N; i++ { 821 + ops.LoadBundle(testPath) 822 + } 823 + }) 824 + 825 + b.Run("LoadOperationAtPosition", func(b *testing.B) { 826 + for i := 0; i < b.N; i++ { 827 + ops.LoadOperationAtPosition(testPath, i%10000) 828 + } 829 + }) 830 + 831 + b.Run("Hash", func(b *testing.B) { 832 + data := ops.SerializeJSONL(operations) 833 + b.ResetTimer() 834 + for i := 0; i < b.N; i++ { 835 + ops.Hash(data) 836 + } 837 + }) 838 + 839 + b.Run("SerializeJSONL", func(b *testing.B) { 840 + for i := 0; i < b.N; i++ { 841 + ops.SerializeJSONL(operations) 842 + } 843 + }) 844 + } 845 + 846 + // ==================================================================================== 847 + // HELPER FUNCTIONS 848 + // ==================================================================================== 849 + 850 + func makeTestOperations(count int) []plcclient.PLCOperation { 851 + ops := make([]plcclient.PLCOperation, count) 852 + baseTime := time.Now().Add(-time.Hour) 853 + 854 + for i := 0; i < count; i++ { 855 + ops[i] = plcclient.PLCOperation{ 856 + DID: fmt.Sprintf("did:plc:test%06d", i), 857 + CID: fmt.Sprintf("bafy%06d", i), 858 + CreatedAt: baseTime.Add(time.Duration(i) * time.Second), 859 + } 860 + } 861 + 862 + return ops 863 + } 864 + 865 + func containsBytes(haystack, needle []byte) bool { 866 + return bytes.Contains(haystack, needle) 867 + }
+742
internal/sync/sync_test.go
··· 1 + package sync_test 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + "net/http/httptest" 8 + "sync" 9 + "sync/atomic" 10 + "testing" 11 + "time" 12 + 13 + "github.com/goccy/go-json" 14 + "tangled.org/atscan.net/plcbundle/internal/plcclient" 15 + "tangled.org/atscan.net/plcbundle/internal/storage" 16 + internalsync "tangled.org/atscan.net/plcbundle/internal/sync" 17 + ) 18 + 19 + type testLogger struct { 20 + t *testing.T 21 + } 22 + 23 + func (l *testLogger) Printf(format string, v ...interface{}) { 24 + l.t.Logf(format, v...) 25 + } 26 + 27 + func (l *testLogger) Println(v ...interface{}) { 28 + l.t.Log(v...) 29 + } 30 + 31 + // Mock mempool for testing 32 + type mockMempool struct { 33 + operations []plcclient.PLCOperation 34 + mu sync.Mutex 35 + saveCount int32 36 + } 37 + 38 + func newMockMempool() *mockMempool { 39 + return &mockMempool{ 40 + operations: make([]plcclient.PLCOperation, 0), 41 + } 42 + } 43 + 44 + func (m *mockMempool) Add(ops []plcclient.PLCOperation) (int, error) { 45 + m.mu.Lock() 46 + defer m.mu.Unlock() 47 + 48 + // Build existing CID set (like real mempool does) 49 + existingCIDs := make(map[string]bool) 50 + for _, op := range m.operations { 51 + existingCIDs[op.CID] = true 52 + } 53 + 54 + // Only add new operations (deduplicate by CID) 55 + addedCount := 0 56 + for _, op := range ops { 57 + if !existingCIDs[op.CID] { 58 + m.operations = append(m.operations, op) 59 + existingCIDs[op.CID] = true 60 + addedCount++ 61 + } 62 + } 63 + 64 + return addedCount, nil // ← Return actual added count 65 + } 66 + 67 + func (m *mockMempool) Save() error { 68 + atomic.AddInt32(&m.saveCount, 1) 69 + return nil 70 + } 71 + 72 + func (m *mockMempool) SaveIfNeeded() error { 73 + return m.Save() 74 + } 75 + 76 + func (m *mockMempool) Count() int { 77 + m.mu.Lock() 78 + defer m.mu.Unlock() 79 + return len(m.operations) 80 + } 81 + 82 + func (m *mockMempool) GetLastTime() string { 83 + m.mu.Lock() 84 + defer m.mu.Unlock() 85 + if len(m.operations) == 0 { 86 + return "" 87 + } 88 + return m.operations[len(m.operations)-1].CreatedAt.Format(time.RFC3339Nano) 89 + } 90 + 91 + // ==================================================================================== 92 + // FETCHER TESTS - DEDUPLICATION & RETRY LOGIC 93 + // ==================================================================================== 94 + 95 + func TestFetcherDeduplication(t *testing.T) { 96 + t.Run("BoundaryDuplicateHandling", func(t *testing.T) { 97 + // Setup mock server 98 + baseTime := time.Now() 99 + boundaryTime := baseTime.Add(5 * time.Second) 100 + 101 + // Simulate operations at bundle boundary 102 + mockOps := []plcclient.PLCOperation{ 103 + {DID: "did:plc:001", CID: "cid1", CreatedAt: boundaryTime}, 104 + {DID: "did:plc:002", CID: "cid2", CreatedAt: boundaryTime}, 105 + {DID: "did:plc:003", CID: "cid3", CreatedAt: boundaryTime.Add(1 * time.Second)}, 106 + } 107 + 108 + server := createMockPLCServer(t, mockOps) 109 + defer server.Close() 110 + 111 + // Create fetcher 112 + client := plcclient.NewClient(server.URL) 113 + defer client.Close() 114 + 115 + logger := &testLogger{t: t} 116 + ops, _ := storage.NewOperations(logger) 117 + defer ops.Close() 118 + 119 + fetcher := internalsync.NewFetcher(client, ops, logger) 120 + 121 + // Previous bundle had cid1 and cid2 at boundary 122 + prevBoundaryCIDs := map[string]bool{ 123 + "cid1": true, 124 + "cid2": true, 125 + } 126 + 127 + mempool := newMockMempool() 128 + 129 + // Fetch 130 + newOps, fetchCount, err := fetcher.FetchToMempool( 131 + context.Background(), 132 + boundaryTime.Add(-1*time.Second).Format(time.RFC3339Nano), 133 + prevBoundaryCIDs, 134 + 10, 135 + true, // quiet 136 + mempool, 137 + 0, 138 + ) 139 + 140 + if err != nil { 141 + t.Fatalf("FetchToMempool failed: %v", err) 142 + } 143 + 144 + // Should have filtered out cid1 and cid2 (duplicates) 145 + // Only cid3 should be returned 146 + if len(newOps) != 1 { 147 + t.Errorf("expected 1 unique operation, got %d", len(newOps)) 148 + } 149 + 150 + if len(newOps) > 0 && newOps[0].CID != "cid3" { 151 + t.Errorf("expected cid3, got %s", newOps[0].CID) 152 + } 153 + 154 + if fetchCount == 0 { 155 + t.Error("expected at least one fetch") 156 + } 157 + }) 158 + 159 + t.Run("ConcurrentFetchDedup", func(t *testing.T) { 160 + baseTime := time.Now() 161 + mockOps := make([]plcclient.PLCOperation, 50) 162 + for i := 0; i < 50; i++ { 163 + mockOps[i] = plcclient.PLCOperation{ 164 + DID: fmt.Sprintf("did:plc:%03d", i), 165 + CID: fmt.Sprintf("cid%03d", i), 166 + CreatedAt: baseTime.Add(time.Duration(i) * time.Second), 167 + } 168 + } 169 + 170 + server := createMockPLCServer(t, mockOps) 171 + defer server.Close() 172 + 173 + client := plcclient.NewClient(server.URL) 174 + defer client.Close() 175 + 176 + logger := &testLogger{t: t} 177 + storageOps, _ := storage.NewOperations(logger) 178 + defer storageOps.Close() 179 + 180 + fetcher := internalsync.NewFetcher(client, storageOps, logger) 181 + mempool := newMockMempool() 182 + 183 + // First fetch 184 + initialCount := mempool.Count() 185 + _, _, err := fetcher.FetchToMempool( 186 + context.Background(), 187 + "", 188 + nil, 189 + 30, 190 + true, 191 + mempool, 192 + 0, 193 + ) 194 + if err != nil { 195 + t.Fatalf("First fetch failed: %v", err) 196 + } 197 + 198 + countAfterFirst := mempool.Count() 199 + addedFirst := countAfterFirst - initialCount 200 + 201 + if addedFirst == 0 { 202 + t.Fatal("first fetch should add operations") 203 + } 204 + 205 + // Second fetch with same cursor - mempool deduplicates 206 + countBeforeSecond := mempool.Count() 207 + _, _, err = fetcher.FetchToMempool( 208 + context.Background(), 209 + "", // Same cursor - fetches same data 210 + nil, 211 + 30, 212 + true, 213 + mempool, 214 + 1, 215 + ) 216 + if err != nil { 217 + t.Fatalf("Second fetch failed: %v", err) 218 + } 219 + 220 + countAfterSecond := mempool.Count() 221 + addedSecond := countAfterSecond - countBeforeSecond 222 + 223 + // Mempool's Add() method deduplicates by CID 224 + // So second fetch should add 0 (all duplicates) 225 + if addedSecond != 0 { 226 + t.Errorf("expected 0 new ops in mempool after second fetch (duplicates), got %d", addedSecond) 227 + } 228 + 229 + t.Logf("First fetch: +%d ops, Second fetch: +%d ops (deduped)", addedFirst, addedSecond) 230 + }) 231 + 232 + t.Run("EmptyBoundaryCIDs", func(t *testing.T) { 233 + baseTime := time.Now() 234 + mockOps := []plcclient.PLCOperation{ 235 + {DID: "did:plc:001", CID: "cid1", CreatedAt: baseTime}, 236 + } 237 + 238 + server := createMockPLCServer(t, mockOps) 239 + defer server.Close() 240 + 241 + client := plcclient.NewClient(server.URL) 242 + defer client.Close() 243 + 244 + logger := &testLogger{t: t} 245 + storageOps, _ := storage.NewOperations(logger) 246 + defer storageOps.Close() 247 + 248 + fetcher := internalsync.NewFetcher(client, storageOps, logger) 249 + mempool := newMockMempool() 250 + 251 + // Fetch with no boundary CIDs (genesis bundle) 252 + newOps, _, err := fetcher.FetchToMempool( 253 + context.Background(), 254 + "", 255 + nil, // No previous boundary 256 + 10, 257 + true, 258 + mempool, 259 + 0, 260 + ) 261 + 262 + if err != nil { 263 + t.Fatalf("FetchToMempool failed: %v", err) 264 + } 265 + 266 + if len(newOps) != 1 { 267 + t.Errorf("expected 1 operation, got %d", len(newOps)) 268 + } 269 + }) 270 + } 271 + 272 + func TestFetcherRetry(t *testing.T) { 273 + t.Run("TransientFailures", func(t *testing.T) { 274 + attemptCount := 0 275 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 276 + attemptCount++ 277 + 278 + if attemptCount < 3 { 279 + // Fail first 2 attempts 280 + w.WriteHeader(500) 281 + return 282 + } 283 + 284 + // Succeed on 3rd attempt 285 + w.Header().Set("Content-Type", "application/x-ndjson") 286 + op := plcclient.PLCOperation{ 287 + DID: "did:plc:test", 288 + CID: "cid1", 289 + CreatedAt: time.Now(), 290 + } 291 + json.NewEncoder(w).Encode(op) 292 + })) 293 + defer server.Close() 294 + 295 + client := plcclient.NewClient(server.URL) 296 + defer client.Close() 297 + 298 + // Should retry and eventually succeed 299 + _, err := client.Export(context.Background(), plcclient.ExportOptions{Count: 1}) 300 + if err != nil { 301 + t.Fatalf("expected retry to succeed, got error: %v", err) 302 + } 303 + 304 + if attemptCount < 3 { 305 + t.Errorf("expected at least 3 attempts, got %d", attemptCount) 306 + } 307 + }) 308 + 309 + t.Run("RateLimitHandling", func(t *testing.T) { 310 + attemptCount := 0 311 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 312 + attemptCount++ 313 + 314 + if attemptCount == 1 { 315 + // Return 429 with Retry-After 316 + w.Header().Set("Retry-After", "1") 317 + w.WriteHeader(429) 318 + return 319 + } 320 + 321 + // Success 322 + w.Header().Set("Content-Type", "application/x-ndjson") 323 + op := plcclient.PLCOperation{ 324 + DID: "did:plc:test", 325 + CID: "cid1", 326 + CreatedAt: time.Now(), 327 + } 328 + json.NewEncoder(w).Encode(op) 329 + })) 330 + defer server.Close() 331 + 332 + client := plcclient.NewClient(server.URL) 333 + defer client.Close() 334 + 335 + startTime := time.Now() 336 + _, err := client.Export(context.Background(), plcclient.ExportOptions{Count: 1}) 337 + elapsed := time.Since(startTime) 338 + 339 + if err != nil { 340 + t.Fatalf("expected success after rate limit, got: %v", err) 341 + } 342 + 343 + // Should have waited at least 1 second 344 + if elapsed < 1*time.Second { 345 + t.Errorf("expected wait for rate limit, elapsed: %v", elapsed) 346 + } 347 + 348 + if attemptCount != 2 { 349 + t.Errorf("expected 2 attempts, got %d", attemptCount) 350 + } 351 + }) 352 + 353 + t.Run("ContextCancellation", func(t *testing.T) { 354 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 355 + // Slow response 356 + time.Sleep(5 * time.Second) 357 + })) 358 + defer server.Close() 359 + 360 + client := plcclient.NewClient(server.URL) 361 + defer client.Close() 362 + 363 + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) 364 + defer cancel() 365 + 366 + _, err := client.Export(ctx, plcclient.ExportOptions{Count: 1}) 367 + if err == nil { 368 + t.Error("expected timeout error, got nil") 369 + } 370 + }) 371 + } 372 + 373 + func TestFetcherMempoolIntegration(t *testing.T) { 374 + t.Run("AutoSaveAfterFetch", func(t *testing.T) { 375 + baseTime := time.Now() 376 + mockOps := []plcclient.PLCOperation{ 377 + {DID: "did:plc:001", CID: "cid1", CreatedAt: baseTime}, 378 + {DID: "did:plc:002", CID: "cid2", CreatedAt: baseTime.Add(1 * time.Second)}, 379 + } 380 + 381 + server := createMockPLCServer(t, mockOps) 382 + defer server.Close() 383 + 384 + client := plcclient.NewClient(server.URL) 385 + defer client.Close() 386 + 387 + logger := &testLogger{t: t} 388 + storageOps, _ := storage.NewOperations(logger) 389 + defer storageOps.Close() 390 + 391 + fetcher := internalsync.NewFetcher(client, storageOps, logger) 392 + mempool := newMockMempool() 393 + 394 + _, _, err := fetcher.FetchToMempool( 395 + context.Background(), 396 + "", 397 + nil, 398 + 10, 399 + true, 400 + mempool, 401 + 0, 402 + ) 403 + 404 + if err != nil { 405 + t.Fatalf("FetchToMempool failed: %v", err) 406 + } 407 + 408 + // Verify mempool.SaveIfNeeded was called 409 + if mempool.saveCount == 0 { 410 + t.Error("expected mempool to be saved after fetch") 411 + } 412 + }) 413 + } 414 + 415 + // ==================================================================================== 416 + // CLONER TESTS 417 + // ==================================================================================== 418 + 419 + func TestClonerAtomicity(t *testing.T) { 420 + // Note: Cloner tests would need more complex mocking 421 + // Including mock HTTP server, file system operations, etc. 422 + // This is a template showing what to test 423 + 424 + t.Run("InterruptedClone", func(t *testing.T) { 425 + // TODO: Test context cancellation mid-download 426 + // Verify: 427 + // - .tmp files are cleaned up OR kept for resume 428 + // - Index not updated for incomplete downloads 429 + // - Partial progress can resume with --resume flag 430 + }) 431 + 432 + t.Run("HashVerificationFailure", func(t *testing.T) { 433 + // TODO: Mock server returns file with wrong hash 434 + // Verify: 435 + // - File is deleted (or .tmp is not renamed) 436 + // - Bundle NOT added to index 437 + // - Error returned to user 438 + }) 439 + 440 + t.Run("IndexUpdateTiming", func(t *testing.T) { 441 + // CRITICAL: Index must only update AFTER file write succeeds 442 + // TODO: Implement test that verifies ordering 443 + }) 444 + } 445 + 446 + // ==================================================================================== 447 + // SYNC LOOP TESTS 448 + // ==================================================================================== 449 + 450 + func TestSyncLoopBehavior(t *testing.T) { 451 + t.Run("CatchUpDetection", func(t *testing.T) { 452 + // Mock manager 453 + mockMgr := &mockSyncManager{ 454 + lastBundle: 5, 455 + mempoolCount: 500, 456 + } 457 + 458 + logger := &testLogger{t: t} 459 + config := &internalsync.SyncLoopConfig{ 460 + MaxBundles: 0, 461 + Verbose: false, 462 + Logger: logger, 463 + } 464 + 465 + // First sync should detect "caught up" when no progress 466 + synced, err := internalsync.SyncOnce(context.Background(), mockMgr, config, false) 467 + 468 + if err != nil { 469 + t.Fatalf("SyncOnce failed: %v", err) 470 + } 471 + 472 + // Should return 0 if already caught up 473 + if synced != 0 { 474 + t.Logf("Note: synced %d bundles (manager may not be caught up)", synced) 475 + } 476 + }) 477 + 478 + t.Run("MaxBundlesLimit", func(t *testing.T) { 479 + mockMgr := &mockSyncManager{ 480 + lastBundle: 0, 481 + mempoolCount: 10000, // Always has enough for bundle 482 + } 483 + 484 + logger := &testLogger{t: t} 485 + config := &internalsync.SyncLoopConfig{ 486 + MaxBundles: 3, 487 + Verbose: false, 488 + Logger: logger, 489 + } 490 + 491 + ctx := context.Background() 492 + synced, err := internalsync.SyncOnce(ctx, mockMgr, config, false) 493 + 494 + if err != nil { 495 + t.Fatalf("SyncOnce failed: %v", err) 496 + } 497 + 498 + // Should respect max limit 499 + if synced > 3 { 500 + t.Errorf("synced %d bundles, but max was 3", synced) 501 + } 502 + }) 503 + 504 + t.Run("GracefulShutdown", func(t *testing.T) { 505 + mockMgr := &mockSyncManager{ 506 + lastBundle: 0, 507 + mempoolCount: 10000, 508 + fetchDelay: 50 * time.Millisecond, 509 + } 510 + 511 + logger := &testLogger{t: t} 512 + config := &internalsync.SyncLoopConfig{ 513 + Interval: 100 * time.Millisecond, 514 + MaxBundles: 0, 515 + Verbose: false, 516 + Logger: logger, 517 + } 518 + 519 + ctx, cancel := context.WithCancel(context.Background()) 520 + 521 + // Start sync loop in goroutine 522 + done := make(chan error, 1) 523 + go func() { 524 + done <- internalsync.RunSyncLoop(ctx, mockMgr, config) 525 + }() 526 + 527 + // Let it run briefly (should complete at least one cycle) 528 + time.Sleep(250 * time.Millisecond) 529 + 530 + // Cancel context 531 + cancel() 532 + 533 + // Should exit gracefully with context.Canceled error 534 + select { 535 + case err := <-done: 536 + // Expected: context.Canceled or nil 537 + if err != nil && err != context.Canceled { 538 + t.Errorf("unexpected error on shutdown: %v", err) 539 + } 540 + t.Logf("Sync loop stopped cleanly: %v", err) 541 + 542 + case <-time.After(2 * time.Second): 543 + t.Error("sync loop did not stop within timeout after context cancellation") 544 + } 545 + 546 + // NOTE: Mempool saving on shutdown is handled by the caller (commands/server), 547 + // not by the sync loop itself. The sync loop only respects context cancellation. 548 + // 549 + // For mempool save testing, see command-level tests. 550 + }) 551 + } 552 + 553 + // ==================================================================================== 554 + // BUNDLER TESTS 555 + // ==================================================================================== 556 + 557 + func TestBundlerCreateBundle(t *testing.T) { 558 + logger := &testLogger{t: t} 559 + storageOps, _ := storage.NewOperations(logger) 560 + defer storageOps.Close() 561 + 562 + t.Run("BasicBundleCreation", func(t *testing.T) { 563 + operations := makeTestOperations(10000) 564 + cursor := operations[len(operations)-1].CreatedAt.Format(time.RFC3339Nano) 565 + 566 + bundle := internalsync.CreateBundle(1, operations, cursor, "", storageOps) 567 + 568 + if bundle.BundleNumber != 1 { 569 + t.Errorf("wrong bundle number: got %d, want 1", bundle.BundleNumber) 570 + } 571 + 572 + if len(bundle.Operations) != 10000 { 573 + t.Errorf("wrong operation count: got %d, want 10000", len(bundle.Operations)) 574 + } 575 + 576 + if bundle.DIDCount == 0 { 577 + t.Error("DIDCount should not be zero") 578 + } 579 + 580 + if len(bundle.BoundaryCIDs) == 0 { 581 + t.Error("BoundaryCIDs should not be empty") 582 + } 583 + 584 + if bundle.Cursor != cursor { 585 + t.Error("cursor mismatch") 586 + } 587 + }) 588 + 589 + t.Run("GenesisBundle", func(t *testing.T) { 590 + operations := makeTestOperations(10000) 591 + cursor := operations[len(operations)-1].CreatedAt.Format(time.RFC3339Nano) 592 + 593 + bundle := internalsync.CreateBundle(1, operations, cursor, "", storageOps) 594 + 595 + // Genesis should have empty parent 596 + if bundle.Parent != "" { 597 + t.Errorf("genesis bundle should have empty parent, got %s", bundle.Parent) 598 + } 599 + }) 600 + 601 + t.Run("ChainedBundle", func(t *testing.T) { 602 + operations := makeTestOperations(10000) 603 + cursor := operations[len(operations)-1].CreatedAt.Format(time.RFC3339Nano) 604 + parentHash := "parent_hash_from_bundle_1" 605 + 606 + bundle := internalsync.CreateBundle(2, operations, cursor, parentHash, storageOps) 607 + 608 + if bundle.Parent != parentHash { 609 + t.Errorf("parent mismatch: got %s, want %s", bundle.Parent, parentHash) 610 + } 611 + 612 + if bundle.BundleNumber != 2 { 613 + t.Error("bundle number should be 2") 614 + } 615 + }) 616 + 617 + t.Run("BoundaryTimestamps", func(t *testing.T) { 618 + baseTime := time.Now() 619 + 620 + // Create operations where last 5 share same timestamp 621 + operations := makeTestOperations(10000) 622 + for i := 9995; i < 10000; i++ { 623 + operations[i].CreatedAt = baseTime 624 + } 625 + 626 + cursor := baseTime.Format(time.RFC3339Nano) 627 + bundle := internalsync.CreateBundle(1, operations, cursor, "", storageOps) 628 + 629 + // Should capture all 5 CIDs at boundary 630 + if len(bundle.BoundaryCIDs) != 5 { 631 + t.Errorf("expected 5 boundary CIDs, got %d", len(bundle.BoundaryCIDs)) 632 + } 633 + }) 634 + } 635 + 636 + // ==================================================================================== 637 + // MOCK SERVER & HELPERS 638 + // ==================================================================================== 639 + 640 + func createMockPLCServer(_ *testing.T, operations []plcclient.PLCOperation) *httptest.Server { 641 + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 642 + if r.URL.Path != "/export" { 643 + w.WriteHeader(404) 644 + return 645 + } 646 + 647 + w.Header().Set("Content-Type", "application/x-ndjson") 648 + 649 + // Return operations as JSONL 650 + for _, op := range operations { 651 + json.NewEncoder(w).Encode(op) 652 + } 653 + })) 654 + } 655 + 656 + func makeTestOperations(count int) []plcclient.PLCOperation { 657 + ops := make([]plcclient.PLCOperation, count) 658 + baseTime := time.Now().Add(-time.Hour) 659 + 660 + for i := 0; i < count; i++ { 661 + ops[i] = plcclient.PLCOperation{ 662 + DID: fmt.Sprintf("did:plc:test%06d", i), 663 + CID: fmt.Sprintf("bafy%06d", i), 664 + CreatedAt: baseTime.Add(time.Duration(i) * time.Second), 665 + } 666 + } 667 + 668 + return ops 669 + } 670 + 671 + // Mock sync manager for testing 672 + type mockSyncManager struct { 673 + lastBundle int 674 + mempoolCount int 675 + fetchDelay time.Duration 676 + mempoolSaveCount int 677 + mu sync.Mutex 678 + } 679 + 680 + func (m *mockSyncManager) GetLastBundleNumber() int { 681 + m.mu.Lock() 682 + defer m.mu.Unlock() 683 + return m.lastBundle 684 + } 685 + 686 + func (m *mockSyncManager) GetMempoolCount() int { 687 + m.mu.Lock() 688 + defer m.mu.Unlock() 689 + return m.mempoolCount 690 + } 691 + 692 + func (m *mockSyncManager) FetchAndSaveNextBundle(ctx context.Context, quiet bool) (int, time.Duration, error) { 693 + m.mu.Lock() 694 + defer m.mu.Unlock() 695 + 696 + if m.fetchDelay > 0 { 697 + time.Sleep(m.fetchDelay) 698 + } 699 + 700 + // Simulate creating bundle if we have enough ops 701 + if m.mempoolCount >= 10000 { 702 + m.lastBundle++ 703 + m.mempoolCount -= 10000 704 + return m.lastBundle, 10 * time.Millisecond, nil 705 + } 706 + 707 + // Not enough ops 708 + return 0, 0, fmt.Errorf("insufficient operations") 709 + } 710 + 711 + func (m *mockSyncManager) SaveMempool() error { 712 + m.mu.Lock() 713 + defer m.mu.Unlock() 714 + m.mempoolSaveCount++ 715 + return nil 716 + } 717 + 718 + func TestMockMempoolDeduplication(t *testing.T) { 719 + m := newMockMempool() 720 + 721 + op1 := plcclient.PLCOperation{ 722 + CID: "duplicate_cid", 723 + DID: "did:plc:test", 724 + CreatedAt: time.Now(), 725 + } 726 + 727 + // Add first time 728 + added, _ := m.Add([]plcclient.PLCOperation{op1}) 729 + if added != 1 { 730 + t.Fatalf("first add should return 1, got %d", added) 731 + } 732 + 733 + // Add same CID again 734 + added, _ = m.Add([]plcclient.PLCOperation{op1}) 735 + if added != 0 { 736 + t.Fatalf("duplicate add should return 0, got %d", added) 737 + } 738 + 739 + if m.Count() != 1 { 740 + t.Fatalf("count should be 1, got %d", m.Count()) 741 + } 742 + }
+218
internal/types/types_test.go
··· 1 + package types_test 2 + 3 + import ( 4 + "bytes" 5 + "fmt" 6 + "testing" 7 + 8 + "tangled.org/atscan.net/plcbundle/internal/types" 9 + ) 10 + 11 + // ==================================================================================== 12 + // CONSTANT VALIDATION TESTS 13 + // ==================================================================================== 14 + 15 + func TestConstants(t *testing.T) { 16 + t.Run("BundleSize", func(t *testing.T) { 17 + if types.BUNDLE_SIZE != 10000 { 18 + t.Errorf("BUNDLE_SIZE = %d, want 10000", types.BUNDLE_SIZE) 19 + } 20 + 21 + // Ensure it's a reasonable size 22 + if types.BUNDLE_SIZE < 1000 { 23 + t.Error("BUNDLE_SIZE too small") 24 + } 25 + 26 + if types.BUNDLE_SIZE > 100000 { 27 + t.Error("BUNDLE_SIZE too large") 28 + } 29 + }) 30 + 31 + t.Run("IndexFile", func(t *testing.T) { 32 + if types.INDEX_FILE != "plc_bundles.json" { 33 + t.Errorf("INDEX_FILE = %s, want plc_bundles.json", types.INDEX_FILE) 34 + } 35 + 36 + // Should be a valid filename 37 + if types.INDEX_FILE == "" { 38 + t.Error("INDEX_FILE should not be empty") 39 + } 40 + 41 + // Should have .json extension 42 + if len(types.INDEX_FILE) < 5 || types.INDEX_FILE[len(types.INDEX_FILE)-5:] != ".json" { 43 + t.Error("INDEX_FILE should have .json extension") 44 + } 45 + }) 46 + 47 + t.Run("IndexVersion", func(t *testing.T) { 48 + if types.INDEX_VERSION != "1.0" { 49 + t.Errorf("INDEX_VERSION = %s, want 1.0", types.INDEX_VERSION) 50 + } 51 + 52 + // Should follow semantic versioning format (at least major.minor) 53 + if len(types.INDEX_VERSION) < 3 { 54 + t.Error("INDEX_VERSION should follow semantic versioning") 55 + } 56 + }) 57 + } 58 + 59 + // ==================================================================================== 60 + // LOGGER INTERFACE COMPLIANCE TESTS 61 + // ==================================================================================== 62 + 63 + func TestLoggerInterface(t *testing.T) { 64 + t.Run("MockLoggerImplementsInterface", func(t *testing.T) { 65 + var logger types.Logger = &mockLogger{} 66 + 67 + // Should compile and not panic 68 + logger.Printf("test %s", "message") 69 + logger.Println("test", "message") 70 + }) 71 + 72 + t.Run("BufferedLoggerImplementation", func(t *testing.T) { 73 + buf := &bytes.Buffer{} 74 + logger := &bufferedLogger{buf: buf} 75 + 76 + // Cast to interface 77 + var _ types.Logger = logger 78 + 79 + logger.Printf("formatted %s %d", "message", 42) 80 + logger.Println("plain", "message") 81 + 82 + output := buf.String() 83 + 84 + if !containsString(output, "formatted message 42") { 85 + t.Error("Printf output not captured") 86 + } 87 + 88 + if !containsString(output, "plain message") { 89 + t.Error("Println output not captured") 90 + } 91 + }) 92 + 93 + t.Run("NullLoggerImplementation", func(t *testing.T) { 94 + // Logger that discards all output 95 + logger := &nullLogger{} 96 + 97 + // Should not panic 98 + var _ types.Logger = logger 99 + logger.Printf("test %s", "ignored") 100 + logger.Println("also", "ignored") 101 + }) 102 + 103 + t.Run("MultiLoggerImplementation", func(t *testing.T) { 104 + // Logger that writes to multiple destinations 105 + buf1 := &bytes.Buffer{} 106 + buf2 := &bytes.Buffer{} 107 + 108 + logger := &multiLogger{ 109 + loggers: []types.Logger{ 110 + &bufferedLogger{buf: buf1}, 111 + &bufferedLogger{buf: buf2}, 112 + }, 113 + } 114 + 115 + var _ types.Logger = logger 116 + 117 + logger.Printf("test %s", "message") 118 + 119 + // Both buffers should have the message 120 + if !containsString(buf1.String(), "test message") { 121 + t.Error("first logger didn't receive message") 122 + } 123 + 124 + if !containsString(buf2.String(), "test message") { 125 + t.Error("second logger didn't receive message") 126 + } 127 + }) 128 + } 129 + 130 + // ==================================================================================== 131 + // CONSTANT USAGE IN CALCULATIONS 132 + // ==================================================================================== 133 + 134 + func TestConstantUsage(t *testing.T) { 135 + t.Run("GlobalPositionCalculation", func(t *testing.T) { 136 + // Global position = bundleNumber * BUNDLE_SIZE + position 137 + bundleNumber := 42 138 + position := 1337 139 + 140 + globalPos := bundleNumber*types.BUNDLE_SIZE + position 141 + expected := 420000 + 1337 142 + 143 + if globalPos != expected { 144 + t.Errorf("global position calculation incorrect: got %d, want %d", globalPos, expected) 145 + } 146 + }) 147 + 148 + t.Run("BundleFromGlobalPosition", func(t *testing.T) { 149 + globalPos := 88410345 150 + 151 + bundleNumber := globalPos / types.BUNDLE_SIZE 152 + position := globalPos % types.BUNDLE_SIZE 153 + 154 + if bundleNumber != 8841 { 155 + t.Errorf("bundle calculation wrong: got %d, want 8841", bundleNumber) 156 + } 157 + 158 + if position != 345 { 159 + t.Errorf("position calculation wrong: got %d, want 345", position) 160 + } 161 + }) 162 + 163 + t.Run("OperationCountPerBundle", func(t *testing.T) { 164 + // Each bundle should have exactly BUNDLE_SIZE operations 165 + bundleCount := 100 166 + totalOps := bundleCount * types.BUNDLE_SIZE 167 + 168 + if totalOps != 1000000 { 169 + t.Errorf("total ops calculation: got %d, want 1000000", totalOps) 170 + } 171 + }) 172 + } 173 + 174 + // ==================================================================================== 175 + // HELPER IMPLEMENTATIONS 176 + // ==================================================================================== 177 + 178 + type mockLogger struct{} 179 + 180 + func (l *mockLogger) Printf(format string, v ...interface{}) {} 181 + func (l *mockLogger) Println(v ...interface{}) {} 182 + 183 + type bufferedLogger struct { 184 + buf *bytes.Buffer 185 + } 186 + 187 + func (l *bufferedLogger) Printf(format string, v ...interface{}) { 188 + fmt.Fprintf(l.buf, format+"\n", v...) 189 + } 190 + 191 + func (l *bufferedLogger) Println(v ...interface{}) { 192 + fmt.Fprintln(l.buf, v...) 193 + } 194 + 195 + type nullLogger struct{} 196 + 197 + func (l *nullLogger) Printf(format string, v ...interface{}) {} 198 + func (l *nullLogger) Println(v ...interface{}) {} 199 + 200 + type multiLogger struct { 201 + loggers []types.Logger 202 + } 203 + 204 + func (l *multiLogger) Printf(format string, v ...interface{}) { 205 + for _, logger := range l.loggers { 206 + logger.Printf(format, v...) 207 + } 208 + } 209 + 210 + func (l *multiLogger) Println(v ...interface{}) { 211 + for _, logger := range l.loggers { 212 + logger.Println(v...) 213 + } 214 + } 215 + 216 + func containsString(haystack, needle string) bool { 217 + return bytes.Contains([]byte(haystack), []byte(needle)) 218 + }
+32
server/helpers_test.go
··· 1 + // repo/server/helpers_test.go 2 + package server_test 3 + 4 + import ( 5 + "io" 6 + "net/http" 7 + "net/http/httptest" 8 + "testing" 9 + ) 10 + 11 + func TestServerHelperFunctions(t *testing.T) { 12 + // Note: Many helper functions are unexported, so we test them indirectly 13 + 14 + t.Run("FormatNumber_ViaOutput", func(t *testing.T) { 15 + // This tests the formatNumber function indirectly 16 + srv, _, cleanup := setupTestServer(t, false) 17 + defer cleanup() 18 + 19 + ts := httptest.NewServer(srv) 20 + defer ts.Close() 21 + 22 + resp, _ := http.Get(ts.URL + "/") 23 + body, _ := io.ReadAll(resp.Body) 24 + resp.Body.Close() 25 + 26 + // Large numbers should be formatted with commas 27 + // Check if output looks reasonable 28 + if len(body) == 0 { 29 + t.Error("root page is empty") 30 + } 31 + }) 32 + }
+5
server/server.go
··· 106 106 func (s *Server) GetStartTime() time.Time { 107 107 return s.startTime 108 108 } 109 + 110 + // Add this method to Server 111 + func (s *Server) Handler() http.Handler { 112 + return s.createHandler() 113 + }
+1068
server/server_test.go
··· 1 + package server_test 2 + 3 + import ( 4 + "bytes" 5 + "context" 6 + "encoding/json" 7 + "fmt" 8 + "io" 9 + "net/http" 10 + "net/http/httptest" 11 + "path/filepath" 12 + "strings" 13 + "sync" 14 + "testing" 15 + "time" 16 + 17 + "github.com/gorilla/websocket" 18 + "tangled.org/atscan.net/plcbundle/bundle" 19 + "tangled.org/atscan.net/plcbundle/internal/bundleindex" 20 + "tangled.org/atscan.net/plcbundle/internal/plcclient" 21 + "tangled.org/atscan.net/plcbundle/internal/storage" 22 + "tangled.org/atscan.net/plcbundle/server" 23 + ) 24 + 25 + type testLogger struct { 26 + t *testing.T 27 + } 28 + 29 + func (l *testLogger) Printf(format string, v ...interface{}) { 30 + l.t.Logf(format, v...) 31 + } 32 + 33 + func (l *testLogger) Println(v ...interface{}) { 34 + l.t.Log(v...) 35 + } 36 + 37 + // ==================================================================================== 38 + // HTTP ENDPOINT TESTS 39 + // ==================================================================================== 40 + 41 + func TestServerHTTPEndpoints(t *testing.T) { 42 + handler, _, cleanup := setupTestServer(t, false) 43 + defer cleanup() 44 + 45 + ts := httptest.NewServer(handler) 46 + defer ts.Close() 47 + 48 + t.Run("RootEndpoint", func(t *testing.T) { 49 + resp, err := http.Get(ts.URL + "/") 50 + if err != nil { 51 + t.Fatalf("GET / failed: %v", err) 52 + } 53 + defer resp.Body.Close() 54 + 55 + if resp.StatusCode != 200 { 56 + t.Errorf("expected 200, got %d", resp.StatusCode) 57 + } 58 + 59 + body, _ := io.ReadAll(resp.Body) 60 + bodyStr := string(body) 61 + 62 + // Should contain welcome message 63 + if !strings.Contains(bodyStr, "plcbundle server") { 64 + t.Error("root page missing title") 65 + } 66 + 67 + // Should show API endpoints 68 + if !strings.Contains(bodyStr, "API Endpoints") { 69 + t.Error("root page missing API documentation") 70 + } 71 + }) 72 + 73 + t.Run("IndexJSON", func(t *testing.T) { 74 + resp, err := http.Get(ts.URL + "/index.json") 75 + if err != nil { 76 + t.Fatalf("GET /index.json failed: %v", err) 77 + } 78 + defer resp.Body.Close() 79 + 80 + if resp.StatusCode != 200 { 81 + t.Errorf("expected 200, got %d", resp.StatusCode) 82 + } 83 + 84 + // Should be JSON 85 + contentType := resp.Header.Get("Content-Type") 86 + if !strings.Contains(contentType, "application/json") { 87 + t.Errorf("wrong content type: %s", contentType) 88 + } 89 + 90 + // Parse JSON 91 + var idx bundleindex.Index 92 + if err := json.NewDecoder(resp.Body).Decode(&idx); err != nil { 93 + t.Fatalf("failed to parse index JSON: %v", err) 94 + } 95 + 96 + if idx.Version != "1.0" { 97 + t.Errorf("index version mismatch: got %s", idx.Version) 98 + } 99 + }) 100 + 101 + t.Run("BundleMetadata", func(t *testing.T) { 102 + resp, err := http.Get(ts.URL + "/bundle/1") 103 + if err != nil { 104 + t.Fatalf("GET /bundle/1 failed: %v", err) 105 + } 106 + defer resp.Body.Close() 107 + 108 + if resp.StatusCode != 200 { 109 + t.Errorf("expected 200, got %d", resp.StatusCode) 110 + } 111 + 112 + var meta bundleindex.BundleMetadata 113 + if err := json.NewDecoder(resp.Body).Decode(&meta); err != nil { 114 + t.Fatalf("failed to parse bundle metadata: %v", err) 115 + } 116 + 117 + if meta.BundleNumber != 1 { 118 + t.Error("wrong bundle returned") 119 + } 120 + 121 + // Verify it has the fields we set 122 + if meta.ContentHash == "" { 123 + t.Error("metadata missing content hash") 124 + } 125 + }) 126 + 127 + t.Run("BundleMetadata_NotFound", func(t *testing.T) { 128 + resp, err := http.Get(ts.URL + "/bundle/9999") 129 + if err != nil { 130 + t.Fatalf("GET /bundle/9999 failed: %v", err) 131 + } 132 + defer resp.Body.Close() 133 + 134 + if resp.StatusCode != 404 { 135 + t.Errorf("expected 404 for nonexistent bundle, got %d", resp.StatusCode) 136 + } 137 + }) 138 + 139 + t.Run("BundleMetadata_InvalidNumber", func(t *testing.T) { 140 + resp, err := http.Get(ts.URL + "/bundle/invalid") 141 + if err != nil { 142 + t.Fatalf("GET /bundle/invalid failed: %v", err) 143 + } 144 + defer resp.Body.Close() 145 + 146 + if resp.StatusCode != 400 { 147 + t.Errorf("expected 400 for invalid bundle number, got %d", resp.StatusCode) 148 + } 149 + }) 150 + 151 + t.Run("BundleData_Raw", func(t *testing.T) { 152 + resp, err := http.Get(ts.URL + "/data/1") 153 + if err != nil { 154 + t.Fatalf("GET /data/1 failed: %v", err) 155 + } 156 + defer resp.Body.Close() 157 + 158 + if resp.StatusCode != 200 { 159 + // If 500, read error body 160 + if resp.StatusCode == 500 { 161 + body, _ := io.ReadAll(resp.Body) 162 + t.Fatalf("expected 200, got 500. Error: %s", string(body)) 163 + } 164 + t.Errorf("expected 200, got %d", resp.StatusCode) 165 + } 166 + 167 + // Should be zstd compressed 168 + contentType := resp.Header.Get("Content-Type") 169 + if !strings.Contains(contentType, "application/zstd") { 170 + t.Errorf("wrong content type for raw data: %s", contentType) 171 + } 172 + 173 + // Should have content-disposition header 174 + disposition := resp.Header.Get("Content-Disposition") 175 + if !strings.Contains(disposition, "000001.jsonl.zst") { 176 + t.Errorf("wrong disposition header: %s", disposition) 177 + } 178 + 179 + // Should have data 180 + data, _ := io.ReadAll(resp.Body) 181 + if len(data) == 0 { 182 + t.Error("bundle data is empty") 183 + } 184 + 185 + t.Logf("Bundle data size: %d bytes", len(data)) 186 + }) 187 + 188 + t.Run("BundleJSONL_Decompressed", func(t *testing.T) { 189 + resp, err := http.Get(ts.URL + "/jsonl/1") 190 + if err != nil { 191 + t.Fatalf("GET /jsonl/1 failed: %v", err) 192 + } 193 + defer resp.Body.Close() 194 + 195 + if resp.StatusCode != 200 { 196 + t.Errorf("expected 200, got %d", resp.StatusCode) 197 + } 198 + 199 + // Should be JSONL 200 + contentType := resp.Header.Get("Content-Type") 201 + if !strings.Contains(contentType, "application/x-ndjson") { 202 + t.Errorf("wrong content type for JSONL: %s", contentType) 203 + } 204 + 205 + // Count lines 206 + data, _ := io.ReadAll(resp.Body) 207 + lines := bytes.Count(data, []byte("\n")) 208 + 209 + if lines == 0 { 210 + t.Error("JSONL should have lines") 211 + } 212 + }) 213 + 214 + t.Run("StatusEndpoint", func(t *testing.T) { 215 + resp, err := http.Get(ts.URL + "/status") 216 + if err != nil { 217 + t.Fatalf("GET /status failed: %v", err) 218 + } 219 + defer resp.Body.Close() 220 + 221 + if resp.StatusCode != 200 { 222 + t.Errorf("expected 200, got %d", resp.StatusCode) 223 + } 224 + 225 + var status server.StatusResponse 226 + if err := json.NewDecoder(resp.Body).Decode(&status); err != nil { 227 + t.Fatalf("failed to parse status JSON: %v", err) 228 + } 229 + 230 + // Verify structure 231 + if status.Server.Version == "" { 232 + t.Error("status missing server version") 233 + } 234 + 235 + if status.Bundles.Count < 0 { 236 + t.Error("invalid bundle count") 237 + } 238 + 239 + if status.Server.UptimeSeconds < 0 { 240 + t.Error("invalid uptime") 241 + } 242 + }) 243 + } 244 + 245 + // ==================================================================================== 246 + // DID RESOLUTION ENDPOINT TESTS 247 + // ==================================================================================== 248 + 249 + func TestServerDIDResolution(t *testing.T) { 250 + handler, _, cleanup := setupTestServerWithResolver(t) 251 + defer cleanup() 252 + 253 + ts := httptest.NewServer(handler) 254 + defer ts.Close() 255 + 256 + // Use valid did:plc format: "did:plc:" + 24 chars base32 (a-z, 2-7 only) 257 + testDID := "did:plc:abc234def567ghi234jkl456" // Valid format 258 + 259 + t.Run("DIDDocument", func(t *testing.T) { 260 + resp, err := http.Get(ts.URL + "/" + testDID) 261 + if err != nil { 262 + t.Fatalf("GET /%s failed: %v", testDID, err) 263 + } 264 + defer resp.Body.Close() 265 + 266 + // Should be 404 (not in test data) or 500 (no DID index) 267 + // NOT 400 (that means invalid format) 268 + if resp.StatusCode == 400 { 269 + body, _ := io.ReadAll(resp.Body) 270 + t.Fatalf("got 400 (invalid DID format): %s", string(body)) 271 + } 272 + 273 + if resp.StatusCode == 500 { 274 + t.Log("Expected 500 (no DID index)") 275 + return 276 + } 277 + 278 + if resp.StatusCode == 404 { 279 + t.Log("Expected 404 (DID not found)") 280 + return 281 + } 282 + 283 + if resp.StatusCode == 200 { 284 + var doc plcclient.DIDDocument 285 + if err := json.NewDecoder(resp.Body).Decode(&doc); err != nil { 286 + t.Fatalf("failed to parse DID document: %v", err) 287 + } 288 + } 289 + }) 290 + 291 + t.Run("DIDData_RawState", func(t *testing.T) { 292 + resp, err := http.Get(ts.URL + "/" + testDID + "/data") 293 + if err != nil { 294 + t.Fatalf("GET /%s/data failed: %v", testDID, err) 295 + } 296 + defer resp.Body.Close() 297 + 298 + // /data endpoint validates format, so 400 is NOT acceptable for valid DID 299 + if resp.StatusCode == 400 { 300 + body, _ := io.ReadAll(resp.Body) 301 + t.Fatalf("got 400 for valid DID format: %s", string(body)) 302 + } 303 + 304 + // 404 or 500 acceptable (no data / no index) 305 + if resp.StatusCode == 500 || resp.StatusCode == 404 { 306 + t.Logf("Expected error (no DID index): status %d", resp.StatusCode) 307 + return 308 + } 309 + 310 + if resp.StatusCode == 200 { 311 + var state plcclient.DIDState 312 + json.NewDecoder(resp.Body).Decode(&state) 313 + } 314 + }) 315 + 316 + t.Run("DIDAuditLog", func(t *testing.T) { 317 + resp, err := http.Get(ts.URL + "/" + testDID + "/log/audit") 318 + if err != nil { 319 + t.Fatalf("request failed: %v", err) 320 + } 321 + defer resp.Body.Close() 322 + 323 + // Should NOT be 400 for valid DID 324 + if resp.StatusCode == 400 { 325 + body, _ := io.ReadAll(resp.Body) 326 + t.Fatalf("got 400 for valid DID format: %s", string(body)) 327 + } 328 + 329 + // 404, 500 acceptable 330 + if resp.StatusCode == 500 || resp.StatusCode == 404 { 331 + t.Logf("Expected error (no DID index): status %d", resp.StatusCode) 332 + return 333 + } 334 + }) 335 + 336 + // Test invalid formats on /data endpoint (which validates properly) 337 + t.Run("InvalidDIDFormat_OnDataEndpoint", func(t *testing.T) { 338 + // Test DIDs that START with "did:plc:" but are still invalid 339 + // (routing checks prefix first, so "did:invalid:" returns 404 before validation) 340 + invalidDIDs := []string{ 341 + "did:plc:short", // Too short (< 24 chars) 342 + "did:plc:tooshort2345", // Still too short 343 + "did:plc:contains0189invalidchars456", // Has 0,1,8,9 (invalid in base32) 344 + "did:plc:UPPERCASENOTALLOWED1234", // Has uppercase 345 + "did:plc:has-dashes-not-allowed12", // Has dashes 346 + "did:plc:waytoolonggggggggggggggggg", // Too long (> 24 chars) 347 + } 348 + 349 + for _, invalidDID := range invalidDIDs { 350 + resp, err := http.Get(ts.URL + "/" + invalidDID + "/data") 351 + if err != nil { 352 + t.Fatalf("request to %s failed: %v", invalidDID, err) 353 + } 354 + 355 + body, _ := io.ReadAll(resp.Body) 356 + resp.Body.Close() 357 + 358 + // /data endpoint validates format and should return 400 359 + if resp.StatusCode != 400 { 360 + t.Logf("DID %s: got %d (body: %s)", invalidDID, resp.StatusCode, string(body)) 361 + // Some might also return 500 if they pass initial checks 362 + // but fail deeper validation - that's also acceptable 363 + if resp.StatusCode != 500 { 364 + t.Errorf("DID %s: expected 400 or 500, got %d", invalidDID, resp.StatusCode) 365 + } 366 + } 367 + } 368 + }) 369 + 370 + t.Run("InvalidDIDMethod_Returns404", func(t *testing.T) { 371 + // DIDs with wrong method get 404 from routing (never reach validation) 372 + wrongMethodDIDs := []string{ 373 + "did:invalid:format", 374 + "did:web:example.com", 375 + "did:key:z6MkhaXgBZDvotDkL5257faiztiGiC2QtKLGpbnnEGta2doK", 376 + "notadid", 377 + } 378 + 379 + for _, did := range wrongMethodDIDs { 380 + resp, err := http.Get(ts.URL + "/" + did + "/data") 381 + if err != nil { 382 + t.Fatalf("request failed: %v", err) 383 + } 384 + resp.Body.Close() 385 + 386 + // Should get 404 (not a did:plc: path) 387 + if resp.StatusCode != 404 { 388 + t.Errorf("DID %s: expected 404 from routing, got %d", did, resp.StatusCode) 389 + } 390 + } 391 + }) 392 + 393 + t.Run("NotADIDPath", func(t *testing.T) { 394 + resp, err := http.Get(ts.URL + "/notadid") 395 + if err != nil { 396 + t.Fatalf("request failed: %v", err) 397 + } 398 + defer resp.Body.Close() 399 + 400 + if resp.StatusCode != 404 { 401 + t.Errorf("expected 404 for non-DID path, got %d", resp.StatusCode) 402 + } 403 + }) 404 + } 405 + 406 + // ==================================================================================== 407 + // CORS MIDDLEWARE TESTS 408 + // ==================================================================================== 409 + 410 + func TestServerCORS(t *testing.T) { 411 + srv, _, cleanup := setupTestServer(t, false) 412 + defer cleanup() 413 + 414 + ts := httptest.NewServer(srv) 415 + defer ts.Close() 416 + 417 + t.Run("CORS_Headers_GET", func(t *testing.T) { 418 + resp, err := http.Get(ts.URL + "/index.json") 419 + if err != nil { 420 + t.Fatalf("request failed: %v", err) 421 + } 422 + defer resp.Body.Close() 423 + 424 + // Check CORS headers 425 + if resp.Header.Get("Access-Control-Allow-Origin") != "*" { 426 + t.Error("missing or wrong Access-Control-Allow-Origin header") 427 + } 428 + 429 + methods := resp.Header.Get("Access-Control-Allow-Methods") 430 + if !strings.Contains(methods, "GET") { 431 + t.Errorf("Access-Control-Allow-Methods missing GET: %s", methods) 432 + } 433 + }) 434 + 435 + t.Run("CORS_Preflight_OPTIONS", func(t *testing.T) { 436 + req, _ := http.NewRequest("OPTIONS", ts.URL+"/index.json", nil) 437 + req.Header.Set("Access-Control-Request-Method", "GET") 438 + req.Header.Set("Access-Control-Request-Headers", "Content-Type") 439 + 440 + resp, err := http.DefaultClient.Do(req) 441 + if err != nil { 442 + t.Fatalf("OPTIONS request failed: %v", err) 443 + } 444 + defer resp.Body.Close() 445 + 446 + if resp.StatusCode != 204 { 447 + t.Errorf("expected 204 for OPTIONS, got %d", resp.StatusCode) 448 + } 449 + 450 + if resp.Header.Get("Access-Control-Allow-Origin") != "*" { 451 + t.Error("CORS headers missing on OPTIONS") 452 + } 453 + 454 + maxAge := resp.Header.Get("Access-Control-Max-Age") 455 + if maxAge != "86400" { 456 + t.Errorf("wrong max-age: %s", maxAge) 457 + } 458 + }) 459 + } 460 + 461 + // ==================================================================================== 462 + // WEBSOCKET TESTS 463 + // ==================================================================================== 464 + 465 + func TestServerWebSocket(t *testing.T) { 466 + srv, _, cleanup := setupTestServer(t, true) // Enable WebSocket 467 + defer cleanup() 468 + 469 + ts := httptest.NewServer(srv) 470 + defer ts.Close() 471 + 472 + wsURL := "ws" + strings.TrimPrefix(ts.URL, "http") + "/ws" 473 + 474 + t.Run("WebSocket_Connect", func(t *testing.T) { 475 + ws, _, err := websocket.DefaultDialer.Dial(wsURL, nil) 476 + if err != nil { 477 + t.Fatalf("WebSocket dial failed: %v", err) 478 + } 479 + defer ws.Close() 480 + 481 + // Should connect successfully 482 + t.Log("WebSocket connected successfully") 483 + }) 484 + 485 + t.Run("WebSocket_ReceiveOperations", func(t *testing.T) { 486 + ws, _, err := websocket.DefaultDialer.Dial(wsURL+"?cursor=0", nil) 487 + if err != nil { 488 + t.Fatalf("WebSocket dial failed: %v", err) 489 + } 490 + defer ws.Close() 491 + 492 + // Set read deadline 493 + ws.SetReadDeadline(time.Now().Add(5 * time.Second)) 494 + 495 + // Read a message (should get operations or timeout) 496 + _, message, err := ws.ReadMessage() 497 + if err != nil { 498 + // Timeout is OK (no operations available) 499 + if !strings.Contains(err.Error(), "timeout") { 500 + t.Logf("Read error (may be OK if no ops): %v", err) 501 + } 502 + return 503 + } 504 + 505 + // If we got a message, verify it's valid JSON 506 + var op plcclient.PLCOperation 507 + if err := json.Unmarshal(message, &op); err != nil { 508 + t.Errorf("received invalid operation JSON: %v", err) 509 + } 510 + 511 + t.Logf("Received operation: %s", op.CID) 512 + }) 513 + 514 + t.Run("WebSocket_InvalidCursor", func(t *testing.T) { 515 + resp, err := http.Get(ts.URL + "/ws?cursor=invalid") 516 + if err != nil { 517 + t.Fatalf("request failed: %v", err) 518 + } 519 + defer resp.Body.Close() 520 + 521 + if resp.StatusCode != 400 { 522 + t.Errorf("expected 400 for invalid cursor, got %d", resp.StatusCode) 523 + } 524 + }) 525 + 526 + t.Run("WebSocket_CloseGracefully", func(t *testing.T) { 527 + ws, _, err := websocket.DefaultDialer.Dial(wsURL, nil) 528 + if err != nil { 529 + t.Fatalf("WebSocket dial failed: %v", err) 530 + } 531 + 532 + // Close immediately 533 + err = ws.WriteMessage(websocket.CloseMessage, 534 + websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) 535 + if err != nil { 536 + t.Logf("close message error (may be OK): %v", err) 537 + } 538 + 539 + ws.Close() 540 + t.Log("WebSocket closed gracefully") 541 + }) 542 + } 543 + 544 + // ==================================================================================== 545 + // SYNC MODE TESTS 546 + // ==================================================================================== 547 + 548 + func TestServerSyncMode(t *testing.T) { 549 + srv, _, cleanup := setupTestServer(t, true) 550 + defer cleanup() 551 + 552 + ts := httptest.NewServer(srv) 553 + defer ts.Close() 554 + 555 + t.Run("MempoolEndpoint", func(t *testing.T) { 556 + resp, err := http.Get(ts.URL + "/mempool") 557 + if err != nil { 558 + t.Fatalf("GET /mempool failed: %v", err) 559 + } 560 + defer resp.Body.Close() 561 + 562 + if resp.StatusCode != 200 { 563 + t.Errorf("expected 200, got %d", resp.StatusCode) 564 + } 565 + 566 + // Should be JSONL 567 + contentType := resp.Header.Get("Content-Type") 568 + if !strings.Contains(contentType, "application/x-ndjson") { 569 + t.Errorf("wrong content type: %s", contentType) 570 + } 571 + }) 572 + 573 + t.Run("StatusWithMempool", func(t *testing.T) { 574 + resp, err := http.Get(ts.URL + "/status") 575 + if err != nil { 576 + t.Fatalf("GET /status failed: %v", err) 577 + } 578 + defer resp.Body.Close() 579 + 580 + var status server.StatusResponse 581 + if err := json.NewDecoder(resp.Body).Decode(&status); err != nil { 582 + t.Fatalf("failed to parse status: %v", err) 583 + } 584 + 585 + // Sync mode should include mempool stats 586 + if status.Server.SyncMode { 587 + if status.Mempool == nil { 588 + t.Error("sync mode status missing mempool") 589 + } 590 + } 591 + }) 592 + } 593 + 594 + // ==================================================================================== 595 + // CONCURRENT REQUEST TESTS 596 + // ==================================================================================== 597 + 598 + func TestServerConcurrency(t *testing.T) { 599 + srv, _, cleanup := setupTestServer(t, false) 600 + defer cleanup() 601 + 602 + ts := httptest.NewServer(srv) 603 + defer ts.Close() 604 + 605 + t.Run("ConcurrentIndexRequests", func(t *testing.T) { 606 + var wg sync.WaitGroup 607 + errors := make(chan error, 100) 608 + 609 + for i := 0; i < 100; i++ { 610 + wg.Add(1) 611 + go func() { 612 + defer wg.Done() 613 + 614 + resp, err := http.Get(ts.URL + "/index.json") 615 + if err != nil { 616 + errors <- err 617 + return 618 + } 619 + defer resp.Body.Close() 620 + 621 + if resp.StatusCode != 200 { 622 + errors <- fmt.Errorf("status %d", resp.StatusCode) 623 + } 624 + }() 625 + } 626 + 627 + wg.Wait() 628 + close(errors) 629 + 630 + for err := range errors { 631 + t.Errorf("concurrent request error: %v", err) 632 + } 633 + }) 634 + 635 + t.Run("ConcurrentBundleRequests", func(t *testing.T) { 636 + var wg sync.WaitGroup 637 + errors := make(chan error, 50) 638 + 639 + for i := 0; i < 50; i++ { 640 + wg.Add(1) 641 + go func(bundleNum int) { 642 + defer wg.Done() 643 + 644 + resp, err := http.Get(fmt.Sprintf("%s/bundle/%d", ts.URL, bundleNum%3+1)) 645 + if err != nil { 646 + errors <- err 647 + return 648 + } 649 + defer resp.Body.Close() 650 + 651 + if resp.StatusCode != 200 && resp.StatusCode != 404 { 652 + errors <- fmt.Errorf("unexpected status %d", resp.StatusCode) 653 + } 654 + }(i) 655 + } 656 + 657 + wg.Wait() 658 + close(errors) 659 + 660 + for err := range errors { 661 + t.Errorf("concurrent request error: %v", err) 662 + } 663 + }) 664 + 665 + t.Run("MixedEndpointConcurrency", func(t *testing.T) { 666 + var wg sync.WaitGroup 667 + 668 + endpoints := []string{ 669 + "/", 670 + "/index.json", 671 + "/bundle/1", 672 + "/data/1", 673 + "/jsonl/1", 674 + "/status", 675 + } 676 + 677 + for i := 0; i < 30; i++ { 678 + wg.Add(1) 679 + go func(id int) { 680 + defer wg.Done() 681 + 682 + endpoint := endpoints[id%len(endpoints)] 683 + resp, err := http.Get(ts.URL + endpoint) 684 + if err != nil { 685 + t.Errorf("request to %s failed: %v", endpoint, err) 686 + return 687 + } 688 + defer resp.Body.Close() 689 + 690 + // Read body to completion 691 + io.ReadAll(resp.Body) 692 + }(i) 693 + } 694 + 695 + wg.Wait() 696 + }) 697 + } 698 + 699 + // ==================================================================================== 700 + // ERROR HANDLING TESTS 701 + // ==================================================================================== 702 + 703 + func TestServerErrorHandling(t *testing.T) { 704 + srv, _, cleanup := setupTestServer(t, false) 705 + defer cleanup() 706 + 707 + ts := httptest.NewServer(srv) 708 + defer ts.Close() 709 + 710 + t.Run("404_NotFound", func(t *testing.T) { 711 + resp, err := http.Get(ts.URL + "/nonexistent") 712 + if err != nil { 713 + t.Fatalf("request failed: %v", err) 714 + } 715 + defer resp.Body.Close() 716 + 717 + if resp.StatusCode != 404 { 718 + t.Errorf("expected 404, got %d", resp.StatusCode) 719 + } 720 + }) 721 + 722 + t.Run("405_MethodNotAllowed", func(t *testing.T) { 723 + // POST to GET-only endpoint 724 + resp, err := http.Post(ts.URL+"/index.json", "application/json", bytes.NewReader([]byte("{}"))) 725 + if err != nil { 726 + t.Fatalf("request failed: %v", err) 727 + } 728 + defer resp.Body.Close() 729 + 730 + if resp.StatusCode != 404 && resp.StatusCode != 405 { 731 + t.Logf("Note: Got status %d (404/405 both acceptable)", resp.StatusCode) 732 + } 733 + }) 734 + 735 + t.Run("LargeRequestHandling", func(t *testing.T) { 736 + // Request very large bundle number 737 + resp, err := http.Get(ts.URL + "/bundle/999999") 738 + if err != nil { 739 + t.Fatalf("request failed: %v", err) 740 + } 741 + defer resp.Body.Close() 742 + 743 + if resp.StatusCode != 404 { 744 + t.Errorf("expected 404 for large bundle number, got %d", resp.StatusCode) 745 + } 746 + }) 747 + } 748 + 749 + // ==================================================================================== 750 + // MIDDLEWARE TESTS 751 + // ==================================================================================== 752 + 753 + func TestServerMiddleware(t *testing.T) { 754 + srv, _, cleanup := setupTestServer(t, false) 755 + defer cleanup() 756 + 757 + ts := httptest.NewServer(srv) 758 + defer ts.Close() 759 + 760 + t.Run("JSON_ContentType", func(t *testing.T) { 761 + resp, err := http.Get(ts.URL + "/index.json") 762 + if err != nil { 763 + t.Fatalf("request failed: %v", err) 764 + } 765 + defer resp.Body.Close() 766 + 767 + contentType := resp.Header.Get("Content-Type") 768 + if !strings.Contains(contentType, "application/json") { 769 + t.Errorf("wrong content type: %s", contentType) 770 + } 771 + }) 772 + 773 + t.Run("CORS_AllowsAllOrigins", func(t *testing.T) { 774 + req, _ := http.NewRequest("GET", ts.URL+"/index.json", nil) 775 + req.Header.Set("Origin", "https://example.com") 776 + 777 + resp, err := http.DefaultClient.Do(req) 778 + if err != nil { 779 + t.Fatalf("request failed: %v", err) 780 + } 781 + defer resp.Body.Close() 782 + 783 + allowOrigin := resp.Header.Get("Access-Control-Allow-Origin") 784 + if allowOrigin != "*" { 785 + t.Errorf("CORS not allowing all origins: %s", allowOrigin) 786 + } 787 + }) 788 + } 789 + 790 + // ==================================================================================== 791 + // HELPER FUNCTIONS & FORMATTERS 792 + // ==================================================================================== 793 + 794 + func TestServerHelpers(t *testing.T) { 795 + t.Run("FormatNumber", func(t *testing.T) { 796 + // Note: formatNumber is not exported, so we test indirectly 797 + // through endpoints that use it (like root page) 798 + 799 + srv, _, cleanup := setupTestServer(t, false) 800 + defer cleanup() 801 + 802 + ts := httptest.NewServer(srv) 803 + defer ts.Close() 804 + 805 + resp, _ := http.Get(ts.URL + "/") 806 + body, _ := io.ReadAll(resp.Body) 807 + resp.Body.Close() 808 + 809 + // Should have formatted numbers with commas 810 + // (if there are any large numbers in output) 811 + t.Logf("Root page length: %d bytes", len(body)) 812 + }) 813 + } 814 + 815 + // ==================================================================================== 816 + // MEMORY & PERFORMANCE TESTS 817 + // ==================================================================================== 818 + 819 + func TestServerPerformance(t *testing.T) { 820 + if testing.Short() { 821 + t.Skip("skipping performance test in short mode") 822 + } 823 + 824 + srv, _, cleanup := setupTestServer(t, false) 825 + defer cleanup() 826 + 827 + ts := httptest.NewServer(srv) 828 + defer ts.Close() 829 + 830 + t.Run("MemoryDebugEndpoint", func(t *testing.T) { 831 + resp, err := http.Get(ts.URL + "/debug/memory") 832 + if err != nil { 833 + t.Fatalf("GET /debug/memory failed: %v", err) 834 + } 835 + defer resp.Body.Close() 836 + 837 + if resp.StatusCode != 200 { 838 + t.Errorf("expected 200, got %d", resp.StatusCode) 839 + } 840 + 841 + body, _ := io.ReadAll(resp.Body) 842 + bodyStr := string(body) 843 + 844 + if !strings.Contains(bodyStr, "Memory Stats") { 845 + t.Error("memory debug output missing stats") 846 + } 847 + 848 + if !strings.Contains(bodyStr, "Alloc:") { 849 + t.Error("memory debug missing allocation info") 850 + } 851 + }) 852 + 853 + t.Run("ResponseTime", func(t *testing.T) { 854 + // Measure response time for index 855 + start := time.Now() 856 + resp, err := http.Get(ts.URL + "/index.json") 857 + elapsed := time.Since(start) 858 + 859 + if err != nil { 860 + t.Fatalf("request failed: %v", err) 861 + } 862 + resp.Body.Close() 863 + 864 + // Should be fast (< 100ms for index) 865 + if elapsed > 100*time.Millisecond { 866 + t.Logf("Warning: slow response time: %v", elapsed) 867 + } 868 + 869 + t.Logf("Index response time: %v", elapsed) 870 + }) 871 + } 872 + 873 + // ==================================================================================== 874 + // SERVER LIFECYCLE TESTS 875 + // ==================================================================================== 876 + 877 + func TestServerLifecycle(t *testing.T) { 878 + t.Run("StartAndStop", func(t *testing.T) { 879 + mgr, mgrCleanup := setupTestManager(t) 880 + defer mgrCleanup() 881 + 882 + config := &server.Config{ 883 + Addr: "127.0.0.1:0", // Random port 884 + SyncMode: false, 885 + EnableWebSocket: false, 886 + EnableResolver: false, 887 + Version: "test", 888 + } 889 + 890 + srv := server.New(mgr, config) 891 + 892 + // Start in goroutine 893 + errChan := make(chan error, 1) 894 + go func() { 895 + // This will block 896 + errChan <- srv.ListenAndServe() 897 + }() 898 + 899 + // Give it time to start 900 + time.Sleep(100 * time.Millisecond) 901 + 902 + // Shutdown 903 + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 904 + defer cancel() 905 + 906 + if err := srv.Shutdown(ctx); err != nil { 907 + t.Errorf("shutdown failed: %v", err) 908 + } 909 + 910 + // Should exit 911 + select { 912 + case err := <-errChan: 913 + if err != nil && err != http.ErrServerClosed { 914 + t.Errorf("unexpected error: %v", err) 915 + } 916 + case <-time.After(2 * time.Second): 917 + t.Error("server did not stop after shutdown") 918 + } 919 + }) 920 + 921 + t.Run("GetStartTime", func(t *testing.T) { 922 + mgr, cleanup := setupTestManager(t) 923 + defer cleanup() 924 + 925 + config := &server.Config{ 926 + Addr: ":0", 927 + Version: "test", 928 + } 929 + 930 + before := time.Now() 931 + srv := server.New(mgr, config) 932 + after := time.Now() 933 + 934 + startTime := srv.GetStartTime() 935 + 936 + if startTime.Before(before) || startTime.After(after) { 937 + t.Error("start time not in expected range") 938 + } 939 + }) 940 + } 941 + 942 + // ==================================================================================== 943 + // SETUP HELPERS 944 + // ==================================================================================== 945 + 946 + func setupTestServer(t *testing.T, enableWebSocket bool) (http.Handler, *server.Server, func()) { 947 + mgr, cleanup := setupTestManager(t) 948 + 949 + config := &server.Config{ 950 + Addr: ":8080", 951 + SyncMode: true, 952 + SyncInterval: 1 * time.Minute, 953 + EnableWebSocket: enableWebSocket, 954 + EnableResolver: false, 955 + Version: "test", 956 + } 957 + 958 + srv := server.New(mgr, config) 959 + 960 + // Get handler from server 961 + handler := srv.Handler() // Use new method 962 + 963 + return handler, srv, cleanup 964 + } 965 + 966 + func setupTestServerWithResolver(t *testing.T) (http.Handler, *server.Server, func()) { 967 + mgr, cleanup := setupTestManager(t) 968 + 969 + config := &server.Config{ 970 + Addr: ":8080", 971 + SyncMode: false, 972 + EnableWebSocket: false, 973 + EnableResolver: true, 974 + Version: "test", 975 + } 976 + 977 + srv := server.New(mgr, config) 978 + handler := srv.Handler() 979 + 980 + return handler, srv, cleanup 981 + } 982 + 983 + func setupTestManager(t *testing.T) (*bundle.Manager, func()) { 984 + tmpDir := t.TempDir() 985 + 986 + config := bundle.DefaultConfig(tmpDir) 987 + config.AutoInit = true 988 + config.VerifyOnLoad = false // Disable verification in tests 989 + 990 + // Create storage operations ONCE and reuse 991 + logger := &testLogger{t: t} 992 + storageOps, err := storage.NewOperations(logger) 993 + if err != nil { 994 + t.Fatalf("failed to create storage operations: %v", err) 995 + } 996 + 997 + mgr, err := bundle.NewManager(config, nil) 998 + if err != nil { 999 + storageOps.Close() 1000 + t.Fatalf("failed to create manager: %v", err) 1001 + } 1002 + 1003 + // Add test bundles with actual files 1004 + for i := 1; i <= 3; i++ { 1005 + // Create actual bundle file FIRST 1006 + path := filepath.Join(tmpDir, fmt.Sprintf("%06d.jsonl.zst", i)) 1007 + ops := makeMinimalTestOperations(10000, i*10000) // Unique ops per bundle 1008 + 1009 + contentHash, compHash, uncompSize, compSize, err := storageOps.SaveBundle(path, ops) 1010 + if err != nil { 1011 + t.Fatalf("failed to save test bundle %d: %v", i, err) 1012 + } 1013 + 1014 + // Create metadata that matches the actual file 1015 + meta := &bundleindex.BundleMetadata{ 1016 + BundleNumber: i, 1017 + StartTime: ops[0].CreatedAt, 1018 + EndTime: ops[len(ops)-1].CreatedAt, 1019 + OperationCount: len(ops), 1020 + DIDCount: len(ops), // All unique in test data 1021 + Hash: fmt.Sprintf("hash%d", i), 1022 + ContentHash: contentHash, // Use actual hash 1023 + CompressedHash: compHash, // Use actual hash 1024 + CompressedSize: compSize, // Use actual size 1025 + UncompressedSize: uncompSize, // Use actual size 1026 + CreatedAt: time.Now(), 1027 + } 1028 + 1029 + mgr.GetIndex().AddBundle(meta) 1030 + } 1031 + 1032 + if err := mgr.SaveIndex(); err != nil { 1033 + t.Fatalf("failed to save index: %v", err) 1034 + } 1035 + 1036 + cleanup := func() { 1037 + storageOps.Close() 1038 + mgr.Close() 1039 + } 1040 + 1041 + return mgr, cleanup 1042 + } 1043 + 1044 + func makeMinimalTestOperations(count int, offset int) []plcclient.PLCOperation { 1045 + ops := make([]plcclient.PLCOperation, count) 1046 + baseTime := time.Now().Add(-time.Hour) 1047 + 1048 + for i := 0; i < count; i++ { 1049 + idx := offset + i 1050 + 1051 + // Create valid base32 DID identifier (24 chars, only a-z and 2-7) 1052 + // Convert index to base32-like string 1053 + identifier := fmt.Sprintf("%024d", idx) 1054 + // Replace invalid chars (0,1,8,9) with valid ones 1055 + identifier = strings.ReplaceAll(identifier, "0", "a") 1056 + identifier = strings.ReplaceAll(identifier, "1", "b") 1057 + identifier = strings.ReplaceAll(identifier, "8", "c") 1058 + identifier = strings.ReplaceAll(identifier, "9", "d") 1059 + 1060 + ops[i] = plcclient.PLCOperation{ 1061 + DID: "did:plc:" + identifier, 1062 + CID: fmt.Sprintf("bafytest%012d", idx), 1063 + CreatedAt: baseTime.Add(time.Duration(idx) * time.Second), 1064 + } 1065 + } 1066 + 1067 + return ops 1068 + }
+122
server/types_test.go
··· 1 + package server_test 2 + 3 + import ( 4 + "encoding/json" 5 + "testing" 6 + "time" 7 + 8 + "tangled.org/atscan.net/plcbundle/server" 9 + ) 10 + 11 + func TestServerResponseTypes(t *testing.T) { 12 + t.Run("StatusResponse_JSON", func(t *testing.T) { 13 + response := server.StatusResponse{ 14 + Server: server.ServerStatus{ 15 + Version: "1.0.0", 16 + UptimeSeconds: 3600, 17 + SyncMode: true, 18 + SyncIntervalSeconds: 60, 19 + WebSocketEnabled: true, 20 + Origin: "https://plc.directory", 21 + }, 22 + Bundles: server.BundleStatus{ 23 + Count: 100, 24 + FirstBundle: 1, 25 + LastBundle: 100, 26 + TotalSize: 1024000, 27 + UncompressedSize: 5120000, 28 + CompressionRatio: 5.0, 29 + TotalOperations: 1000000, 30 + AvgOpsPerHour: 10000, 31 + UpdatedAt: time.Now(), 32 + HeadAgeSeconds: 30, 33 + RootHash: "root_hash", 34 + HeadHash: "head_hash", 35 + Gaps: 0, 36 + HasGaps: false, 37 + }, 38 + } 39 + 40 + // Should marshal to JSON 41 + data, err := json.Marshal(response) 42 + if err != nil { 43 + t.Fatalf("failed to marshal StatusResponse: %v", err) 44 + } 45 + 46 + // Should unmarshal back 47 + var decoded server.StatusResponse 48 + if err := json.Unmarshal(data, &decoded); err != nil { 49 + t.Fatalf("failed to unmarshal StatusResponse: %v", err) 50 + } 51 + 52 + // Verify round-trip 53 + if decoded.Server.Version != "1.0.0" { 54 + t.Error("version not preserved") 55 + } 56 + 57 + if decoded.Bundles.Count != 100 { 58 + t.Error("bundle count not preserved") 59 + } 60 + }) 61 + 62 + t.Run("MempoolStatus_JSON", func(t *testing.T) { 63 + status := server.MempoolStatus{ 64 + Count: 500, 65 + TargetBundle: 42, 66 + CanCreateBundle: false, 67 + MinTimestamp: time.Now(), 68 + Validated: true, 69 + ProgressPercent: 5.0, 70 + BundleSize: 10000, 71 + OperationsNeeded: 9500, 72 + FirstTime: time.Now().Add(-time.Hour), 73 + LastTime: time.Now(), 74 + TimespanSeconds: 3600, 75 + LastOpAgeSeconds: 10, 76 + EtaNextBundleSeconds: 1800, 77 + } 78 + 79 + data, err := json.Marshal(status) 80 + if err != nil { 81 + t.Fatalf("failed to marshal MempoolStatus: %v", err) 82 + } 83 + 84 + var decoded server.MempoolStatus 85 + if err := json.Unmarshal(data, &decoded); err != nil { 86 + t.Fatalf("failed to unmarshal MempoolStatus: %v", err) 87 + } 88 + 89 + if decoded.Count != 500 { 90 + t.Error("count not preserved") 91 + } 92 + 93 + if decoded.ProgressPercent != 5.0 { 94 + t.Error("progress not preserved") 95 + } 96 + }) 97 + 98 + t.Run("BundleStatus_WithGaps", func(t *testing.T) { 99 + status := server.BundleStatus{ 100 + Count: 100, 101 + Gaps: 3, 102 + HasGaps: true, 103 + GapNumbers: []int{5, 23, 67}, 104 + } 105 + 106 + data, err := json.Marshal(status) 107 + if err != nil { 108 + t.Fatalf("marshal failed: %v", err) 109 + } 110 + 111 + var decoded server.BundleStatus 112 + json.Unmarshal(data, &decoded) 113 + 114 + if !decoded.HasGaps { 115 + t.Error("HasGaps flag not preserved") 116 + } 117 + 118 + if len(decoded.GapNumbers) != 3 { 119 + t.Error("gap numbers not preserved") 120 + } 121 + }) 122 + }