package gott import ( "io/fs" "os" "path/filepath" "testing" "testing/fstest" ) // TestInMemoryCache tests the LRU cache behavior func TestInMemoryCache(t *testing.T) { t.Run("basic get and put", func(t *testing.T) { cache := newCache(0) // unlimited // Put a template tmpl := &Template{Nodes: []Node{&TextNode{Text: "hello"}}} cache.Put("key1", tmpl) // Get it back got, ok := cache.Get("key1") if !ok { t.Fatal("expected to find key1 in cache") } if got != tmpl { t.Error("got different template than stored") } }) t.Run("cache miss", func(t *testing.T) { cache := newCache(0) _, ok := cache.Get("nonexistent") if ok { t.Error("expected cache miss for nonexistent key") } }) t.Run("LRU eviction", func(t *testing.T) { cache := newCache(2) // max 2 entries tmpl1 := &Template{Nodes: []Node{&TextNode{Text: "1"}}} tmpl2 := &Template{Nodes: []Node{&TextNode{Text: "2"}}} tmpl3 := &Template{Nodes: []Node{&TextNode{Text: "3"}}} cache.Put("key1", tmpl1) cache.Put("key2", tmpl2) // Verify both are present if cache.Len() != 2 { t.Errorf("expected 2 entries, got %d", cache.Len()) } // Add a third, should evict key1 (LRU) cache.Put("key3", tmpl3) if cache.Len() != 2 { t.Errorf("expected 2 entries after eviction, got %d", cache.Len()) } // key1 should be gone (LRU) _, ok := cache.Get("key1") if ok { t.Error("key1 should have been evicted") } // key2 and key3 should still be present if _, ok := cache.Get("key2"); !ok { t.Error("key2 should still be in cache") } if _, ok := cache.Get("key3"); !ok { t.Error("key3 should still be in cache") } }) t.Run("LRU access updates order", func(t *testing.T) { cache := newCache(2) tmpl1 := &Template{Nodes: []Node{&TextNode{Text: "1"}}} tmpl2 := &Template{Nodes: []Node{&TextNode{Text: "2"}}} tmpl3 := &Template{Nodes: []Node{&TextNode{Text: "3"}}} cache.Put("key1", tmpl1) cache.Put("key2", tmpl2) // Access key1 to make it recently used cache.Get("key1") // Add key3, should evict key2 (now LRU) cache.Put("key3", tmpl3) // key1 should still be present if _, ok := cache.Get("key1"); !ok { t.Error("key1 should still be in cache after being accessed") } // key2 should be gone if _, ok := cache.Get("key2"); ok { t.Error("key2 should have been evicted") } }) t.Run("clear cache", func(t *testing.T) { cache := newCache(0) cache.Put("key1", &Template{}) cache.Put("key2", &Template{}) cache.Clear() if cache.Len() != 0 { t.Errorf("expected empty cache after clear, got %d entries", cache.Len()) } if _, ok := cache.Get("key1"); ok { t.Error("key1 should not be in cache after clear") } }) t.Run("update existing key", func(t *testing.T) { cache := newCache(0) tmpl1 := &Template{Nodes: []Node{&TextNode{Text: "1"}}} tmpl2 := &Template{Nodes: []Node{&TextNode{Text: "2"}}} cache.Put("key1", tmpl1) cache.Put("key1", tmpl2) // update if cache.Len() != 1 { t.Errorf("expected 1 entry after update, got %d", cache.Len()) } got, _ := cache.Get("key1") if got != tmpl2 { t.Error("expected updated template") } }) } // TestDiskCache tests the disk-based cache func TestDiskCache(t *testing.T) { // Create a temp directory for cache tmpDir := filepath.Join(os.TempDir(), "gott-cache-test") defer os.RemoveAll(tmpDir) t.Run("create disk cache", func(t *testing.T) { dc, err := newDiskCache(tmpDir) if err != nil { t.Fatalf("newDiskCache() error = %v", err) } if dc == nil { t.Fatal("expected non-nil diskCache") } }) t.Run("put and get", func(t *testing.T) { dc, _ := newDiskCache(tmpDir) // Create a template with various node types to test serialization tmpl := &Template{ Position: Position{Line: 1, Column: 1}, Nodes: []Node{ &TextNode{Text: "Hello "}, &OutputStmt{ Expr: &IdentExpr{Parts: []string{"name"}}, }, &IfStmt{ Condition: &BinaryExpr{ Op: TokenEq, Left: &IdentExpr{Parts: []string{"x"}}, Right: &LiteralExpr{Value: float64(1)}, }, Body: []Node{&TextNode{Text: "yes"}}, }, }, } err := dc.Put("test-key", tmpl) if err != nil { t.Fatalf("Put() error = %v", err) } got, err := dc.Get("test-key") if err != nil { t.Fatalf("Get() error = %v", err) } if got == nil { t.Fatal("expected non-nil template from disk cache") } // Verify structure was preserved if len(got.Nodes) != 3 { t.Errorf("expected 3 nodes, got %d", len(got.Nodes)) } // Check text node if textNode, ok := got.Nodes[0].(*TextNode); !ok || textNode.Text != "Hello " { t.Error("text node not preserved correctly") } // Check output stmt with ident if outputStmt, ok := got.Nodes[1].(*OutputStmt); !ok { t.Error("expected OutputStmt") } else if identExpr, ok := outputStmt.Expr.(*IdentExpr); !ok || identExpr.Parts[0] != "name" { t.Error("ident expr not preserved correctly") } }) t.Run("get nonexistent key", func(t *testing.T) { dc, _ := newDiskCache(tmpDir) got, err := dc.Get("nonexistent") if err != nil { t.Errorf("Get() error = %v", err) } if got != nil { t.Error("expected nil for nonexistent key") } }) t.Run("clear disk cache", func(t *testing.T) { dc, _ := newDiskCache(tmpDir) // Add some entries dc.Put("key1", &Template{}) dc.Put("key2", &Template{}) err := dc.Clear() if err != nil { t.Fatalf("Clear() error = %v", err) } // Verify entries are gone if got, _ := dc.Get("key1"); got != nil { t.Error("key1 should be cleared") } if got, _ := dc.Get("key2"); got != nil { t.Error("key2 should be cleared") } }) } // TestCachingIntegration tests that caching works with the Renderer func TestCachingIntegration(t *testing.T) { memFS := fstest.MapFS{ "test.html": &fstest.MapFile{Data: []byte("Hello [% name %]")}, } t.Run("memory cache speeds up processing", func(t *testing.T) { r, err := New(&Config{ IncludePaths: []fs.FS{memFS}, MaxCacheSize: 10, }) if err != nil { t.Fatalf("New() error = %v", err) } vars := map[string]any{"name": "World"} // First call parses and caches result1, err := r.ProcessFile("test.html", vars) if err != nil { t.Fatalf("ProcessFile() error = %v", err) } if result1 != "Hello World" { t.Errorf("got %q, want %q", result1, "Hello World") } // Verify it's in cache if r.cache.Len() != 1 { t.Errorf("expected 1 entry in cache, got %d", r.cache.Len()) } // Second call should use cache result2, err := r.ProcessFile("test.html", vars) if err != nil { t.Fatalf("ProcessFile() error = %v", err) } if result2 != "Hello World" { t.Errorf("got %q, want %q", result2, "Hello World") } }) t.Run("disk cache persistence", func(t *testing.T) { tmpDir := filepath.Join(os.TempDir(), "gott-disk-cache-test") defer os.RemoveAll(tmpDir) r1, err := New(&Config{ IncludePaths: []fs.FS{memFS}, CachePath: tmpDir, }) if err != nil { t.Fatalf("New() error = %v", err) } // Process to populate cache _, err = r1.ProcessFile("test.html", map[string]any{"name": "Test"}) if err != nil { t.Fatalf("ProcessFile() error = %v", err) } // Create new renderer with same cache path r2, err := New(&Config{ IncludePaths: []fs.FS{memFS}, CachePath: tmpDir, }) if err != nil { t.Fatalf("New() error = %v", err) } // Memory cache should be empty in new renderer if r2.cache.Len() != 0 { t.Errorf("expected empty memory cache in new renderer, got %d", r2.cache.Len()) } // But processing should load from disk cache result, err := r2.ProcessFile("test.html", map[string]any{"name": "FromDisk"}) if err != nil { t.Fatalf("ProcessFile() error = %v", err) } if result != "Hello FromDisk" { t.Errorf("got %q, want %q", result, "Hello FromDisk") } // Now memory cache should have the entry if r2.cache.Len() != 1 { t.Errorf("expected 1 entry in memory cache after disk load, got %d", r2.cache.Len()) } }) t.Run("ClearCache clears both caches", func(t *testing.T) { tmpDir := filepath.Join(os.TempDir(), "gott-clear-cache-test") defer os.RemoveAll(tmpDir) r, err := New(&Config{ IncludePaths: []fs.FS{memFS}, CachePath: tmpDir, }) if err != nil { t.Fatalf("New() error = %v", err) } // Populate caches _, _ = r.ProcessFile("test.html", map[string]any{"name": "Test"}) // Clear r.ClearCache() // Memory cache should be empty if r.cache.Len() != 0 { t.Errorf("expected empty memory cache after clear, got %d", r.cache.Len()) } // Disk cache should also be empty - verify by checking that // the file no longer exists entries, _ := os.ReadDir(tmpDir) gobCount := 0 for _, e := range entries { if filepath.Ext(e.Name()) == ".gob" { gobCount++ } } if gobCount != 0 { t.Errorf("expected 0 .gob files after clear, got %d", gobCount) } }) t.Run("Process caches by content hash", func(t *testing.T) { r, err := New(&Config{ MaxCacheSize: 10, }) if err != nil { t.Fatalf("New() error = %v", err) } template := "Hello [% name %]!" // First call result1, _ := r.Process(template, map[string]any{"name": "Alice"}) if result1 != "Hello Alice!" { t.Errorf("got %q, want %q", result1, "Hello Alice!") } // Second call with same template (different vars) result2, _ := r.Process(template, map[string]any{"name": "Bob"}) if result2 != "Hello Bob!" { t.Errorf("got %q, want %q", result2, "Hello Bob!") } // Should only have one cache entry (same template content) if r.cache.Len() != 1 { t.Errorf("expected 1 entry in cache for same template, got %d", r.cache.Len()) } // Different template should add another entry _, _ = r.Process("Different template", nil) if r.cache.Len() != 2 { t.Errorf("expected 2 entries for different templates, got %d", r.cache.Len()) } }) } // TestCacheWithIncludes tests that includes also use caching func TestCacheWithIncludes(t *testing.T) { memFS := fstest.MapFS{ "main.html": &fstest.MapFile{Data: []byte("Main: [% INCLUDE header.html %]")}, "header.html": &fstest.MapFile{Data: []byte("Header: [% title %]")}, } r, err := New(&Config{ IncludePaths: []fs.FS{memFS}, MaxCacheSize: 10, }) if err != nil { t.Fatalf("New() error = %v", err) } result, err := r.ProcessFile("main.html", map[string]any{"title": "Test"}) if err != nil { t.Fatalf("ProcessFile() error = %v", err) } expected := "Main: Header: Test" if result != expected { t.Errorf("got %q, want %q", result, expected) } // Both templates should be cached if r.cache.Len() != 2 { t.Errorf("expected 2 entries in cache (main + header), got %d", r.cache.Len()) } } // TestRendererClose tests that Close stops the SIGHUP handler func TestRendererClose(t *testing.T) { r, err := New(&Config{ EnableSIGHUP: true, }) if err != nil { t.Fatalf("New() error = %v", err) } // Close should not panic r.Close() // Second close should also not panic (idempotent-ish) // Note: This will panic with "close of closed channel" if we don't handle it // But for simplicity, we document that Close should only be called once }