···11// Package boltstore provides persistent storage using BoltDB (bbolt).
22// It implements the oauth.ClientAuthStore interface for session persistence
33-// and provides storage for the feed registry.
33+// and provides storage for join requests.
44package boltstore
5566import (
···2020 // BucketAuthRequests stores pending OAuth auth requests keyed by state
2121 BucketAuthRequests = []byte("oauth_auth_requests")
22222323- // BucketFeedRegistry stores registered user DIDs for the community feed
2424- BucketFeedRegistry = []byte("feed_registry")
2525-2626- // BucketModerationHiddenRecords stores AT-URIs of hidden records
2727- BucketModerationHiddenRecords = []byte("moderation_hidden_records")
2828-2929- // BucketModerationBlacklist stores blacklisted user DIDs
3030- BucketModerationBlacklist = []byte("moderation_blacklist")
3131-3232- // BucketModerationReports stores user reports on content
3333- BucketModerationReports = []byte("moderation_reports")
3434-3535- // BucketModerationReportsByURI indexes reports by subject AT-URI
3636- BucketModerationReportsByURI = []byte("moderation_reports_by_uri")
3737-3838- // BucketModerationReportsByDID indexes reports by subject DID
3939- BucketModerationReportsByDID = []byte("moderation_reports_by_did")
4040-4141- // BucketModerationAuditLog stores moderation action audit trail
4242- BucketModerationAuditLog = []byte("moderation_audit_log")
4343-4444- // BucketModerationAutoHideResets stores DID -> timestamp for auto-hide counter resets
4545- BucketModerationAutoHideResets = []byte("moderation_autohide_resets")
4646-4723 // BucketJoinRequests stores PDS account join requests
4824 BucketJoinRequests = []byte("join_requests")
4925)
···11086 buckets := [][]byte{
11187 BucketSessions,
11288 BucketAuthRequests,
113113- BucketFeedRegistry,
114114- BucketModerationHiddenRecords,
115115- BucketModerationBlacklist,
116116- BucketModerationReports,
117117- BucketModerationReportsByURI,
118118- BucketModerationReportsByDID,
119119- BucketModerationAuditLog,
120120- BucketModerationAutoHideResets,
12189 BucketJoinRequests,
12290 }
12391···154122// SessionStore returns an OAuth session store backed by this database.
155123func (s *Store) SessionStore() *SessionStore {
156124 return &SessionStore{db: s.db}
157157-}
158158-159159-// FeedStore returns a feed registry store backed by this database.
160160-func (s *Store) FeedStore() *FeedStore {
161161- return &FeedStore{db: s.db}
162162-}
163163-164164-// ModerationStore returns a moderation store backed by this database.
165165-func (s *Store) ModerationStore() *ModerationStore {
166166- return &ModerationStore{db: s.db}
167125}
168126169127// JoinStore returns a join request store backed by this database.
+378
internal/database/sqlitestore/moderation.go
···11+// Package sqlitestore provides SQLite-backed store implementations.
22+package sqlitestore
33+44+import (
55+ "context"
66+ "database/sql"
77+ "encoding/json"
88+ "fmt"
99+ "time"
1010+1111+ "arabica/internal/moderation"
1212+)
1313+1414+// ModerationStore implements moderation.Store using SQLite.
1515+// It shares the database connection with the firehose FeedIndex.
1616+type ModerationStore struct {
1717+ db *sql.DB
1818+}
1919+2020+// NewModerationStore creates a ModerationStore backed by the given database.
2121+// The database must already have the moderation schema applied.
2222+func NewModerationStore(db *sql.DB) *ModerationStore {
2323+ return &ModerationStore{db: db}
2424+}
2525+2626+// Ensure ModerationStore implements the interface at compile time.
2727+var _ moderation.Store = (*ModerationStore)(nil)
2828+2929+// ========== Hidden Records ==========
3030+3131+func (s *ModerationStore) HideRecord(ctx context.Context, entry moderation.HiddenRecord) error {
3232+ autoHidden := 0
3333+ if entry.AutoHidden {
3434+ autoHidden = 1
3535+ }
3636+ _, err := s.db.ExecContext(ctx, `
3737+ INSERT INTO moderation_hidden_records (uri, hidden_at, hidden_by, reason, auto_hidden)
3838+ VALUES (?, ?, ?, ?, ?)
3939+ ON CONFLICT(uri) DO UPDATE SET
4040+ hidden_at = excluded.hidden_at,
4141+ hidden_by = excluded.hidden_by,
4242+ reason = excluded.reason,
4343+ auto_hidden = excluded.auto_hidden
4444+ `, entry.ATURI, entry.HiddenAt.Format(time.RFC3339Nano), entry.HiddenBy, entry.Reason, autoHidden)
4545+ if err != nil {
4646+ return fmt.Errorf("hide record: %w", err)
4747+ }
4848+ return nil
4949+}
5050+5151+func (s *ModerationStore) UnhideRecord(ctx context.Context, atURI string) error {
5252+ _, err := s.db.ExecContext(ctx, `DELETE FROM moderation_hidden_records WHERE uri = ?`, atURI)
5353+ return err
5454+}
5555+5656+func (s *ModerationStore) IsRecordHidden(ctx context.Context, atURI string) bool {
5757+ var exists int
5858+ _ = s.db.QueryRowContext(ctx, `SELECT 1 FROM moderation_hidden_records WHERE uri = ?`, atURI).Scan(&exists)
5959+ return exists == 1
6060+}
6161+6262+func (s *ModerationStore) GetHiddenRecord(ctx context.Context, atURI string) (*moderation.HiddenRecord, error) {
6363+ var r moderation.HiddenRecord
6464+ var hiddenAtStr string
6565+ var autoHidden int
6666+ err := s.db.QueryRowContext(ctx, `
6767+ SELECT uri, hidden_at, hidden_by, reason, auto_hidden
6868+ FROM moderation_hidden_records WHERE uri = ?
6969+ `, atURI).Scan(&r.ATURI, &hiddenAtStr, &r.HiddenBy, &r.Reason, &autoHidden)
7070+ if err == sql.ErrNoRows {
7171+ return nil, nil
7272+ }
7373+ if err != nil {
7474+ return nil, err
7575+ }
7676+ r.HiddenAt, _ = time.Parse(time.RFC3339Nano, hiddenAtStr)
7777+ r.AutoHidden = autoHidden == 1
7878+ return &r, nil
7979+}
8080+8181+func (s *ModerationStore) ListHiddenRecords(ctx context.Context) ([]moderation.HiddenRecord, error) {
8282+ rows, err := s.db.QueryContext(ctx, `
8383+ SELECT uri, hidden_at, hidden_by, reason, auto_hidden
8484+ FROM moderation_hidden_records ORDER BY hidden_at DESC
8585+ `)
8686+ if err != nil {
8787+ return nil, err
8888+ }
8989+ defer rows.Close()
9090+9191+ var records []moderation.HiddenRecord
9292+ for rows.Next() {
9393+ var r moderation.HiddenRecord
9494+ var hiddenAtStr string
9595+ var autoHidden int
9696+ if err := rows.Scan(&r.ATURI, &hiddenAtStr, &r.HiddenBy, &r.Reason, &autoHidden); err != nil {
9797+ continue
9898+ }
9999+ r.HiddenAt, _ = time.Parse(time.RFC3339Nano, hiddenAtStr)
100100+ r.AutoHidden = autoHidden == 1
101101+ records = append(records, r)
102102+ }
103103+ return records, rows.Err()
104104+}
105105+106106+// ========== Blacklist ==========
107107+108108+func (s *ModerationStore) BlacklistUser(ctx context.Context, entry moderation.BlacklistedUser) error {
109109+ _, err := s.db.ExecContext(ctx, `
110110+ INSERT INTO moderation_blacklist (did, blacklisted_at, blacklisted_by, reason)
111111+ VALUES (?, ?, ?, ?)
112112+ ON CONFLICT(did) DO UPDATE SET
113113+ blacklisted_at = excluded.blacklisted_at,
114114+ blacklisted_by = excluded.blacklisted_by,
115115+ reason = excluded.reason
116116+ `, entry.DID, entry.BlacklistedAt.Format(time.RFC3339Nano), entry.BlacklistedBy, entry.Reason)
117117+ if err != nil {
118118+ return fmt.Errorf("blacklist user: %w", err)
119119+ }
120120+ return nil
121121+}
122122+123123+func (s *ModerationStore) UnblacklistUser(ctx context.Context, did string) error {
124124+ _, err := s.db.ExecContext(ctx, `DELETE FROM moderation_blacklist WHERE did = ?`, did)
125125+ return err
126126+}
127127+128128+func (s *ModerationStore) IsBlacklisted(ctx context.Context, did string) bool {
129129+ var exists int
130130+ _ = s.db.QueryRowContext(ctx, `SELECT 1 FROM moderation_blacklist WHERE did = ?`, did).Scan(&exists)
131131+ return exists == 1
132132+}
133133+134134+func (s *ModerationStore) GetBlacklistedUser(ctx context.Context, did string) (*moderation.BlacklistedUser, error) {
135135+ var u moderation.BlacklistedUser
136136+ var blacklistedAtStr string
137137+ err := s.db.QueryRowContext(ctx, `
138138+ SELECT did, blacklisted_at, blacklisted_by, reason
139139+ FROM moderation_blacklist WHERE did = ?
140140+ `, did).Scan(&u.DID, &blacklistedAtStr, &u.BlacklistedBy, &u.Reason)
141141+ if err == sql.ErrNoRows {
142142+ return nil, nil
143143+ }
144144+ if err != nil {
145145+ return nil, err
146146+ }
147147+ u.BlacklistedAt, _ = time.Parse(time.RFC3339Nano, blacklistedAtStr)
148148+ return &u, nil
149149+}
150150+151151+func (s *ModerationStore) ListBlacklistedUsers(ctx context.Context) ([]moderation.BlacklistedUser, error) {
152152+ rows, err := s.db.QueryContext(ctx, `
153153+ SELECT did, blacklisted_at, blacklisted_by, reason
154154+ FROM moderation_blacklist ORDER BY blacklisted_at DESC
155155+ `)
156156+ if err != nil {
157157+ return nil, err
158158+ }
159159+ defer rows.Close()
160160+161161+ var users []moderation.BlacklistedUser
162162+ for rows.Next() {
163163+ var u moderation.BlacklistedUser
164164+ var blacklistedAtStr string
165165+ if err := rows.Scan(&u.DID, &blacklistedAtStr, &u.BlacklistedBy, &u.Reason); err != nil {
166166+ continue
167167+ }
168168+ u.BlacklistedAt, _ = time.Parse(time.RFC3339Nano, blacklistedAtStr)
169169+ users = append(users, u)
170170+ }
171171+ return users, rows.Err()
172172+}
173173+174174+// ========== Reports ==========
175175+176176+func (s *ModerationStore) CreateReport(ctx context.Context, report moderation.Report) error {
177177+ _, err := s.db.ExecContext(ctx, `
178178+ INSERT INTO moderation_reports
179179+ (id, subject_uri, subject_did, reporter_did, reason, created_at, status, resolved_by, resolved_at)
180180+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
181181+ `, report.ID, report.SubjectURI, report.SubjectDID, report.ReporterDID, report.Reason,
182182+ report.CreatedAt.Format(time.RFC3339Nano), string(report.Status), report.ResolvedBy, nil)
183183+ if err != nil {
184184+ return fmt.Errorf("create report: %w", err)
185185+ }
186186+ return nil
187187+}
188188+189189+func (s *ModerationStore) GetReport(ctx context.Context, id string) (*moderation.Report, error) {
190190+ var r moderation.Report
191191+ var createdAtStr string
192192+ var resolvedAtStr sql.NullString
193193+ err := s.db.QueryRowContext(ctx, `
194194+ SELECT id, subject_uri, subject_did, reporter_did, reason, created_at, status, resolved_by, resolved_at
195195+ FROM moderation_reports WHERE id = ?
196196+ `, id).Scan(&r.ID, &r.SubjectURI, &r.SubjectDID, &r.ReporterDID, &r.Reason,
197197+ &createdAtStr, &r.Status, &r.ResolvedBy, &resolvedAtStr)
198198+ if err == sql.ErrNoRows {
199199+ return nil, nil
200200+ }
201201+ if err != nil {
202202+ return nil, err
203203+ }
204204+ r.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
205205+ if resolvedAtStr.Valid {
206206+ t, _ := time.Parse(time.RFC3339Nano, resolvedAtStr.String)
207207+ r.ResolvedAt = &t
208208+ }
209209+ return &r, nil
210210+}
211211+212212+func (s *ModerationStore) ListPendingReports(ctx context.Context) ([]moderation.Report, error) {
213213+ return s.listReports(ctx, `WHERE status = 'pending' ORDER BY created_at DESC`)
214214+}
215215+216216+func (s *ModerationStore) ListAllReports(ctx context.Context) ([]moderation.Report, error) {
217217+ return s.listReports(ctx, `ORDER BY created_at DESC`)
218218+}
219219+220220+func (s *ModerationStore) listReports(ctx context.Context, clause string) ([]moderation.Report, error) {
221221+ rows, err := s.db.QueryContext(ctx, `
222222+ SELECT id, subject_uri, subject_did, reporter_did, reason, created_at, status, resolved_by, resolved_at
223223+ FROM moderation_reports `+clause)
224224+ if err != nil {
225225+ return nil, err
226226+ }
227227+ defer rows.Close()
228228+ return scanReports(rows)
229229+}
230230+231231+func scanReports(rows *sql.Rows) ([]moderation.Report, error) {
232232+ var reports []moderation.Report
233233+ for rows.Next() {
234234+ var r moderation.Report
235235+ var createdAtStr string
236236+ var resolvedAtStr sql.NullString
237237+ if err := rows.Scan(&r.ID, &r.SubjectURI, &r.SubjectDID, &r.ReporterDID, &r.Reason,
238238+ &createdAtStr, &r.Status, &r.ResolvedBy, &resolvedAtStr); err != nil {
239239+ continue
240240+ }
241241+ r.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
242242+ if resolvedAtStr.Valid {
243243+ t, _ := time.Parse(time.RFC3339Nano, resolvedAtStr.String)
244244+ r.ResolvedAt = &t
245245+ }
246246+ reports = append(reports, r)
247247+ }
248248+ return reports, rows.Err()
249249+}
250250+251251+func (s *ModerationStore) ResolveReport(ctx context.Context, id string, status moderation.ReportStatus, resolvedBy string) error {
252252+ now := time.Now().Format(time.RFC3339Nano)
253253+ res, err := s.db.ExecContext(ctx, `
254254+ UPDATE moderation_reports SET status = ?, resolved_by = ?, resolved_at = ? WHERE id = ?
255255+ `, string(status), resolvedBy, now, id)
256256+ if err != nil {
257257+ return fmt.Errorf("resolve report: %w", err)
258258+ }
259259+ n, _ := res.RowsAffected()
260260+ if n == 0 {
261261+ return fmt.Errorf("report not found: %s", id)
262262+ }
263263+ return nil
264264+}
265265+266266+func (s *ModerationStore) CountReportsForURI(ctx context.Context, atURI string) (int, error) {
267267+ var count int
268268+ err := s.db.QueryRowContext(ctx, `SELECT COUNT(*) FROM moderation_reports WHERE subject_uri = ?`, atURI).Scan(&count)
269269+ return count, err
270270+}
271271+272272+func (s *ModerationStore) CountReportsForDID(ctx context.Context, did string) (int, error) {
273273+ var count int
274274+ err := s.db.QueryRowContext(ctx, `SELECT COUNT(*) FROM moderation_reports WHERE subject_did = ?`, did).Scan(&count)
275275+ return count, err
276276+}
277277+278278+func (s *ModerationStore) CountReportsForDIDSince(ctx context.Context, did string, since time.Time) (int, error) {
279279+ var count int
280280+ err := s.db.QueryRowContext(ctx, `
281281+ SELECT COUNT(*) FROM moderation_reports WHERE subject_did = ? AND created_at > ?
282282+ `, did, since.Format(time.RFC3339Nano)).Scan(&count)
283283+ return count, err
284284+}
285285+286286+func (s *ModerationStore) HasReportedURI(ctx context.Context, reporterDID, subjectURI string) (bool, error) {
287287+ var exists int
288288+ err := s.db.QueryRowContext(ctx, `
289289+ SELECT 1 FROM moderation_reports WHERE reporter_did = ? AND subject_uri = ? LIMIT 1
290290+ `, reporterDID, subjectURI).Scan(&exists)
291291+ if err == sql.ErrNoRows {
292292+ return false, nil
293293+ }
294294+ return exists == 1, err
295295+}
296296+297297+func (s *ModerationStore) CountReportsFromUserSince(ctx context.Context, reporterDID string, since time.Time) (int, error) {
298298+ var count int
299299+ err := s.db.QueryRowContext(ctx, `
300300+ SELECT COUNT(*) FROM moderation_reports WHERE reporter_did = ? AND created_at > ?
301301+ `, reporterDID, since.Format(time.RFC3339Nano)).Scan(&count)
302302+ return count, err
303303+}
304304+305305+// ========== Audit Log ==========
306306+307307+func (s *ModerationStore) LogAction(ctx context.Context, entry moderation.AuditEntry) error {
308308+ details, err := json.Marshal(entry.Details)
309309+ if err != nil {
310310+ details = []byte("{}")
311311+ }
312312+ autoMod := 0
313313+ if entry.AutoMod {
314314+ autoMod = 1
315315+ }
316316+ _, err = s.db.ExecContext(ctx, `
317317+ INSERT INTO moderation_audit_log (id, action, actor_did, target_uri, reason, details, timestamp, auto_mod)
318318+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
319319+ `, entry.ID, string(entry.Action), entry.ActorDID, entry.TargetURI, entry.Reason,
320320+ string(details), entry.Timestamp.Format(time.RFC3339Nano), autoMod)
321321+ if err != nil {
322322+ return fmt.Errorf("log action: %w", err)
323323+ }
324324+ return nil
325325+}
326326+327327+func (s *ModerationStore) ListAuditLog(ctx context.Context, limit int) ([]moderation.AuditEntry, error) {
328328+ rows, err := s.db.QueryContext(ctx, `
329329+ SELECT id, action, actor_did, target_uri, reason, details, timestamp, auto_mod
330330+ FROM moderation_audit_log ORDER BY timestamp DESC LIMIT ?
331331+ `, limit)
332332+ if err != nil {
333333+ return nil, err
334334+ }
335335+ defer rows.Close()
336336+337337+ var entries []moderation.AuditEntry
338338+ for rows.Next() {
339339+ var e moderation.AuditEntry
340340+ var timestampStr, detailsStr string
341341+ var autoMod int
342342+ if err := rows.Scan(&e.ID, &e.Action, &e.ActorDID, &e.TargetURI, &e.Reason,
343343+ &detailsStr, ×tampStr, &autoMod); err != nil {
344344+ continue
345345+ }
346346+ e.Timestamp, _ = time.Parse(time.RFC3339Nano, timestampStr)
347347+ e.AutoMod = autoMod == 1
348348+ _ = json.Unmarshal([]byte(detailsStr), &e.Details)
349349+ entries = append(entries, e)
350350+ }
351351+ return entries, rows.Err()
352352+}
353353+354354+// ========== Auto-hide Resets ==========
355355+356356+func (s *ModerationStore) SetAutoHideReset(ctx context.Context, did string, resetAt time.Time) error {
357357+ _, err := s.db.ExecContext(ctx, `
358358+ INSERT INTO moderation_autohide_resets (did, reset_at) VALUES (?, ?)
359359+ ON CONFLICT(did) DO UPDATE SET reset_at = excluded.reset_at
360360+ `, did, resetAt.Format(time.RFC3339Nano))
361361+ if err != nil {
362362+ return fmt.Errorf("set autohide reset: %w", err)
363363+ }
364364+ return nil
365365+}
366366+367367+func (s *ModerationStore) GetAutoHideReset(ctx context.Context, did string) (time.Time, error) {
368368+ var resetAtStr string
369369+ err := s.db.QueryRowContext(ctx, `SELECT reset_at FROM moderation_autohide_resets WHERE did = ?`, did).Scan(&resetAtStr)
370370+ if err == sql.ErrNoRows {
371371+ return time.Time{}, nil
372372+ }
373373+ if err != nil {
374374+ return time.Time{}, err
375375+ }
376376+ t, _ := time.Parse(time.RFC3339Nano, resetAtStr)
377377+ return t, nil
378378+}
+2-2
internal/firehose/config.go
···11// Package firehose provides real-time AT Protocol event consumption via Jetstream.
22-// It indexes Arabica records into a local BoltDB database for fast feed queries.
22+// It indexes Arabica records into a local SQLite database for fast feed queries.
33package firehose
4455import (
···3636 // Compress enables zstd compression (~56% bandwidth reduction)
3737 Compress bool
38383939- // IndexPath is the path to the BoltDB feed index database
3939+ // IndexPath is the path to the SQLite feed index database
4040 IndexPath string
41414242 // ProfileCacheTTL is how long to cache profile data
+479-890
internal/firehose/index.go
···11package firehose
2233import (
44- "bytes"
54 "context"
66- "encoding/binary"
77- "encoding/hex"
55+ "database/sql"
86 "encoding/json"
97 "fmt"
108 "os"
···1917 "arabica/internal/models"
20182119 "github.com/rs/zerolog/log"
2222- bolt "go.etcd.io/bbolt"
2323-)
2424-2525-// Bucket names for the feed index
2626-var (
2727- // BucketRecords stores full record data: {at-uri} -> {IndexedRecord JSON}
2828- BucketRecords = []byte("records")
2929-3030- // BucketByTime stores records by timestamp for chronological queries: {timestamp:at-uri} -> {}
3131- BucketByTime = []byte("by_time")
3232-3333- // BucketByDID stores records by DID for user-specific queries: {did:at-uri} -> {}
3434- BucketByDID = []byte("by_did")
3535-3636- // BucketByCollection stores records by type: {collection:timestamp:at-uri} -> {}
3737- BucketByCollection = []byte("by_collection")
3838-3939- // BucketProfiles stores cached profile data: {did} -> {CachedProfile JSON}
4040- BucketProfiles = []byte("profiles")
4141-4242- // BucketMeta stores metadata like cursor position: {key} -> {value}
4343- BucketMeta = []byte("meta")
4444-4545- // BucketKnownDIDs stores all DIDs we've seen with Arabica records
4646- BucketKnownDIDs = []byte("known_dids")
4747-4848- // BucketBackfilled stores DIDs that have been backfilled: {did} -> {timestamp}
4949- BucketBackfilled = []byte("backfilled")
5050-5151- // BucketLikes stores like mappings: {subject_uri:actor_did} -> {rkey}
5252- BucketLikes = []byte("likes")
5353-5454- // BucketLikeCounts stores aggregated like counts: {subject_uri} -> {uint64 count}
5555- BucketLikeCounts = []byte("like_counts")
5656-5757- // BucketLikesByActor stores likes by actor for lookup: {actor_did:subject_uri} -> {rkey}
5858- BucketLikesByActor = []byte("likes_by_actor")
5959-6060- // BucketComments stores comment data: {subject_uri:timestamp:actor_did} -> {comment JSON}
6161- BucketComments = []byte("comments")
6262-6363- // BucketCommentCounts stores aggregated comment counts: {subject_uri} -> {uint64 count}
6464- BucketCommentCounts = []byte("comment_counts")
6565-6666- // BucketCommentsByActor stores comments by actor for lookup: {actor_did:rkey} -> {subject_uri}
6767- BucketCommentsByActor = []byte("comments_by_actor")
6868-6969- // BucketCommentChildren stores parent-child relationships: {parent_uri:child_rkey} -> {child_actor_did}
7070- BucketCommentChildren = []byte("comment_children")
2020+ _ "modernc.org/sqlite"
7121)
72227323// FeedableRecordTypes are the record types that should appear as feed items.
···8939 Record json.RawMessage `json:"record"`
9040 CID string `json:"cid"`
9141 IndexedAt time.Time `json:"indexed_at"`
9292- CreatedAt time.Time `json:"created_at"` // Parsed from record
4242+ CreatedAt time.Time `json:"created_at"`
9343}
94449545// CachedProfile stores profile data with TTL
···1015110252// FeedIndex provides persistent storage for firehose events
10353type FeedIndex struct {
104104- db *bolt.DB
5454+ db *sql.DB
10555 publicClient *atproto.PublicClient
10656 profileTTL time.Duration
10757···1237312474// FeedQuery specifies filtering, sorting, and pagination for feed queries
12575type FeedQuery struct {
126126- Limit int // Max items to return
127127- Cursor string // Opaque cursor for pagination (base64-encoded time key)
128128- TypeFilter lexicons.RecordType // Filter to a specific record type (empty = all)
129129- Sort FeedSort // Sort order (default: recent)
7676+ Limit int // Max items to return
7777+ Cursor string // Opaque cursor for pagination (created_at|uri)
7878+ TypeFilter lexicons.RecordType // Filter to a specific record type (empty = all)
7979+ Sort FeedSort // Sort order (default: recent)
13080}
1318113282// FeedResult contains feed items plus pagination info
···13585 NextCursor string // Empty if no more results
13686}
13787138138-// NewFeedIndex creates a new feed index backed by BoltDB
8888+const schemaNoTrailingPragma = `
8989+CREATE TABLE IF NOT EXISTS records (
9090+ uri TEXT PRIMARY KEY,
9191+ did TEXT NOT NULL,
9292+ collection TEXT NOT NULL,
9393+ rkey TEXT NOT NULL,
9494+ record TEXT NOT NULL,
9595+ cid TEXT NOT NULL DEFAULT '',
9696+ indexed_at TEXT NOT NULL,
9797+ created_at TEXT NOT NULL
9898+);
9999+CREATE INDEX IF NOT EXISTS idx_records_created ON records(created_at DESC);
100100+CREATE INDEX IF NOT EXISTS idx_records_did ON records(did);
101101+CREATE INDEX IF NOT EXISTS idx_records_coll_created ON records(collection, created_at DESC);
102102+103103+CREATE TABLE IF NOT EXISTS meta (
104104+ key TEXT PRIMARY KEY,
105105+ value BLOB
106106+);
107107+108108+CREATE TABLE IF NOT EXISTS known_dids (did TEXT PRIMARY KEY);
109109+CREATE TABLE IF NOT EXISTS backfilled (did TEXT PRIMARY KEY, backfilled_at TEXT NOT NULL);
110110+111111+CREATE TABLE IF NOT EXISTS profiles (
112112+ did TEXT PRIMARY KEY,
113113+ data TEXT NOT NULL,
114114+ expires_at TEXT NOT NULL
115115+);
116116+117117+CREATE TABLE IF NOT EXISTS likes (
118118+ subject_uri TEXT NOT NULL,
119119+ actor_did TEXT NOT NULL,
120120+ rkey TEXT NOT NULL,
121121+ PRIMARY KEY (subject_uri, actor_did)
122122+);
123123+CREATE INDEX IF NOT EXISTS idx_likes_actor ON likes(actor_did, subject_uri);
124124+125125+CREATE TABLE IF NOT EXISTS comments (
126126+ actor_did TEXT NOT NULL,
127127+ rkey TEXT NOT NULL,
128128+ subject_uri TEXT NOT NULL,
129129+ parent_uri TEXT NOT NULL DEFAULT '',
130130+ parent_rkey TEXT NOT NULL DEFAULT '',
131131+ cid TEXT NOT NULL DEFAULT '',
132132+ text TEXT NOT NULL,
133133+ created_at TEXT NOT NULL,
134134+ PRIMARY KEY (actor_did, rkey)
135135+);
136136+CREATE INDEX IF NOT EXISTS idx_comments_subject ON comments(subject_uri, created_at);
137137+138138+CREATE TABLE IF NOT EXISTS notifications (
139139+ id TEXT NOT NULL,
140140+ target_did TEXT NOT NULL,
141141+ type TEXT NOT NULL,
142142+ actor_did TEXT NOT NULL,
143143+ subject_uri TEXT NOT NULL,
144144+ created_at TEXT NOT NULL
145145+);
146146+CREATE INDEX IF NOT EXISTS idx_notif_target ON notifications(target_did, created_at DESC);
147147+CREATE UNIQUE INDEX IF NOT EXISTS idx_notif_dedup ON notifications(target_did, type, actor_did, subject_uri);
148148+149149+CREATE TABLE IF NOT EXISTS notifications_meta (
150150+ target_did TEXT PRIMARY KEY,
151151+ last_read TEXT NOT NULL
152152+);
153153+154154+CREATE TABLE IF NOT EXISTS moderation_hidden_records (
155155+ uri TEXT PRIMARY KEY,
156156+ hidden_at TEXT NOT NULL,
157157+ hidden_by TEXT NOT NULL,
158158+ reason TEXT NOT NULL DEFAULT '',
159159+ auto_hidden INTEGER NOT NULL DEFAULT 0
160160+);
161161+162162+CREATE TABLE IF NOT EXISTS moderation_blacklist (
163163+ did TEXT PRIMARY KEY,
164164+ blacklisted_at TEXT NOT NULL,
165165+ blacklisted_by TEXT NOT NULL,
166166+ reason TEXT NOT NULL DEFAULT ''
167167+);
168168+169169+CREATE TABLE IF NOT EXISTS moderation_reports (
170170+ id TEXT PRIMARY KEY,
171171+ subject_uri TEXT NOT NULL DEFAULT '',
172172+ subject_did TEXT NOT NULL DEFAULT '',
173173+ reporter_did TEXT NOT NULL,
174174+ reason TEXT NOT NULL,
175175+ created_at TEXT NOT NULL,
176176+ status TEXT NOT NULL DEFAULT 'pending',
177177+ resolved_by TEXT NOT NULL DEFAULT '',
178178+ resolved_at TEXT
179179+);
180180+CREATE INDEX IF NOT EXISTS idx_modreports_uri ON moderation_reports(subject_uri);
181181+CREATE INDEX IF NOT EXISTS idx_modreports_did ON moderation_reports(subject_did);
182182+CREATE INDEX IF NOT EXISTS idx_modreports_reporter ON moderation_reports(reporter_did, created_at);
183183+CREATE INDEX IF NOT EXISTS idx_modreports_status ON moderation_reports(status);
184184+185185+CREATE TABLE IF NOT EXISTS moderation_audit_log (
186186+ id TEXT PRIMARY KEY,
187187+ action TEXT NOT NULL,
188188+ actor_did TEXT NOT NULL,
189189+ target_uri TEXT NOT NULL DEFAULT '',
190190+ reason TEXT NOT NULL DEFAULT '',
191191+ details TEXT NOT NULL DEFAULT '{}',
192192+ timestamp TEXT NOT NULL,
193193+ auto_mod INTEGER NOT NULL DEFAULT 0
194194+);
195195+CREATE INDEX IF NOT EXISTS idx_modaudit_ts ON moderation_audit_log(timestamp DESC);
196196+197197+CREATE TABLE IF NOT EXISTS moderation_autohide_resets (
198198+ did TEXT PRIMARY KEY,
199199+ reset_at TEXT NOT NULL
200200+);
201201+`
202202+203203+// NewFeedIndex creates a new feed index backed by SQLite
139204func NewFeedIndex(path string, profileTTL time.Duration) (*FeedIndex, error) {
140205 if path == "" {
141206 return nil, fmt.Errorf("index path is required")
···149214 }
150215 }
151216152152- db, err := bolt.Open(path, 0600, &bolt.Options{
153153- Timeout: 5 * time.Second,
154154- })
217217+ db, err := sql.Open("sqlite", "file:"+path+"?_pragma=busy_timeout(5000)&_pragma=journal_mode(WAL)&_pragma=synchronous(NORMAL)&_pragma=foreign_keys(ON)&_pragma=temp_store(MEMORY)&_pragma=mmap_size(134217728)&_pragma=cache_size(-65536)")
155218 if err != nil {
156219 return nil, fmt.Errorf("failed to open index database: %w", err)
157220 }
158221159159- // Create buckets
160160- err = db.Update(func(tx *bolt.Tx) error {
161161- buckets := [][]byte{
162162- BucketRecords,
163163- BucketByTime,
164164- BucketByDID,
165165- BucketByCollection,
166166- BucketProfiles,
167167- BucketMeta,
168168- BucketKnownDIDs,
169169- BucketBackfilled,
170170- BucketLikes,
171171- BucketLikeCounts,
172172- BucketLikesByActor,
173173- BucketComments,
174174- BucketCommentCounts,
175175- BucketCommentsByActor,
176176- BucketCommentChildren,
177177- BucketNotifications,
178178- BucketNotificationsMeta,
179179- }
180180- for _, bucket := range buckets {
181181- if _, err := tx.CreateBucketIfNotExists(bucket); err != nil {
182182- return fmt.Errorf("failed to create bucket %s: %w", bucket, err)
183183- }
184184- }
185185- return nil
186186- })
187187- if err != nil {
222222+ // WAL mode allows concurrent reads with a single writer.
223223+ // Allow multiple reader connections but limit to avoid file descriptor exhaustion.
224224+ db.SetMaxOpenConns(4)
225225+ db.SetMaxIdleConns(4)
226226+227227+ // Execute schema (skip PRAGMAs — already set via DSN)
228228+ if _, err := db.Exec(schemaNoTrailingPragma); err != nil {
188229 _ = db.Close()
189189- return nil, err
230230+ return nil, fmt.Errorf("failed to initialize schema: %w", err)
190231 }
191232192233 idx := &FeedIndex{
···199240 return idx, nil
200241}
201242243243+// DB returns the underlying database connection for shared use by other stores.
244244+func (idx *FeedIndex) DB() *sql.DB {
245245+ return idx.db
246246+}
247247+202248// Close closes the index database
203249func (idx *FeedIndex) Close() error {
204250 if idx.db != nil {
···224270// GetCursor returns the last processed cursor (microseconds timestamp)
225271func (idx *FeedIndex) GetCursor() (int64, error) {
226272 var cursor int64
227227- err := idx.db.View(func(tx *bolt.Tx) error {
228228- b := tx.Bucket(BucketMeta)
229229- v := b.Get([]byte("cursor"))
230230- if len(v) == 8 {
231231- cursor = int64(binary.BigEndian.Uint64(v))
232232- }
233233- return nil
234234- })
273273+ err := idx.db.QueryRow(`SELECT value FROM meta WHERE key = 'cursor'`).Scan(&cursor)
274274+ if err == sql.ErrNoRows {
275275+ return 0, nil
276276+ }
235277 return cursor, err
236278}
237279238280// SetCursor stores the cursor position
239281func (idx *FeedIndex) SetCursor(cursor int64) error {
240240- return idx.db.Update(func(tx *bolt.Tx) error {
241241- b := tx.Bucket(BucketMeta)
242242- buf := make([]byte, 8)
243243- binary.BigEndian.PutUint64(buf, uint64(cursor))
244244- return b.Put([]byte("cursor"), buf)
245245- })
282282+ _, err := idx.db.Exec(`INSERT OR REPLACE INTO meta (key, value) VALUES ('cursor', ?)`, cursor)
283283+ return err
246284}
247285248286// UpsertRecord adds or updates a record in the index
···260298 }
261299 }
262300263263- indexed := &IndexedRecord{
264264- URI: uri,
265265- DID: did,
266266- Collection: collection,
267267- RKey: rkey,
268268- Record: record,
269269- CID: cid,
270270- IndexedAt: time.Now(),
271271- CreatedAt: createdAt,
301301+ now := time.Now()
302302+303303+ _, err := idx.db.Exec(`
304304+ INSERT INTO records (uri, did, collection, rkey, record, cid, indexed_at, created_at)
305305+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
306306+ ON CONFLICT(uri) DO UPDATE SET
307307+ record = excluded.record,
308308+ cid = excluded.cid,
309309+ indexed_at = excluded.indexed_at,
310310+ created_at = excluded.created_at
311311+ `, uri, did, collection, rkey, string(record), cid,
312312+ now.Format(time.RFC3339Nano), createdAt.Format(time.RFC3339Nano))
313313+ if err != nil {
314314+ return fmt.Errorf("failed to upsert record: %w", err)
272315 }
273316274274- data, err := json.Marshal(indexed)
317317+ // Track known DID
318318+ _, err = idx.db.Exec(`INSERT OR IGNORE INTO known_dids (did) VALUES (?)`, did)
275319 if err != nil {
276276- return fmt.Errorf("failed to marshal record: %w", err)
320320+ return fmt.Errorf("failed to track known DID: %w", err)
277321 }
278322279279- return idx.db.Update(func(tx *bolt.Tx) error {
280280- // Store the record
281281- records := tx.Bucket(BucketRecords)
282282- if err := records.Put([]byte(uri), data); err != nil {
283283- return err
284284- }
285285-286286- // Index by time (use createdAt for sorting, not event time)
287287- byTime := tx.Bucket(BucketByTime)
288288- timeKey := makeTimeKey(createdAt, uri)
289289- if err := byTime.Put(timeKey, nil); err != nil {
290290- return err
291291- }
292292-293293- // Index by DID
294294- byDID := tx.Bucket(BucketByDID)
295295- didKey := []byte(did + ":" + uri)
296296- if err := byDID.Put(didKey, nil); err != nil {
297297- return err
298298- }
299299-300300- // Index by collection
301301- byCollection := tx.Bucket(BucketByCollection)
302302- collKey := []byte(collection + ":" + string(timeKey))
303303- if err := byCollection.Put(collKey, nil); err != nil {
304304- return err
305305- }
306306-307307- // Track known DID
308308- knownDIDs := tx.Bucket(BucketKnownDIDs)
309309- if err := knownDIDs.Put([]byte(did), []byte("1")); err != nil {
310310- return err
311311- }
312312-313313- return nil
314314- })
323323+ return nil
315324}
316325317326// DeleteRecord removes a record from the index
318327func (idx *FeedIndex) DeleteRecord(did, collection, rkey string) error {
319328 uri := atproto.BuildATURI(did, collection, rkey)
329329+ _, err := idx.db.Exec(`DELETE FROM records WHERE uri = ?`, uri)
330330+ return err
331331+}
320332321321- return idx.db.Update(func(tx *bolt.Tx) error {
322322- // Get the existing record to find its timestamp
323323- records := tx.Bucket(BucketRecords)
324324- existingData := records.Get([]byte(uri))
325325- if existingData == nil {
326326- // Record doesn't exist, nothing to delete
327327- return nil
328328- }
333333+// GetRecord retrieves a single record by URI
334334+func (idx *FeedIndex) GetRecord(uri string) (*IndexedRecord, error) {
335335+ var rec IndexedRecord
336336+ var recordStr, indexedAtStr, createdAtStr string
329337330330- var existing IndexedRecord
331331- if err := json.Unmarshal(existingData, &existing); err != nil {
332332- // Can't parse, just delete the main record
333333- return records.Delete([]byte(uri))
334334- }
338338+ err := idx.db.QueryRow(`
339339+ SELECT uri, did, collection, rkey, record, cid, indexed_at, created_at
340340+ FROM records WHERE uri = ?
341341+ `, uri).Scan(&rec.URI, &rec.DID, &rec.Collection, &rec.RKey,
342342+ &recordStr, &rec.CID, &indexedAtStr, &createdAtStr)
343343+ if err == sql.ErrNoRows {
344344+ return nil, nil
345345+ }
346346+ if err != nil {
347347+ return nil, err
348348+ }
335349336336- // Delete from records
337337- if err := records.Delete([]byte(uri)); err != nil {
338338- return err
339339- }
340340-341341- // Delete from by_time index
342342- byTime := tx.Bucket(BucketByTime)
343343- timeKey := makeTimeKey(existing.CreatedAt, uri)
344344- if err := byTime.Delete(timeKey); err != nil {
345345- return err
346346- }
350350+ rec.Record = json.RawMessage(recordStr)
351351+ rec.IndexedAt, _ = time.Parse(time.RFC3339Nano, indexedAtStr)
352352+ rec.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
347353348348- // Delete from by_did index
349349- byDID := tx.Bucket(BucketByDID)
350350- didKey := []byte(did + ":" + uri)
351351- if err := byDID.Delete(didKey); err != nil {
352352- return err
353353- }
354354-355355- // Delete from by_collection index
356356- byCollection := tx.Bucket(BucketByCollection)
357357- collKey := []byte(collection + ":" + string(timeKey))
358358- if err := byCollection.Delete(collKey); err != nil {
359359- return err
360360- }
361361-362362- return nil
363363- })
364364-}
365365-366366-// GetRecord retrieves a single record by URI
367367-func (idx *FeedIndex) GetRecord(uri string) (*IndexedRecord, error) {
368368- var record *IndexedRecord
369369- err := idx.db.View(func(tx *bolt.Tx) error {
370370- b := tx.Bucket(BucketRecords)
371371- data := b.Get([]byte(uri))
372372- if data == nil {
373373- return nil
374374- }
375375- record = &IndexedRecord{}
376376- return json.Unmarshal(data, record)
377377- })
378378- return record, err
354354+ return &rec, nil
379355}
380356381357// FeedItem represents an item in the feed (matches feed.FeedItem structure)
···404380405381// GetRecentFeed returns recent feed items from the index
406382func (idx *FeedIndex) GetRecentFeed(ctx context.Context, limit int) ([]*FeedItem, error) {
407407- var records []*IndexedRecord
408408- err := idx.db.View(func(tx *bolt.Tx) error {
409409- byTime := tx.Bucket(BucketByTime)
410410- recordsBucket := tx.Bucket(BucketRecords)
411411-412412- c := byTime.Cursor()
413413-414414- // Iterate in reverse (newest first)
415415- count := 0
416416- for k, _ := c.First(); k != nil && count < limit*2; k, _ = c.Next() {
417417- // Extract URI from key (format: timestamp:uri)
418418- uri := extractURIFromTimeKey(k)
419419- if uri == "" {
420420- continue
421421- }
422422-423423- data := recordsBucket.Get([]byte(uri))
424424- if data == nil {
425425- continue
426426- }
427427-428428- var record IndexedRecord
429429- if err := json.Unmarshal(data, &record); err != nil {
430430- continue
431431- }
432432-433433- records = append(records, &record)
434434- count++
435435- }
436436-437437- return nil
438438- })
439439- if err != nil {
440440- return nil, err
441441- }
442442-443443- // Build lookup maps for reference resolution
444444- recordsByURI := make(map[string]*IndexedRecord)
445445- for _, r := range records {
446446- recordsByURI[r.URI] = r
447447- }
448448-449449- // Also load additional records we might need for references
450450- err = idx.db.View(func(tx *bolt.Tx) error {
451451- recordsBucket := tx.Bucket(BucketRecords)
452452- return recordsBucket.ForEach(func(k, v []byte) error {
453453- uri := string(k)
454454- if _, exists := recordsByURI[uri]; exists {
455455- return nil
456456- }
457457- var record IndexedRecord
458458- if err := json.Unmarshal(v, &record); err != nil {
459459- return nil
460460- }
461461- // Only load beans, roasters, grinders, brewers for reference resolution
462462- switch record.Collection {
463463- case atproto.NSIDBean, atproto.NSIDRoaster, atproto.NSIDGrinder, atproto.NSIDBrewer:
464464- recordsByURI[uri] = &record
465465- }
466466- return nil
467467- })
468468- })
469469- if err != nil {
470470- return nil, err
471471- }
472472-473473- // Convert to FeedItems
474474- items := make([]*FeedItem, 0, len(records))
475475- for _, record := range records {
476476- // Skip likes - they're indexed for like counts but not displayed as feed items
477477- if record.Collection == atproto.NSIDLike {
478478- continue
479479- }
480480-481481- item, err := idx.recordToFeedItem(ctx, record, recordsByURI)
482482- if err != nil {
483483- log.Warn().Err(err).Str("uri", record.URI).Msg("failed to convert record to feed item")
484484- continue
485485- }
486486- if !FeedableRecordTypes[item.RecordType] {
487487- continue
488488- }
489489- items = append(items, item)
490490- }
491491-492492- // Sort by timestamp descending
493493- sort.Slice(items, func(i, j int) bool {
494494- return items[i].Timestamp.After(items[j].Timestamp)
495495- })
496496-497497- // Apply limit
498498- if len(items) > limit {
499499- items = items[:limit]
500500- }
501501-502502- return items, nil
383383+ return idx.getFeedItems(ctx, "", limit, "")
503384}
504385505386// recordTypeToNSID maps a lexicons.RecordType to its NSID collection string
···511392 lexicons.RecordTypeBrewer: atproto.NSIDBrewer,
512393}
513394395395+// feedableCollections is the set of collection NSIDs that appear in the feed
396396+var feedableCollections = func() []string {
397397+ out := make([]string, 0, len(recordTypeToNSID))
398398+ for _, nsid := range recordTypeToNSID {
399399+ out = append(out, nsid)
400400+ }
401401+ return out
402402+}()
403403+514404// GetFeedWithQuery returns feed items matching the given query with cursor-based pagination
515405func (idx *FeedIndex) GetFeedWithQuery(ctx context.Context, q FeedQuery) (*FeedResult, error) {
516406 if q.Limit <= 0 {
···520410 q.Sort = FeedSortRecent
521411 }
522412523523- // For type-filtered queries, use BucketByCollection for efficiency
524524- // For unfiltered queries, use BucketByTime
525525- var records []*IndexedRecord
526526- var lastTimeKey []byte
527527-528528- // Decode cursor if provided
529529- var cursorBytes []byte
530530- if q.Cursor != "" {
531531- var err error
532532- cursorBytes, err = decodeCursor(q.Cursor)
533533- if err != nil {
534534- return nil, fmt.Errorf("invalid cursor: %w", err)
413413+ var collectionFilter string
414414+ if q.TypeFilter != "" {
415415+ nsid, ok := recordTypeToNSID[q.TypeFilter]
416416+ if !ok {
417417+ return nil, fmt.Errorf("unknown record type: %s", q.TypeFilter)
535418 }
419419+ collectionFilter = nsid
536420 }
537421538538- // Fetch more than needed to account for filtering
539539- fetchLimit := q.Limit + 10
540540-541541- err := idx.db.View(func(tx *bolt.Tx) error {
542542- recordsBucket := tx.Bucket(BucketRecords)
422422+ items, err := idx.getFeedItems(ctx, collectionFilter, q.Limit+1, q.Cursor)
423423+ if err != nil {
424424+ return nil, err
425425+ }
543426544544- if q.TypeFilter != "" {
545545- // Use BucketByCollection for filtered queries
546546- nsid, ok := recordTypeToNSID[q.TypeFilter]
547547- if !ok {
548548- return fmt.Errorf("unknown record type: %s", q.TypeFilter)
427427+ // Sort based on query
428428+ if q.Sort == FeedSortPopular {
429429+ sort.Slice(items, func(i, j int) bool {
430430+ scoreI := items[i].LikeCount*3 + items[i].CommentCount*2
431431+ scoreJ := items[j].LikeCount*3 + items[j].CommentCount*2
432432+ if scoreI != scoreJ {
433433+ return scoreI > scoreJ
549434 }
435435+ return items[i].Timestamp.After(items[j].Timestamp)
436436+ })
437437+ }
550438551551- byCollection := tx.Bucket(BucketByCollection)
552552- c := byCollection.Cursor()
439439+ result := &FeedResult{Items: items}
440440+ if len(items) > q.Limit {
441441+ result.Items = items[:q.Limit]
442442+ last := result.Items[q.Limit-1]
443443+ result.NextCursor = last.Timestamp.Format(time.RFC3339Nano) + "|" + last.SubjectURI
444444+ }
553445554554- // Collection keys: {collection}:{inverted_timestamp}:{uri}
555555- prefix := []byte(nsid + ":")
446446+ return result, nil
447447+}
556448557557- var k []byte
558558- if cursorBytes != nil {
559559- // Seek to cursor position (cursor is the full collection key)
560560- k, _ = c.Seek(cursorBytes)
561561- // Skip the cursor key itself (it was the last item of previous page)
562562- if k != nil && string(k) == string(cursorBytes) {
563563- k, _ = c.Next()
564564- }
565565- } else {
566566- k, _ = c.Seek(prefix)
567567- }
449449+// getFeedItems fetches records from SQLite, resolves references, and returns FeedItems.
450450+func (idx *FeedIndex) getFeedItems(ctx context.Context, collectionFilter string, limit int, cursor string) ([]*FeedItem, error) {
451451+ // Build query for feedable records
452452+ var args []any
453453+ query := `SELECT uri, did, collection, rkey, record, cid, indexed_at, created_at FROM records WHERE `
568454569569- count := 0
570570- for ; k != nil && count < fetchLimit; k, _ = c.Next() {
571571- if !bytes.HasPrefix(k, prefix) {
572572- break
573573- }
455455+ if collectionFilter != "" {
456456+ query += `collection = ? `
457457+ args = append(args, collectionFilter)
458458+ } else {
459459+ // Only feedable collections
460460+ placeholders := make([]string, len(feedableCollections))
461461+ for i, c := range feedableCollections {
462462+ placeholders[i] = "?"
463463+ args = append(args, c)
464464+ }
465465+ query += `collection IN (` + strings.Join(placeholders, ",") + `) `
466466+ }
574467575575- // Extract URI from collection key: {collection}:{timestamp_bytes}:{uri}
576576- uri := extractURIFromCollectionKey(k, nsid)
577577- if uri == "" {
578578- continue
579579- }
468468+ // Cursor-based pagination: cursor format is "created_at|uri"
469469+ if cursor != "" {
470470+ parts := strings.SplitN(cursor, "|", 2)
471471+ if len(parts) == 2 {
472472+ query += `AND (created_at < ? OR (created_at = ? AND uri < ?)) `
473473+ args = append(args, parts[0], parts[0], parts[1])
474474+ }
475475+ }
580476581581- data := recordsBucket.Get([]byte(uri))
582582- if data == nil {
583583- continue
584584- }
477477+ query += `ORDER BY created_at DESC LIMIT ?`
478478+ args = append(args, limit)
585479586586- var record IndexedRecord
587587- if err := json.Unmarshal(data, &record); err != nil {
588588- continue
589589- }
590590- records = append(records, &record)
591591- lastTimeKey = make([]byte, len(k))
592592- copy(lastTimeKey, k)
593593- count++
594594- }
595595- } else {
596596- // Use BucketByTime for unfiltered queries
597597- byTime := tx.Bucket(BucketByTime)
598598- c := byTime.Cursor()
480480+ rows, err := idx.db.QueryContext(ctx, query, args...)
481481+ if err != nil {
482482+ return nil, err
483483+ }
484484+ defer rows.Close()
599485600600- var k []byte
601601- if cursorBytes != nil {
602602- k, _ = c.Seek(cursorBytes)
603603- if k != nil && string(k) == string(cursorBytes) {
604604- k, _ = c.Next()
605605- }
606606- } else {
607607- k, _ = c.First()
608608- }
486486+ var records []*IndexedRecord
487487+ refURIs := make(map[string]bool) // URIs we need to resolve
609488610610- count := 0
611611- for ; k != nil && count < fetchLimit; k, _ = c.Next() {
612612- uri := extractURIFromTimeKey(k)
613613- if uri == "" {
614614- continue
615615- }
489489+ for rows.Next() {
490490+ var rec IndexedRecord
491491+ var recordStr, indexedAtStr, createdAtStr string
492492+ if err := rows.Scan(&rec.URI, &rec.DID, &rec.Collection, &rec.RKey,
493493+ &recordStr, &rec.CID, &indexedAtStr, &createdAtStr); err != nil {
494494+ continue
495495+ }
496496+ rec.Record = json.RawMessage(recordStr)
497497+ rec.IndexedAt, _ = time.Parse(time.RFC3339Nano, indexedAtStr)
498498+ rec.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
499499+ records = append(records, &rec)
616500617617- data := recordsBucket.Get([]byte(uri))
618618- if data == nil {
619619- continue
620620- }
621621-622622- var record IndexedRecord
623623- if err := json.Unmarshal(data, &record); err != nil {
624624- continue
625625- }
626626- // Skip non-feedable records (likes, comments) so they don't
627627- // consume slots in the fetch limit, which would cause pagination
628628- // to break when many non-feedable records are intermixed.
629629- if record.Collection == atproto.NSIDLike || record.Collection == atproto.NSIDComment {
630630- continue
501501+ // Collect reference URIs from the record data
502502+ var recordData map[string]any
503503+ if err := json.Unmarshal(rec.Record, &recordData); err == nil {
504504+ for _, key := range []string{"beanRef", "roasterRef", "grinderRef", "brewerRef"} {
505505+ if ref, ok := recordData[key].(string); ok && ref != "" {
506506+ refURIs[ref] = true
631507 }
632632- records = append(records, &record)
633633- lastTimeKey = make([]byte, len(k))
634634- copy(lastTimeKey, k)
635635- count++
636508 }
637509 }
638638-639639- return nil
640640- })
641641- if err != nil {
510510+ }
511511+ if err := rows.Err(); err != nil {
642512 return nil, err
643513 }
644514645645- // Build lookup maps for reference resolution
646646- recordsByURI := make(map[string]*IndexedRecord)
515515+ // Build lookup map starting with the fetched records
516516+ recordsByURI := make(map[string]*IndexedRecord, len(records))
647517 for _, r := range records {
648518 recordsByURI[r.URI] = r
649519 }
650520651651- // Load additional records for reference resolution
652652- err = idx.db.View(func(tx *bolt.Tx) error {
653653- recordsBucket := tx.Bucket(BucketRecords)
654654- return recordsBucket.ForEach(func(k, v []byte) error {
655655- uri := string(k)
656656- if _, exists := recordsByURI[uri]; exists {
657657- return nil
658658- }
659659- var record IndexedRecord
660660- if err := json.Unmarshal(v, &record); err != nil {
661661- return nil
662662- }
663663- switch record.Collection {
664664- case atproto.NSIDBean, atproto.NSIDRoaster, atproto.NSIDGrinder, atproto.NSIDBrewer:
665665- recordsByURI[uri] = &record
521521+ // Fetch referenced records that we don't already have
522522+ var missingURIs []string
523523+ for uri := range refURIs {
524524+ if _, ok := recordsByURI[uri]; !ok {
525525+ missingURIs = append(missingURIs, uri)
526526+ }
527527+ }
528528+529529+ if len(missingURIs) > 0 {
530530+ placeholders := make([]string, len(missingURIs))
531531+ refArgs := make([]any, len(missingURIs))
532532+ for i, uri := range missingURIs {
533533+ placeholders[i] = "?"
534534+ refArgs[i] = uri
535535+ }
536536+ refQuery := `SELECT uri, did, collection, rkey, record, cid, indexed_at, created_at FROM records WHERE uri IN (` + strings.Join(placeholders, ",") + `)`
537537+ refRows, err := idx.db.QueryContext(ctx, refQuery, refArgs...)
538538+ if err == nil {
539539+ defer refRows.Close()
540540+ for refRows.Next() {
541541+ var rec IndexedRecord
542542+ var recordStr, indexedAtStr, createdAtStr string
543543+ if err := refRows.Scan(&rec.URI, &rec.DID, &rec.Collection, &rec.RKey,
544544+ &recordStr, &rec.CID, &indexedAtStr, &createdAtStr); err != nil {
545545+ continue
546546+ }
547547+ rec.Record = json.RawMessage(recordStr)
548548+ rec.IndexedAt, _ = time.Parse(time.RFC3339Nano, indexedAtStr)
549549+ rec.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
550550+ recordsByURI[rec.URI] = &rec
551551+552552+ // If this is a bean, check if it references a roaster we also need
553553+ if rec.Collection == atproto.NSIDBean {
554554+ var beanData map[string]any
555555+ if err := json.Unmarshal(rec.Record, &beanData); err == nil {
556556+ if roasterRef, ok := beanData["roasterRef"].(string); ok && roasterRef != "" {
557557+ if _, ok := recordsByURI[roasterRef]; !ok {
558558+ // Fetch this roaster too
559559+ var rRec IndexedRecord
560560+ var rStr, rIdxAt, rCreAt string
561561+ err := idx.db.QueryRowContext(ctx,
562562+ `SELECT uri, did, collection, rkey, record, cid, indexed_at, created_at FROM records WHERE uri = ?`,
563563+ roasterRef).Scan(&rRec.URI, &rRec.DID, &rRec.Collection, &rRec.RKey,
564564+ &rStr, &rRec.CID, &rIdxAt, &rCreAt)
565565+ if err == nil {
566566+ rRec.Record = json.RawMessage(rStr)
567567+ rRec.IndexedAt, _ = time.Parse(time.RFC3339Nano, rIdxAt)
568568+ rRec.CreatedAt, _ = time.Parse(time.RFC3339Nano, rCreAt)
569569+ recordsByURI[rRec.URI] = &rRec
570570+ }
571571+ }
572572+ }
573573+ }
574574+ }
666575 }
667667- return nil
668668- })
669669- })
670670- if err != nil {
671671- return nil, err
576576+ }
672577 }
673578674579 // Convert to FeedItems
675580 items := make([]*FeedItem, 0, len(records))
676581 for _, record := range records {
677677- if record.Collection == atproto.NSIDLike || record.Collection == atproto.NSIDComment {
678678- continue
679679- }
680680-681582 item, err := idx.recordToFeedItem(ctx, record, recordsByURI)
682583 if err != nil {
683584 log.Warn().Err(err).Str("uri", record.URI).Msg("failed to convert record to feed item")
···689590 items = append(items, item)
690591 }
691592692692- // Sort based on query
693693- switch q.Sort {
694694- case FeedSortPopular:
695695- sort.Slice(items, func(i, j int) bool {
696696- scoreI := items[i].LikeCount*3 + items[i].CommentCount*2
697697- scoreJ := items[j].LikeCount*3 + items[j].CommentCount*2
698698- if scoreI != scoreJ {
699699- return scoreI > scoreJ
700700- }
701701- return items[i].Timestamp.After(items[j].Timestamp)
702702- })
703703- default: // FeedSortRecent
704704- sort.Slice(items, func(i, j int) bool {
705705- return items[i].Timestamp.After(items[j].Timestamp)
706706- })
707707- }
708708-709709- // Build result with cursor
710710- result := &FeedResult{Items: items}
711711-712712- if len(items) > q.Limit {
713713- result.Items = items[:q.Limit]
714714- // Cursor is the last time key we read from the DB
715715- if lastTimeKey != nil {
716716- result.NextCursor = encodeCursor(lastTimeKey)
717717- }
718718- }
719719-720720- return result, nil
721721-}
722722-723723-// extractURIFromCollectionKey extracts the URI from a collection key
724724-// Format: {collection}:{inverted_timestamp_8bytes}:{uri}
725725-func extractURIFromCollectionKey(key []byte, collection string) string {
726726- // prefix is collection + ":"
727727- prefixLen := len(collection) + 1
728728- // Then 8 bytes of timestamp + ":"
729729- minLen := prefixLen + 8 + 1 + 1 // prefix + timestamp + ":" + at least 1 char
730730- if len(key) < minLen {
731731- return ""
732732- }
733733- return string(key[prefixLen+9:])
734734-}
735735-736736-func encodeCursor(key []byte) string {
737737- return hex.EncodeToString(key)
738738-}
739739-740740-func decodeCursor(s string) ([]byte, error) {
741741- return hex.DecodeString(s)
593593+ return items, nil
742594}
743595744596// recordToFeedItem converts an IndexedRecord to a FeedItem
···757609 profile, err := idx.GetProfile(ctx, record.DID)
758610 if err != nil {
759611 log.Warn().Err(err).Str("did", record.DID).Msg("failed to get profile")
760760- // Use a placeholder profile
761612 profile = &atproto.Profile{
762613 DID: record.DID,
763763- Handle: record.DID, // Use DID as handle if we can't resolve
614614+ Handle: record.DID,
764615 }
765616 }
766617 item.Author = profile
···869720 item.Brewer = brewer
870721871722 case atproto.NSIDLike:
872872- // This should never be reached - likes are filtered before calling recordToFeedItem
873723 return nil, fmt.Errorf("unexpected: likes should be filtered before conversion")
874724875725 default:
···896746 idx.profileCacheMu.RUnlock()
897747898748 // Check persistent cache
899899- var cached *CachedProfile
900900- err := idx.db.View(func(tx *bolt.Tx) error {
901901- b := tx.Bucket(BucketProfiles)
902902- data := b.Get([]byte(did))
903903- if data == nil {
904904- return nil
749749+ var dataStr, expiresAtStr string
750750+ err := idx.db.QueryRow(`SELECT data, expires_at FROM profiles WHERE did = ?`, did).Scan(&dataStr, &expiresAtStr)
751751+ if err == nil {
752752+ expiresAt, _ := time.Parse(time.RFC3339Nano, expiresAtStr)
753753+ if time.Now().Before(expiresAt) {
754754+ cached := &CachedProfile{}
755755+ if err := json.Unmarshal([]byte(dataStr), cached); err == nil {
756756+ idx.profileCacheMu.Lock()
757757+ idx.profileCache[did] = cached
758758+ idx.profileCacheMu.Unlock()
759759+ return cached.Profile, nil
760760+ }
905761 }
906906- cached = &CachedProfile{}
907907- return json.Unmarshal(data, cached)
908908- })
909909- if err == nil && cached != nil && time.Now().Before(cached.ExpiresAt) {
910910- // Update in-memory cache
911911- idx.profileCacheMu.Lock()
912912- idx.profileCache[did] = cached
913913- idx.profileCacheMu.Unlock()
914914- return cached.Profile, nil
915762 }
916763917764 // Fetch from API
···922769923770 // Cache the result
924771 now := time.Now()
925925- cached = &CachedProfile{
772772+ cached := &CachedProfile{
926773 Profile: profile,
927774 CachedAt: now,
928775 ExpiresAt: now.Add(idx.profileTTL),
···935782936783 // Persist to database
937784 data, _ := json.Marshal(cached)
938938- _ = idx.db.Update(func(tx *bolt.Tx) error {
939939- b := tx.Bucket(BucketProfiles)
940940- return b.Put([]byte(did), data)
941941- })
785785+ _, _ = idx.db.Exec(`INSERT OR REPLACE INTO profiles (did, data, expires_at) VALUES (?, ?, ?)`,
786786+ did, string(data), cached.ExpiresAt.Format(time.RFC3339Nano))
942787943788 return profile, nil
944789}
945790946791// GetKnownDIDs returns all DIDs that have created Arabica records
947792func (idx *FeedIndex) GetKnownDIDs() ([]string, error) {
793793+ rows, err := idx.db.Query(`SELECT did FROM known_dids`)
794794+ if err != nil {
795795+ return nil, err
796796+ }
797797+ defer rows.Close()
798798+948799 var dids []string
949949- err := idx.db.View(func(tx *bolt.Tx) error {
950950- b := tx.Bucket(BucketKnownDIDs)
951951- return b.ForEach(func(k, v []byte) error {
952952- dids = append(dids, string(k))
953953- return nil
954954- })
955955- })
956956- return dids, err
800800+ for rows.Next() {
801801+ var did string
802802+ if err := rows.Scan(&did); err != nil {
803803+ continue
804804+ }
805805+ dids = append(dids, did)
806806+ }
807807+ return dids, rows.Err()
808808+}
809809+810810+// ListRecordsByCollection returns all indexed records for a given collection.
811811+func (idx *FeedIndex) ListRecordsByCollection(collection string) ([]IndexedRecord, error) {
812812+ rows, err := idx.db.Query(`
813813+ SELECT uri, did, collection, rkey, record, cid, indexed_at, created_at
814814+ FROM records WHERE collection = ? ORDER BY created_at DESC
815815+ `, collection)
816816+ if err != nil {
817817+ return nil, err
818818+ }
819819+ defer rows.Close()
820820+821821+ var records []IndexedRecord
822822+ for rows.Next() {
823823+ var rec IndexedRecord
824824+ var recordStr, indexedAtStr, createdAtStr string
825825+ if err := rows.Scan(&rec.URI, &rec.DID, &rec.Collection, &rec.RKey,
826826+ &recordStr, &rec.CID, &indexedAtStr, &createdAtStr); err != nil {
827827+ continue
828828+ }
829829+ rec.Record = json.RawMessage(recordStr)
830830+ rec.IndexedAt, _ = time.Parse(time.RFC3339Nano, indexedAtStr)
831831+ rec.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
832832+ records = append(records, rec)
833833+ }
834834+ return records, rows.Err()
957835}
958836959837// RecordCount returns the total number of indexed records
960838func (idx *FeedIndex) RecordCount() int {
961839 var count int
962962- _ = idx.db.View(func(tx *bolt.Tx) error {
963963- b := tx.Bucket(BucketRecords)
964964- count = b.Stats().KeyN
965965- return nil
966966- })
840840+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM records`).Scan(&count)
967841 return count
968842}
969843970844// KnownDIDCount returns the number of unique DIDs in the index
971845func (idx *FeedIndex) KnownDIDCount() int {
972846 var count int
973973- _ = idx.db.View(func(tx *bolt.Tx) error {
974974- b := tx.Bucket(BucketKnownDIDs)
975975- count = b.Stats().KeyN
976976- return nil
977977- })
847847+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM known_dids`).Scan(&count)
978848 return count
979849}
980850981851// TotalLikeCount returns the total number of likes indexed
982852func (idx *FeedIndex) TotalLikeCount() int {
983853 var count int
984984- _ = idx.db.View(func(tx *bolt.Tx) error {
985985- b := tx.Bucket(BucketLikes)
986986- count = b.Stats().KeyN
987987- return nil
988988- })
854854+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM likes`).Scan(&count)
989855 return count
990856}
991857992858// TotalCommentCount returns the total number of comments indexed
993859func (idx *FeedIndex) TotalCommentCount() int {
994860 var count int
995995- _ = idx.db.View(func(tx *bolt.Tx) error {
996996- b := tx.Bucket(BucketCommentsByActor)
997997- count = b.Stats().KeyN
998998- return nil
999999- })
861861+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM comments`).Scan(&count)
1000862 return count
1001863}
10028641003865// RecordCountByCollection returns a breakdown of record counts by collection type
1004866func (idx *FeedIndex) RecordCountByCollection() map[string]int {
1005867 counts := make(map[string]int)
10061006- _ = idx.db.View(func(tx *bolt.Tx) error {
10071007- records := tx.Bucket(BucketRecords)
10081008- return records.ForEach(func(k, v []byte) error {
10091009- var record IndexedRecord
10101010- if err := json.Unmarshal(v, &record); err != nil {
10111011- return nil
10121012- }
10131013- counts[record.Collection]++
10141014- return nil
10151015- })
10161016- })
868868+ rows, err := idx.db.Query(`SELECT collection, COUNT(*) FROM records GROUP BY collection`)
869869+ if err != nil {
870870+ return counts
871871+ }
872872+ defer rows.Close()
873873+ for rows.Next() {
874874+ var collection string
875875+ var count int
876876+ if err := rows.Scan(&collection, &count); err == nil {
877877+ counts[collection] = count
878878+ }
879879+ }
1017880 return counts
10181018-}
10191019-10201020-// Helper functions
10211021-10221022-func makeTimeKey(t time.Time, uri string) []byte {
10231023- // Format: inverted timestamp (for reverse chronological order) + ":" + uri
10241024- // Use nanoseconds for uniqueness
10251025- inverted := ^uint64(t.UnixNano())
10261026- buf := make([]byte, 8)
10271027- binary.BigEndian.PutUint64(buf, inverted)
10281028- return append(buf, []byte(":"+uri)...)
10291029-}
10301030-10311031-func extractURIFromTimeKey(key []byte) string {
10321032- if len(key) < 10 { // 8 bytes timestamp + ":" + at least 1 char
10331033- return ""
10341034- }
10351035- // Skip 8 bytes timestamp + 1 byte ":"
10361036- return string(key[9:])
1037881}
10388821039883func formatTimeAgo(t time.Time) string {
···10779211078922// IsBackfilled checks if a DID has already been backfilled
1079923func (idx *FeedIndex) IsBackfilled(did string) bool {
10801080- var exists bool
10811081- _ = idx.db.View(func(tx *bolt.Tx) error {
10821082- b := tx.Bucket(BucketBackfilled)
10831083- exists = b.Get([]byte(did)) != nil
10841084- return nil
10851085- })
10861086- return exists
924924+ var exists int
925925+ err := idx.db.QueryRow(`SELECT 1 FROM backfilled WHERE did = ?`, did).Scan(&exists)
926926+ return err == nil
1087927}
10889281089929// MarkBackfilled marks a DID as backfilled with current timestamp
1090930func (idx *FeedIndex) MarkBackfilled(did string) error {
10911091- return idx.db.Update(func(tx *bolt.Tx) error {
10921092- b := tx.Bucket(BucketBackfilled)
10931093- timestamp := []byte(time.Now().Format(time.RFC3339))
10941094- return b.Put([]byte(did), timestamp)
10951095- })
931931+ _, err := idx.db.Exec(`INSERT OR IGNORE INTO backfilled (did, backfilled_at) VALUES (?, ?)`,
932932+ did, time.Now().Format(time.RFC3339))
933933+ return err
1096934}
10979351098936// BackfillUser fetches all existing records for a DID and adds them to the index
10991099-// Returns early if the DID has already been backfilled
1100937func (idx *FeedIndex) BackfillUser(ctx context.Context, did string) error {
11011101- // Check if already backfilled
1102938 if idx.IsBackfilled(did) {
1103939 log.Debug().Str("did", did).Msg("DID already backfilled, skipping")
1104940 return nil
···1115951 }
11169521117953 for _, record := range records.Records {
11181118- // Extract rkey from URI
1119954 parts := strings.Split(record.URI, "/")
1120955 if len(parts) < 3 {
1121956 continue
···1133968 }
1134969 recordCount++
113597011361136- // Index likes and comments into their specialized buckets
1137971 switch collection {
1138972 case atproto.NSIDLike:
1139973 if subject, ok := record.Value["subject"].(map[string]interface{}); ok {
···11701004 }
11711005 }
1172100611731173- // Mark as backfilled
11741007 if err := idx.MarkBackfilled(did); err != nil {
11751008 log.Warn().Err(err).Str("did", did).Msg("failed to mark DID as backfilled")
11761009 }
···1183101611841017// UpsertLike adds or updates a like in the index
11851018func (idx *FeedIndex) UpsertLike(actorDID, rkey, subjectURI string) error {
11861186- return idx.db.Update(func(tx *bolt.Tx) error {
11871187- likes := tx.Bucket(BucketLikes)
11881188- likeCounts := tx.Bucket(BucketLikeCounts)
11891189- likesByActor := tx.Bucket(BucketLikesByActor)
11901190-11911191- // Key format: {subject_uri}:{actor_did}
11921192- likeKey := []byte(subjectURI + ":" + actorDID)
11931193-11941194- // Check if this like already exists
11951195- existingRKey := likes.Get(likeKey)
11961196- if existingRKey != nil {
11971197- // Already exists, nothing to do
11981198- return nil
11991199- }
12001200-12011201- // Store the like mapping
12021202- if err := likes.Put(likeKey, []byte(rkey)); err != nil {
12031203- return err
12041204- }
12051205-12061206- // Store by actor for reverse lookup
12071207- actorKey := []byte(actorDID + ":" + subjectURI)
12081208- if err := likesByActor.Put(actorKey, []byte(rkey)); err != nil {
12091209- return err
12101210- }
12111211-12121212- // Increment the like count
12131213- countKey := []byte(subjectURI)
12141214- currentCount := uint64(0)
12151215- if countData := likeCounts.Get(countKey); len(countData) == 8 {
12161216- currentCount = binary.BigEndian.Uint64(countData)
12171217- }
12181218- currentCount++
12191219- countBuf := make([]byte, 8)
12201220- binary.BigEndian.PutUint64(countBuf, currentCount)
12211221- return likeCounts.Put(countKey, countBuf)
12221222- })
10191019+ _, err := idx.db.Exec(`INSERT OR IGNORE INTO likes (subject_uri, actor_did, rkey) VALUES (?, ?, ?)`,
10201020+ subjectURI, actorDID, rkey)
10211021+ return err
12231022}
1224102312251024// DeleteLike removes a like from the index
12261025func (idx *FeedIndex) DeleteLike(actorDID, subjectURI string) error {
12271227- return idx.db.Update(func(tx *bolt.Tx) error {
12281228- likes := tx.Bucket(BucketLikes)
12291229- likeCounts := tx.Bucket(BucketLikeCounts)
12301230- likesByActor := tx.Bucket(BucketLikesByActor)
12311231-12321232- // Key format: {subject_uri}:{actor_did}
12331233- likeKey := []byte(subjectURI + ":" + actorDID)
12341234-12351235- // Check if like exists
12361236- if likes.Get(likeKey) == nil {
12371237- // Doesn't exist, nothing to do
12381238- return nil
12391239- }
12401240-12411241- // Delete the like mapping
12421242- if err := likes.Delete(likeKey); err != nil {
12431243- return err
12441244- }
12451245-12461246- // Delete by actor lookup
12471247- actorKey := []byte(actorDID + ":" + subjectURI)
12481248- if err := likesByActor.Delete(actorKey); err != nil {
12491249- return err
12501250- }
12511251-12521252- // Decrement the like count
12531253- countKey := []byte(subjectURI)
12541254- currentCount := uint64(0)
12551255- if countData := likeCounts.Get(countKey); len(countData) == 8 {
12561256- currentCount = binary.BigEndian.Uint64(countData)
12571257- }
12581258- if currentCount > 0 {
12591259- currentCount--
12601260- }
12611261- if currentCount == 0 {
12621262- return likeCounts.Delete(countKey)
12631263- }
12641264- countBuf := make([]byte, 8)
12651265- binary.BigEndian.PutUint64(countBuf, currentCount)
12661266- return likeCounts.Put(countKey, countBuf)
12671267- })
10261026+ _, err := idx.db.Exec(`DELETE FROM likes WHERE subject_uri = ? AND actor_did = ?`,
10271027+ subjectURI, actorDID)
10281028+ return err
12681029}
1269103012701031// GetLikeCount returns the number of likes for a record
12711032func (idx *FeedIndex) GetLikeCount(subjectURI string) int {
12721272- var count uint64
12731273- _ = idx.db.View(func(tx *bolt.Tx) error {
12741274- likeCounts := tx.Bucket(BucketLikeCounts)
12751275- countData := likeCounts.Get([]byte(subjectURI))
12761276- if len(countData) == 8 {
12771277- count = binary.BigEndian.Uint64(countData)
12781278- }
12791279- return nil
12801280- })
12811281- return int(count)
10331033+ var count int
10341034+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM likes WHERE subject_uri = ?`, subjectURI).Scan(&count)
10351035+ return count
12821036}
1283103712841038// HasUserLiked checks if a user has liked a specific record
12851039func (idx *FeedIndex) HasUserLiked(actorDID, subjectURI string) bool {
12861286- var exists bool
12871287- _ = idx.db.View(func(tx *bolt.Tx) error {
12881288- likesByActor := tx.Bucket(BucketLikesByActor)
12891289- actorKey := []byte(actorDID + ":" + subjectURI)
12901290- exists = likesByActor.Get(actorKey) != nil
12911291- return nil
12921292- })
12931293- return exists
10401040+ var exists int
10411041+ err := idx.db.QueryRow(`SELECT 1 FROM likes WHERE actor_did = ? AND subject_uri = ? LIMIT 1`,
10421042+ actorDID, subjectURI).Scan(&exists)
10431043+ return err == nil
12941044}
1295104512961046// GetUserLikeRKey returns the rkey of a user's like for a specific record, or empty string if not found
12971047func (idx *FeedIndex) GetUserLikeRKey(actorDID, subjectURI string) string {
12981048 var rkey string
12991299- _ = idx.db.View(func(tx *bolt.Tx) error {
13001300- likesByActor := tx.Bucket(BucketLikesByActor)
13011301- actorKey := []byte(actorDID + ":" + subjectURI)
13021302- if data := likesByActor.Get(actorKey); data != nil {
13031303- rkey = string(data)
13041304- }
13051305- return nil
13061306- })
10491049+ err := idx.db.QueryRow(`SELECT rkey FROM likes WHERE actor_did = ? AND subject_uri = ?`,
10501050+ actorDID, subjectURI).Scan(&rkey)
10511051+ if err != nil {
10521052+ return ""
10531053+ }
13071054 return rkey
13081055}
13091056···1332107913331080// UpsertComment adds or updates a comment in the index
13341081func (idx *FeedIndex) UpsertComment(actorDID, rkey, subjectURI, parentURI, cid, text string, createdAt time.Time) error {
13351335- return idx.db.Update(func(tx *bolt.Tx) error {
13361336- comments := tx.Bucket(BucketComments)
13371337- commentCounts := tx.Bucket(BucketCommentCounts)
13381338- commentsByActor := tx.Bucket(BucketCommentsByActor)
13391339- commentChildren := tx.Bucket(BucketCommentChildren)
13401340-13411341- // Key format: {subject_uri}:{timestamp}:{actor_did}:{rkey}
13421342- // Using timestamp for chronological ordering
13431343- commentKey := []byte(subjectURI + ":" + createdAt.Format(time.RFC3339Nano) + ":" + actorDID + ":" + rkey)
13441344-13451345- // Check if this comment already exists (by actor key)
13461346- actorKey := []byte(actorDID + ":" + rkey)
13471347- existingSubject := commentsByActor.Get(actorKey)
13481348- isNew := existingSubject == nil
13491349-13501350- // If the comment already exists, delete the old entry from BucketComments
13511351- // to prevent duplicates (the key includes timestamp which may differ between calls)
13521352- if !isNew {
13531353- oldPrefix := []byte(string(existingSubject) + ":")
13541354- suffix := ":" + actorDID + ":" + rkey
13551355- cur := comments.Cursor()
13561356- for k, _ := cur.Seek(oldPrefix); k != nil && strings.HasPrefix(string(k), string(oldPrefix)); k, _ = cur.Next() {
13571357- if strings.HasSuffix(string(k), suffix) {
13581358- _ = comments.Delete(k)
13591359- break
13601360- }
13611361- }
13621362- }
13631363-13641364- // Extract parent rkey from parent URI if present
13651365- var parentRKey string
13661366- if parentURI != "" {
13671367- parts := strings.Split(parentURI, "/")
13681368- if len(parts) > 0 {
13691369- parentRKey = parts[len(parts)-1]
13701370- }
13711371- }
13721372-13731373- // Store comment data as JSON
13741374- commentData := IndexedComment{
13751375- RKey: rkey,
13761376- SubjectURI: subjectURI,
13771377- Text: text,
13781378- ActorDID: actorDID,
13791379- CreatedAt: createdAt,
13801380- ParentURI: parentURI,
13811381- ParentRKey: parentRKey,
13821382- CID: cid,
13831383- }
13841384- commentJSON, err := json.Marshal(commentData)
13851385- if err != nil {
13861386- return fmt.Errorf("failed to marshal comment: %w", err)
13871387- }
13881388-13891389- // Store comment
13901390- if err := comments.Put(commentKey, commentJSON); err != nil {
13911391- return fmt.Errorf("failed to store comment: %w", err)
13921392- }
13931393-13941394- // Store actor lookup
13951395- if err := commentsByActor.Put(actorKey, []byte(subjectURI)); err != nil {
13961396- return fmt.Errorf("failed to store comment by actor: %w", err)
13971397- }
13981398-13991399- // Store parent-child relationship if this is a reply
14001400- if parentURI != "" {
14011401- childKey := []byte(parentURI + ":" + rkey)
14021402- if err := commentChildren.Put(childKey, []byte(actorDID)); err != nil {
14031403- return fmt.Errorf("failed to store comment child: %w", err)
14041404- }
14051405- }
14061406-14071407- // Increment count only if this is a new comment
14081408- if isNew {
14091409- countKey := []byte(subjectURI)
14101410- var count uint64
14111411- if countData := commentCounts.Get(countKey); len(countData) == 8 {
14121412- count = binary.BigEndian.Uint64(countData)
14131413- }
14141414- count++
14151415- countBytes := make([]byte, 8)
14161416- binary.BigEndian.PutUint64(countBytes, count)
14171417- if err := commentCounts.Put(countKey, countBytes); err != nil {
14181418- return fmt.Errorf("failed to update comment count: %w", err)
14191419- }
10821082+ // Extract parent rkey from parent URI if present
10831083+ var parentRKey string
10841084+ if parentURI != "" {
10851085+ parts := strings.Split(parentURI, "/")
10861086+ if len(parts) > 0 {
10871087+ parentRKey = parts[len(parts)-1]
14201088 }
10891089+ }
1421109014221422- return nil
14231423- })
10911091+ _, err := idx.db.Exec(`
10921092+ INSERT INTO comments (actor_did, rkey, subject_uri, parent_uri, parent_rkey, cid, text, created_at)
10931093+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
10941094+ ON CONFLICT(actor_did, rkey) DO UPDATE SET
10951095+ subject_uri = excluded.subject_uri,
10961096+ parent_uri = excluded.parent_uri,
10971097+ parent_rkey = excluded.parent_rkey,
10981098+ cid = excluded.cid,
10991099+ text = excluded.text,
11001100+ created_at = excluded.created_at
11011101+ `, actorDID, rkey, subjectURI, parentURI, parentRKey, cid, text, createdAt.Format(time.RFC3339Nano))
11021102+ return err
14241103}
1425110414261105// DeleteComment removes a comment from the index
14271106func (idx *FeedIndex) DeleteComment(actorDID, rkey, subjectURI string) error {
14281428- return idx.db.Update(func(tx *bolt.Tx) error {
14291429- comments := tx.Bucket(BucketComments)
14301430- commentCounts := tx.Bucket(BucketCommentCounts)
14311431- commentsByActor := tx.Bucket(BucketCommentsByActor)
14321432- commentChildren := tx.Bucket(BucketCommentChildren)
14331433-14341434- actorKey := []byte(actorDID + ":" + rkey)
14351435-14361436- // Get subject URI from the actor index, or use the provided one
14371437- existingSubject := commentsByActor.Get(actorKey)
14381438- if existingSubject != nil && subjectURI == "" {
14391439- subjectURI = string(existingSubject)
14401440- }
14411441-14421442- // Find and delete the comment from BucketComments
14431443- var parentURI string
14441444- suffix := ":" + actorDID + ":" + rkey
14451445-14461446- if subjectURI != "" {
14471447- // Fast path: we know the subject URI, scan only that prefix
14481448- prefix := []byte(subjectURI + ":")
14491449- c := comments.Cursor()
14501450- for k, v := c.Seek(prefix); k != nil && strings.HasPrefix(string(k), string(prefix)); k, v = c.Next() {
14511451- if strings.HasSuffix(string(k), suffix) {
14521452- var comment IndexedComment
14531453- if err := json.Unmarshal(v, &comment); err == nil {
14541454- parentURI = comment.ParentURI
14551455- }
14561456- if err := comments.Delete(k); err != nil {
14571457- return fmt.Errorf("failed to delete comment: %w", err)
14581458- }
14591459- break
14601460- }
14611461- }
14621462- } else {
14631463- // Slow path: scan all comments to find this actor+rkey
14641464- c := comments.Cursor()
14651465- for k, v := c.First(); k != nil; k, v = c.Next() {
14661466- if strings.HasSuffix(string(k), suffix) {
14671467- var comment IndexedComment
14681468- if err := json.Unmarshal(v, &comment); err == nil {
14691469- parentURI = comment.ParentURI
14701470- subjectURI = comment.SubjectURI
14711471- }
14721472- if err := comments.Delete(k); err != nil {
14731473- return fmt.Errorf("failed to delete comment: %w", err)
14741474- }
14751475- break
14761476- }
14771477- }
14781478- }
14791479-14801480- // Delete actor lookup
14811481- if existingSubject != nil {
14821482- if err := commentsByActor.Delete(actorKey); err != nil {
14831483- return fmt.Errorf("failed to delete comment by actor: %w", err)
14841484- }
14851485- }
14861486-14871487- // Delete parent-child relationship if this was a reply
14881488- if parentURI != "" {
14891489- childKey := []byte(parentURI + ":" + rkey)
14901490- if err := commentChildren.Delete(childKey); err != nil {
14911491- return fmt.Errorf("failed to delete comment child: %w", err)
14921492- }
14931493- }
14941494-14951495- // Decrement count
14961496- countKey := []byte(subjectURI)
14971497- var count uint64
14981498- if countData := commentCounts.Get(countKey); len(countData) == 8 {
14991499- count = binary.BigEndian.Uint64(countData)
15001500- }
15011501- if count > 0 {
15021502- count--
15031503- }
15041504- countBytes := make([]byte, 8)
15051505- binary.BigEndian.PutUint64(countBytes, count)
15061506- if err := commentCounts.Put(countKey, countBytes); err != nil {
15071507- return fmt.Errorf("failed to update comment count: %w", err)
15081508- }
15091509-15101510- return nil
15111511- })
11071107+ _, err := idx.db.Exec(`DELETE FROM comments WHERE actor_did = ? AND rkey = ?`, actorDID, rkey)
11081108+ return err
15121109}
1513111015141111// GetCommentCount returns the number of comments on a record
15151112func (idx *FeedIndex) GetCommentCount(subjectURI string) int {
15161516- var count uint64
15171517- _ = idx.db.View(func(tx *bolt.Tx) error {
15181518- commentCounts := tx.Bucket(BucketCommentCounts)
15191519- countData := commentCounts.Get([]byte(subjectURI))
15201520- if len(countData) == 8 {
15211521- count = binary.BigEndian.Uint64(countData)
15221522- }
15231523- return nil
15241524- })
15251525- return int(count)
11131113+ var count int
11141114+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM comments WHERE subject_uri = ?`, subjectURI).Scan(&count)
11151115+ return count
15261116}
1527111715281118// GetCommentsForSubject returns all comments for a specific record, ordered by creation time
15291529-// This returns a flat list of comments without threading
15301119func (idx *FeedIndex) GetCommentsForSubject(ctx context.Context, subjectURI string, limit int, viewerDID string) []IndexedComment {
15311531- var comments []IndexedComment
15321532- _ = idx.db.View(func(tx *bolt.Tx) error {
15331533- bucket := tx.Bucket(BucketComments)
15341534- prefix := []byte(subjectURI + ":")
15351535- c := bucket.Cursor()
11201120+ query := `SELECT actor_did, rkey, subject_uri, parent_uri, parent_rkey, cid, text, created_at
11211121+ FROM comments WHERE subject_uri = ? ORDER BY created_at`
11221122+ var args []any
11231123+ args = append(args, subjectURI)
11241124+ if limit > 0 {
11251125+ query += ` LIMIT ?`
11261126+ args = append(args, limit)
11271127+ }
1536112815371537- for k, v := c.Seek(prefix); k != nil && strings.HasPrefix(string(k), string(prefix)); k, v = c.Next() {
15381538- var comment IndexedComment
15391539- if err := json.Unmarshal(v, &comment); err != nil {
15401540- continue
15411541- }
15421542- comments = append(comments, comment)
15431543- if limit > 0 && len(comments) >= limit {
15441544- break
15451545- }
15461546- }
11291129+ rows, err := idx.db.QueryContext(ctx, query, args...)
11301130+ if err != nil {
15471131 return nil
15481548- })
11321132+ }
11331133+ defer rows.Close()
11341134+11351135+ var comments []IndexedComment
11361136+ for rows.Next() {
11371137+ var c IndexedComment
11381138+ var createdAtStr string
11391139+ if err := rows.Scan(&c.ActorDID, &c.RKey, &c.SubjectURI, &c.ParentURI, &c.ParentRKey,
11401140+ &c.CID, &c.Text, &createdAtStr); err != nil {
11411141+ continue
11421142+ }
11431143+ c.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
11441144+ comments = append(comments, c)
11451145+ }
1549114615501147 // Populate profile and like info for each comment
15511148 for i := range comments {
15521149 profile, err := idx.GetProfile(ctx, comments[i].ActorDID)
15531150 if err != nil {
15541554- // Use DID as fallback handle
15551151 comments[i].Handle = comments[i].ActorDID
15561152 } else {
15571153 comments[i].Handle = profile.Handle
···15701166}
1571116715721168// GetThreadedCommentsForSubject returns comments for a record in threaded order with depth
15731573-// Comments are returned in depth-first order (parent followed by children)
15741574-// Visual depth is capped at 2 levels for display purposes
15751169func (idx *FeedIndex) GetThreadedCommentsForSubject(ctx context.Context, subjectURI string, limit int, viewerDID string) []IndexedComment {
15761576- // First get all comments for this subject
15771577- allComments := idx.GetCommentsForSubject(ctx, subjectURI, 0, viewerDID) // Get all, we'll limit after threading
11701170+ allComments := idx.GetCommentsForSubject(ctx, subjectURI, 0, viewerDID)
1578117115791172 if len(allComments) == 0 {
15801173 return nil
···15931186 for i := range allComments {
15941187 comment := &allComments[i]
15951188 if comment.ParentRKey == "" {
15961596- // Top-level comment
15971189 topLevel = append(topLevel, comment)
15981190 } else {
15991599- // Reply - add to parent's children
16001191 childrenMap[comment.ParentRKey] = append(childrenMap[comment.ParentRKey], comment)
16011192 }
16021193 }
···16201211 if limit > 0 && len(result) >= limit {
16211212 return
16221213 }
16231623- // Cap visual depth at 2 for display
16241214 visualDepth := depth
16251215 if visualDepth > 2 {
16261216 visualDepth = 2
···16281218 comment.Depth = visualDepth
16291219 result = append(result, *comment)
1630122016311631- // Add children (if any)
16321221 if children, ok := childrenMap[comment.RKey]; ok {
16331222 for _, child := range children {
16341223 flatten(child, depth+1)
+75-147
internal/firehose/notifications.go
···11package firehose
2233import (
44- "encoding/json"
54 "fmt"
65 "strings"
76 "time"
···98 "arabica/internal/models"
1091110 "github.com/rs/zerolog/log"
1212- bolt "go.etcd.io/bbolt"
1313-)
1414-1515-// Bucket names for notifications
1616-var (
1717- // BucketNotifications stores notifications: {target_did}:{inverted_timestamp}:{id} -> {Notification JSON}
1818- BucketNotifications = []byte("notifications")
1919-2020- // BucketNotificationsMeta stores per-user metadata: {target_did}:last_read -> {timestamp RFC3339}
2121- BucketNotificationsMeta = []byte("notifications_meta")
2211)
23122413// CreateNotification stores a notification for the target user.
2525-// Deduplicates by (type + actorDID + subjectURI) to prevent duplicates from backfills.
1414+// Deduplicates by (type + actorDID + subjectURI) via unique index.
2615// Self-notifications (actorDID == targetDID) are silently skipped.
2716func (idx *FeedIndex) CreateNotification(targetDID string, notif models.Notification) error {
2817 if targetDID == "" || targetDID == notif.ActorDID {
2918 return nil // skip self-notifications
3019 }
31203232- return idx.db.Update(func(tx *bolt.Tx) error {
3333- b := tx.Bucket(BucketNotifications)
3434-3535- // Deduplication: scan for existing notification with same type+actor+subject
3636- prefix := []byte(targetDID + ":")
3737- c := b.Cursor()
3838- for k, v := c.Seek(prefix); k != nil && strings.HasPrefix(string(k), string(prefix)); k, v = c.Next() {
3939- var existing models.Notification
4040- if err := json.Unmarshal(v, &existing); err != nil {
4141- continue
4242- }
4343- if existing.Type == notif.Type && existing.ActorDID == notif.ActorDID && existing.SubjectURI == notif.SubjectURI {
4444- return nil // duplicate, skip
4545- }
4646- }
4747-4848- // Generate ID from timestamp
4949- if notif.ID == "" {
5050- notif.ID = fmt.Sprintf("%d", notif.CreatedAt.UnixNano())
5151- }
5252-5353- data, err := json.Marshal(notif)
5454- if err != nil {
5555- return fmt.Errorf("failed to marshal notification: %w", err)
5656- }
2121+ // Generate ID from timestamp
2222+ if notif.ID == "" {
2323+ notif.ID = fmt.Sprintf("%d", notif.CreatedAt.UnixNano())
2424+ }
57255858- // Key: {target_did}:{inverted_timestamp}:{id} for reverse chronological order
5959- inverted := ^uint64(notif.CreatedAt.UnixNano())
6060- key := fmt.Sprintf("%s:%016x:%s", targetDID, inverted, notif.ID)
6161- return b.Put([]byte(key), data)
6262- })
2626+ // INSERT OR IGNORE deduplicates via the unique index on (target_did, type, actor_did, subject_uri)
2727+ _, err := idx.db.Exec(`
2828+ INSERT OR IGNORE INTO notifications (id, target_did, type, actor_did, subject_uri, created_at)
2929+ VALUES (?, ?, ?, ?, ?, ?)
3030+ `, notif.ID, targetDID, string(notif.Type), notif.ActorDID, notif.SubjectURI,
3131+ notif.CreatedAt.Format(time.RFC3339Nano))
3232+ return err
6333}
64346535// GetNotifications returns notifications for a user, newest first.
6636// Uses cursor-based pagination. Returns notifications, next cursor, and error.
6737func (idx *FeedIndex) GetNotifications(targetDID string, limit int, cursor string) ([]models.Notification, string, error) {
6868- var notifications []models.Notification
6969- var nextCursor string
7070-7138 if limit <= 0 {
7239 limit = 20
7340 }
74417575- // Get last_read timestamp for marking read status
7642 lastRead := idx.getLastRead(targetDID)
77437878- err := idx.db.View(func(tx *bolt.Tx) error {
7979- b := tx.Bucket(BucketNotifications)
8080- c := b.Cursor()
4444+ var args []any
4545+ query := `SELECT id, type, actor_did, subject_uri, created_at
4646+ FROM notifications WHERE target_did = ?`
4747+ args = append(args, targetDID)
81488282- prefix := []byte(targetDID + ":")
8383- var k, v []byte
4949+ if cursor != "" {
5050+ query += ` AND created_at < ?`
5151+ args = append(args, cursor)
5252+ }
84538585- if cursor != "" {
8686- // Seek to cursor position, then advance past it
8787- k, v = c.Seek([]byte(cursor))
8888- if k != nil && string(k) == cursor {
8989- k, v = c.Next()
9090- }
9191- } else {
9292- k, v = c.Seek(prefix)
9393- }
5454+ query += ` ORDER BY created_at DESC LIMIT ?`
5555+ // Fetch one extra to determine if there's a next page
5656+ args = append(args, limit+1)
94579595- var lastKey []byte
9696- count := 0
9797- for ; k != nil && strings.HasPrefix(string(k), string(prefix)); k, v = c.Next() {
9898- if count >= limit {
9999- // There are more items beyond our limit
100100- nextCursor = string(lastKey)
101101- break
102102- }
5858+ rows, err := idx.db.Query(query, args...)
5959+ if err != nil {
6060+ return nil, "", err
6161+ }
6262+ defer rows.Close()
10363104104- var notif models.Notification
105105- if err := json.Unmarshal(v, ¬if); err != nil {
106106- continue
107107- }
108108-109109- // Determine read status based on last_read timestamp
110110- if !lastRead.IsZero() && !notif.CreatedAt.After(lastRead) {
111111- notif.Read = true
112112- }
6464+ var notifications []models.Notification
6565+ for rows.Next() {
6666+ var notif models.Notification
6767+ var typeStr, createdAtStr string
6868+ if err := rows.Scan(¬if.ID, &typeStr, ¬if.ActorDID, ¬if.SubjectURI, &createdAtStr); err != nil {
6969+ continue
7070+ }
7171+ notif.Type = models.NotificationType(typeStr)
7272+ notif.CreatedAt, _ = time.Parse(time.RFC3339Nano, createdAtStr)
11373114114- notifications = append(notifications, notif)
115115- lastKey = make([]byte, len(k))
116116- copy(lastKey, k)
117117- count++
7474+ if !lastRead.IsZero() && !notif.CreatedAt.After(lastRead) {
7575+ notif.Read = true
11876 }
11977120120- return nil
121121- })
7878+ notifications = append(notifications, notif)
7979+ }
12280123123- return notifications, nextCursor, err
8181+ var nextCursor string
8282+ if len(notifications) > limit {
8383+ // There are more results
8484+ last := notifications[limit-1]
8585+ nextCursor = last.CreatedAt.Format(time.RFC3339Nano)
8686+ notifications = notifications[:limit]
8787+ }
8888+8989+ return notifications, nextCursor, rows.Err()
12490}
1259112692// GetUnreadCount returns the number of unread notifications for a user.
···13298 lastRead := idx.getLastRead(targetDID)
13399134100 var count int
135135- _ = idx.db.View(func(tx *bolt.Tx) error {
136136- b := tx.Bucket(BucketNotifications)
137137- c := b.Cursor()
138138-139139- prefix := []byte(targetDID + ":")
140140- for k, v := c.Seek(prefix); k != nil && strings.HasPrefix(string(k), string(prefix)); k, v = c.Next() {
141141- var notif models.Notification
142142- if err := json.Unmarshal(v, ¬if); err != nil {
143143- continue
144144- }
145145- // If no last_read set, all are unread
146146- if lastRead.IsZero() || notif.CreatedAt.After(lastRead) {
147147- count++
148148- } else {
149149- // Since keys are in reverse chronological order,
150150- // once we hit a read notification, all remaining are also read
151151- break
152152- }
153153- }
154154- return nil
155155- })
101101+ if lastRead.IsZero() {
102102+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM notifications WHERE target_did = ?`, targetDID).Scan(&count)
103103+ } else {
104104+ _ = idx.db.QueryRow(`SELECT COUNT(*) FROM notifications WHERE target_did = ? AND created_at > ?`,
105105+ targetDID, lastRead.Format(time.RFC3339Nano)).Scan(&count)
106106+ }
156107157108 return count
158109}
159110160111// MarkAllRead updates the last_read timestamp to now for the user.
161112func (idx *FeedIndex) MarkAllRead(targetDID string) error {
162162- return idx.db.Update(func(tx *bolt.Tx) error {
163163- b := tx.Bucket(BucketNotificationsMeta)
164164- key := []byte(targetDID + ":last_read")
165165- return b.Put(key, []byte(time.Now().Format(time.RFC3339Nano)))
166166- })
113113+ _, err := idx.db.Exec(`INSERT OR REPLACE INTO notifications_meta (target_did, last_read) VALUES (?, ?)`,
114114+ targetDID, time.Now().Format(time.RFC3339Nano))
115115+ return err
167116}
168117169118// getLastRead returns the last_read timestamp for a user.
170119func (idx *FeedIndex) getLastRead(targetDID string) time.Time {
171171- var lastRead time.Time
172172- _ = idx.db.View(func(tx *bolt.Tx) error {
173173- b := tx.Bucket(BucketNotificationsMeta)
174174- v := b.Get([]byte(targetDID + ":last_read"))
175175- if v != nil {
176176- if t, err := time.Parse(time.RFC3339Nano, string(v)); err == nil {
177177- lastRead = t
178178- }
179179- }
180180- return nil
181181- })
182182- return lastRead
120120+ var lastReadStr string
121121+ err := idx.db.QueryRow(`SELECT last_read FROM notifications_meta WHERE target_did = ?`, targetDID).Scan(&lastReadStr)
122122+ if err != nil {
123123+ return time.Time{}
124124+ }
125125+ t, _ := time.Parse(time.RFC3339Nano, lastReadStr)
126126+ return t
183127}
184128185129// parseTargetDID extracts the DID from an AT-URI (at://did:plc:xxx/collection/rkey)
···206150 return
207151 }
208152209209- err := idx.db.Update(func(tx *bolt.Tx) error {
210210- b := tx.Bucket(BucketNotifications)
211211- prefix := []byte(targetDID + ":")
212212- c := b.Cursor()
213213- for k, v := c.Seek(prefix); k != nil && strings.HasPrefix(string(k), string(prefix)); k, v = c.Next() {
214214- var existing models.Notification
215215- if err := json.Unmarshal(v, &existing); err != nil {
216216- continue
217217- }
218218- if existing.Type == notifType && existing.ActorDID == actorDID && existing.SubjectURI == subjectURI {
219219- return b.Delete(k)
220220- }
221221- }
222222- return nil
223223- })
153153+ _, err := idx.db.Exec(`
154154+ DELETE FROM notifications
155155+ WHERE target_did = ? AND type = ? AND actor_did = ? AND subject_uri = ?
156156+ `, targetDID, string(notifType), actorDID, subjectURI)
224157 if err != nil {
225158 log.Warn().Err(err).Str("target", targetDID).Str("actor", actorDID).Msg("failed to delete notification")
226159 }
···251184// Returns empty string if not found.
252185func (idx *FeedIndex) GetCommentSubjectURI(actorDID, rkey string) string {
253186 var subjectURI string
254254- _ = idx.db.View(func(tx *bolt.Tx) error {
255255- b := tx.Bucket(BucketCommentsByActor)
256256- v := b.Get([]byte(actorDID + ":" + rkey))
257257- if v != nil {
258258- subjectURI = string(v)
259259- }
260260- return nil
261261- })
187187+ err := idx.db.QueryRow(`SELECT subject_uri FROM comments WHERE actor_did = ? AND rkey = ?`,
188188+ actorDID, rkey).Scan(&subjectURI)
189189+ if err != nil {
190190+ return ""
191191+ }
262192 return subjectURI
263193}
264194···301231 }
302232303233 // If this is a reply, also notify the parent comment's author.
304304- // We store the brew's subjectURI (not the parent comment URI) so the
305305- // notification links directly to the brew page with comments.
306234 if parentURI != "" {
307235 parentAuthorDID := parseTargetDID(parentURI)
308236 if parentAuthorDID != "" && parentAuthorDID != actorDID && parentAuthorDID != targetDID {
-220
internal/firehose/suggestions.go
···11-package firehose
22-33-import (
44- "encoding/json"
55- "sort"
66- "strings"
77-88- "arabica/internal/atproto"
99-1010- bolt "go.etcd.io/bbolt"
1111-)
1212-1313-// EntitySuggestion represents a suggestion for auto-completing an entity
1414-type EntitySuggestion struct {
1515- Name string `json:"name"`
1616- SourceURI string `json:"source_uri"`
1717- Fields map[string]string `json:"fields"`
1818- Count int `json:"count"`
1919-}
2020-2121-// entityFieldConfig defines which fields to extract and search for each entity type
2222-type entityFieldConfig struct {
2323- allFields []string
2424- searchFields []string
2525- nameField string
2626-}
2727-2828-var entityConfigs = map[string]entityFieldConfig{
2929- atproto.NSIDRoaster: {
3030- allFields: []string{"name", "location", "website"},
3131- searchFields: []string{"name", "location", "website"},
3232- nameField: "name",
3333- },
3434- atproto.NSIDGrinder: {
3535- allFields: []string{"name", "grinderType", "burrType"},
3636- searchFields: []string{"name", "grinderType", "burrType"},
3737- nameField: "name",
3838- },
3939- atproto.NSIDBrewer: {
4040- allFields: []string{"name", "brewerType", "description"},
4141- searchFields: []string{"name", "brewerType"},
4242- nameField: "name",
4343- },
4444- atproto.NSIDBean: {
4545- allFields: []string{"name", "origin", "roastLevel", "process"},
4646- searchFields: []string{"name", "origin", "roastLevel"},
4747- nameField: "name",
4848- },
4949-}
5050-5151-// SearchEntitySuggestions searches indexed records for entity suggestions matching a query.
5252-// It scans BucketByCollection for the given collection, matches against searchable fields,
5353-// deduplicates by normalized name, and returns results sorted by popularity.
5454-func (idx *FeedIndex) SearchEntitySuggestions(collection, query string, limit int) ([]EntitySuggestion, error) {
5555- if limit <= 0 {
5656- limit = 10
5757- }
5858-5959- config, ok := entityConfigs[collection]
6060- if !ok {
6161- return nil, nil
6262- }
6363-6464- queryLower := strings.ToLower(strings.TrimSpace(query))
6565- if len(queryLower) < 2 {
6666- return nil, nil
6767- }
6868-6969- // dedupKey -> aggregated suggestion
7070- type candidate struct {
7171- suggestion EntitySuggestion
7272- fieldCount int // number of non-empty fields (to pick best representative)
7373- dids map[string]struct{}
7474- }
7575- candidates := make(map[string]*candidate)
7676-7777- err := idx.db.View(func(tx *bolt.Tx) error {
7878- byCollection := tx.Bucket(BucketByCollection)
7979- recordsBucket := tx.Bucket(BucketRecords)
8080-8181- prefix := []byte(collection + ":")
8282- c := byCollection.Cursor()
8383-8484- for k, _ := c.Seek(prefix); k != nil; k, _ = c.Next() {
8585- if !hasPrefix(k, prefix) {
8686- break
8787- }
8888-8989- // Extract URI from collection key
9090- uri := extractURIFromCollectionKey(k, collection)
9191- if uri == "" {
9292- continue
9393- }
9494-9595- data := recordsBucket.Get([]byte(uri))
9696- if data == nil {
9797- continue
9898- }
9999-100100- var indexed IndexedRecord
101101- if err := json.Unmarshal(data, &indexed); err != nil {
102102- continue
103103- }
104104-105105- var recordData map[string]interface{}
106106- if err := json.Unmarshal(indexed.Record, &recordData); err != nil {
107107- continue
108108- }
109109-110110- // Extract fields
111111- fields := make(map[string]string)
112112- for _, f := range config.allFields {
113113- if v, ok := recordData[f].(string); ok && v != "" {
114114- fields[f] = v
115115- }
116116- }
117117-118118- name := fields[config.nameField]
119119- if name == "" {
120120- continue
121121- }
122122-123123- // Check if any searchable field matches the query
124124- matched := false
125125- for _, sf := range config.searchFields {
126126- val := strings.ToLower(fields[sf])
127127- if val == "" {
128128- continue
129129- }
130130- if strings.HasPrefix(val, queryLower) || strings.Contains(val, queryLower) {
131131- matched = true
132132- break
133133- }
134134- }
135135- if !matched {
136136- continue
137137- }
138138-139139- // Deduplicate by normalized name
140140- normalizedName := strings.ToLower(strings.TrimSpace(name))
141141-142142- if existing, ok := candidates[normalizedName]; ok {
143143- existing.dids[indexed.DID] = struct{}{}
144144- // Keep the record with more complete fields
145145- nonEmpty := 0
146146- for _, v := range fields {
147147- if v != "" {
148148- nonEmpty++
149149- }
150150- }
151151- if nonEmpty > existing.fieldCount {
152152- existing.suggestion.Name = name
153153- existing.suggestion.Fields = fields
154154- existing.suggestion.SourceURI = indexed.URI
155155- existing.fieldCount = nonEmpty
156156- }
157157- } else {
158158- nonEmpty := 0
159159- for _, v := range fields {
160160- if v != "" {
161161- nonEmpty++
162162- }
163163- }
164164- candidates[normalizedName] = &candidate{
165165- suggestion: EntitySuggestion{
166166- Name: name,
167167- SourceURI: indexed.URI,
168168- Fields: fields,
169169- },
170170- fieldCount: nonEmpty,
171171- dids: map[string]struct{}{indexed.DID: {}},
172172- }
173173- }
174174- }
175175-176176- return nil
177177- })
178178- if err != nil {
179179- return nil, err
180180- }
181181-182182- // Build results with counts
183183- results := make([]EntitySuggestion, 0, len(candidates))
184184- for _, c := range candidates {
185185- c.suggestion.Count = len(c.dids)
186186- results = append(results, c.suggestion)
187187- }
188188-189189- // Sort: prefix matches first, then by count desc, then alphabetically
190190- sort.Slice(results, func(i, j int) bool {
191191- iPrefix := strings.HasPrefix(strings.ToLower(results[i].Name), queryLower)
192192- jPrefix := strings.HasPrefix(strings.ToLower(results[j].Name), queryLower)
193193- if iPrefix != jPrefix {
194194- return iPrefix
195195- }
196196- if results[i].Count != results[j].Count {
197197- return results[i].Count > results[j].Count
198198- }
199199- return strings.ToLower(results[i].Name) < strings.ToLower(results[j].Name)
200200- })
201201-202202- if len(results) > limit {
203203- results = results[:limit]
204204- }
205205-206206- return results, nil
207207-}
208208-209209-// hasPrefix checks if a byte slice starts with a prefix (avoids import of bytes)
210210-func hasPrefix(s, prefix []byte) bool {
211211- if len(s) < len(prefix) {
212212- return false
213213- }
214214- for i, b := range prefix {
215215- if s[i] != b {
216216- return false
217217- }
218218- }
219219- return true
220220-}