···2525)
26262727func TestImportV2(t *testing.T) {
2828- c, err := rpchttp.New("http://localhost:26100", "/websocket")
2828+ c, err := rpchttp.New("http://localhost:26657", "/websocket")
2929 require.NoError(t, err)
30303131 ctx := t.Context()
···4141 var wg sync.WaitGroup
4242 noMoreNewEntries := atomic.Bool{}
4343 wg.Go(func() {
4444- for entry := range iterateOverExport(ctx, "2023-10-10T00:00:00.000Z") {
4444+ for entry := range iterateOverExport(ctx, 0) {
4545 if totalAwaiting.Size() > 5000 {
4646 for totalAwaiting.Size() > 1000 {
4747 time.Sleep(1 * time.Second)
···184184 wg.Wait()
185185}
186186187187-func iterateOverExport(ctx context.Context, startAt string) iter.Seq[didplc.LogEntry] {
187187+func iterateOverExport(ctx context.Context, startAt uint64) iter.Seq[didplc.LogEntry] {
188188 return func(yield func(didplc.LogEntry) bool) {
189189 const batchSize = 1000
190190 baseURL := didplc.DefaultDirectoryURL + "/export"
191191 client := &http.Client{Timeout: 30 * time.Second}
192192-193193- // The /export seems to sometimes return outright duplicated entries :weary:
194194- seenCIDs := map[string]struct{}{}
195192196193 after := startAt
197194 for {
···204201205202 q := req.URL.Query()
206203 q.Add("count", fmt.Sprint(batchSize))
207207- if after != "" {
208208- q.Add("after", after)
209209- }
204204+ q.Add("after", fmt.Sprint(after))
210205 req.URL.RawQuery = q.Encode()
211206212207 resp, err := client.Do(req)
···219214 return // Non-200 status code
220215 }
221216222222- entries := make([]didplc.LogEntry, 0, batchSize)
217217+ type logEntryWithSeq struct {
218218+ didplc.LogEntry
219219+ Seq uint64 `json:"seq"`
220220+ }
221221+222222+ entries := make([]logEntryWithSeq, 0, batchSize)
223223224224 // Read response body
225225 s := bufio.NewScanner(resp.Body)
226226 receivedEntries := 0
227227 for s.Scan() {
228228- var entry didplc.LogEntry
228228+ var entry logEntryWithSeq
229229 if err := json.Unmarshal(s.Bytes(), &entry); err != nil {
230230 return // Failed to decode JSON
231231 }
232232- if _, present := seenCIDs[entry.CID]; !present {
233233- entries = append(entries, entry)
234234- seenCIDs[entry.CID] = struct{}{}
235235- }
232232+ entries = append(entries, entry)
236233 receivedEntries++
237234 }
238235 if s.Err() != nil {
···244241 }
245242246243 // Process each entry
247247- var lastCreatedAt string
248244 for _, entry := range entries {
249249- lastCreatedAt = entry.CreatedAt
250250- if !yield(entry) {
245245+ after = entry.Seq
246246+ if !yield(entry.LogEntry) {
251247 return
252248 }
253249 }
···255251 if receivedEntries < batchSize {
256252 return
257253 }
258258-259259- after = lastCreatedAt
260260-261261- // Small delay to be respectful to the API
262262- time.Sleep(100 * time.Millisecond)
263254 }
264255 }
265256}
+66-28
plc/impl.go
···1313 "github.com/samber/lo"
1414 "github.com/samber/mo"
1515 "tangled.org/gbl08ma/didplcbft/store"
1616+ "tangled.org/gbl08ma/didplcbft/types"
1617)
17181819type TreeProvider interface {
···4344 plc.mu.Lock()
4445 defer plc.mu.Unlock()
45464646- timestamp := syntax.Datetime(at.Format(store.ActualAtprotoDatetimeLayout))
4747+ timestamp := syntax.Datetime(at.Format(types.ActualAtprotoDatetimeLayout))
47484849 // TODO set true to false only while importing old ops
4950 _, err := plc.validator.Validate(atHeight, timestamp, did, opBytes, true)
···5859 plc.mu.Lock()
5960 defer plc.mu.Unlock()
60616161- timestamp := syntax.Datetime(t.Format(store.ActualAtprotoDatetimeLayout))
6262+ timestamp := syntax.Datetime(t.Format(types.ActualAtprotoDatetimeLayout))
62636364 // TODO set true to false only while importing old ops
6465 effects, err := plc.validator.Validate(WorkingTreeVersion, timestamp, did, opBytes, true)
···9798 newCID := newEntry.CID
9899 newPrev := newEntry.Operation.AsOperation().PrevCIDStr()
99100100100- // TODO avoid redundant CreatedAt formating and parsing by using a specialized LogEntry type internally (i.e. between us and the store)
101101 newCreatedAtDT, err := syntax.ParseDatetime(newEntry.CreatedAt)
102102 if err != nil {
103103 return stacktrace.Propagate(err, "")
···106106107107 mustFullyReplaceHistory := false
108108 for _, entry := range l {
109109- existingCreatedAt, err := syntax.ParseDatetime(entry.CreatedAt)
110110- if err != nil {
111111- return stacktrace.Propagate(err, "")
112112- }
113113- if existingCreatedAt.Time().After(newCreatedAt) {
109109+ if entry.CreatedAt.After(newCreatedAt) {
114110 // We're trying to import an operation whose timestamp precedes one of the timestamps for operations we already know about
115111 // We'll need to discard all known history and import it anew using the authoritative source data (same as when dealing with sequence forks)
116112 mustFullyReplaceHistory = true
117113 break
118114 }
119115120120- if entry.CID == newCID {
116116+ if entry.CID.String() == newCID && entry.Nullified == newEntry.Nullified {
121117 // If an operation with the same CID already exists -> easy-ish
122118123119 // this operation is already present, there is nothing to do
···127123 }
128124 }
129125130130- if len(l) == 0 || (!mustFullyReplaceHistory && l[len(l)-1].CID == newPrev) {
126126+ if len(l) == 0 || (!mustFullyReplaceHistory && l[len(l)-1].CID.String() == newPrev) {
131127 // If DID doesn't exist at all -> easy
132128 // If prev matches CID of latest operation, and resulting timestamp sequence monotonically increases -> easy
133129 err = store.Tree.StoreOperation(tree, newEntry, mo.None[int]())
···166162 return didplc.Doc{}, stacktrace.Propagate(ErrDIDNotFound, "")
167163 }
168164169169- opEnum := l[len(l)-1].Operation
170170- if opEnum.Tombstone != nil {
171171- return didplc.Doc{}, stacktrace.Propagate(ErrDIDGone, "")
165165+ // find most recent operation that isn't nullified (during authoritative import, the latest operation might be nullified)
166166+ for i := len(l) - 1; i >= 0; i-- {
167167+ opEnum := l[i].Operation
168168+ if !l[i].Nullified {
169169+ if opEnum.Tombstone != nil {
170170+ return didplc.Doc{}, stacktrace.Propagate(ErrDIDGone, "")
171171+ }
172172+ return opEnum.AsOperation().Doc(did)
173173+ }
172174 }
173173- return opEnum.AsOperation().Doc(did)
175175+ // in the worst case all operations are somehow nullified and the loop ends with opEnum holding a nullified operation
176176+ // that _shouldn't_ be possible (right?) but if it does happen, let's just behave as if the DID was tombstoned
177177+ return didplc.Doc{}, stacktrace.Propagate(ErrDIDGone, "")
174178}
175179176180func (plc *plcImpl) OperationLog(ctx context.Context, atHeight TreeVersion, did string) ([]didplc.OpEnum, error) {
···195199 return nil, stacktrace.Propagate(ErrDIDNotFound, "")
196200 }
197201198198- return lo.Map(l, func(logEntry didplc.LogEntry, _ int) didplc.OpEnum {
202202+ l = lo.Filter(l, func(logEntry types.SequencedLogEntry, _ int) bool {
203203+ return !logEntry.Nullified
204204+ })
205205+206206+ return lo.Map(l, func(logEntry types.SequencedLogEntry, _ int) didplc.OpEnum {
199207 return logEntry.Operation
200208 }), nil
201209}
···221229 return nil, stacktrace.Propagate(ErrDIDNotFound, "")
222230 }
223231224224- return l, nil
232232+ // if the latest operations are nullified (happens while authoritative import is in progress), just pretend we don't have them yet,
233233+ // since a properly functioning PLC implementation could never have the latest operation for a DID be nullified
234234+ dropAfterIdx := len(l) - 1
235235+ for ; dropAfterIdx >= 0; dropAfterIdx-- {
236236+ if !l[dropAfterIdx].Nullified {
237237+ break
238238+ }
239239+ }
240240+ l = l[0 : dropAfterIdx+1]
241241+242242+ return lo.Map(l, func(logEntry types.SequencedLogEntry, _ int) didplc.LogEntry {
243243+ return logEntry.ToDIDPLCLogEntry()
244244+ }), nil
225245}
226246227247func (plc *plcImpl) LastOperation(ctx context.Context, atHeight TreeVersion, did string) (didplc.OpEnum, error) {
228228- // GetLastOp - /:did/log/last - latest op from audit log which isn't nullified (isn't the latest op guaranteed to not be nullified?)
248248+ // GetLastOp - /:did/log/last - latest op from audit log which isn't nullified
229249 // if missing -> returns ErrDIDNotFound
230250 // if tombstone -> returns tombstone op
231251 plc.mu.Lock()
···245265 return didplc.OpEnum{}, stacktrace.Propagate(ErrDIDNotFound, "")
246266 }
247267248248- return l[len(l)-1].Operation, nil
268268+ // find most recent operation that isn't nullified (during authoritative import, the latest operation might be nullified)
269269+ for i := len(l) - 1; i >= 0; i-- {
270270+ opEnum := l[i].Operation
271271+ if !l[i].Nullified {
272272+ return opEnum, nil
273273+ }
274274+ }
275275+ // in the worst case all operations are somehow nullified and the loop ends with opEnum holding a nullified operation
276276+ // that _shouldn't_ be possible (right?) but if it does happen, let's just behave as if the DID did not exist
277277+ return didplc.OpEnum{}, stacktrace.Propagate(ErrDIDNotFound, "")
249278}
250279251280func (plc *plcImpl) Data(ctx context.Context, atHeight TreeVersion, did string) (didplc.RegularOp, error) {
···269298 return didplc.RegularOp{}, stacktrace.Propagate(ErrDIDNotFound, "")
270299 }
271300272272- opEnum := l[len(l)-1].Operation
273273- if opEnum.Tombstone != nil {
274274- return didplc.RegularOp{}, stacktrace.Propagate(ErrDIDGone, "")
301301+ // find most recent operation that isn't nullified (during authoritative import, the latest operation might be nullified)
302302+ for i := len(l) - 1; i >= 0; i-- {
303303+ opEnum := l[i].Operation
304304+ if !l[i].Nullified {
305305+ if opEnum.Tombstone != nil {
306306+ return didplc.RegularOp{}, stacktrace.Propagate(ErrDIDGone, "")
307307+ }
308308+ if opEnum.Regular != nil {
309309+ return *opEnum.Regular, nil
310310+ }
311311+ return *modernizeOp(opEnum.Legacy), nil
312312+ }
275313 }
276276- if opEnum.Regular != nil {
277277- return *opEnum.Regular, nil
278278- }
279279- return *modernizeOp(opEnum.Legacy), nil
314314+ // in the worst case all operations are somehow nullified and the loop ends with opEnum holding a nullified operation
315315+ // that _shouldn't_ be possible (right?) but if it does happen, let's just behave as if the DID was tombstoned
316316+ return didplc.RegularOp{}, stacktrace.Propagate(ErrDIDGone, "")
317317+280318}
281319282282-func (plc *plcImpl) Export(ctx context.Context, atHeight TreeVersion, after time.Time, count int) ([]didplc.LogEntry, error) {
320320+func (plc *plcImpl) Export(ctx context.Context, atHeight TreeVersion, after uint64, count int) ([]types.SequencedLogEntry, error) {
283321 plc.mu.Lock()
284322 defer plc.mu.Unlock()
285323···296334 plc *plcImpl
297335}
298336299299-func (a *inMemoryAuditLogFetcher) AuditLogReverseIterator(atHeight TreeVersion, did string, retErr *error) iter.Seq2[int, didplc.LogEntry] {
337337+func (a *inMemoryAuditLogFetcher) AuditLogReverseIterator(atHeight TreeVersion, did string, retErr *error) iter.Seq2[int, types.SequencedLogEntry] {
300338 tree, err := a.plc.treeProvider.ImmutableTree(atHeight)
301339 if err != nil {
302340 *retErr = stacktrace.Propagate(err, "")
303303- return func(yield func(int, didplc.LogEntry) bool) {}
341341+ return func(yield func(int, types.SequencedLogEntry) bool) {}
304342 }
305343306344 return store.Tree.AuditLogReverseIterator(tree, did, retErr)
+9-22
plc/operation_validator.go
···1111 "github.com/did-method-plc/go-didplc"
1212 "github.com/palantir/stacktrace"
1313 "github.com/samber/mo"
1414+ "tangled.org/gbl08ma/didplcbft/types"
1415)
15161617type AuditLogFetcher interface {
1718 // AuditLogReverseIterator should return an iterator over the list of log entries for the specified DID, in reverse
1818- AuditLogReverseIterator(atHeight TreeVersion, did string, err *error) iter.Seq2[int, didplc.LogEntry]
1919+ AuditLogReverseIterator(atHeight TreeVersion, did string, err *error) iter.Seq2[int, types.SequencedLogEntry]
1920}
20212122type V0OperationValidator struct {
···74757576 proposedPrev := op.PrevCIDStr()
76777777- partialLog := make(map[int]didplc.LogEntry)
7878+ partialLog := make(map[int]types.SequencedLogEntry)
7879 mostRecentOpIndex := -1
7980 indexOfPrev := -1
8081 var iteratorErr error
···8889 }
8990 }
90919191- if entry.CID == proposedPrev {
9292+ if entry.CID.String() == proposedPrev {
9293 indexOfPrev = entryIdx
9394 break
9495 }
···9899 return OperationEffects{}, stacktrace.Propagate(iteratorErr, "")
99100 }
100101101101- nullifiedEntries := []didplc.LogEntry{}
102102+ nullifiedEntries := []types.SequencedLogEntry{}
102103 nullifiedEntriesStartingIndex := mo.None[int]()
103104104105 if mostRecentOpIndex < 0 {
···125126126127 // timestamps must increase monotonically
127128 mostRecentOp := partialLog[mostRecentOpIndex]
128128- mostRecentCreatedAt, err := syntax.ParseDatetime(mostRecentOp.CreatedAt)
129129- if err != nil {
130130- return OperationEffects{}, stacktrace.Propagate(err, "reached invalid internal state")
131131- }
132132- if !timestamp.Time().After(mostRecentCreatedAt.Time()) {
129129+ if !timestamp.Time().After(mostRecentOp.CreatedAt) {
133130 return OperationEffects{}, stacktrace.Propagate(ErrInvalidOperationSequence, "")
134131 }
135132···156153 }
157154158155 // recovery key gets a 72hr window to do historical re-writes
159159- firstNullifiedCreatedAt, err := syntax.ParseDatetime(nullifiedEntries[0].CreatedAt)
160160- if err != nil {
161161- return OperationEffects{}, stacktrace.Propagate(err, "reached invalid internal state")
162162- }
163163- if timestamp.Time().Sub(firstNullifiedCreatedAt.Time()) > 72*time.Hour {
156156+ if timestamp.Time().Sub(nullifiedEntries[0].CreatedAt) > 72*time.Hour {
164157 return OperationEffects{}, stacktrace.Propagate(ErrRecoveryWindowExpired, "")
165158 }
166159 } else {
···230223 for _, entry := range v.auditLogFetcher.AuditLogReverseIterator(atHeight, did, &err) {
231224 if entry.Nullified {
232225 // The typescript implementation operates over a `ops` array which doesn't include nullified ops
233233- // (With recovery ops also skipping rate limits, doesn't this leave the PLC vulnerable to the spam of constant recovery operations?)
226226+ // (With recovery ops also skipping rate limits, doesn't this leave the PLC vulnerable to the spam of constant recovery operations? TODO investigate)
234227 continue
235228 }
236236- // Parse the CreatedAt timestamp string
237237- // The CreatedAt field is stored as a string in ISO 8601 format
238238- opDatetime, err := syntax.ParseDatetime(entry.CreatedAt)
239239- if err != nil {
240240- return stacktrace.Propagate(err, "")
241241- }
242242- opTime := opDatetime.Time()
229229+ opTime := entry.CreatedAt
243230244231 if opTime.Before(weekAgo) {
245232 // operations are always ordered by timestamp, and we're iterating from newest to oldest
···1616 "github.com/samber/lo"
1717 "github.com/stretchr/testify/require"
1818 "tangled.org/gbl08ma/didplcbft/plc"
1919+ "tangled.org/gbl08ma/didplcbft/types"
1920)
20212122func TestPLC(t *testing.T) {
···190191 doc, err = testPLC.Resolve(ctx, plc.SpecificTreeVersion(origVersion+4), testDID)
191192 require.NoError(t, err)
192193193193- export, err := testPLC.Export(ctx, plc.CommittedTreeVersion, time.Time{}, 1000)
194194+ export, err := testPLC.Export(ctx, plc.CommittedTreeVersion, 0, 1000)
194195 require.NoError(t, err)
195196 require.Len(t, export, 3)
196197197198 require.Equal(t, "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q", export[0].Operation.AsOperation().CID().String())
198198- require.Equal(t, "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q", export[0].CID)
199199+ require.Equal(t, "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q", export[0].CID.String())
199200 require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[1].Operation.AsOperation().CID().String())
200200- require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[1].CID)
201201+ require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[1].CID.String())
201202 require.Equal(t, "bafyreigyzl2esgnk7nvav5myvgywbshdmatzthc73iiar7tyeq3xjt47m4", export[2].Operation.AsOperation().CID().String())
202202- require.Equal(t, "bafyreigyzl2esgnk7nvav5myvgywbshdmatzthc73iiar7tyeq3xjt47m4", export[2].CID)
203203+ require.Equal(t, "bafyreigyzl2esgnk7nvav5myvgywbshdmatzthc73iiar7tyeq3xjt47m4", export[2].CID.String())
203204204204- // the after parameter is exclusive, we should just get the second successful operation
205205- export, err = testPLC.Export(ctx, plc.CommittedTreeVersion, operations[1].ApplyAt.Time(), 1)
205205+ // the after parameter is exclusive, with a limit of 1, we should just get the second successful operation
206206+ export, err = testPLC.Export(ctx, plc.CommittedTreeVersion, export[0].Seq, 1)
206207 require.NoError(t, err)
207208 require.Len(t, export, 1)
208208- require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[0].CID)
209209+ require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[0].CID.String())
209210}
210211211212func TestPLCFromRemoteOperations(t *testing.T) {
···291292 }
292293 }
293294294294- export, err := testPLC.Export(ctx, plc.CommittedTreeVersion, time.Time{}, 0)
295295+ export, err := testPLC.Export(ctx, plc.CommittedTreeVersion, 0, 0)
295296 require.NoError(t, err)
296296- require.Len(t, export, 96)
297297+ require.Len(t, export, 100)
297298298299 // ensure entries are sorted correctly
299299- last := time.Time{}
300300+ last := uint64(0)
300301 for _, entry := range export {
301301- et, err := syntax.ParseDatetime(entry.CreatedAt)
302302- require.NoError(t, err)
303303- require.True(t, et.Time().After(last))
304304- last = et.Time()
302302+ require.True(t, entry.Seq > last)
303303+ last = entry.Seq
305304 }
306305}
307306···412411 require.NoError(t, err)
413412414413 seenCIDs := map[string]struct{}{}
415415- for entry := range iterateOverExport(ctx, "") {
414414+ for entry := range iterateOverExport(ctx, 0) {
416415 err := testPLC.ImportOperationFromAuthoritativeSource(ctx, entry, func() ([]didplc.LogEntry, error) {
417416 e, err := client.AuditLog(ctx, entry.DID)
418417 return e, stacktrace.Propagate(err, "")
···420419 require.NoError(t, err)
421420422421 seenCIDs[entry.CID] = struct{}{}
423423- if len(seenCIDs) == 4000 {
422422+ if len(seenCIDs) == 10000 {
424423 break
425424 }
426425 }
···428427 _, _, err = tree.SaveVersion()
429428 require.NoError(t, err)
430429431431- exportedEntries, err := testPLC.Export(ctx, plc.CommittedTreeVersion, time.Time{}, len(seenCIDs)+1)
430430+ exportedEntries, err := testPLC.Export(ctx, plc.CommittedTreeVersion, 0, len(seenCIDs)+1)
432431 require.NoError(t, err)
433432434433 require.Len(t, exportedEntries, len(seenCIDs))
435434436435 for _, exportedEntry := range exportedEntries {
437437- delete(seenCIDs, exportedEntry.CID)
436436+ delete(seenCIDs, exportedEntry.CID.String())
438437 }
439438 require.Empty(t, seenCIDs)
440439}
441440442442-func iterateOverExport(ctx context.Context, startAt string) iter.Seq[didplc.LogEntry] {
441441+func TestImportOperationWithNullification(t *testing.T) {
442442+ var client didplc.Client
443443+444444+ ctx := t.Context()
445445+446446+ testFn := func(toImport []didplc.LogEntry, mutate func(didplc.LogEntry) didplc.LogEntry) ([]types.SequencedLogEntry, []didplc.LogEntry) {
447447+ treeProvider := NewTestTreeProvider()
448448+ testPLC := plc.NewPLC(treeProvider)
449449+450450+ tree, err := treeProvider.MutableTree()
451451+ require.NoError(t, err)
452452+ _, _, err = tree.SaveVersion()
453453+ require.NoError(t, err)
454454+455455+ for _, entry := range toImport {
456456+ entry = mutate(entry)
457457+ err := testPLC.ImportOperationFromAuthoritativeSource(ctx, entry, func() ([]didplc.LogEntry, error) {
458458+ e, err := client.AuditLog(ctx, entry.DID)
459459+ return e, stacktrace.Propagate(err, "")
460460+ })
461461+ require.NoError(t, err)
462462+ }
463463+464464+ _, _, err = tree.SaveVersion()
465465+ require.NoError(t, err)
466466+467467+ exportedEntries, err := testPLC.Export(ctx, plc.CommittedTreeVersion, 0, len(toImport)+1)
468468+ require.NoError(t, err)
469469+470470+ require.Len(t, exportedEntries, len(toImport))
471471+472472+ auditLog, err := testPLC.AuditLog(ctx, plc.CommittedTreeVersion, "did:plc:pkmfz5soq2swsvbhvjekb36g")
473473+ require.NoError(t, err)
474474+475475+ return exportedEntries, auditLog
476476+ }
477477+478478+ toImport, err := client.AuditLog(ctx, "did:plc:pkmfz5soq2swsvbhvjekb36g")
479479+ require.NoError(t, err)
480480+481481+ exportedEntries, auditLog := testFn(toImport, func(le didplc.LogEntry) didplc.LogEntry { return le })
482482+ require.Len(t, auditLog, len(toImport))
483483+484484+ for i, entry := range exportedEntries {
485485+ require.Equal(t, uint64(i+1), entry.Seq)
486486+ require.Equal(t, toImport[i].CID, entry.CID.String())
487487+ require.Equal(t, toImport[i].CID, auditLog[i].CID)
488488+ require.Equal(t, toImport[i].CreatedAt, entry.CreatedAt.Format(types.ActualAtprotoDatetimeLayout))
489489+ require.Equal(t, toImport[i].CreatedAt, auditLog[i].CreatedAt)
490490+ require.Equal(t, toImport[i].Nullified, entry.Nullified)
491491+ require.Equal(t, toImport[i].Nullified, auditLog[i].Nullified)
492492+ }
493493+494494+ // ensure auditLog never returns nullified entries as the last entries
495495+ exportedEntries, auditLog = testFn(toImport[0:5], func(le didplc.LogEntry) didplc.LogEntry { return le })
496496+497497+ require.Len(t, exportedEntries, 5)
498498+ require.Len(t, auditLog, 1)
499499+ require.False(t, auditLog[0].Nullified)
500500+ require.Equal(t, auditLog[0].CID, "bafyreid2tbopmtuguvuvij5kjcqo7rv7yvqza37uvfcvk5zdxyo57xlfdi")
501501+502502+ // now pretend that at the time of import, no operations were nullified
503503+ exportedEntries, auditLog = testFn(toImport, func(le didplc.LogEntry) didplc.LogEntry {
504504+ le.Nullified = false
505505+ return le
506506+ })
507507+ require.Len(t, auditLog, len(toImport))
508508+509509+ for i, entry := range exportedEntries {
510510+ if i < 1 {
511511+ require.Equal(t, uint64(i+1), entry.Seq)
512512+ } else {
513513+ require.Equal(t, uint64(i+5), entry.Seq)
514514+ }
515515+ require.Equal(t, toImport[i].CID, entry.CID.String())
516516+ require.Equal(t, toImport[i].CID, auditLog[i].CID)
517517+ require.Equal(t, toImport[i].CreatedAt, entry.CreatedAt.Format(types.ActualAtprotoDatetimeLayout))
518518+ require.Equal(t, toImport[i].CreatedAt, auditLog[i].CreatedAt)
519519+ require.Equal(t, toImport[i].Nullified, entry.Nullified)
520520+ require.Equal(t, toImport[i].Nullified, auditLog[i].Nullified)
521521+ }
522522+523523+ // now manipulate the timestamp on the first operation just to see the first operation get rewritten
524524+ exportedEntries, auditLog = testFn(toImport, func(le didplc.LogEntry) didplc.LogEntry {
525525+ if le.CID == "bafyreid2tbopmtuguvuvij5kjcqo7rv7yvqza37uvfcvk5zdxyo57xlfdi" {
526526+ // this should cause mustFullyReplaceHistory to become true
527527+ le.CreatedAt = syntax.DatetimeNow().String()
528528+ }
529529+ return le
530530+ })
531531+ require.Len(t, auditLog, len(toImport))
532532+533533+ for i, entry := range exportedEntries {
534534+ require.Equal(t, uint64(i+2), entry.Seq)
535535+ require.Equal(t, toImport[i].CID, entry.CID.String())
536536+ require.Equal(t, toImport[i].CID, auditLog[i].CID)
537537+ require.Equal(t, toImport[i].CreatedAt, entry.CreatedAt.Format(types.ActualAtprotoDatetimeLayout))
538538+ require.Equal(t, toImport[i].CreatedAt, auditLog[i].CreatedAt)
539539+ require.Equal(t, toImport[i].Nullified, entry.Nullified)
540540+ require.Equal(t, toImport[i].Nullified, auditLog[i].Nullified)
541541+ }
542542+}
543543+544544+func iterateOverExport(ctx context.Context, startAt uint64) iter.Seq[didplc.LogEntry] {
443545 return func(yield func(didplc.LogEntry) bool) {
444546 const batchSize = 1000
445547 baseURL := didplc.DefaultDirectoryURL + "/export"
446548 client := &http.Client{Timeout: 30 * time.Second}
447447-448448- // The /export seems to sometimes return outright duplicated entries :weary:
449449- seenCIDs := map[string]struct{}{}
450549451550 after := startAt
452551 for {
···459558460559 q := req.URL.Query()
461560 q.Add("count", fmt.Sprint(batchSize))
462462- if after != "" {
463463- q.Add("after", after)
464464- }
561561+ q.Add("after", fmt.Sprint(after))
465562 req.URL.RawQuery = q.Encode()
466563467564 resp, err := client.Do(req)
···474571 return // Non-200 status code
475572 }
476573477477- entries := make([]didplc.LogEntry, 0, batchSize)
574574+ type logEntryWithSeq struct {
575575+ didplc.LogEntry
576576+ Seq uint64 `json:"seq"`
577577+ }
578578+579579+ entries := make([]logEntryWithSeq, 0, batchSize)
478580479581 // Read response body
480582 s := bufio.NewScanner(resp.Body)
481583 receivedEntries := 0
482584 for s.Scan() {
483483- var entry didplc.LogEntry
585585+ var entry logEntryWithSeq
484586 if err := json.Unmarshal(s.Bytes(), &entry); err != nil {
485587 return // Failed to decode JSON
486588 }
487487- if _, present := seenCIDs[entry.CID]; !present {
488488- entries = append(entries, entry)
489489- seenCIDs[entry.CID] = struct{}{}
490490- }
589589+ entries = append(entries, entry)
491590 receivedEntries++
492591 }
493592 if s.Err() != nil {
···499598 }
500599501600 // Process each entry
502502- var lastCreatedAt string
503601 for _, entry := range entries {
504504- lastCreatedAt = entry.CreatedAt
505505- if !yield(entry) {
602602+ after = entry.Seq
603603+ if !yield(entry.LogEntry) {
506604 return
507605 }
508606 }
···510608 if receivedEntries < batchSize {
511609 return
512610 }
513513-514514- after = lastCreatedAt
515611 }
516612 }
517613}
+158-108
store/tree.go
···44 "encoding/base32"
55 "encoding/binary"
66 "iter"
77+ "math"
78 "slices"
89 "strings"
910 "time"
···1718 "github.com/polydawn/refmt/obj/atlas"
1819 "github.com/samber/lo"
1920 "github.com/samber/mo"
2121+ "tangled.org/gbl08ma/didplcbft/types"
2022)
21232222-// ActualAtprotoDatetimeLayout is the format for CreatedAt timestamps
2323-// AtprotoDatetimeLayout as defined by github.com/bluesky-social/indigo/atproto/syntax omits trailing zeros in the milliseconds
2424-// This doesn't match how the official plc.directory implementation formats them, so we define that format here with trailing zeros included
2525-const ActualAtprotoDatetimeLayout = "2006-01-02T15:04:05.000Z"
2626-2724var Tree PLCTreeStore = &TreeStore{}
28252926type PLCTreeStore interface {
3030- AuditLog(tree ReadOnlyTree, did string, withProof bool) ([]didplc.LogEntry, *ics23.CommitmentProof, error)
3131- AuditLogReverseIterator(tree ReadOnlyTree, did string, err *error) iter.Seq2[int, didplc.LogEntry]
3232- ExportOperations(tree ReadOnlyTree, after time.Time, count int) ([]didplc.LogEntry, error) // passing a count of zero means unlimited
2727+ AuditLog(tree ReadOnlyTree, did string, withProof bool) ([]types.SequencedLogEntry, *ics23.CommitmentProof, error)
2828+ AuditLogReverseIterator(tree ReadOnlyTree, did string, err *error) iter.Seq2[int, types.SequencedLogEntry]
2929+ ExportOperations(tree ReadOnlyTree, after uint64, count int) ([]types.SequencedLogEntry, error) // passing a count of zero means unlimited
3330 StoreOperation(tree *iavl.MutableTree, entry didplc.LogEntry, nullifyWithIndexEqualOrGreaterThan mo.Option[int]) error
3431 ReplaceHistory(tree *iavl.MutableTree, history []didplc.LogEntry) error
3532}
···3936// TreeStore exists just to groups methods nicely
4037type TreeStore struct{}
41384242-func (t *TreeStore) AuditLog(tree ReadOnlyTree, did string, withProof bool) ([]didplc.LogEntry, *ics23.CommitmentProof, error) {
3939+func (t *TreeStore) AuditLog(tree ReadOnlyTree, did string, withProof bool) ([]types.SequencedLogEntry, *ics23.CommitmentProof, error) {
4340 proofs := []*ics23.CommitmentProof{}
44414542 didBytes, err := didToBytes(did)
···6158 return nil, nil, stacktrace.Propagate(err, "")
6259 }
6360 operationKeys = make([][]byte, 0, len(logOperations)/8)
6464- for ts := range slices.Chunk(logOperations, 8) {
6565- operationKeys = append(operationKeys, timestampBytesToDIDOperationKey(ts, didBytes))
6161+ for seqBytes := range slices.Chunk(logOperations, 8) {
6262+ operationKeys = append(operationKeys, sequenceBytesToOperationKey(seqBytes))
6663 }
6764 }
6865···7471 proofs = append(proofs, proof)
7572 }
76737777- logEntries := make([]didplc.LogEntry, 0, len(operationKeys))
7474+ logEntries := make([]types.SequencedLogEntry, 0, len(operationKeys))
7875 for _, opKey := range operationKeys {
7976 operationValue, err := tree.Get(opKey)
8077 if err != nil {
···8986 proofs = append(proofs, proof)
9087 }
91889292- nullified, operation, err := unmarshalOperationValue(operationValue)
8989+ logEntry, err := unmarshalLogEntry(opKey, operationValue)
9390 if err != nil {
9491 return nil, nil, stacktrace.Propagate(err, "")
9592 }
96939797- timestamp, actualDID, err := unmarshalOperationKey(opKey)
9898- if err != nil {
9999- return nil, nil, stacktrace.Propagate(err, "")
100100- }
101101-102102- logEntries = append(logEntries, didplc.LogEntry{
103103- DID: actualDID,
104104- Operation: operation,
105105- CID: operation.AsOperation().CID().String(),
106106- Nullified: nullified,
107107- CreatedAt: timestamp.Format(ActualAtprotoDatetimeLayout),
108108- })
9494+ logEntries = append(logEntries, logEntry)
10995 }
1109611197 var combinedProof *ics23.CommitmentProof
···118104 return logEntries, combinedProof, nil
119105}
120106121121-func (t *TreeStore) AuditLogReverseIterator(tree ReadOnlyTree, did string, retErr *error) iter.Seq2[int, didplc.LogEntry] {
122122- return func(yield func(int, didplc.LogEntry) bool) {
107107+func (t *TreeStore) AuditLogReverseIterator(tree ReadOnlyTree, did string, retErr *error) iter.Seq2[int, types.SequencedLogEntry] {
108108+ return func(yield func(int, types.SequencedLogEntry) bool) {
123109 didBytes, err := didToBytes(did)
124110 if err != nil {
125111 *retErr = stacktrace.Propagate(err, "")
···142128 return
143129 }
144130 operationKeys = make([][]byte, 0, len(logOperations)/8)
145145- for ts := range slices.Chunk(logOperations, 8) {
146146- operationKeys = append(operationKeys, timestampBytesToDIDOperationKey(ts, didBytes))
131131+ for seqBytes := range slices.Chunk(logOperations, 8) {
132132+ operationKeys = append(operationKeys, sequenceBytesToOperationKey(seqBytes))
147133 }
148134 }
149135···155141 return
156142 }
157143158158- nullified, operation, err := unmarshalOperationValue(operationValue)
144144+ logEntry, err := unmarshalLogEntry(opKey, operationValue)
159145 if err != nil {
160146 *retErr = stacktrace.Propagate(err, "")
161147 return
162148 }
163149164164- timestamp, actualDID, err := unmarshalOperationKey(opKey)
165165- if err != nil {
166166- *retErr = stacktrace.Propagate(err, "")
167167- return
168168- }
169169-170170- if !yield(i, didplc.LogEntry{
171171- DID: actualDID,
172172- Operation: operation,
173173- CID: operation.AsOperation().CID().String(),
174174- Nullified: nullified,
175175- CreatedAt: timestamp.Format(ActualAtprotoDatetimeLayout),
176176- }) {
150150+ if !yield(i, logEntry) {
177151 return
178152 }
179153 }
180154 }
181155}
182156183183-func (t *TreeStore) ExportOperations(tree ReadOnlyTree, after time.Time, count int) ([]didplc.LogEntry, error) {
157157+func (t *TreeStore) ExportOperations(tree ReadOnlyTree, after uint64, count int) ([]types.SequencedLogEntry, error) {
184158 // as the name suggests, after is an exclusive lower bound, but our iterators use inclusive lower bounds
185185- start := after.Add(1 * time.Nanosecond)
186186- startKey := marshalOperationKey(start, make([]byte, 15))
187187- if after.UnixNano() < 0 {
188188- // our storage format doesn't deal well with negative unix timestamps,
189189- // but that's fine because we don't have operations created that far back. assume we just want to iterate from the start
190190- copy(startKey[1:8], make([]byte, 8))
191191- }
159159+ start := after + 1
160160+ startKey := marshalOperationKey(start)
161161+ endKey := maxOperationKey
192162193193- entries := make([]didplc.LogEntry, 0, count)
163163+ entries := make([]types.SequencedLogEntry, 0, count)
194164 var iterErr error
195195- tree.IterateRange(startKey, nil, true, func(operationKey, operationValue []byte) bool {
196196- nullified, operation, err := unmarshalOperationValue(operationValue)
165165+ tree.IterateRange(startKey, endKey, true, func(operationKey, operationValue []byte) bool {
166166+ logEntry, err := unmarshalLogEntry(operationKey, operationValue)
197167 if err != nil {
198168 iterErr = stacktrace.Propagate(err, "")
199169 return true
200170 }
201171202202- timestamp, actualDID, err := unmarshalOperationKey(operationKey)
203203- if err != nil {
204204- iterErr = stacktrace.Propagate(err, "")
205205- return true
206206- }
207207-208208- entries = append(entries, didplc.LogEntry{
209209- DID: actualDID,
210210- Operation: operation,
211211- CID: operation.AsOperation().CID().String(),
212212- Nullified: nullified,
213213- CreatedAt: timestamp.Format(ActualAtprotoDatetimeLayout),
214214- })
172172+ entries = append(entries, logEntry)
215173 return len(entries) == count // this condition being checked here also makes it so that a count of zero means unlimited
216174 })
217175 if iterErr != nil {
···237195 operationKeys = [][]byte{}
238196 } else {
239197 operationKeys = make([][]byte, 0, len(logOperations)/8)
240240- for ts := range slices.Chunk(logOperations, 8) {
241241- operationKeys = append(operationKeys, timestampBytesToDIDOperationKey(ts, didBytes))
198198+ for seqBytes := range slices.Chunk(logOperations, 8) {
199199+ operationKeys = append(operationKeys, sequenceBytesToOperationKey(seqBytes))
242200 }
243201 }
244202···262220 return stacktrace.Propagate(err, "invalid CreatedAt")
263221 }
264222223223+ seq, err := getNextSeqID(tree)
224224+ if err != nil {
225225+ return stacktrace.Propagate(err, "")
226226+ }
227227+265228 operation := entry.Operation.AsOperation()
266266- opKey := marshalOperationKey(opDatetime.Time(), didBytes)
267267- opValue := marshalOperationValue(entry.Nullified, operation)
229229+ opKey := marshalOperationKey(seq)
230230+ opValue := marshalOperationValue(entry.Nullified, didBytes, opDatetime.Time(), operation)
268231269232 _, err = tree.Set(opKey, opValue)
270233 if err != nil {
···280243 return nil
281244}
282245283283-func (t *TreeStore) ReplaceHistory(tree *iavl.MutableTree, history []didplc.LogEntry) error {
284284- if len(history) == 0 {
246246+func (t *TreeStore) ReplaceHistory(tree *iavl.MutableTree, remoteHistory []didplc.LogEntry) error {
247247+ if len(remoteHistory) == 0 {
285248 // for now this isn't needed, if it's needed in the future we'll have to accept a DID as argument on this function
286249 return stacktrace.NewError("can't replace with empty history")
287250 }
288251289289- did := history[0].DID
252252+ did := remoteHistory[0].DID
290253291254 didBytes, err := didToBytes(did)
292255 if err != nil {
···295258296259 logKey := marshalDIDLogKey(didBytes)
297260298298- // identify keys of existing operations for this DID (if any)
299299- var prevOpKeys [][]byte
300300- logOperations, err := tree.Get(logKey)
261261+ localHistory, _, err := t.AuditLog(tree, did, false)
301262 if err != nil {
302263 return stacktrace.Propagate(err, "")
303264 }
304304- prevOpKeys = make([][]byte, 0, len(logOperations)/8)
305305- for ts := range slices.Chunk(logOperations, 8) {
306306- prevOpKeys = append(prevOpKeys, timestampBytesToDIDOperationKey(ts, didBytes))
265265+266266+ // if the first operations are equal to what we already have, keep them untouched to minimize the turmoil
267267+ keepLocalBeforeIdx := 0
268268+ for i, localEntry := range localHistory {
269269+ if i >= len(remoteHistory) {
270270+ break
271271+ }
272272+ remoteEntry := remoteHistory[i]
273273+274274+ // stop looping once we find a difference
275275+ // we trust that the authoritative source computes CIDs properly (i.e. that two operations having the same CID are indeed equal)
276276+ if localEntry.Nullified != remoteEntry.Nullified || localEntry.CID.String() != remoteEntry.CID {
277277+ break
278278+ }
279279+280280+ remoteDatetime, err := syntax.ParseDatetime(remoteEntry.CreatedAt)
281281+ if err != nil {
282282+ return stacktrace.Propagate(err, "invalid CreatedAt")
283283+ }
284284+285285+ if !localEntry.CreatedAt.Equal(remoteDatetime.Time()) {
286286+ break
287287+ }
288288+289289+ keepLocalBeforeIdx++
307290 }
308291309309- // remove existing operations for this DID (if any)
310310- for _, key := range prevOpKeys {
292292+ // all replaced/added operations get new sequence IDs.
293293+ // Get the highest sequence ID before removing any keys to ensure the sequence IDs actually change
294294+ seq, err := getNextSeqID(tree)
295295+ if err != nil {
296296+ return stacktrace.Propagate(err, "")
297297+ }
298298+299299+ // remove existing conflicting operations for this DID (if any)
300300+ logOperations, err := tree.Get(logKey)
301301+ if err != nil {
302302+ return stacktrace.Propagate(err, "")
303303+ }
304304+ logOperationsToDelete := logOperations[8*keepLocalBeforeIdx:]
305305+ for seqBytes := range slices.Chunk(logOperationsToDelete, 8) {
306306+ key := sequenceBytesToOperationKey(seqBytes)
307307+311308 _, _, err = tree.Remove(key)
312309 if err != nil {
313310 return stacktrace.Propagate(err, "")
314311 }
315312 }
316313317317- // add new list of operations
318318- logOperations = make([]byte, 0, len(history)*8)
319319- for _, entry := range history {
314314+ // add just the operations past the point they weren't kept
315315+ remoteHistory = remoteHistory[keepLocalBeforeIdx:]
316316+317317+ // keep the operations log up until the point we've kept the history
318318+ // clone just to make sure we avoid issues since we got this slice from the tree, it is not meant to be modified
319319+ logOperations = slices.Clone(logOperations[0 : 8*keepLocalBeforeIdx])
320320+321321+ for _, entry := range remoteHistory {
320322 opDatetime, err := syntax.ParseDatetime(entry.CreatedAt)
321323 if err != nil {
322324 return stacktrace.Propagate(err, "invalid CreatedAt")
323325 }
324326325327 operation := entry.Operation.AsOperation()
326326- opKey := marshalOperationKey(opDatetime.Time(), didBytes)
327327- opValue := marshalOperationValue(entry.Nullified, operation)
328328+ opKey := marshalOperationKey(seq)
329329+ seq++
330330+ opValue := marshalOperationValue(entry.Nullified, didBytes, opDatetime.Time(), operation)
328331329332 _, err = tree.Set(opKey, opValue)
330333 if err != nil {
···344347 return nil
345348}
346349350350+var minOperationKey = marshalOperationKey(0)
351351+var maxOperationKey = marshalOperationKey(math.MaxInt64)
352352+353353+func getNextSeqID(tree *iavl.MutableTree) (uint64, error) {
354354+ seq := uint64(0)
355355+ var err error
356356+ tree.IterateRange(minOperationKey, maxOperationKey, false, func(key, value []byte) bool {
357357+ seq, err = unmarshalOperationKey(key)
358358+ return true
359359+ })
360360+361361+ return seq + 1, stacktrace.Propagate(err, "")
362362+}
363363+347364func didToBytes(did string) ([]byte, error) {
348365 if !strings.HasPrefix(did, "did:plc:") {
349366 return nil, stacktrace.NewError("invalid did:plc")
···379396 return key
380397}
381398382382-func timestampBytesToDIDOperationKey(timestamp []byte, didBytes []byte) []byte {
383383- key := make([]byte, 1+8+15)
399399+func sequenceBytesToOperationKey(sequenceBytes []byte) []byte {
400400+ key := make([]byte, 1+8)
384401 key[0] = 'o'
385385- copy(key[1:9], timestamp)
386386- copy(key[9:], didBytes)
402402+ copy(key[1:9], sequenceBytes)
387403 return key
388404}
389405390390-func marshalOperationKey(createdAt time.Time, didBytes []byte) []byte {
391391- key := make([]byte, 1+8+15)
406406+func marshalOperationKey(sequence uint64) []byte {
407407+ key := make([]byte, 1+8)
392408 key[0] = 'o'
393409394394- ts := uint64(createdAt.Truncate(1 * time.Millisecond).UTC().UnixNano())
395395- binary.BigEndian.PutUint64(key[1:], ts)
410410+ binary.BigEndian.PutUint64(key[1:], sequence)
396411397397- copy(key[9:], didBytes)
398412 return key
399413}
400414401401-func unmarshalOperationKey(key []byte) (time.Time, string, error) {
402402- createdAtUnixNano := binary.BigEndian.Uint64(key[1:9])
403403- createdAt := time.Unix(0, int64(createdAtUnixNano)).UTC()
404404- did, err := bytesToDID(key[9:])
405405- return createdAt, did, stacktrace.Propagate(err, "")
415415+func unmarshalOperationKey(key []byte) (uint64, error) {
416416+ return binary.BigEndian.Uint64(key[1:9]), nil
406417}
407418408408-func marshalOperationValue(nullified bool, operation didplc.Operation) []byte {
409409- o := []byte{lo.Ternary[byte](nullified, 1, 0)}
410410- o = append(o, operation.SignedCBORBytes()...)
419419+func marshalOperationValue(nullified bool, didBytes []byte, createdAt time.Time, operation didplc.Operation) []byte {
420420+ opAsBytes := operation.SignedCBORBytes()
421421+ o := make([]byte, 1+15+8+len(opAsBytes))
422422+423423+ o[0] = lo.Ternary[byte](nullified, 1, 0)
424424+425425+ copy(o[1:16], didBytes)
426426+427427+ ts := uint64(createdAt.Truncate(1 * time.Millisecond).UTC().UnixNano())
428428+ binary.BigEndian.PutUint64(o[16:24], ts)
429429+ copy(o[24:], opAsBytes)
430430+411431 return o
412432}
413433414414-func unmarshalOperationValue(value []byte) (bool, didplc.OpEnum, error) {
434434+func unmarshalOperationValue(value []byte) (bool, string, time.Time, didplc.OpEnum, error) {
415435 nullified := value[0] != 0
436436+437437+ did, err := bytesToDID(value[1:16])
438438+ if err != nil {
439439+ return false, "", time.Time{}, didplc.OpEnum{}, stacktrace.Propagate(err, "")
440440+ }
441441+442442+ createdAtUnixNano := binary.BigEndian.Uint64(value[16:24])
443443+ createdAt := time.Unix(0, int64(createdAtUnixNano)).UTC()
444444+416445 var opEnum didplc.OpEnum
417417- err := cbornode.DecodeInto(value[1:], &opEnum)
446446+ err = cbornode.DecodeInto(value[24:], &opEnum)
447447+ if err != nil {
448448+ return false, "", time.Time{}, didplc.OpEnum{}, stacktrace.Propagate(err, "")
449449+ }
450450+ return nullified, did, createdAt, opEnum, nil
451451+}
452452+453453+func unmarshalLogEntry(operationKey, operationValue []byte) (types.SequencedLogEntry, error) {
454454+ nullified, actualDID, timestamp, operation, err := unmarshalOperationValue(operationValue)
455455+ if err != nil {
456456+ return types.SequencedLogEntry{}, stacktrace.Propagate(err, "")
457457+ }
458458+459459+ seq, err := unmarshalOperationKey(operationKey)
418460 if err != nil {
419419- return false, didplc.OpEnum{}, stacktrace.Propagate(err, "")
461461+ return types.SequencedLogEntry{}, stacktrace.Propagate(err, "")
420462 }
421421- return nullified, opEnum, nil
463463+464464+ return types.SequencedLogEntry{
465465+ Seq: seq,
466466+ DID: actualDID,
467467+ Operation: operation,
468468+ CID: operation.AsOperation().CID(),
469469+ Nullified: nullified,
470470+ CreatedAt: timestamp,
471471+ }, nil
422472}
423473424474func init() {
+32
types/log_entry.go
···11+package types
22+33+import (
44+ "time"
55+66+ "github.com/did-method-plc/go-didplc"
77+ "github.com/ipfs/go-cid"
88+)
99+1010+type SequencedLogEntry struct {
1111+ Seq uint64
1212+ DID string
1313+ Operation didplc.OpEnum
1414+ CID cid.Cid
1515+ Nullified bool
1616+ CreatedAt time.Time
1717+}
1818+1919+func (l SequencedLogEntry) ToDIDPLCLogEntry() didplc.LogEntry {
2020+ return didplc.LogEntry{
2121+ DID: l.DID,
2222+ Operation: l.Operation,
2323+ CID: l.CID.String(),
2424+ Nullified: l.Nullified,
2525+ CreatedAt: l.CreatedAt.Format(ActualAtprotoDatetimeLayout),
2626+ }
2727+}
2828+2929+// ActualAtprotoDatetimeLayout is the format for CreatedAt timestamps
3030+// AtprotoDatetimeLayout as defined by github.com/bluesky-social/indigo/atproto/syntax omits trailing zeros in the milliseconds
3131+// This doesn't match how the official plc.directory implementation formats them, so we define that format here with trailing zeros included
3232+const ActualAtprotoDatetimeLayout = "2006-01-02T15:04:05.000Z"