···1111 dbm "github.com/cometbft/cometbft-db"
1212 abcitypes "github.com/cometbft/cometbft/abci/types"
1313 "github.com/cosmos/iavl"
1414+ "github.com/klauspost/compress/zstd"
1415 "github.com/palantir/stacktrace"
1516 "github.com/samber/lo"
1617 "tangled.org/gbl08ma.com/didplcbft/dbadapter"
1818+ "tangled.org/gbl08ma.com/didplcbft/dbadapter/zstddict"
1719 "tangled.org/gbl08ma.com/didplcbft/plc"
1820 "tangled.org/gbl08ma.com/didplcbft/store"
1921 "tangled.org/gbl08ma.com/didplcbft/transaction"
···4244// store and plc must be able to share transaction objects
4345func NewDIDPLCApplication(treeDB dbm.DB, indexDB dbm.DB, clearData func(), snapshotDirectory string) (*DIDPLCApplication, *transaction.Factory, plc.PLC, func(), error) {
4446 mkTree := func() *iavl.MutableTree {
4545- return iavl.NewMutableTree(dbadapter.Adapt(treeDB), 500000, false, iavl.NewNopLogger(), iavl.AsyncPruningOption(false))
4747+ // Using SpeedDefault appears to cause the processing time for ExecuteOperation to double on average
4848+ // Using SpeedBetterCompression appears to cause the processing time to double again
4949+ // By using SpeedFastest we seem to give up on like 5% size reduction, it's not worth using the slower speeds
5050+ return iavl.NewMutableTree(dbadapter.AdaptWithCompression(treeDB, zstd.SpeedFastest, zstddict.PLCZstdDict), 500000, false, iavl.NewNopLogger(), iavl.AsyncPruningOption(false))
4651 }
47524853 tree := mkTree()
+102-7
dbadapter/adapter.go
···44 "cosmossdk.io/core/store"
55 dbm "github.com/cometbft/cometbft-db"
66 iavldbm "github.com/cosmos/iavl/db"
77+ "github.com/klauspost/compress/zstd"
88+ "github.com/palantir/stacktrace"
79)
810911type AdaptedDB struct {
1012 underlying dbm.DB
1313+1414+ // these two may be nil when not compressing:
1515+ zstdEncoder *zstd.Encoder
1616+ zstdDecoder *zstd.Decoder
1117}
12181319func Adapt(underlying dbm.DB) *AdaptedDB {
···1622 }
1723}
18242525+func AdaptWithCompression(underlying dbm.DB, level zstd.EncoderLevel, dict []byte) *AdaptedDB {
2626+ zstdEncoder, _ := zstd.NewWriter(nil, zstd.WithEncoderDict(dict), zstd.WithEncoderLevel(level))
2727+ zstdDecoder, _ := zstd.NewReader(nil, zstd.WithDecoderDicts(dict))
2828+2929+ return &AdaptedDB{
3030+ underlying: underlying,
3131+ zstdEncoder: zstdEncoder,
3232+ zstdDecoder: zstdDecoder,
3333+ }
3434+}
3535+1936var _ iavldbm.DB = (*AdaptedDB)(nil)
20372138// Close implements [iavldbm.DB].
···25422643// Get implements [iavldbm.DB].
2744func (b *AdaptedDB) Get(key []byte) ([]byte, error) {
2828- return b.underlying.Get(key)
4545+ v, err := b.underlying.Get(key)
4646+ if err != nil {
4747+ return nil, stacktrace.Propagate(err, "")
4848+ }
4949+ v, err = decompressValue(b.zstdDecoder, v)
5050+ return v, stacktrace.Propagate(err, "")
2951}
30523153// Has implements [iavldbm.DB].
···35573658// AdaptedIterator adapts badger.Iterator to store.Iterator
3759type AdaptedIterator struct {
6060+ zstdDecoder *zstd.Decoder
3861 underlying dbm.Iterator
3962 calledNextOnce bool
4063}
···6083}
61846285func (i *AdaptedIterator) Value() []byte {
6363- return i.underlying.Value()
8686+ v, _ := decompressValue(i.zstdDecoder, i.underlying.Value())
8787+ return v
6488}
65896690func (i *AdaptedIterator) Error() error {
···77101 if err != nil {
78102 return nil, err
79103 }
8080- return &AdaptedIterator{underlying: i}, nil
104104+ return &AdaptedIterator{underlying: i, zstdDecoder: b.zstdDecoder}, nil
81105}
8210683107// ReverseIterator implements [iavldbm.DB].
···86110 if err != nil {
87111 return nil, err
88112 }
8989- return &AdaptedIterator{underlying: i}, nil
113113+ return &AdaptedIterator{underlying: i, zstdDecoder: b.zstdDecoder}, nil
90114}
9111592116// NewBatch implements [db.DB].
93117func (b *AdaptedDB) NewBatch() store.Batch {
9494- return &AdaptedBatch{b.underlying.NewBatch()}
118118+ return &AdaptedBatch{
119119+ underlying: b.underlying.NewBatch(),
120120+ zstdEncoder: b.zstdEncoder,
121121+ }
95122}
9612397124// NewBatchWithSize implements [db.DB].
98125func (b *AdaptedDB) NewBatchWithSize(int) store.Batch {
9999- return &AdaptedBatch{b.underlying.NewBatch()}
126126+ return b.NewBatch()
100127}
101128102129type AdaptedBatch struct {
103103- dbm.Batch
130130+ underlying dbm.Batch
131131+ zstdEncoder *zstd.Encoder
132132+}
133133+134134+// Close implements [store.Batch].
135135+func (a *AdaptedBatch) Close() error {
136136+ return a.underlying.Close()
137137+}
138138+139139+// Delete implements [store.Batch].
140140+func (a *AdaptedBatch) Delete(key []byte) error {
141141+ return a.underlying.Delete(key)
142142+}
143143+144144+// Set implements [store.Batch].
145145+func (a *AdaptedBatch) Set(key []byte, value []byte) error {
146146+ v := compressValue(a.zstdEncoder, value)
147147+ return stacktrace.Propagate(a.underlying.Set(key, v), "")
148148+}
149149+150150+// Write implements [store.Batch].
151151+func (a *AdaptedBatch) Write() error {
152152+ return a.underlying.Write()
153153+}
154154+155155+// WriteSync implements [store.Batch].
156156+func (a *AdaptedBatch) WriteSync() error {
157157+ return a.underlying.WriteSync()
104158}
105159106160// GetByteSize implements [store.Batch].
107161func (a *AdaptedBatch) GetByteSize() (int, error) {
108162 return 0, nil
109163}
164164+165165+func compressValue(encoder *zstd.Encoder, value []byte) []byte {
166166+ if encoder == nil {
167167+ return value
168168+ }
169169+ if len(value) < 192 {
170170+ // this is probably a inner node of the iavl tree and we don't gain anything from compressing those 50-ish byte values
171171+ return prepend(value, 0x00)
172172+ }
173173+ buf := make([]byte, 0, len(value)+5) // a bit of an extra buffer because, rarely, the value increases in size and this way we save on one reallocation
174174+ return prepend(encoder.EncodeAll(value, buf), 0x01)
175175+}
176176+177177+func decompressValue(decoder *zstd.Decoder, value []byte) ([]byte, error) {
178178+ if decoder == nil || len(value) == 0 {
179179+ return value, nil
180180+ } else if value[0] == 0x00 {
181181+ return value[1:], nil
182182+ }
183183+ // passing a nil output buffer to DecodeAll means it'll optimistically start by allocating len(value)*2
184184+ // but we observe compression ratios better than 50% frequently, so we allocate a slice ourselves with cap len(value)*3
185185+ value, err := decoder.DecodeAll(value[1:], make([]byte, 0, len(value)*3))
186186+ return value, stacktrace.Propagate(err, "")
187187+}
188188+189189+// this is a simplified version of slices.Insert for prepending a single element to a slice, returning the modified slice
190190+func prepend[S ~[]E, E any](s S, v E) S {
191191+ n := len(s)
192192+193193+ if n >= cap(s) {
194194+ s2 := make(S, n+1)
195195+ s2[0] = v
196196+ copy(s2[1:], s)
197197+ return s2
198198+ }
199199+ s = s[:n+1]
200200+ copy(s[1:], s)
201201+ s[0] = v
202202+203203+ return s
204204+}