···11+package abciapp
22+33+import (
44+ "bytes"
55+ "context"
66+ "slices"
77+ "time"
88+99+ abcitypes "github.com/cometbft/cometbft/abci/types"
1010+ "github.com/palantir/stacktrace"
1111+)
1212+1313+// InitChain implements [types.Application].
1414+func (d *DIDPLCApplication) InitChain(context.Context, *abcitypes.RequestInitChain) (*abcitypes.ResponseInitChain, error) {
1515+ // TODO
1616+ return &abcitypes.ResponseInitChain{}, nil
1717+}
1818+1919+// PrepareProposal implements [types.Application].
2020+func (d *DIDPLCApplication) PrepareProposal(ctx context.Context, req *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) {
2121+ defer d.tree.Rollback()
2222+2323+ st := time.Now()
2424+ acceptedTx := make([][]byte, 0, len(req.Txs))
2525+ toProcess := req.Txs
2626+ for {
2727+ toTryNext := [][]byte{}
2828+ for _, tx := range toProcess {
2929+ result, err := processTx(ctx, d.plc, tx, req.Time, true)
3030+ if err != nil {
3131+ return nil, stacktrace.Propagate(err, "")
3232+ }
3333+3434+ if result.Code == 0 {
3535+ acceptedTx = append(acceptedTx, tx)
3636+ } else {
3737+ // if a transaction is invalid, it _might_ be because it depends on a transaction that's further up in the list
3838+ // process it after all the others
3939+ toTryNext = append(toTryNext, tx)
4040+ }
4141+ }
4242+ if len(toProcess) == len(toTryNext) {
4343+ // we made no progress in this iteration - all transactions left to process fail to do so
4444+ // so they can't be depending on anything that would be included in this block, at this point
4545+ // just continue while dropping the transactions that would never succeed in this block
4646+ break
4747+ }
4848+ if time.Since(st) > 800*time.Millisecond {
4949+ // this is taking too long, just continue with what's already in acceptedTx
5050+ break
5151+ }
5252+ toProcess = toTryNext
5353+ }
5454+5555+ return &abcitypes.ResponsePrepareProposal{Txs: acceptedTx}, nil
5656+}
5757+5858+// ProcessProposal implements [types.Application].
5959+func (d *DIDPLCApplication) ProcessProposal(ctx context.Context, req *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) {
6060+ // do not rollback tree in this method, in case the changes can be reused in FinalizeBlock
6161+ if req.Height != d.tree.WorkingVersion() {
6262+ // our tree went out of sync, this should never happen
6363+ return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_REJECT}, nil
6464+ }
6565+6666+ // if we return early, ensure we don't use incomplete results where we haven't voted ACCEPT
6767+ d.lastProcessedProposalHash = nil
6868+ d.lastProcessedProposalExecTxResults = nil
6969+ defer func() {
7070+ if d.lastProcessedProposalHash == nil {
7171+ // we didn't vote ACCEPT
7272+ // we could rollback only eventually on FinalizeBlock, but why wait - rollback now for safety
7373+ d.tree.Rollback()
7474+ }
7575+ }()
7676+7777+ txResults := make([]*abcitypes.ExecTxResult, len(req.Txs))
7878+ for i, tx := range req.Txs {
7979+ result, err := processTx(ctx, d.plc, tx, req.Time, true)
8080+ if err != nil {
8181+ return nil, stacktrace.Propagate(err, "")
8282+ }
8383+ for _, c := range result.TreeChanges {
8484+ _, err := d.tree.Set(c.Key, c.Value)
8585+ if err != nil {
8686+ return nil, stacktrace.Propagate(err, "")
8787+ }
8888+ }
8989+ // when preparing a proposal, invalid transactions should have been discarded
9090+ // so, if something doesn't succeed now, something has gone wrong and we should not vote in agreement of the proposal
9191+ if result.Code != 0 {
9292+ return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_REJECT}, nil
9393+ }
9494+9595+ txResults[i] = &abcitypes.ExecTxResult{
9696+ Code: result.Code,
9797+ Data: result.Data,
9898+ Log: result.Log,
9999+ Info: result.Info,
100100+ GasWanted: result.GasWanted,
101101+ GasUsed: result.GasUsed,
102102+ Events: result.Events,
103103+ Codespace: result.Codespace,
104104+ }
105105+ }
106106+107107+ d.lastProcessedProposalHash = slices.Clone(req.Hash)
108108+ d.lastProcessedProposalExecTxResults = txResults
109109+110110+ return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT}, nil
111111+}
112112+113113+// ExtendVote implements [types.Application].
114114+func (d *DIDPLCApplication) ExtendVote(context.Context, *abcitypes.RequestExtendVote) (*abcitypes.ResponseExtendVote, error) {
115115+ // TODO
116116+ return &abcitypes.ResponseExtendVote{}, nil
117117+}
118118+119119+// VerifyVoteExtension implements [types.Application].
120120+func (d *DIDPLCApplication) VerifyVoteExtension(context.Context, *abcitypes.RequestVerifyVoteExtension) (*abcitypes.ResponseVerifyVoteExtension, error) {
121121+ // TODO
122122+ return &abcitypes.ResponseVerifyVoteExtension{}, nil
123123+}
124124+125125+// FinalizeBlock implements [types.Application].
126126+func (d *DIDPLCApplication) FinalizeBlock(ctx context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) {
127127+ if bytes.Equal(req.Hash, d.lastProcessedProposalHash) && d.lastProcessedProposalExecTxResults != nil {
128128+ // the block that was decided was the one we processed in ProcessProposal, and ProcessProposal processed successfully
129129+ // reuse the uncommitted results
130130+ return &abcitypes.ResponseFinalizeBlock{
131131+ TxResults: d.lastProcessedProposalExecTxResults,
132132+ AppHash: d.tree.WorkingHash(),
133133+ }, nil
134134+ }
135135+ // a block other than the one we processed in ProcessProposal was decided
136136+ // discard the current modified state, and process the decided block
137137+ d.tree.Rollback()
138138+139139+ txResults := make([]*abcitypes.ExecTxResult, len(req.Txs))
140140+ for i, tx := range req.Txs {
141141+ result, err := processTx(ctx, d.plc, tx, req.Time, true)
142142+ if err != nil {
143143+ return nil, stacktrace.Propagate(err, "")
144144+ }
145145+ for _, c := range result.TreeChanges {
146146+ _, err := d.tree.Set(c.Key, c.Value)
147147+ if err != nil {
148148+ return nil, stacktrace.Propagate(err, "")
149149+ }
150150+ }
151151+ txResults[i] = &abcitypes.ExecTxResult{
152152+ Code: result.Code,
153153+ Data: result.Data,
154154+ Log: result.Log,
155155+ Info: result.Info,
156156+ GasWanted: result.GasWanted,
157157+ GasUsed: result.GasUsed,
158158+ Events: result.Events,
159159+ Codespace: result.Codespace,
160160+ }
161161+ }
162162+163163+ return &abcitypes.ResponseFinalizeBlock{
164164+ TxResults: txResults,
165165+ AppHash: d.tree.WorkingHash(),
166166+ }, nil
167167+}
168168+169169+// Commit implements [types.Application].
170170+func (d *DIDPLCApplication) Commit(context.Context, *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) {
171171+ _, _, err := d.tree.SaveVersion()
172172+ if err != nil {
173173+ return nil, stacktrace.Propagate(err, "")
174174+ }
175175+176176+ // TODO(later) consider whether we can set some RetainHeight in the response
177177+ return &abcitypes.ResponseCommit{}, nil
178178+}
···11+package abciapp
22+33+import (
44+ "context"
55+ "encoding/json"
66+ "time"
77+88+ "github.com/did-method-plc/go-didplc"
99+ "github.com/ipfs/go-cid"
1010+ cbornode "github.com/ipfs/go-ipld-cbor"
1111+ "github.com/palantir/stacktrace"
1212+ "tangled.org/gbl08ma/didplcbft/plc"
1313+)
1414+1515+type CreatePlcOpArguments struct {
1616+ DID string `json:"did" refmt:"did"`
1717+ Operation *didplc.OpEnum `refmt:"operation"`
1818+}
1919+2020+func (CreatePlcOpArguments) ForAction() TransactionAction {
2121+ return TransactionActionCreatePlcOp
2222+}
2323+2424+func init() {
2525+ cbornode.RegisterCborType(CreatePlcOpArguments{})
2626+ cbornode.RegisterCborType(Transaction[CreatePlcOpArguments]{})
2727+}
2828+2929+func processCreatePlcOpTx(ctx context.Context, p plc.PLC, txBytes []byte, atTime time.Time, execute bool) (*processResult, error) {
3030+ tx, err := UnmarshalTransaction[CreatePlcOpArguments](txBytes)
3131+ if err != nil {
3232+ return &processResult{
3333+ Code: 4000,
3434+ Info: err.Error(),
3535+ }, nil
3636+ }
3737+3838+ // sadly didplc is really designed to unmarshal JSON, not CBOR
3939+ // so JSON ends up being the lingua franca for operations inside our PLC implementation too
4040+ // we also can't instance didplc.Operations directly from the CBOR unmarshaller (the MakeUnmarshalTransformFunc thing)
4141+ // because the interface makes us lose data (it is not powerful enough to detect the type of a transaction, for instance)
4242+ // so our PLC internals end up depending on OpEnum, too
4343+ // the decision to use CBOR for the entire thing at the blockchain transaction level is:
4444+ // - to make transactions more compact
4545+ // - to have more of a canonical format for them (we specifically use the stable CBOR format already used by the PLC for signing)
4646+4747+ // there is one advantage to this approach: by ensuring we first unmarshal the operations into strongly defined types
4848+ // (e.g. the OpEnum struct of the didplc package)
4949+ // we avoid accepting malformed data like what happened in https://github.com/did-method-plc/did-method-plc/issues/71
5050+ opBytes, err := json.Marshal(tx.Arguments.Operation)
5151+ if err != nil {
5252+ return nil, stacktrace.Propagate(err, "internal error")
5353+ }
5454+5555+ var cid cid.Cid
5656+ if execute {
5757+ cid, err = p.ExecuteOperation(ctx, atTime, tx.Arguments.DID, opBytes)
5858+ } else {
5959+ err = p.ValidateOperation(ctx, plc.CommittedTreeVersion, atTime, tx.Arguments.DID, opBytes)
6060+ }
6161+ if err != nil {
6262+ if code, ok := plc.InvalidOperationErrorCode(err); ok {
6363+ return &processResult{
6464+ Code: code,
6565+ Info: err.Error(),
6666+ }, nil
6767+ }
6868+ return nil, stacktrace.Propagate(err, "internal error")
6969+ }
7070+7171+ return &processResult{
7272+ TreeChanges: []treeChange{{
7373+ Key: []byte(tx.Arguments.DID),
7474+ Value: cid.Bytes(),
7575+ }},
7676+ Code: 0,
7777+ }, nil
7878+}
+407
badgeradapter/adapter.go
···11+package badgeradapter
22+33+import (
44+ "bytes"
55+ "slices"
66+77+ "cosmossdk.io/core/store"
88+ "github.com/cosmos/iavl/db"
99+ "github.com/palantir/stacktrace"
1010+1111+ badger "github.com/dgraph-io/badger/v4"
1212+)
1313+1414+type BadgerAdapter struct {
1515+ badgerDB *badger.DB
1616+ keyPrefix []byte
1717+}
1818+1919+func AdaptBadger(badgerDB *badger.DB, keyPrefix []byte) *BadgerAdapter {
2020+ return &BadgerAdapter{
2121+ badgerDB: badgerDB,
2222+ keyPrefix: keyPrefix,
2323+ }
2424+}
2525+2626+var _ db.DB = (*BadgerAdapter)(nil)
2727+2828+// prefixKey adds the keyPrefix to the given key
2929+func (b *BadgerAdapter) prefixKey(key []byte) []byte {
3030+ result := make([]byte, 0, len(b.keyPrefix)+len(key))
3131+ result = append(result, b.keyPrefix...)
3232+ result = append(result, key...)
3333+ return result
3434+}
3535+3636+// Close implements [db.DB].
3737+func (b *BadgerAdapter) Close() error {
3838+ return b.badgerDB.Close()
3939+}
4040+4141+// Get implements [db.DB].
4242+func (b *BadgerAdapter) Get(key []byte) ([]byte, error) {
4343+ prefixedKey := b.prefixKey(key)
4444+4545+ var value []byte
4646+ err := b.badgerDB.View(func(txn *badger.Txn) error {
4747+ item, err := txn.Get(prefixedKey)
4848+ if err != nil {
4949+ return err
5050+ }
5151+ value, err = item.ValueCopy(nil)
5252+ return err
5353+ })
5454+5555+ if err == badger.ErrKeyNotFound {
5656+ return nil, nil
5757+ }
5858+ if err != nil {
5959+ return nil, stacktrace.Propagate(err, "failed to get key from badger")
6060+ }
6161+6262+ return value, nil
6363+}
6464+6565+// Has implements [db.DB].
6666+func (b *BadgerAdapter) Has(key []byte) (bool, error) {
6767+ prefixedKey := b.prefixKey(key)
6868+6969+ var has bool
7070+ err := b.badgerDB.View(func(txn *badger.Txn) error {
7171+ _, err := txn.Get(prefixedKey)
7272+ if err == badger.ErrKeyNotFound {
7373+ has = false
7474+ return nil
7575+ }
7676+ if err != nil {
7777+ return err
7878+ }
7979+ has = true
8080+ return nil
8181+ })
8282+8383+ if err != nil {
8484+ return false, stacktrace.Propagate(err, "failed to check key existence in badger")
8585+ }
8686+8787+ return has, nil
8888+}
8989+9090+// BadgerIterator adapts badger.Iterator to store.Iterator
9191+type BadgerIterator struct {
9292+ badgerIter *badger.Iterator
9393+ txn *badger.Txn
9494+ start []byte
9595+ end []byte
9696+ reverse bool // true if this is a reverse iterator
9797+ valid bool
9898+ keyPrefix []byte
9999+}
100100+101101+// hasPrefix checks if a prefixed key actually has the expected keyPrefix
102102+func (i *BadgerIterator) hasPrefix(prefixedKey []byte) bool {
103103+ return len(prefixedKey) >= len(i.keyPrefix) && bytes.Equal(prefixedKey[:len(i.keyPrefix)], i.keyPrefix)
104104+}
105105+106106+// stripPrefix removes the keyPrefix from a prefixed key
107107+func (i *BadgerIterator) stripPrefix(prefixedKey []byte) []byte {
108108+ if len(prefixedKey) < len(i.keyPrefix) {
109109+ return prefixedKey // Shouldn't happen, but defensive programming
110110+ }
111111+ stripped := make([]byte, len(prefixedKey)-len(i.keyPrefix))
112112+ copy(stripped, prefixedKey[len(i.keyPrefix):])
113113+ return stripped
114114+}
115115+116116+func (i *BadgerIterator) Domain() (start, end []byte) {
117117+ // Return copies to ensure they're safe for modification
118118+ startCopy := make([]byte, len(i.start))
119119+ endCopy := make([]byte, len(i.end))
120120+ copy(startCopy, i.start)
121121+ copy(endCopy, i.end)
122122+ return startCopy, endCopy
123123+}
124124+125125+func (i *BadgerIterator) Valid() bool {
126126+ if !i.valid || !i.badgerIter.Valid() {
127127+ return false
128128+ }
129129+130130+ // Ensure the current key has the correct keyPrefix
131131+ // If not, skip to the next valid key
132132+ item := i.badgerIter.Item()
133133+ prefixedKey := item.Key()
134134+ if !i.hasPrefix(prefixedKey) {
135135+ // We've gone out of the bounds of "our" prefixes
136136+ return false
137137+ }
138138+139139+ // For forward iteration, check if we've reached the end (end is exclusive)
140140+ if i.end != nil && !i.reverse {
141141+ currentKey := i.stripPrefix(prefixedKey)
142142+ // If current key >= end key, we're done
143143+ if bytes.Compare(currentKey, i.end) >= 0 {
144144+ return false
145145+ }
146146+ }
147147+148148+ // For reverse iteration, check if we've gone below the start (start is inclusive)
149149+ if i.start != nil && i.reverse {
150150+ currentKey := i.stripPrefix(prefixedKey)
151151+ // If current key < start key, we're done
152152+ if bytes.Compare(currentKey, i.start) < 0 {
153153+ return false
154154+ }
155155+ }
156156+157157+ return true
158158+}
159159+160160+func (i *BadgerIterator) Next() {
161161+ if !i.valid {
162162+ panic("iterator is not valid")
163163+ }
164164+ i.badgerIter.Next()
165165+166166+ // Check if the badger iterator is still valid
167167+ if !i.badgerIter.Valid() {
168168+ i.valid = false
169169+ return
170170+ }
171171+172172+ item := i.badgerIter.Item()
173173+ prefixedKey := item.Key()
174174+ if !i.hasPrefix(prefixedKey) {
175175+ // We've gone out of the bounds of "our" prefixes
176176+ i.valid = false
177177+ return
178178+ }
179179+180180+ // For forward iteration, check if we've reached the end (end is exclusive)
181181+ if i.end != nil && !i.reverse {
182182+ currentKey := i.stripPrefix(prefixedKey)
183183+ // If current key >= end key, we're done
184184+ if bytes.Compare(currentKey, i.end) >= 0 {
185185+ i.valid = false
186186+ return
187187+ }
188188+ }
189189+190190+ // For reverse iteration, check if we've gone below the start (start is inclusive)
191191+ if i.start != nil && i.reverse {
192192+ currentKey := i.stripPrefix(prefixedKey)
193193+ // If current key < start key, we're done
194194+ if bytes.Compare(currentKey, i.start) < 0 {
195195+ i.valid = false
196196+ return
197197+ }
198198+ }
199199+200200+ i.valid = true
201201+}
202202+203203+func (i *BadgerIterator) Key() []byte {
204204+ if !i.valid {
205205+ panic("iterator is not valid")
206206+ }
207207+ item := i.badgerIter.Item()
208208+ return i.stripPrefix(item.Key())
209209+}
210210+211211+func (i *BadgerIterator) Value() []byte {
212212+ if !i.valid {
213213+ panic("iterator is not valid")
214214+ }
215215+ item := i.badgerIter.Item()
216216+ value, err := item.ValueCopy(nil)
217217+ if err != nil {
218218+ panic("failed to copy value: " + err.Error())
219219+ }
220220+ return value
221221+}
222222+223223+func (i *BadgerIterator) Error() error {
224224+ // Badger iterator doesn't have a separate error method
225225+ // Errors are typically caught during iteration setup
226226+ return nil
227227+}
228228+229229+func (i *BadgerIterator) Close() error {
230230+ // Close the badger iterator first - this is critical to avoid panics
231231+ if i.badgerIter != nil {
232232+ i.badgerIter.Close()
233233+ }
234234+235235+ // Mark as invalid
236236+ i.valid = false
237237+238238+ // Discard the transaction to release resources
239239+ if i.txn != nil {
240240+ i.txn.Discard()
241241+ i.txn = nil
242242+ }
243243+244244+ return nil
245245+}
246246+247247+// Iterator implements [db.DB].
248248+func (b *BadgerAdapter) Iterator(start []byte, end []byte) (store.Iterator, error) {
249249+ // Create a read-only transaction to hold the iterator
250250+ txn := b.badgerDB.NewTransaction(false)
251251+252252+ // Create prefixed version of start
253253+ prefixedStart := b.prefixKey(start)
254254+255255+ opts := badger.IteratorOptions{
256256+ PrefetchValues: true,
257257+ Reverse: false,
258258+ AllVersions: false,
259259+ }
260260+ badgerIter := txn.NewIterator(opts)
261261+262262+ badgerIter.Seek(prefixedStart)
263263+264264+ iterator := &BadgerIterator{
265265+ badgerIter: badgerIter,
266266+ txn: txn,
267267+ start: start, // Store original start/end for Domain() method
268268+ end: end,
269269+ reverse: false, // This is a forward iterator
270270+ valid: badgerIter.Valid(),
271271+ keyPrefix: b.keyPrefix,
272272+ }
273273+274274+ return iterator, nil
275275+}
276276+277277+// incrementSlice assumes that the first byte of b is not 0xff
278278+func incrementSlice(b []byte) {
279279+ for i := len(b) - 1; i >= 0; i-- {
280280+ b[i] += 1
281281+ if b[i] != 0 {
282282+ break
283283+ }
284284+ }
285285+}
286286+287287+// ReverseIterator implements [db.DB].
288288+func (b *BadgerAdapter) ReverseIterator(start []byte, end []byte) (store.Iterator, error) {
289289+ // Create a read-only transaction to hold the iterator
290290+ txn := b.badgerDB.NewTransaction(false)
291291+292292+ opts := badger.IteratorOptions{
293293+ PrefetchValues: true,
294294+ Reverse: true, // This enables reverse iteration
295295+ AllVersions: false,
296296+ }
297297+ badgerIter := txn.NewIterator(opts)
298298+299299+ prefixedEnd := b.prefixKey(end)
300300+ incrementedEnd := slices.Clone(prefixedEnd)
301301+ incrementSlice(incrementedEnd) // Badger's Seek is inclusive but in these iterators end is exclusive (except if nil)
302302+303303+ badgerIter.Seek(incrementedEnd)
304304+ // if end is nil, then Badger might be (depending on whether end matches an existing key)
305305+ // already giving us the key we want and there's no need to skip
306306+ if end != nil && badgerIter.Valid() && bytes.Equal(badgerIter.Item().Key(), prefixedEnd) {
307307+ badgerIter.Next()
308308+ }
309309+310310+ iterator := &BadgerIterator{
311311+ badgerIter: badgerIter,
312312+ txn: txn,
313313+ start: start,
314314+ end: end,
315315+ reverse: true, // This is a reverse iterator
316316+ valid: badgerIter.Valid(),
317317+ keyPrefix: b.keyPrefix,
318318+ }
319319+320320+ return iterator, nil
321321+}
322322+323323+// BadgerBatch implements store.Batch
324324+// BadgerBatch writes are atomic up until the point where they'd exceed the badger max transaction size,
325325+// at which point they are split into multiple non-atomic writes
326326+type BadgerBatch struct {
327327+ wb *badger.WriteBatch
328328+ closed bool
329329+ keyPrefix []byte
330330+}
331331+332332+func (b *BadgerBatch) Set(key, value []byte) error {
333333+ if b.closed {
334334+ return stacktrace.NewError("batch has been written or closed")
335335+ }
336336+ if len(key) == 0 {
337337+ return stacktrace.NewError("key cannot be empty")
338338+ }
339339+ if value == nil {
340340+ return stacktrace.NewError("value cannot be nil")
341341+ }
342342+343343+ prefixedKey := make([]byte, 0, len(b.keyPrefix)+len(key))
344344+ prefixedKey = append(prefixedKey, b.keyPrefix...)
345345+ prefixedKey = append(prefixedKey, key...)
346346+347347+ err := b.wb.Set(prefixedKey, value)
348348+ return stacktrace.Propagate(err, "failed to set key in batch")
349349+}
350350+351351+func (b *BadgerBatch) Delete(key []byte) error {
352352+ if b.closed {
353353+ return stacktrace.NewError("batch has been written or closed")
354354+ }
355355+ if len(key) == 0 {
356356+ return stacktrace.NewError("key cannot be empty")
357357+ }
358358+359359+ prefixedKey := make([]byte, 0, len(b.keyPrefix)+len(key))
360360+ prefixedKey = append(prefixedKey, b.keyPrefix...)
361361+ prefixedKey = append(prefixedKey, key...)
362362+363363+ err := b.wb.Delete(prefixedKey)
364364+ return stacktrace.Propagate(err, "failed to delete key in batch")
365365+}
366366+367367+func (b *BadgerBatch) Write() error {
368368+ if b.closed {
369369+ return stacktrace.NewError("batch has been written or closed")
370370+ }
371371+ b.closed = true
372372+ err := b.wb.Flush()
373373+ return stacktrace.Propagate(err, "failed to write batch")
374374+}
375375+376376+func (b *BadgerBatch) WriteSync() error {
377377+ // Badger doesn't have separate WriteSync, so we just use Write
378378+ return b.Write()
379379+}
380380+381381+func (b *BadgerBatch) Close() error {
382382+ if !b.closed {
383383+ b.wb.Cancel()
384384+ b.closed = true
385385+ }
386386+ return nil
387387+}
388388+389389+func (b *BadgerBatch) GetByteSize() (int, error) {
390390+ // Badger doesn't provide byte size tracking for batches
391391+ // Return 0 as a placeholder
392392+ return 0, nil
393393+}
394394+395395+// NewBatch implements [db.DB].
396396+func (b *BadgerAdapter) NewBatch() store.Batch {
397397+ return &BadgerBatch{
398398+ wb: b.badgerDB.NewWriteBatch(),
399399+ keyPrefix: b.keyPrefix,
400400+ }
401401+}
402402+403403+// NewBatchWithSize implements [db.DB].
404404+func (b *BadgerAdapter) NewBatchWithSize(size int) store.Batch {
405405+ // Badger doesn't support pre-allocated batch sizes, so we just create a regular batch
406406+ return b.NewBatch()
407407+}
+454
badgeradapter/adapter_test.go
···11+package badgeradapter
22+33+import (
44+ "testing"
55+66+ badger "github.com/dgraph-io/badger/v4"
77+ "github.com/stretchr/testify/require"
88+)
99+1010+func TestBadgerAdapter_KeyPrefixStripping(t *testing.T) {
1111+ // Create a temporary badger database
1212+ opts := badger.DefaultOptions("").WithInMemory(true)
1313+ db, err := badger.Open(opts)
1414+ require.NoError(t, err)
1515+ defer db.Close()
1616+1717+ // Create adapter with a specific key prefix
1818+ keyPrefix := []byte("test:")
1919+ adapter := AdaptBadger(db, keyPrefix)
2020+2121+ // Write some test data
2222+ batch := adapter.NewBatch()
2323+ err = batch.Set([]byte("key1"), []byte("value1"))
2424+ require.NoError(t, err)
2525+ err = batch.Write()
2626+ require.NoError(t, err)
2727+2828+ // Verify that the underlying badger database stores the key WITH the prefix
2929+ var foundPrefixedKey bool
3030+ err = db.View(func(txn *badger.Txn) error {
3131+ opts := badger.IteratorOptions{
3232+ PrefetchValues: true,
3333+ Reverse: false,
3434+ AllVersions: false,
3535+ }
3636+ iter := txn.NewIterator(opts)
3737+ defer iter.Close()
3838+3939+ for iter.Seek([]byte("test:")); iter.Valid(); iter.Next() {
4040+ item := iter.Item()
4141+ key := item.KeyCopy(nil)
4242+ if string(key) == "test:key1" {
4343+ foundPrefixedKey = true
4444+ value, err := item.ValueCopy(nil)
4545+ require.NoError(t, err)
4646+ require.Equal(t, []byte("value1"), value)
4747+ break
4848+ }
4949+ }
5050+ return nil
5151+ })
5252+ require.NoError(t, err)
5353+ require.True(t, foundPrefixedKey, "Expected to find prefixed key 'test:key1' in underlying badger database")
5454+5555+ // Test Get operation - should work with unprefixed key
5656+ value, err := adapter.Get([]byte("key1"))
5757+ require.NoError(t, err)
5858+ require.Equal(t, []byte("value1"), value)
5959+6060+ // Test iterator - should iterate over prefixed keys but return unprefixed keys
6161+ iter, err := adapter.Iterator([]byte("key1"), []byte("key2"))
6262+ require.NoError(t, err)
6363+ defer iter.Close()
6464+6565+ require.True(t, iter.Valid())
6666+ returnedKey := iter.Key()
6767+ returnedValue := iter.Value()
6868+6969+ // The returned key should NOT have the prefix
7070+ require.Equal(t, []byte("key1"), returnedKey)
7171+ require.Equal(t, []byte("value1"), returnedValue)
7272+7373+ iter.Next()
7474+ require.False(t, iter.Valid())
7575+}
7676+7777+func TestBadgerAdapter_ReverseIteratorPrefixStripping(t *testing.T) {
7878+ // Create a temporary badger database
7979+ opts := badger.DefaultOptions("").WithInMemory(true)
8080+ db, err := badger.Open(opts)
8181+ require.NoError(t, err)
8282+ defer db.Close()
8383+8484+ // Create adapter with a specific key prefix
8585+ keyPrefix := []byte("prefix:")
8686+ adapter := AdaptBadger(db, keyPrefix)
8787+8888+ // Write multiple test data entries
8989+ batch := adapter.NewBatch()
9090+ for i := 1; i <= 3; i++ {
9191+ key := []byte("key" + string(rune('0'+i)))
9292+ value := []byte("value" + string(rune('0'+i)))
9393+ err = batch.Set(key, value)
9494+ require.NoError(t, err)
9595+ }
9696+ err = batch.Write()
9797+ require.NoError(t, err)
9898+9999+ // Verify that the underlying badger database stores the keys WITH the prefix
100100+ var foundPrefixedKeys []string
101101+ err = db.View(func(txn *badger.Txn) error {
102102+ opts := badger.IteratorOptions{
103103+ PrefetchValues: true,
104104+ Reverse: false,
105105+ AllVersions: false,
106106+ }
107107+ iter := txn.NewIterator(opts)
108108+ defer iter.Close()
109109+110110+ for iter.Seek([]byte("prefix:")); iter.Valid(); iter.Next() {
111111+ item := iter.Item()
112112+ key := item.KeyCopy(nil)
113113+ keyStr := string(key)
114114+ if len(keyStr) > len("prefix:") && keyStr[:len("prefix:")] == "prefix:" {
115115+ foundPrefixedKeys = append(foundPrefixedKeys, keyStr)
116116+ }
117117+ }
118118+ return nil
119119+ })
120120+ require.NoError(t, err)
121121+ require.Len(t, foundPrefixedKeys, 3, "Expected to find 3 prefixed keys in underlying badger database")
122122+ require.Contains(t, foundPrefixedKeys, "prefix:key1")
123123+ require.Contains(t, foundPrefixedKeys, "prefix:key2")
124124+ require.Contains(t, foundPrefixedKeys, "prefix:key3")
125125+126126+ // Test reverse iterator - should iterate over prefixed keys but return unprefixed keys
127127+ iter, err := adapter.ReverseIterator([]byte("key1"), []byte("key4"))
128128+ require.NoError(t, err)
129129+ defer iter.Close()
130130+131131+ // Should start with the last key in range
132132+ require.True(t, iter.Valid())
133133+ returnedKey := iter.Key()
134134+ returnedValue := iter.Value()
135135+136136+ // The returned key should NOT have the prefix
137137+ require.Equal(t, []byte("key3"), returnedKey)
138138+ require.Equal(t, []byte("value3"), returnedValue)
139139+140140+ // Move to previous key
141141+ iter.Next()
142142+ require.True(t, iter.Valid())
143143+ returnedKey = iter.Key()
144144+ returnedValue = iter.Value()
145145+ require.Equal(t, []byte("key2"), returnedKey)
146146+ require.Equal(t, []byte("value2"), returnedValue)
147147+148148+ // Move to previous key again
149149+ iter.Next()
150150+ require.True(t, iter.Valid())
151151+ returnedKey = iter.Key()
152152+ returnedValue = iter.Value()
153153+ require.Equal(t, []byte("key1"), returnedKey)
154154+ require.Equal(t, []byte("value1"), returnedValue)
155155+156156+ // Should be at the beginning of range
157157+ iter.Next()
158158+ require.False(t, iter.Valid())
159159+}
160160+161161+func TestBadgerAdapter_IteratorRespectsEnd(t *testing.T) {
162162+ // Create a temporary badger database
163163+ opts := badger.DefaultOptions("").WithInMemory(true)
164164+ db, err := badger.Open(opts)
165165+ require.NoError(t, err)
166166+ defer db.Close()
167167+168168+ // Create adapter with a specific key prefix
169169+ keyPrefix := []byte("test:")
170170+ adapter := AdaptBadger(db, keyPrefix)
171171+172172+ // Write test data
173173+ batch := adapter.NewBatch()
174174+ data := map[string]string{
175175+ "apple": "fruit1",
176176+ "banana": "fruit2",
177177+ "cherry": "fruit3",
178178+ "date": "fruit4",
179179+ "elderberry": "fruit5",
180180+ }
181181+ for key, value := range data {
182182+ err = batch.Set([]byte(key), []byte(value))
183183+ require.NoError(t, err)
184184+ }
185185+ err = batch.Write()
186186+ require.NoError(t, err)
187187+188188+ // Test forward iteration with end boundary
189189+ iter, err := adapter.Iterator([]byte("apple"), []byte("cherry"))
190190+ require.NoError(t, err)
191191+ defer iter.Close()
192192+193193+ // Should include "apple" and "banana" but stop before "cherry"
194194+ require.True(t, iter.Valid())
195195+ require.Equal(t, []byte("apple"), iter.Key())
196196+ require.Equal(t, []byte("fruit1"), iter.Value())
197197+198198+ iter.Next()
199199+ require.True(t, iter.Valid())
200200+ require.Equal(t, []byte("banana"), iter.Key())
201201+ require.Equal(t, []byte("fruit2"), iter.Value())
202202+203203+ // Next should stop before "cherry" since end is exclusive
204204+ iter.Next()
205205+ require.False(t, iter.Valid(), "Iterator should be invalid after reaching end boundary")
206206+207207+ // Test forward iteration with nil end (should iterate to the end)
208208+ iter, err = adapter.Iterator([]byte("apple"), nil)
209209+ require.NoError(t, err)
210210+ defer iter.Close()
211211+212212+ count := 0
213213+ for iter.Valid() {
214214+ count++
215215+ iter.Next()
216216+ }
217217+ require.Equal(t, 5, count, "Should iterate over all 5 keys when end is nil")
218218+219219+ // Test forward iteration with start = nil (should start from first key)
220220+ iter, err = adapter.Iterator(nil, []byte("cherry"))
221221+ require.NoError(t, err)
222222+ defer iter.Close()
223223+224224+ count = 0
225225+ for iter.Valid() {
226226+ count++
227227+ iter.Next()
228228+ }
229229+ require.Equal(t, 2, count, "Should iterate over 2 keys (apple, banana) before cherry")
230230+}
231231+232232+func TestBadgerAdapter_ReverseIteratorRespectsStart(t *testing.T) {
233233+ // Create a temporary badger database
234234+ opts := badger.DefaultOptions("").WithInMemory(true)
235235+ db, err := badger.Open(opts)
236236+ require.NoError(t, err)
237237+ defer db.Close()
238238+239239+ // Create adapter with a specific key prefix
240240+ keyPrefix := []byte("test:")
241241+ adapter := AdaptBadger(db, keyPrefix)
242242+243243+ // Write test data
244244+ batch := adapter.NewBatch()
245245+ data := map[string]string{
246246+ "apple": "fruit1",
247247+ "banana": "fruit2",
248248+ "cherry": "fruit3",
249249+ "date": "fruit4",
250250+ "elderberry": "fruit5",
251251+ }
252252+ for key, value := range data {
253253+ err = batch.Set([]byte(key), []byte(value))
254254+ require.NoError(t, err)
255255+ }
256256+ err = batch.Write()
257257+ require.NoError(t, err)
258258+259259+ // Test reverse iteration with start boundary
260260+ iter, err := adapter.ReverseIterator([]byte("banana"), []byte("elderberry"))
261261+ require.NoError(t, err)
262262+ defer iter.Close()
263263+264264+ // Should start from "date" and go backwards to "banana" (inclusive)
265265+ require.True(t, iter.Valid())
266266+ require.Equal(t, []byte("date"), iter.Key())
267267+ require.Equal(t, []byte("fruit4"), iter.Value())
268268+269269+ iter.Next()
270270+ require.True(t, iter.Valid())
271271+ require.Equal(t, []byte("cherry"), iter.Key())
272272+ require.Equal(t, []byte("fruit3"), iter.Value())
273273+274274+ iter.Next()
275275+ require.True(t, iter.Valid())
276276+ require.Equal(t, []byte("banana"), iter.Key())
277277+ require.Equal(t, []byte("fruit2"), iter.Value())
278278+279279+ // Next should stop since we've reached the start boundary (inclusive)
280280+ iter.Next()
281281+ require.False(t, iter.Valid(), "Iterator should be invalid after reaching start boundary")
282282+283283+ // Test reverse iteration with nil start (should go to the beginning)
284284+ iter, err = adapter.ReverseIterator(nil, []byte("cherry"))
285285+ require.NoError(t, err)
286286+ defer iter.Close()
287287+288288+ count := 0
289289+ for iter.Valid() {
290290+ count++
291291+ iter.Next()
292292+ }
293293+ require.Equal(t, 2, count, "Should iterate over 2 keys (banana, apple) before cherry")
294294+295295+ // Test reverse iteration with nil end (should start from the last key)
296296+ iter, err = adapter.ReverseIterator([]byte("banana"), nil)
297297+ require.NoError(t, err)
298298+ defer iter.Close()
299299+300300+ count = 0
301301+ for iter.Valid() {
302302+ count++
303303+ iter.Next()
304304+ }
305305+ require.Equal(t, 4, count, "Should iterate over 4 keys (elderberry, date, cherry, banana) when end is nil")
306306+}
307307+308308+func TestBadgerAdapter_IteratorRespectsKeyPrefix(t *testing.T) {
309309+ // Create a temporary badger database
310310+ opts := badger.DefaultOptions("").WithInMemory(true)
311311+ db, err := badger.Open(opts)
312312+ require.NoError(t, err)
313313+ defer db.Close()
314314+315315+ // Create adapter with a specific key prefix
316316+ keyPrefix := []byte("table1:")
317317+ adapter := AdaptBadger(db, keyPrefix)
318318+319319+ // Write test data directly to badger with different prefixes to simulate multiple "tables"
320320+ err = db.Update(func(txn *badger.Txn) error {
321321+ // Write keys with the correct prefix (what the adapter should see)
322322+ err := txn.Set([]byte("table1:apple"), []byte("fruit1"))
323323+ require.NoError(t, err)
324324+ err = txn.Set([]byte("table1:banana"), []byte("fruit2"))
325325+ require.NoError(t, err)
326326+ err = txn.Set([]byte("table1:cherry"), []byte("fruit3"))
327327+ require.NoError(t, err)
328328+329329+ // Write keys with a different prefix (what the adapter should NOT see)
330330+ err = txn.Set([]byte("table2:apple"), []byte("other1"))
331331+ require.NoError(t, err)
332332+ err = txn.Set([]byte("table2:date"), []byte("other2"))
333333+ require.NoError(t, err)
334334+335335+ // Write keys with no prefix (what the adapter should NOT see)
336336+ err = txn.Set([]byte("apple"), []byte("raw1"))
337337+ require.NoError(t, err)
338338+ err = txn.Set([]byte("zebra"), []byte("raw2"))
339339+ require.NoError(t, err)
340340+341341+ return nil
342342+ })
343343+ require.NoError(t, err)
344344+345345+ // Test forward iteration - should only see keys with "table1:" prefix
346346+ iter, err := adapter.Iterator(nil, nil)
347347+ require.NoError(t, err)
348348+ defer iter.Close()
349349+350350+ var keys []string
351351+ for iter.Valid() {
352352+ keys = append(keys, string(iter.Key()))
353353+ iter.Next()
354354+ }
355355+356356+ // Should only see the 3 keys with the correct prefix, stripped of the prefix
357357+ require.Equal(t, []string{"apple", "banana", "cherry"}, keys)
358358+359359+ // Test forward iteration with range - should only see keys with "table1:" prefix in range
360360+ iter, err = adapter.Iterator([]byte("banana"), []byte("cherry"))
361361+ require.NoError(t, err)
362362+ defer iter.Close()
363363+364364+ keys = nil
365365+ for iter.Valid() {
366366+ keys = append(keys, string(iter.Key()))
367367+ iter.Next()
368368+ }
369369+370370+ // Should only see "banana" (inclusive) but not "cherry" (exclusive)
371371+ require.Equal(t, []string{"banana"}, keys)
372372+373373+ // Test reverse iteration - should only see keys with "table1:" prefix
374374+ iter, err = adapter.ReverseIterator(nil, nil)
375375+ require.NoError(t, err)
376376+ defer iter.Close()
377377+378378+ keys = nil
379379+ for iter.Valid() {
380380+ keys = append(keys, string(iter.Key()))
381381+ iter.Next()
382382+ }
383383+384384+ // Should see the 3 keys in reverse order, stripped of the prefix
385385+ require.Equal(t, []string{"cherry", "banana", "apple"}, keys)
386386+387387+ // Test reverse iteration with range - should only see keys with "table1:" prefix in range
388388+ iter, err = adapter.ReverseIterator([]byte("apple"), []byte("cherry"))
389389+ require.NoError(t, err)
390390+ defer iter.Close()
391391+392392+ keys = nil
393393+ for iter.Valid() {
394394+ keys = append(keys, string(iter.Key()))
395395+ iter.Next()
396396+ }
397397+398398+ // Should see keys from cherry (exclusive) down to apple (inclusive)
399399+ require.Equal(t, []string{"banana", "apple"}, keys)
400400+401401+ // Test reverse iteration with wider range - should only see keys with "table1:" prefix in range
402402+ iter, err = adapter.ReverseIterator([]byte("apple"), []byte("zzz"))
403403+ require.NoError(t, err)
404404+ defer iter.Close()
405405+406406+ keys = nil
407407+ for iter.Valid() {
408408+ keys = append(keys, string(iter.Key()))
409409+ iter.Next()
410410+ }
411411+412412+ // Should see keys from cherry (exclusive) down to apple (inclusive)
413413+ require.Equal(t, []string{"cherry", "banana", "apple"}, keys)
414414+415415+ // An adapter without key prefix should be able to iterate over all keys
416416+ adapter = AdaptBadger(db, []byte{})
417417+418418+ iter, err = adapter.ReverseIterator(nil, nil)
419419+ require.NoError(t, err)
420420+ defer iter.Close()
421421+422422+ keys = nil
423423+ for iter.Valid() {
424424+ keys = append(keys, string(iter.Key()))
425425+ iter.Next()
426426+ }
427427+428428+ // Should see all keys in reverse order, regardless of prefix
429429+ require.Len(t, keys, 7)
430430+431431+ iter, err = adapter.ReverseIterator([]byte("table2:date"), []byte("zebra"))
432432+ require.NoError(t, err)
433433+ defer iter.Close()
434434+435435+ keys = nil
436436+ for iter.Valid() {
437437+ keys = append(keys, string(iter.Key()))
438438+ iter.Next()
439439+ }
440440+441441+ require.Equal(t, []string{"table2:date"}, keys)
442442+443443+ iter, err = adapter.ReverseIterator([]byte("table2:date"), []byte("zzz"))
444444+ require.NoError(t, err)
445445+ defer iter.Close()
446446+447447+ keys = nil
448448+ for iter.Valid() {
449449+ keys = append(keys, string(iter.Key()))
450450+ iter.Next()
451451+ }
452452+453453+ require.Equal(t, []string{"zebra", "table2:date"}, keys)
454454+}
···11+#!/bin/sh
22+33+# Default to 4 nodes if no argument provided
44+NUM_NODES="${1:-4}"
55+66+# Validate input
77+if ! echo "$NUM_NODES" | grep -qE '^[0-9]+$'; then
88+ echo "Error: Number of nodes must be a positive integer"
99+ echo "Usage: $0 [number_of_nodes]"
1010+ echo "Example: $0 7"
1111+ exit 1
1212+fi
1313+1414+if [ "$NUM_NODES" -lt 1 ]; then
1515+ echo "Error: Number of nodes must be at least 1"
1616+ exit 1
1717+fi
1818+1919+echo "Starting testnet with $NUM_NODES nodes (preserving existing data)..."
2020+2121+# Check if testnet directory exists and has the expected number of nodes
2222+if [ ! -d "testnet/node0" ]; then
2323+ echo "Error: No existing testnet found. Run ./startfresh-testnet.sh first to create the testnet."
2424+ exit 1
2525+fi
2626+2727+# Count existing nodes
2828+existing_nodes=0
2929+for i in $(seq 0 99); do
3030+ if [ -d "testnet/node$i" ]; then
3131+ existing_nodes=$((existing_nodes + 1))
3232+ else
3333+ break
3434+ fi
3535+done
3636+3737+if [ "$NUM_NODES" -gt "$existing_nodes" ]; then
3838+ echo "Error: Requested $NUM_NODES nodes but only $existing_nodes nodes exist in testnet/"
3939+ echo "Run ./startfresh-testnet.sh $NUM_NODES to create additional nodes."
4040+ exit 1
4141+fi
4242+4343+echo "Found $existing_nodes existing nodes, starting first $NUM_NODES nodes..."
4444+4545+# Check if binary exists, build if needed
4646+if [ ! -f "./didplcbft" ]; then
4747+ echo "Binary not found, building didplcbft..."
4848+ go build -trimpath
4949+fi
5050+5151+# Array to store background process IDs
5252+pids=""
5353+5454+# Cleanup function to kill all background processes
5555+cleanup() {
5656+ echo ""
5757+ echo "Shutting down all nodes..."
5858+5959+ # Kill all background processes
6060+ for pid in $pids; do
6161+ if kill -0 "$pid" 2>/dev/null; then
6262+ echo " Stopping node process $pid..."
6363+ kill "$pid" 2>/dev/null
6464+ fi
6565+ done
6666+6767+ # Clean up temporary fifo files
6868+ for i in $(seq 0 99); do
6969+ rm -f "/tmp/didplcbft-node$i-stdout" "/tmp/didplcbft-node$i-stderr" 2>/dev/null
7070+ done
7171+7272+ # Wait for all processes to terminate
7373+ wait $pids 2>/dev/null
7474+7575+ echo "All nodes stopped."
7676+ exit 0
7777+}
7878+7979+# Set up signal traps
8080+trap cleanup INT TERM EXIT
8181+8282+# Launch all nodes in parallel
8383+echo "Launching $NUM_NODES nodes in parallel..."
8484+8585+for i in $(seq 0 $((NUM_NODES - 1))); do
8686+ if [ -d "testnet/node$i" ]; then
8787+ echo " Starting node$i..."
8888+ mkfifo "/tmp/didplcbft-node$i-stdout" 2>/dev/null || true
8989+ mkfifo "/tmp/didplcbft-node$i-stderr" 2>/dev/null || true
9090+9191+ # Start sed processes to prefix output
9292+ sed "s/^/[node$i-stdout] /" < "/tmp/didplcbft-node$i-stdout" &
9393+ sed "s/^/[node$i-stderr] /" < "/tmp/didplcbft-node$i-stderr" &
9494+9595+ # Start the didplcbft process with redirected output
9696+ ./didplcbft --data-dir "testnet/node$i" > "/tmp/didplcbft-node$i-stdout" 2> "/tmp/didplcbft-node$i-stderr" &
9797+ pid=$!
9898+ pids="$pids $pid"
9999+ echo " PID: $pid"
100100+ else
101101+ echo " Warning: node$i directory not found, skipping..."
102102+ fi
103103+done
104104+105105+echo ""
106106+echo "All $NUM_NODES nodes are now running."
107107+echo "Press Ctrl+C to stop all nodes."
108108+echo ""
109109+110110+# Wait for all background processes
111111+wait $pids
112112+113113+# If we reach here, all processes have terminated normally
114114+echo "All nodes have terminated."
+135
startfresh-testnet.sh
···11+#!/bin/sh
22+33+# Default to 4 nodes if no argument provided
44+NUM_NODES="${1:-4}"
55+66+# Validate input
77+if ! echo "$NUM_NODES" | grep -qE '^[0-9]+$'; then
88+ echo "Error: Number of nodes must be a positive integer"
99+ echo "Usage: $0 [number_of_nodes]"
1010+ echo "Example: $0 7"
1111+ exit 1
1212+fi
1313+1414+if [ "$NUM_NODES" -lt 1 ]; then
1515+ echo "Error: Number of nodes must be at least 1"
1616+ exit 1
1717+fi
1818+1919+echo "Setting up testnet with $NUM_NODES nodes..."
2020+2121+# Build the binary
2222+echo "Building didplcbft binary..."
2323+go build -trimpath
2424+2525+# Clean up existing testnet data
2626+echo "Cleaning up existing testnet data..."
2727+rm -r testnet/node*
2828+2929+# Generate testnet with specified number of nodes
3030+echo "Generating testnet configuration for $NUM_NODES nodes..."
3131+go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.19 testnet --v "$((NUM_NODES - 1))" --n 1 --starting-ip-address 127.67.67.1 --config ./testnet/baseconfig.toml --o ./testnet
3232+3333+# Adjust RPC and P2P listen addresses for each node
3434+echo "Configuring RPC and P2P addresses for $NUM_NODES nodes..."
3535+3636+for i in $(seq 0 $((NUM_NODES - 1))); do
3737+ # Calculate RPC port (starting from 26100)
3838+ rpc_port=$((26100 + i))
3939+4040+ # Calculate P2P IP address (127.67.67.1 + node_index)
4141+ p2p_ip="127.67.67.$((1 + i))"
4242+4343+ echo " Configuring node$i (RPC: $rpc_port, P2P: $p2p_ip:26656)"
4444+4545+ # Adjust RPC listen address
4646+ sed -i "s|^laddr = \"tcp://127.0.0.1:26657\"\$|laddr = \"tcp://127.0.0.1:$rpc_port\"|g" "testnet/node$i/config/config.toml"
4747+4848+ # Adjust P2P listen address
4949+ sed -i "s|^laddr = \"tcp://0.0.0.0:26656\"\$|laddr = \"tcp://$p2p_ip:26656\"|g" "testnet/node$i/config/config.toml"
5050+done
5151+5252+# Configure rpc_servers for the last node (the one that will be started manually)
5353+last_node=$((NUM_NODES - 1))
5454+echo "Configuring rpc_servers for node$last_node..."
5555+5656+# Build comma-separated list of RPC addresses for automatically started nodes
5757+rpc_servers_list=""
5858+for i in $(seq 0 $((NUM_NODES - 2))); do
5959+ if [ -n "$rpc_servers_list" ]; then
6060+ rpc_servers_list="$rpc_servers_list,"
6161+ fi
6262+ rpc_port=$((26100 + i))
6363+ rpc_servers_list="${rpc_servers_list}tcp://127.0.0.1:$rpc_port"
6464+done
6565+6666+# Replace empty rpc_servers configuration in the last node's config
6767+echo " Setting rpc_servers = \"$rpc_servers_list\" for node$last_node"
6868+sed -i "s|^rpc_servers = \"\"\$|rpc_servers = \"$rpc_servers_list\"|g" "testnet/node$last_node/config/config.toml"
6969+7070+# Enable state sync for the last node (the one that will be started manually)
7171+echo " Enabling state sync for node$last_node"
7272+sed -i '/\[statesync\]/,/enable = false/s/enable = false/enable = true/' "testnet/node$last_node/config/config.toml"
7373+7474+# Array to store background process IDs
7575+pids=""
7676+7777+# Cleanup function to kill all background processes
7878+cleanup() {
7979+ echo ""
8080+ echo "Shutting down all nodes..."
8181+8282+ # Kill all background processes
8383+ for pid in $pids; do
8484+ if kill -0 "$pid" 2>/dev/null; then
8585+ echo " Stopping node process $pid..."
8686+ kill "$pid" 2>/dev/null
8787+ fi
8888+ done
8989+9090+ # Clean up temporary fifo files
9191+ for i in $(seq 0 99); do
9292+ rm -f "/tmp/didplcbft-node$i-stdout" "/tmp/didplcbft-node$i-stderr" 2>/dev/null
9393+ done
9494+9595+ # Wait for all processes to terminate
9696+ wait $pids 2>/dev/null
9797+9898+ echo "All nodes stopped."
9999+ exit 0
100100+}
101101+102102+# Set up signal traps
103103+trap cleanup INT TERM EXIT
104104+105105+# Launch all nodes except the last one (for testing later bringup)
106106+nodes_to_start=$((NUM_NODES - 1))
107107+echo "Launching $nodes_to_start nodes in parallel..."
108108+109109+for i in $(seq 0 $((nodes_to_start - 1))); do
110110+ echo " Starting node$i..."
111111+ mkfifo "/tmp/didplcbft-node$i-stdout" 2>/dev/null || true
112112+ mkfifo "/tmp/didplcbft-node$i-stderr" 2>/dev/null || true
113113+114114+ # Start sed processes to prefix output
115115+ sed "s/^/[node$i-stdout] /" < "/tmp/didplcbft-node$i-stdout" &
116116+ sed "s/^/[node$i-stderr] /" < "/tmp/didplcbft-node$i-stderr" &
117117+118118+ # Start the didplcbft process with redirected output
119119+ ./didplcbft --data-dir "testnet/node$i" > "/tmp/didplcbft-node$i-stdout" 2> "/tmp/didplcbft-node$i-stderr" &
120120+ pid=$!
121121+ pids="$pids $pid"
122122+ echo " PID: $pid"
123123+done
124124+125125+echo ""
126126+echo "All $nodes_to_start nodes are now running."
127127+echo "Note: Node $((NUM_NODES - 1)) is not started and can be launched later for testing."
128128+echo "Press Ctrl+C to stop all running nodes."
129129+echo ""
130130+131131+# Wait for all background processes
132132+wait $pids
133133+134134+# If we reach here, all processes have terminated normally
135135+echo "All nodes have terminated."