A very experimental PLC implementation which uses BFT consensus for decentralization

More work on range challenges

gbl08ma.com 34c54a43 1fd7d6f9

verified
+584 -134
+6 -2
abciapp/app.go
··· 21 21 "tangled.org/gbl08ma.com/didplcbft/plc" 22 22 "tangled.org/gbl08ma.com/didplcbft/store" 23 23 "tangled.org/gbl08ma.com/didplcbft/transaction" 24 + "tangled.org/gbl08ma.com/didplcbft/types" 24 25 ) 25 26 26 27 type DIDPLCApplication struct { ··· 30 31 indexDB dbm.DB 31 32 tree *iavl.MutableTree 32 33 fullyClearApplicationData func() error 34 + 35 + mempoolSubmitter types.MempoolSubmitter 33 36 34 37 validatorPubKey crypto.PubKey 35 38 validatorPrivKey crypto.PrivKey ··· 52 55 } 53 56 54 57 // store and plc must be able to share transaction objects 55 - func NewDIDPLCApplication(pv *privval.FilePV, treeDB dbm.DB, indexDB transaction.ExtendedDB, clearData func(), snapshotDirectory, didBloomFilterPath string) (*DIDPLCApplication, *transaction.Factory, plc.PLC, func(), error) { 58 + func NewDIDPLCApplication(pv *privval.FilePV, treeDB dbm.DB, indexDB transaction.ExtendedDB, clearData func(), snapshotDirectory, didBloomFilterPath string, mempoolSubmitter types.MempoolSubmitter) (*DIDPLCApplication, *transaction.Factory, plc.PLC, func(), error) { 56 59 mkTree := func() *iavl.MutableTree { 57 60 // Using SpeedDefault appears to cause the processing time for ExecuteOperation to double on average 58 61 // Using SpeedBetterCompression appears to cause the processing time to double again ··· 80 83 runnerContext: runnerContext, 81 84 tree: tree, 82 85 indexDB: indexDB, 86 + mempoolSubmitter: mempoolSubmitter, 83 87 snapshotDirectory: snapshotDirectory, 84 88 aocsByPLC: make(map[string]*authoritativeOperationsCache), 85 89 } ··· 201 205 return stacktrace.Propagate(err, "") 202 206 } 203 207 204 - d.rangeChallengeCoordinator, err = newRangeChallengeCoordinator(d.runnerContext, d.txFactory, blockStore, d.validatorPubKey, d.validatorPrivKey) 208 + d.rangeChallengeCoordinator, err = newRangeChallengeCoordinator(d.runnerContext, d.txFactory, blockStore, d.mempoolSubmitter, d.validatorPubKey, d.validatorPrivKey) 205 209 if err != nil { 206 210 return stacktrace.Propagate(err, "") 207 211 }
+1 -1
abciapp/app_test.go
··· 22 22 } 23 23 24 24 func TestCheckTx(t *testing.T) { 25 - app, _, _, cleanup, err := abciapp.NewDIDPLCApplication(nil, dbm.NewMemDB(), memDBWrapper{dbm.NewMemDB()}, nil, "", "") 25 + app, _, _, cleanup, err := abciapp.NewDIDPLCApplication(nil, dbm.NewMemDB(), memDBWrapper{dbm.NewMemDB()}, nil, "", "", nil) 26 26 require.NoError(t, err) 27 27 t.Cleanup(cleanup) 28 28
+6 -3
abciapp/block_challenge.go
··· 203 203 } 204 204 205 205 func (c *blockChallengeCoordinator) verifyBlockChallengeProof(height int64, validatorAddress []byte, proofBytes []byte) (bool, error) { 206 - tx := c.txFactory.ReadCommitted() 207 - if tx.Height() != height-1 { 208 - return false, stacktrace.NewError("challenge being verified for unexpected height %d, expected %d", height, tx.Height()+1) 206 + // timestamp shouldn't matter for this 207 + // it is however important that we read the tree exactly as it was on the height prior to the one where the proof was supposedly generated 208 + // this is because operations can change over time (nullification) and also the returned data for the highest operation indexes will be different 209 + tx, err := c.txFactory.ReadHeight(time.Time{}, height-1) 210 + if err != nil { 211 + return false, stacktrace.Propagate(err, "") 209 212 } 210 213 211 214 sharedPart, err := c.fetchOrBuildBlockChallengeCircuitAssignmentShared(tx, height)
+216 -40
abciapp/range_challenge.go
··· 1 1 package abciapp 2 2 3 3 import ( 4 - "bytes" 5 4 "context" 6 5 "encoding/binary" 6 + "errors" 7 7 "math/big" 8 8 "slices" 9 + "time" 9 10 10 11 "github.com/Yiling-J/theine-go" 11 12 "github.com/cometbft/cometbft/crypto" ··· 13 14 "github.com/cosmos/iavl" 14 15 "github.com/cosmos/iavl/db" 15 16 ics23 "github.com/cosmos/ics23/go" 17 + cbornode "github.com/ipfs/go-ipld-cbor" 16 18 "github.com/palantir/stacktrace" 19 + "github.com/samber/mo" 17 20 "tangled.org/gbl08ma.com/didplcbft/store" 18 21 "tangled.org/gbl08ma.com/didplcbft/transaction" 22 + "tangled.org/gbl08ma.com/didplcbft/types" 19 23 ) 20 24 21 25 type rangeChallengeCoordinator struct { ··· 27 31 validatorAddress []byte 28 32 txFactory *transaction.Factory 29 33 nodeBlockStore *bftstore.BlockStore 34 + mempoolSubmitter types.MempoolSubmitter 30 35 31 - treeCache *theine.LoadingCache[treeCacheKey, cachedTree] 36 + treeCache *theine.LoadingCache[treeCacheKey, cachedTree] 37 + cachedNextProofFromHeight mo.Option[int64] 32 38 } 33 39 34 - func newRangeChallengeCoordinator(runnerContext context.Context, txFactory *transaction.Factory, blockStore *bftstore.BlockStore, pubKey crypto.PubKey, privKey crypto.PrivKey) (*rangeChallengeCoordinator, error) { 40 + func newRangeChallengeCoordinator(runnerContext context.Context, txFactory *transaction.Factory, blockStore *bftstore.BlockStore, mempoolSubmitter types.MempoolSubmitter, pubKey crypto.PubKey, privKey crypto.PrivKey) (*rangeChallengeCoordinator, error) { 35 41 c := &rangeChallengeCoordinator{ 36 42 txFactory: txFactory, 37 43 runnerContext: runnerContext, 38 44 nodeBlockStore: blockStore, 45 + mempoolSubmitter: mempoolSubmitter, 39 46 isConfiguredToBeValidator: pubKey != nil, 40 47 validatorPubKey: pubKey, 41 48 validatorPrivKey: privKey, ··· 63 70 root []byte 64 71 } 65 72 66 - type rangeChallengeProof struct { 67 - treeRoot []byte // the tree root we commit to. must be the same between the "commit proof" and the "confirmation proof" 68 - membershipProof *ics23.CommitmentProof 69 - } 73 + func (c *rangeChallengeCoordinator) getOrFetchNextProofFromHeight(tx transaction.Read) (int64, error) { 74 + if !c.isConfiguredToBeValidator { 75 + return 0, stacktrace.NewError("not configured to be a validator") 76 + } 77 + if completion, hasCache := c.cachedNextProofFromHeight.Get(); hasCache { 78 + return completion, nil 79 + } 70 80 71 - func (c *rangeChallengeCoordinator) computeRangeChallengeProof(ctx context.Context, tx transaction.Read, startHeight, endHeight, proveHeight int64) (rangeChallengeProof, error) { 72 - ctx = context.WithValue(ctx, contextTxKey{}, tx) 73 - ct, err := c.treeCache.Get(ctx, treeCacheKey{ 74 - startHeight: startHeight, 75 - endHeight: endHeight, 76 - }) 81 + completion, err := store.Consensus.ValidatorRangeChallengeCompletion(tx, c.validatorAddress) 77 82 if err != nil { 78 - return rangeChallengeProof{}, stacktrace.Propagate(err, "") 83 + if !errors.Is(err, store.ErrNoRecentChallengeCompletion) { 84 + return 0, stacktrace.Propagate(err, "") 85 + } 86 + completion = 0 79 87 } 80 88 81 - proofKey := binary.BigEndian.AppendUint64(nil, uint64(proveHeight)) 82 - 83 - membershipProof, err := ct.tree.GetMembershipProof(proofKey) 89 + minProvenBlock := int64(0) 90 + for proofHeight := range store.Consensus.BlockChalengeProofsIterator(tx, 0, &err) { 91 + minProvenBlock = int64(proofHeight) 92 + break 93 + } 84 94 if err != nil { 85 - return rangeChallengeProof{}, stacktrace.Propagate(err, "") 95 + return 0, stacktrace.Propagate(err, "") 86 96 } 87 97 88 - return rangeChallengeProof{ 89 - treeRoot: ct.root, 90 - membershipProof: membershipProof, 91 - }, nil 98 + minProvable := max(minProvenBlock, int64(completion+1)) 99 + 100 + c.cachedNextProofFromHeight = mo.Some(minProvable) 101 + return minProvable, nil 92 102 } 93 103 94 - func verifyMembershipOfRangeChallengeProofs(proofs ...rangeChallengeProof) (bool, error) { 95 - if len(proofs) < 1 { 96 - return false, stacktrace.NewError("insufficient proofs") 104 + func (c *rangeChallengeCoordinator) onNewBlock(ctx context.Context, newBlockHeight int64) error { 105 + if !c.isConfiguredToBeValidator { 106 + return nil 97 107 } 98 108 99 - treeRoot := proofs[0].treeRoot 109 + // TODO if ABCI is just replaying old blocks -> do nothing 110 + // TODO wait until block is actually committed 111 + // ^ solution to both of these problems: don't call this method from the ABCI app, instead, call it from an "outside subscriber" that listens for truly new blocks 100 112 101 - for _, proof := range proofs { 102 - if !bytes.Equal(proof.treeRoot, treeRoot) { 103 - // did not commit to the same root in all proofs 104 - return false, nil 113 + tx := c.txFactory.ReadCommitted() 114 + if tx.Height() < newBlockHeight { 115 + // the ABCI app is probably still catching up to new blocks 116 + return nil 117 + } 118 + 119 + shouldCommitToChallenge := false 120 + shouldCompleteChallenge := false 121 + 122 + fromHeight, toHeight, provenHeight, includedOnHeight, _, err := store.Consensus.ValidatorRangeChallengeCommitment(tx, c.validatorAddress) 123 + if errors.Is(err, store.ErrNoActiveChallengeCommitment) { 124 + nextFromHeight, err := c.getOrFetchNextProofFromHeight(tx) 125 + if err != nil { 126 + return stacktrace.Propagate(err, "") 105 127 } 128 + shouldCommitToChallenge = nextFromHeight+CommitToChallengeMinRange+50 <= newBlockHeight 129 + } else if err != nil { 130 + return stacktrace.Propagate(err, "") 131 + } else { 132 + commitmentBlockMeta := c.nodeBlockStore.LoadBlockMeta(int64(includedOnHeight)) 133 + commitmentExpired := false 134 + if commitmentBlockMeta != nil { 135 + commitmentExpired = uint64(newBlockHeight) >= includedOnHeight+CompleteChallengeMaxAgeInBlocks || 136 + time.Since(commitmentBlockMeta.Header.Time) >= CompleteChallengeMaxAge-1*time.Second 106 137 107 - // just use the key and value claimed in the proof as those should have been validated previously 108 - if exist := proof.membershipProof.GetExist(); exist != nil { 109 - if !ics23.VerifyMembership(ics23.IavlSpec, treeRoot, proof.membershipProof, exist.Key, exist.Value) { 110 - return false, nil 111 - } 112 - } else { 113 - return false, stacktrace.NewError("proof is not an existence proof") 138 + // shouldCompleteChallenge if not too many blocks have passed AND enough blocks have passed 139 + shouldCompleteChallenge = !commitmentExpired && includedOnHeight+1 <= uint64(newBlockHeight) 140 + } 141 + 142 + if !shouldCompleteChallenge { 143 + shouldCommitToChallenge = commitmentExpired 114 144 } 115 145 } 116 146 117 - return true, nil 147 + var transactionBytes []byte 148 + if shouldCompleteChallenge { 149 + transactionBytes, err = c.createCompleteChallengeTx(ctx, tx, int64(fromHeight), int64(toHeight), int64(provenHeight), int64(includedOnHeight)) 150 + if err != nil { 151 + return stacktrace.Propagate(err, "") 152 + } 153 + } else if shouldCommitToChallenge { 154 + transactionBytes, err = c.createCommitToChallengeTx(ctx, tx, newBlockHeight) 155 + if err != nil { 156 + return stacktrace.Propagate(err, "") 157 + } 158 + } 159 + 160 + _, err = c.mempoolSubmitter.BroadcastTxCommit(ctx, transactionBytes) 161 + if err != nil { 162 + return stacktrace.Propagate(err, "") 163 + } 164 + c.cachedNextProofFromHeight = mo.None[int64]() 165 + return nil 118 166 } 119 167 120 - func computeHeightToProveInRange(lastCommitHash, validatorAddress []byte, fromHeight, toHeight int64) uint64 { 168 + func (c *rangeChallengeCoordinator) createCommitToChallengeTx(ctx context.Context, tx transaction.Read, toHeight int64) ([]byte, error) { 169 + if !c.isConfiguredToBeValidator { 170 + return nil, stacktrace.NewError("not configured to be validator") 171 + } 172 + 173 + fromHeight, err := c.getOrFetchNextProofFromHeight(tx) 174 + if err != nil { 175 + return nil, stacktrace.Propagate(err, "") 176 + } 177 + 178 + if toHeight < fromHeight+CommitToChallengeMinRange { 179 + return nil, stacktrace.NewError("insufficient blocks passed") 180 + } 181 + 182 + if toHeight > fromHeight+CommitToChallengeMaxRange { 183 + fromHeight = toHeight - CommitToChallengeMaxRange 184 + } 185 + 186 + pubKeyArg, err := MarshalPubKeyForArguments(c.validatorPubKey) 187 + if err != nil { 188 + return nil, stacktrace.Propagate(err, "") 189 + } 190 + 191 + toHeightBlockMeta := c.nodeBlockStore.LoadBlockMeta(toHeight) 192 + if toHeightBlockMeta == nil { 193 + return nil, stacktrace.NewError("block not found at height") 194 + } 195 + 196 + if time.Since(toHeightBlockMeta.Header.Time) > CommitToChallengeMaxAge { 197 + return nil, stacktrace.NewError("too much time passed since block at height") 198 + } 199 + 200 + proveHeight := computeHeightToProveInRange(toHeightBlockMeta.Header.LastCommitHash, c.validatorAddress, int64(fromHeight), int64(toHeight), mo.None[int64]()) 201 + 202 + commitToRoot, membershipProof, err := c.computeRangeChallengeProof(ctx, tx, fromHeight, toHeight, proveHeight) 203 + if err != nil { 204 + return nil, stacktrace.Propagate(err, "") 205 + } 206 + 207 + proofBytes, err := membershipProof.Marshal() 208 + if err != nil { 209 + return nil, stacktrace.Propagate(err, "") 210 + } 211 + 212 + transaction := Transaction[CommitToChallengeArguments]{ 213 + Action: TransactionActionCommitToChallenge, 214 + Arguments: CommitToChallengeArguments{ 215 + ValidatorPubKey: pubKeyArg, 216 + FromHeight: int64(fromHeight), 217 + ToHeight: int64(toHeight), 218 + Root: commitToRoot, 219 + Proof: proofBytes, 220 + }, 221 + } 222 + 223 + transaction, err = SignTransaction(c.validatorPrivKey, transaction) 224 + if err != nil { 225 + return nil, stacktrace.Propagate(err, "") 226 + } 227 + 228 + out, err := cbornode.DumpObject(transaction) 229 + if err != nil { 230 + return nil, stacktrace.Propagate(err, "") 231 + } 232 + return out, nil 233 + } 234 + 235 + func (c *rangeChallengeCoordinator) createCompleteChallengeTx(ctx context.Context, tx transaction.Read, fromHeight, toHeight, prevProvenHeight, commitmentIncludedOnHeight int64) ([]byte, error) { 236 + if !c.isConfiguredToBeValidator { 237 + return nil, stacktrace.NewError("not configured to be validator") 238 + } 239 + 240 + nextBlockMeta := c.nodeBlockStore.LoadBlockMeta(commitmentIncludedOnHeight + 1) 241 + if nextBlockMeta == nil { 242 + return nil, stacktrace.NewError("block not found at height") 243 + } 244 + 245 + proveHeight := computeHeightToProveInRange(nextBlockMeta.Header.LastCommitHash, c.validatorAddress, int64(fromHeight), int64(toHeight), mo.Some(prevProvenHeight)) 246 + 247 + _, membershipProof, err := c.computeRangeChallengeProof(ctx, tx, fromHeight, toHeight, proveHeight) 248 + if err != nil { 249 + return nil, stacktrace.Propagate(err, "") 250 + } 251 + 252 + proofBytes, err := membershipProof.Marshal() 253 + if err != nil { 254 + return nil, stacktrace.Propagate(err, "") 255 + } 256 + 257 + transaction := Transaction[CompleteChallengeArguments]{ 258 + Action: TransactionActionCompleteChallenge, 259 + Arguments: CompleteChallengeArguments{ 260 + Validator: c.validatorAddress, 261 + Proof: proofBytes, 262 + }, 263 + } 264 + 265 + out, err := cbornode.DumpObject(transaction) 266 + if err != nil { 267 + return nil, stacktrace.Propagate(err, "") 268 + } 269 + return out, nil 270 + } 271 + 272 + func (c *rangeChallengeCoordinator) computeRangeChallengeProof(ctx context.Context, tx transaction.Read, startHeight, endHeight, proveHeight int64) ([]byte, *ics23.CommitmentProof, error) { 273 + ctx = context.WithValue(ctx, contextTxKey{}, tx) 274 + ct, err := c.treeCache.Get(ctx, treeCacheKey{ 275 + startHeight: startHeight, 276 + endHeight: endHeight, 277 + }) 278 + if err != nil { 279 + return nil, nil, stacktrace.Propagate(err, "") 280 + } 281 + 282 + proofKey := binary.BigEndian.AppendUint64(nil, uint64(proveHeight)) 283 + 284 + membershipProof, err := ct.tree.GetMembershipProof(proofKey) 285 + if err != nil { 286 + return nil, nil, stacktrace.Propagate(err, "") 287 + } 288 + 289 + return ct.root, membershipProof, nil 290 + } 291 + 292 + func computeHeightToProveInRange(lastCommitHash, validatorAddress []byte, fromHeight, toHeight int64, avoidHeight mo.Option[int64]) int64 { 121 293 lastCommitHashBigInt := new(big.Int).SetBytes(lastCommitHash) 122 294 validatorBigInt := new(big.Int).SetBytes(validatorAddress) 123 295 seed := new(big.Int).Xor(lastCommitHashBigInt, validatorBigInt) ··· 126 298 127 299 randOffset := new(big.Int).Mod(seed, big.NewInt(numBlocks)) 128 300 129 - return uint64(fromHeight) + randOffset.Uint64() 301 + candidateHeight := fromHeight + int64(randOffset.Uint64()) 302 + if h, ok := avoidHeight.Get(); ok && candidateHeight == h { 303 + candidateHeight = (candidateHeight + 1) % int64(numBlocks) 304 + } 305 + return candidateHeight 130 306 } 131 307 132 308 func (c *rangeChallengeCoordinator) proofTreeLoader(ctx context.Context, cacheKey treeCacheKey) (theine.Loaded[cachedTree], error) {
+4 -4
abciapp/snapshots.go
··· 382 382 } 383 383 384 384 func exportIndexEntries(indexDB dbm.DB, treeVersion int64, w io.Writer) (int64, error) { 385 - didLogKeyStart := make([]byte, store.DIDLogKeySize) 386 - didLogKeyStart[0] = store.DIDLogKeyPrefix 387 - didLogKeyEnd := slices.Repeat([]byte{0xff}, store.DIDLogKeySize) 388 - didLogKeyEnd[0] = store.DIDLogKeyPrefix 385 + didLogKeyStart := make([]byte, store.IndexDIDLogKeyLength) 386 + didLogKeyStart[0] = store.IndexDIDLogKeyPrefix 387 + didLogKeyEnd := slices.Repeat([]byte{0xff}, store.IndexDIDLogKeyLength) 388 + didLogKeyEnd[0] = store.IndexDIDLogKeyPrefix 389 389 390 390 iterator, err := indexDB.Iterator(didLogKeyStart, didLogKeyEnd) 391 391 if err != nil {
+156 -25
abciapp/tx_challenge.go
··· 3 3 import ( 4 4 "context" 5 5 "encoding/binary" 6 + "errors" 6 7 "time" 7 8 8 9 ics23 "github.com/cosmos/ics23/go" 9 10 cbornode "github.com/ipfs/go-ipld-cbor" 10 11 "github.com/palantir/stacktrace" 11 12 "github.com/samber/lo" 13 + "github.com/samber/mo" 14 + "tangled.org/gbl08ma.com/didplcbft/store" 12 15 ) 13 16 14 17 var TransactionActionCommitToChallenge = registerTransactionAction[CommitToChallengeArguments]("CommitToChallenge", processCommitToChallengeTx) ··· 19 22 20 23 var CommitToChallengeMaxAge = lo.Must(time.ParseDuration("10s")) 21 24 25 + const CompleteChallengeMaxAgeInBlocks = 3 26 + 27 + var CompleteChallengeMaxAge = lo.Must(time.ParseDuration("10s")) 28 + 22 29 type CommitToChallengeArguments struct { 23 30 ValidatorPubKey PubKeyInArguments `json:"validator" refmt:"validator"` 24 31 FromHeight int64 `json:"fromHeight" refmt:"fromHeight"` 25 32 26 - // ToHeight should not be more than N blocks behind the block where this transaction is included 27 - // (i.e. this transaction "expires" once ToHeight is more than N blocks old, and shouldn't be included after that) 33 + // ToHeight should not be more than CommitToChallengeMaxAgeInBlocks behind the block where this transaction is included 34 + // (i.e. this transaction "expires" once ToHeight is more than CommitToChallengeMaxAgeInBlocks blocks old, and shouldn't be included after that) 28 35 ToHeight int64 `json:"toHeight" refmt:"toHeight"` 29 36 Root []byte `json:"root" refmt:"root"` 30 37 Proof []byte `json:"proof" refmt:"proof"` ··· 62 69 63 70 validatorAddress := validatorPubKey.Address() 64 71 65 - if tx.Arguments.ToHeight < tx.Arguments.FromHeight { 72 + if tx.Arguments.ToHeight < tx.Arguments.FromHeight || tx.Arguments.ToHeight >= deps.workingHeight { 66 73 return &processResult{ 67 74 Code: 4201, 68 75 Info: "invalid challenge range", ··· 105 112 }, nil 106 113 } 107 114 115 + currentCompletion, err := store.Consensus.ValidatorRangeChallengeCompletion(deps.readTx, validatorAddress) 116 + if err == nil { 117 + if tx.Arguments.FromHeight <= int64(currentCompletion) { 118 + return &processResult{ 119 + Code: 4207, 120 + Info: "challenge range overlaps already completed challenge", 121 + }, nil 122 + } 123 + } else if !errors.Is(err, store.ErrNoRecentChallengeCompletion) { 124 + return nil, stacktrace.Propagate(err, "") 125 + } 126 + 127 + if len(tx.Arguments.Root) != 32 { 128 + return &processResult{ 129 + Code: 4208, 130 + Info: "invalid root", 131 + }, nil 132 + } 133 + 108 134 proof := new(ics23.CommitmentProof) 109 135 err = proof.Unmarshal(tx.Arguments.Proof) 110 136 if err != nil || proof.GetExist() == nil { 111 137 return &processResult{ 112 - Code: 4207, 138 + Code: 4209, 113 139 Info: "invalid proof", 114 140 }, nil 115 141 } 116 142 existenceProof := proof.GetExist() 117 143 118 - proofHeight := binary.BigEndian.Uint64(existenceProof.Key) 144 + proofHeight := int64(binary.BigEndian.Uint64(existenceProof.Key)) 119 145 120 146 expectedProofHeight := computeHeightToProveInRange( 121 147 toHeightBlockMeta.Header.LastCommitHash.Bytes(), 122 148 validatorAddress.Bytes(), 123 149 tx.Arguments.FromHeight, 124 - tx.Arguments.ToHeight) 150 + tx.Arguments.ToHeight, 151 + mo.None[int64]()) 125 152 126 153 if proofHeight != expectedProofHeight { 127 154 return &processResult{ 128 - Code: 4208, 155 + Code: 4210, 129 156 Info: "invalid proof", 130 157 }, nil 131 158 } ··· 136 163 } 137 164 if !blockProofValid { 138 165 return &processResult{ 139 - Code: 4209, 166 + Code: 4211, 140 167 Info: "invalid proof", 141 168 }, nil 142 169 } 143 170 144 - rangeProof := rangeChallengeProof{ 145 - treeRoot: tx.Arguments.Root, 146 - membershipProof: proof, 147 - } 148 - 149 - rangeProofValid, err := verifyMembershipOfRangeChallengeProofs(rangeProof) 150 - if err != nil { 151 - return nil, stacktrace.Propagate(err, "") 152 - } 153 - if !rangeProofValid { 171 + if !ics23.VerifyMembership(ics23.IavlSpec, tx.Arguments.Root, proof, existenceProof.Key, existenceProof.Value) { 154 172 return &processResult{ 155 - Code: 4210, 173 + Code: 4212, 156 174 Info: "invalid proof", 157 175 }, nil 158 176 } 159 177 160 178 if writeTx, ok := deps.writeTx.Get(); ok { 161 - // TODO set challenge commit state for this validator in tree 162 - _ = writeTx 163 - return nil, stacktrace.NewError("not implemented") // TODO 179 + err = store.Consensus.SetValidatorRangeChallengeCommitment( 180 + writeTx, 181 + validatorAddress, 182 + uint64(tx.Arguments.FromHeight), 183 + uint64(tx.Arguments.ToHeight), 184 + uint64(proofHeight), 185 + uint64(deps.workingHeight), 186 + tx.Arguments.Root) 187 + if err != nil { 188 + return nil, stacktrace.Propagate(err, "") 189 + } 164 190 } 165 191 166 192 return &processResult{ ··· 171 197 var TransactionActionCompleteChallenge = registerTransactionAction[CompleteChallengeArguments]("CompleteChallenge", processCompleteChallengeTx) 172 198 173 199 type CompleteChallengeArguments struct { 200 + // This transaction is not signed. It is a no-op if it isn't valid and we don't really care if an entity is able to complete a challenge on behalf of another validator 201 + // (that would be quite an achievement, on the level of a validator being able to find a collision for the committed root in order to fake not doing all the work) 174 202 Validator []byte `json:"validator" refmt:"validator"` 175 203 176 204 // this shall be a membership proof on the same tree the validator previously committed to, 177 205 // for the key deterministically-randomly determined by the last_commit_hash of the block _after_ the one 178 206 // where the most recent CommitToChallenge transaction for this validator got included 179 - // this transaction must be included within N blocks after the inclusion of that CommitToChallenge transaction 180 - // TODO determine N 207 + // this transaction must be included within CompleteChallengeMaxAgeInBlocks blocks after the inclusion of that CommitToChallenge transaction 181 208 Proof []byte `json:"proof" refmt:"proof"` 182 209 } 183 210 ··· 191 218 } 192 219 193 220 func processCompleteChallengeTx(ctx context.Context, deps TransactionProcessorDependencies, txBytes []byte) (*processResult, error) { 194 - return nil, stacktrace.NewError("not implemented") // TODO 221 + tx, err := UnmarshalTransaction[CompleteChallengeArguments](txBytes) 222 + if err != nil { 223 + return &processResult{ 224 + Code: 4000, 225 + Info: err.Error(), 226 + }, nil 227 + } 228 + 229 + fromHeight, toHeight, provenHeight, includedOnHeight, committedTreeRoot, err := store.Consensus.ValidatorRangeChallengeCommitment(deps.readTx, tx.Arguments.Validator) 230 + if err != nil { 231 + if errors.Is(err, store.ErrNoActiveChallengeCommitment) { 232 + return &processResult{ 233 + Code: 4300, 234 + Info: "validator is not committed to a challenge", 235 + }, nil 236 + } 237 + return nil, stacktrace.Propagate(err, "") 238 + } 239 + 240 + if int64(includedOnHeight)+CompleteChallengeMaxAgeInBlocks < deps.workingHeight { 241 + // validator must commit to a new challenge 242 + return &processResult{ 243 + Code: 4301, 244 + Info: "outdated challenge commitment", 245 + }, nil 246 + } 247 + 248 + includedHeightBlockMeta := deps.blockStore.LoadBlockMeta(int64(includedOnHeight)) 249 + if includedHeightBlockMeta == nil { 250 + // this shouldn't happen unless the prover is submitting the completion on the same block as the commitment 251 + return &processResult{ 252 + Code: 4302, 253 + Info: "premature challenge completion", 254 + }, nil 255 + } 256 + 257 + if time.Since(includedHeightBlockMeta.Header.Time) > CompleteChallengeMaxAge { 258 + // validator must commit to a new challenge 259 + return &processResult{ 260 + Code: 4303, 261 + Info: "outdated challenge commitment", 262 + }, nil 263 + } 264 + 265 + proof := new(ics23.CommitmentProof) 266 + err = proof.Unmarshal(tx.Arguments.Proof) 267 + if err != nil || proof.GetExist() == nil { 268 + return &processResult{ 269 + Code: 4304, 270 + Info: "invalid proof", 271 + }, nil 272 + } 273 + existenceProof := proof.GetExist() 274 + 275 + proofHeight := int64(binary.BigEndian.Uint64(existenceProof.Key)) 276 + 277 + expectedProofHeight := computeHeightToProveInRange( 278 + includedHeightBlockMeta.Header.LastCommitHash.Bytes(), 279 + tx.Arguments.Validator, 280 + int64(fromHeight), 281 + int64(toHeight), 282 + mo.Some(int64(provenHeight))) 283 + 284 + if proofHeight != expectedProofHeight { 285 + return &processResult{ 286 + Code: 4305, 287 + Info: "incorrect key proven", 288 + }, nil 289 + } 290 + 291 + blockProofValid, err := deps.blockChallengeCoordinator.verifyBlockChallengeProof(int64(proofHeight), tx.Arguments.Validator, existenceProof.Value) 292 + if err != nil { 293 + return nil, stacktrace.Propagate(err, "") 294 + } 295 + if !blockProofValid { 296 + return &processResult{ 297 + Code: 4306, 298 + Info: "invalid proof", 299 + }, nil 300 + } 301 + 302 + if !ics23.VerifyMembership(ics23.IavlSpec, committedTreeRoot, proof, existenceProof.Key, existenceProof.Value) { 303 + return &processResult{ 304 + Code: 4307, 305 + Info: "invalid proof", 306 + }, nil 307 + } 308 + 309 + if writeTx, ok := deps.writeTx.Get(); ok { 310 + err = store.Consensus.ClearValidatorRangeChallengeCommitment(writeTx, tx.Arguments.Validator) 311 + if err != nil { 312 + return nil, stacktrace.Propagate(err, "") 313 + } 314 + 315 + err = store.Consensus.SetValidatorRangeChallengeCompletion(writeTx, tx.Arguments.Validator, toHeight) 316 + if err != nil { 317 + return nil, stacktrace.Propagate(err, "") 318 + } 319 + 320 + // TODO effects on validator reputation 321 + } 322 + 323 + return &processResult{ 324 + Code: 0, 325 + }, nil 195 326 }
+27 -18
httpapi/broadcast.go tx_submitter.go
··· 1 - package httpapi 1 + package main 2 2 3 3 import ( 4 4 "context" 5 - "time" 6 5 7 6 abci "github.com/cometbft/cometbft/abci/types" 8 7 mempl "github.com/cometbft/cometbft/mempool" 9 8 "github.com/cometbft/cometbft/node" 10 9 "github.com/cometbft/cometbft/rpc/core" 11 - ctypes "github.com/cometbft/cometbft/rpc/core/types" 12 - "github.com/cometbft/cometbft/types" 10 + coretypes "github.com/cometbft/cometbft/rpc/core/types" 11 + cmttypes "github.com/cometbft/cometbft/types" 12 + "github.com/google/uuid" 13 13 "github.com/palantir/stacktrace" 14 + "tangled.org/gbl08ma.com/didplcbft/types" 14 15 ) 15 16 16 - func broadcastTxCommit(ctx context.Context, node *node.Node, subscriber string, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { 17 - eventBus := node.EventBus() 18 - mempool := node.Mempool() 17 + type txSubmitter struct { 18 + node *node.Node 19 + } 20 + 21 + var _ types.MempoolSubmitter = (*txSubmitter)(nil) 22 + 23 + // BroadcastTxCommit implements [types.MempoolSubmitter]. 24 + func (t *txSubmitter) BroadcastTxCommit(ctx context.Context, tx cmttypes.Tx) (*coretypes.ResultBroadcastTxCommit, error) { 25 + uuid, err := uuid.NewRandom() 26 + if err != nil { 27 + return nil, stacktrace.Propagate(err, "") 28 + } 29 + subscriber := uuid.String() 30 + eventBus := t.node.EventBus() 31 + mempool := t.node.Mempool() 19 32 // Subscribe to tx being committed in block. 20 33 subCtx, cancel := context.WithTimeout(ctx, core.SubscribeTimeout) 21 34 defer cancel() 22 - q := types.EventQueryTxFor(tx) 35 + q := cmttypes.EventQueryTxFor(tx) 23 36 txSub, err := eventBus.Subscribe(subCtx, subscriber, q) 24 37 if err != nil { 25 38 return nil, stacktrace.Propagate(err, "failed to subscribe to tx") ··· 45 58 return nil, stacktrace.Propagate(ctx.Err(), "broadcast confirmation not received") 46 59 case checkTxRes := <-checkTxResCh: 47 60 if checkTxRes.Code != abci.CodeTypeOK { 48 - return &ctypes.ResultBroadcastTxCommit{ 61 + return &coretypes.ResultBroadcastTxCommit{ 49 62 CheckTx: *checkTxRes, 50 63 TxResult: abci.ExecTxResult{}, 51 64 Hash: tx.Hash(), ··· 54 67 55 68 // Wait for the tx to be included in a block or timeout. 56 69 select { 70 + case <-ctx.Done(): 71 + return nil, stacktrace.Propagate(ctx.Err(), "inclusion confirmation not received") 57 72 case msg := <-txSub.Out(): // The tx was included in a block. 58 - txResultEvent := msg.Data().(types.EventDataTx) 59 - return &ctypes.ResultBroadcastTxCommit{ 73 + txResultEvent := msg.Data().(cmttypes.EventDataTx) 74 + return &coretypes.ResultBroadcastTxCommit{ 60 75 CheckTx: *checkTxRes, 61 76 TxResult: txResultEvent.Result, 62 77 Hash: tx.Hash(), ··· 67 82 if err == nil { 68 83 err = stacktrace.NewError("CometBFT exited") 69 84 } 70 - return &ctypes.ResultBroadcastTxCommit{ 85 + return &coretypes.ResultBroadcastTxCommit{ 71 86 CheckTx: *checkTxRes, 72 87 TxResult: abci.ExecTxResult{}, 73 88 Hash: tx.Hash(), 74 89 }, stacktrace.Propagate(err, "txSub was canceled") 75 - case <-time.After(node.Config().RPC.TimeoutBroadcastTxCommit): 76 - return &ctypes.ResultBroadcastTxCommit{ 77 - CheckTx: *checkTxRes, 78 - TxResult: abci.ExecTxResult{}, 79 - Hash: tx.Hash(), 80 - }, stacktrace.NewError("timed out waiting for tx to be included in a block") 81 90 } 82 91 } 83 92 }
+19 -25
httpapi/server.go
··· 17 17 "time" 18 18 19 19 "github.com/bluesky-social/indigo/atproto/atcrypto" 20 - "github.com/cometbft/cometbft/node" 21 20 "github.com/did-method-plc/go-didplc" 22 - "github.com/google/uuid" 23 21 cbornode "github.com/ipfs/go-ipld-cbor" 24 22 "github.com/palantir/stacktrace" 25 23 "github.com/rs/cors" ··· 28 26 "tangled.org/gbl08ma.com/didplcbft/abciapp" 29 27 "tangled.org/gbl08ma.com/didplcbft/plc" 30 28 "tangled.org/gbl08ma.com/didplcbft/transaction" 29 + "tangled.org/gbl08ma.com/didplcbft/types" 31 30 ) 32 31 33 32 // Server represents the HTTP server for the PLC directory. 34 33 type Server struct { 35 - txFactory *transaction.Factory 36 - plc plc.ReadPLC 37 - router *http.ServeMux 38 - node *node.Node 39 - srv http.Server 40 - handlerTimeout time.Duration 41 - proto string 42 - addr string 34 + txFactory *transaction.Factory 35 + plc plc.ReadPLC 36 + router *http.ServeMux 37 + mempoolSubmitter types.MempoolSubmitter 38 + srv http.Server 39 + handlerTimeout time.Duration 40 + proto string 41 + addr string 43 42 44 43 started atomic.Bool 45 44 exitDone sync.WaitGroup 46 45 } 47 46 48 47 // NewServer creates a new instance of the Server. 49 - func NewServer(txFactory *transaction.Factory, plc plc.ReadPLC, node *node.Node, listenAddr string, handlerTimeout time.Duration) (*Server, error) { 48 + func NewServer(txFactory *transaction.Factory, plc plc.ReadPLC, mempoolSubmitter types.MempoolSubmitter, listenAddr string, handlerTimeout time.Duration) (*Server, error) { 50 49 s := &Server{ 51 - txFactory: txFactory, 52 - plc: plc, 53 - router: http.NewServeMux(), 54 - node: node, 55 - srv: http.Server{Addr: listenAddr}, 56 - handlerTimeout: handlerTimeout, 50 + txFactory: txFactory, 51 + plc: plc, 52 + router: http.NewServeMux(), 53 + mempoolSubmitter: mempoolSubmitter, 54 + srv: http.Server{Addr: listenAddr}, 55 + handlerTimeout: handlerTimeout, 57 56 } 58 57 s.setupRoutes() 59 58 ··· 199 198 return 200 199 } 201 200 202 - if s.node == nil { 201 + if s.mempoolSubmitter == nil { 203 202 // Validate only 204 203 // Marshal the operation to JSON bytes for validation 205 204 opBytes, err := json.Marshal(op) ··· 216 215 return 217 216 } 218 217 219 - uuid, err := uuid.NewRandom() 220 - if handlePLCError(w, err, "") { 221 - return 222 - } 223 - 224 218 tx := abciapp.Transaction[abciapp.CreatePlcOpArguments]{ 225 219 Action: abciapp.TransactionActionCreatePlcOp, 226 220 Arguments: abciapp.CreatePlcOpArguments{ ··· 234 228 return 235 229 } 236 230 237 - // broadcastTxCommit will wait for inclusion, up until the TimeoutBroadcastTxCommit configured for the node, or until the context deadline expires 231 + // broadcastTxCommit will wait for inclusion until the context deadline expires 238 232 // in practice we expect operations to be included in about one second 239 - result, err := broadcastTxCommit(r.Context(), s.node, uuid.String(), txBytes) 233 + result, err := s.mempoolSubmitter.BroadcastTxCommit(r.Context(), txBytes) 240 234 // TODO more robust error handling 241 235 if handlePLCError(w, err, "") { 242 236 return
+6 -2
main.go
··· 90 90 os.Remove(didBloomFilterPath) 91 91 } 92 92 93 + mempoolSubmitter := &txSubmitter{} 94 + 93 95 pv := privval.LoadFilePV( 94 96 config.PrivValidatorKeyFile(), 95 97 config.PrivValidatorStateFile(), 96 98 ) 97 99 98 - app, txFactory, plc, cleanup, err := abciapp.NewDIDPLCApplication(pv, treeDB, indexDB, recreateDatabases, filepath.Join(homeDir, "snapshots"), didBloomFilterPath) 100 + app, txFactory, plc, cleanup, err := abciapp.NewDIDPLCApplication(pv, treeDB, indexDB, recreateDatabases, filepath.Join(homeDir, "snapshots"), didBloomFilterPath, mempoolSubmitter) 99 101 if err != nil { 100 102 log.Fatalf("failed to create DIDPLC application: %v", err) 101 103 } ··· 128 130 log.Fatalf("Creating node: %v", err) 129 131 } 130 132 133 + mempoolSubmitter.node = node 134 + 131 135 err = app.FinishInitializing(node.BlockStore()) 132 136 if err != nil { 133 137 log.Fatalf("Finishing ABCI app initialization: %v", err) ··· 143 147 }() 144 148 145 149 if config.PLC.ListenAddress != "" { 146 - plcAPIServer, err := httpapi.NewServer(txFactory, plc, node, config.PLC.ListenAddress, 30*time.Second) 150 + plcAPIServer, err := httpapi.NewServer(txFactory, plc, mempoolSubmitter, config.PLC.ListenAddress, 30*time.Second) 147 151 if err != nil { 148 152 log.Fatalf("Creating PLC API server: %v", err) 149 153 }
+130 -14
store/consensus.go
··· 5 5 _ "embed" 6 6 "encoding/base32" 7 7 "encoding/binary" 8 + "errors" 8 9 "iter" 9 10 "math" 10 11 "slices" ··· 24 25 "tangled.org/gbl08ma.com/didplcbft/types" 25 26 ) 26 27 28 + const ( 29 + TreeOperationKeyPrefix = 'o' 30 + TreeOperationKeyLength = 1 + 8 31 + TreeRangeChallengeCommitmentKeyPrefix = 'C' 32 + TreeRangeChallengeKeyLength = 1 + 20 33 + TreeChallengeCompletionKeyPrefix = 'p' 34 + TreeChallengeCompletionKeyLength = 1 + 20 35 + 36 + IndexBlockChallengeKeyPrefix = 'c' 37 + IndexBlockChallengeKeyLength = 1 + 8 38 + IndexDIDLogKeyPrefix = 'l' 39 + IndexDIDLogKeyLength = 1 + 15 + 8 40 + 41 + TreeAuthoritativePLCKey = "aPLCURL" 42 + TreeAuthoritativeImportProgressKey = "aImportProgress" 43 + ) 44 + 27 45 var Consensus ConsensusStore = &consensusStore{} 46 + 47 + var ErrNoActiveChallengeCommitment = errors.New("the validator is currently not committed to a challenge") 48 + var ErrNoRecentChallengeCompletion = errors.New("the validator has not completed a range challenge recently") 28 49 29 50 // ConsensusStore manages all information that is directly or indirectly protected by consensus 30 51 type ConsensusStore interface { ··· 44 65 45 66 AuthoritativeImportProgress(tx transaction.Read) (uint64, error) 46 67 SetAuthoritativeImportProgress(tx transaction.Write, nextCursor uint64) error 68 + 69 + ValidatorRangeChallengeCommitment(tx transaction.Read, validatorAddress []byte) (fromHeight, toHeight, provenHeight, includedOnHeight uint64, treeRoot []byte, err error) 70 + SetValidatorRangeChallengeCommitment(tx transaction.Write, validatorAddress []byte, fromHeight, toHeight, provenHeight, includedOnHeight uint64, treeRoot []byte) error 71 + ClearValidatorRangeChallengeCommitment(tx transaction.Write, validatorAddress []byte) error 72 + 73 + ValidatorRangeChallengeCompletion(tx transaction.Read, validatorAddress []byte) (uint64, error) 74 + SetValidatorRangeChallengeCompletion(tx transaction.Write, validatorAddress []byte, completedToHeight uint64) error 47 75 48 76 BlockChallengeProof(tx transaction.Read, height uint64) ([]byte, error) 49 77 BlockChalengeProofsIterator(tx transaction.Read, afterHeight uint64, retErr *error) iter.Seq2[uint64, []byte] // afterHeight is exclusive for consistency with OperationsIterator ··· 201 229 func (t *consensusStore) ExportOperations(ctx context.Context, tx transaction.Read, after uint64, count int) ([]types.SequencedLogEntry, error) { 202 230 entries := make([]types.SequencedLogEntry, 0, count) 203 231 var iterErr error 204 - for logEntry, _ := range t.OperationsIterator(tx, after, &iterErr) { 232 + for logEntry := range t.OperationsIterator(tx, after, &iterErr) { 205 233 entries = append(entries, logEntry) 206 234 207 235 // this condition being checked here also makes it so that a count of zero means unlimited ··· 431 459 return did, nil 432 460 } 433 461 434 - const DIDLogKeySize = 1 + 15 + 8 435 - const DIDLogKeyPrefix = 'l' 436 - 437 462 func marshalDIDLogKey(didBytes []byte, sequence uint64) []byte { 438 - key := make([]byte, DIDLogKeySize) 439 - key[0] = DIDLogKeyPrefix 463 + key := make([]byte, IndexDIDLogKeyLength) 464 + key[0] = IndexDIDLogKeyPrefix 440 465 copy(key[1:16], didBytes) 441 466 binary.BigEndian.PutUint64(key[16:], sequence) 442 467 return key ··· 461 486 } 462 487 463 488 func marshalOperationKey(sequence uint64) []byte { 464 - key := make([]byte, 1+8) 465 - key[0] = 'o' 489 + key := make([]byte, TreeOperationKeyLength) 490 + key[0] = TreeOperationKeyPrefix 466 491 467 492 binary.BigEndian.PutUint64(key[1:], sequence) 468 493 ··· 588 613 } 589 614 590 615 func (t *consensusStore) AuthoritativePLC(tx transaction.Read) (string, error) { 591 - url, err := tx.Tree().Get([]byte("aPLCURL")) 616 + url, err := tx.Tree().Get([]byte(TreeAuthoritativePLCKey)) 592 617 if err != nil { 593 618 return "", stacktrace.Propagate(err, "") 594 619 } ··· 599 624 } 600 625 601 626 func (t *consensusStore) SetAuthoritativePLC(tx transaction.Write, url string) error { 602 - _, err := tx.Tree().Set([]byte("aPLCURL"), []byte(url)) 627 + _, err := tx.Tree().Set([]byte(TreeAuthoritativePLCKey), []byte(url)) 603 628 return stacktrace.Propagate(err, "") 604 629 } 605 630 606 631 func (t *consensusStore) AuthoritativeImportProgress(tx transaction.Read) (uint64, error) { 607 - progBytes, err := tx.Tree().Get([]byte("aImportProgress")) 632 + progBytes, err := tx.Tree().Get([]byte(TreeAuthoritativeImportProgressKey)) 608 633 if err != nil { 609 634 return 0, stacktrace.Propagate(err, "") 610 635 } ··· 618 643 value := make([]byte, 8) 619 644 binary.BigEndian.PutUint64(value, nextCursor) 620 645 621 - _, err := tx.Tree().Set([]byte("aImportProgress"), value) 646 + _, err := tx.Tree().Set([]byte(TreeAuthoritativeImportProgressKey), value) 647 + return stacktrace.Propagate(err, "") 648 + } 649 + 650 + func marshalRangeChallengeCommitmentKey(validatorAddress []byte) []byte { 651 + key := make([]byte, TreeRangeChallengeKeyLength) 652 + key[0] = TreeRangeChallengeCommitmentKeyPrefix 653 + copy(key[1:], validatorAddress) 654 + return key 655 + } 656 + 657 + func marshalRangeChallengeCommitmentValue(fromHeight, toHeight, provenHeight, includedOnHeight uint64, treeRoot []byte) []byte { 658 + value := make([]byte, 8+8+8+32) 659 + binary.BigEndian.PutUint64(value, fromHeight) 660 + binary.BigEndian.PutUint64(value[8:], toHeight) 661 + binary.BigEndian.PutUint64(value[16:], provenHeight) 662 + binary.BigEndian.PutUint64(value[24:], includedOnHeight) 663 + copy(value[32:], treeRoot[0:24]) 664 + return value 665 + } 666 + 667 + func unmarshalRangeChallengeCommitmentValue(value []byte) (fromHeight, toHeight, provenHeight, includedOnHeight uint64, treeRoot []byte) { 668 + fromHeight = binary.BigEndian.Uint64(value) 669 + toHeight = binary.BigEndian.Uint64(value[8:]) 670 + provenHeight = binary.BigEndian.Uint64(value[16:]) 671 + includedOnHeight = binary.BigEndian.Uint64(value[24:]) 672 + treeRoot = slices.Clone(value[32:]) 673 + return 674 + } 675 + 676 + // ValidatorRangeChallengeCommitment implements [ConsensusStore]. 677 + func (t *consensusStore) ValidatorRangeChallengeCommitment(tx transaction.Read, validatorAddress []byte) (fromHeight, toHeight, provenHeight, includedOnHeight uint64, treeRoot []byte, err error) { 678 + key := marshalRangeChallengeCommitmentKey(validatorAddress) 679 + value, err := tx.Tree().Get(key) 680 + if err != nil { 681 + return 0, 0, 0, 0, nil, stacktrace.Propagate(err, "") 682 + } 683 + if value == nil { 684 + return 0, 0, 0, 0, nil, stacktrace.Propagate(ErrNoActiveChallengeCommitment, "") 685 + } 686 + fromHeight, toHeight, provenHeight, includedOnHeight, treeRoot = unmarshalRangeChallengeCommitmentValue(value) 687 + return 688 + } 689 + 690 + // SetValidatorRangeChallengeCommitment implements [ConsensusStore]. 691 + func (t *consensusStore) SetValidatorRangeChallengeCommitment(tx transaction.Write, validatorAddress []byte, fromHeight, toHeight, provenHeight, includedOnHeight uint64, treeRoot []byte) error { 692 + key := marshalRangeChallengeCommitmentKey(validatorAddress) 693 + value := marshalRangeChallengeCommitmentValue(fromHeight, toHeight, provenHeight, includedOnHeight, treeRoot) 694 + // this may overwrite sometimes (e.g. if a previous commitment has expired and the validator needs to submit a new one) 695 + _, err := tx.Tree().Set(key, value) 696 + return stacktrace.Propagate(err, "") 697 + } 698 + 699 + // ClearValidatorRangeChallengeCommitment implements [ConsensusStore]. 700 + func (t *consensusStore) ClearValidatorRangeChallengeCommitment(tx transaction.Write, validatorAddress []byte) error { 701 + _, removed, err := tx.Tree().Remove(marshalRangeChallengeCommitmentKey(validatorAddress)) 702 + if err != nil { 703 + return stacktrace.Propagate(err, "") 704 + } 705 + if !removed { 706 + // we are only expecting to call this after completing a range challenge, so we're expecting that the key would still exist 707 + // (we would have needed it to validate the challenge completion transaction) 708 + return stacktrace.NewError("validator did not have a range challenge commitment") 709 + } 710 + return nil 711 + } 712 + 713 + func marshalRangeChallengeCompletionKey(validatorAddress []byte) []byte { 714 + key := make([]byte, TreeChallengeCompletionKeyLength) 715 + key[0] = TreeChallengeCompletionKeyPrefix 716 + copy(key[1:], validatorAddress) 717 + return key 718 + } 719 + 720 + // ValidatorRangeChallengeCompletion implements [ConsensusStore]. 721 + func (t *consensusStore) ValidatorRangeChallengeCompletion(tx transaction.Read, validatorAddress []byte) (uint64, error) { 722 + key := marshalRangeChallengeCompletionKey(validatorAddress) 723 + value, err := tx.Tree().Get(key) 724 + if err != nil { 725 + return 0, stacktrace.Propagate(err, "") 726 + } 727 + if value == nil { 728 + return 0, stacktrace.Propagate(ErrNoRecentChallengeCompletion, "") 729 + } 730 + return binary.BigEndian.Uint64(value), nil 731 + } 732 + 733 + // SetValidatorRangeChallengeCompletion implements [ConsensusStore]. 734 + func (t *consensusStore) SetValidatorRangeChallengeCompletion(tx transaction.Write, validatorAddress []byte, completedToHeight uint64) error { 735 + key := marshalRangeChallengeCompletionKey(validatorAddress) 736 + value := binary.BigEndian.AppendUint64(nil, completedToHeight) 737 + _, err := tx.Tree().Set(key, value) 622 738 return stacktrace.Propagate(err, "") 623 739 } 624 740 625 741 func marshalBlockChallengeProofKey(block uint64) []byte { 626 - key := make([]byte, 1+8) 627 - key[0] = 'c' 742 + key := make([]byte, IndexBlockChallengeKeyLength) 743 + key[0] = IndexBlockChallengeKeyPrefix 628 744 629 745 binary.BigEndian.PutUint64(key[1:], block) 630 746
+1
transaction/iavl_adapter.go
··· 17 17 type UnifiedTree interface { 18 18 ReadTree 19 19 Set(key, value []byte) (bool, error) 20 + Remove(key []byte) ([]byte, bool, error) 20 21 } 21 22 22 23 type immutableToReadOnlyTree struct {
+12
types/tx.go
··· 1 + package types 2 + 3 + import ( 4 + "context" 5 + 6 + ctypes "github.com/cometbft/cometbft/rpc/core/types" 7 + "github.com/cometbft/cometbft/types" 8 + ) 9 + 10 + type MempoolSubmitter interface { 11 + BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) 12 + }