A very experimental PLC implementation which uses BFT consensus for decentralization

Allow passing callback for arbitrary changes in ChangeAllNonZeroValidatorReputations

gbl08ma.com d5732b12 ca2f1ec2

verified
+35 -10
+15 -7
store/consensus.go
··· 83 84 ValidatorReputation(tx transaction.Read, validatorAddress []byte) (uint64, error) 85 ChangeValidatorReputation(tx transaction.Write, validatorAddress []byte, change int64) error 86 - ChangeAllNonZeroValidatorReputations(tx transaction.Write, change int64) error 87 } 88 89 var _ ConsensusStore = (*consensusStore)(nil) ··· 877 var maxReputationKey = marshalValidatorReputationKey(slices.Repeat([]byte{0xff}, 20)) 878 879 // ChangeAllNonZeroValidatorReputations implements [ConsensusStore]. 880 - func (t *consensusStore) ChangeAllNonZeroValidatorReputations(tx transaction.Write, change int64) error { 881 // we are not allowed to make updates to the tree while an iterator is active 882 // process validators in batches of 100 to avoid loading too many key-value pairs into memory 883 const batchSize = 100 ··· 890 value []byte 891 } 892 893 - changeInt := big.NewInt(change) 894 - 895 batch := func() ([]kv, bool, error) { 896 toSet := make([]kv, 0, batchSize) 897 itr, err := tx.Tree().Iterator(startingKey, maxReputationKey, true) ··· 904 itr.Next() 905 } 906 907 for i := 0; itr.Valid() && i < batchSize; i++ { 908 reputation := new(big.Int).SetBytes(itr.Value()) 909 - reputation.Add(reputation, changeInt) 910 911 if reputation.Sign() <= 0 { 912 toSet = append(toSet, kv{ 913 - key: slices.Clone(itr.Key()), 914 value: nil, 915 }) 916 } else { 917 toSet = append(toSet, kv{ 918 - key: slices.Clone(itr.Key()), 919 value: reputation.Bytes(), 920 }) 921 }
··· 83 84 ValidatorReputation(tx transaction.Read, validatorAddress []byte) (uint64, error) 85 ChangeValidatorReputation(tx transaction.Write, validatorAddress []byte, change int64) error 86 + ChangeAllNonZeroValidatorReputations(tx transaction.Write, changer func(validatorAddress []byte, reputation uint64) (uint64, error)) error 87 } 88 89 var _ ConsensusStore = (*consensusStore)(nil) ··· 877 var maxReputationKey = marshalValidatorReputationKey(slices.Repeat([]byte{0xff}, 20)) 878 879 // ChangeAllNonZeroValidatorReputations implements [ConsensusStore]. 880 + func (t *consensusStore) ChangeAllNonZeroValidatorReputations(tx transaction.Write, changer func(validatorAddress []byte, reputation uint64) (uint64, error)) error { 881 // we are not allowed to make updates to the tree while an iterator is active 882 // process validators in batches of 100 to avoid loading too many key-value pairs into memory 883 const batchSize = 100 ··· 890 value []byte 891 } 892 893 batch := func() ([]kv, bool, error) { 894 toSet := make([]kv, 0, batchSize) 895 itr, err := tx.Tree().Iterator(startingKey, maxReputationKey, true) ··· 902 itr.Next() 903 } 904 905 + validatorAddrTmp := make([]byte, 20) 906 + 907 for i := 0; itr.Valid() && i < batchSize; i++ { 908 reputation := new(big.Int).SetBytes(itr.Value()) 909 + 910 + keyCopy := slices.Clone(itr.Key()) 911 + copy(validatorAddrTmp, keyCopy[1:21]) 912 + 913 + newValue, err := changer(validatorAddrTmp, reputation.Uint64()) 914 + if err != nil { 915 + return nil, false, stacktrace.Propagate(err) 916 + } 917 + reputation.SetUint64(newValue) 918 919 if reputation.Sign() <= 0 { 920 toSet = append(toSet, kv{ 921 + key: keyCopy, 922 value: nil, 923 }) 924 } else { 925 toSet = append(toSet, kv{ 926 + key: keyCopy, 927 value: reputation.Bytes(), 928 }) 929 }
+20 -3
store/store_test.go
··· 5 "testing" 6 "time" 7 8 "github.com/stretchr/testify/require" 9 "tangled.org/gbl08ma.com/didplcbft/store" 10 "tangled.org/gbl08ma.com/didplcbft/testutil" ··· 16 tx, err := txFactory.ReadWorking(time.Now()).Upgrade() 17 require.NoError(t, err) 18 19 - err = store.Consensus.ChangeAllNonZeroValidatorReputations(tx, 100) 20 require.NoError(t, err) 21 22 validators := make([][]byte, 10) ··· 38 require.Equal(t, uint64(3*10*i), rep) 39 } 40 41 - err = store.Consensus.ChangeAllNonZeroValidatorReputations(tx, 100) 42 require.NoError(t, err) 43 44 for i := range validators { 45 rep, err := store.Consensus.ValidatorReputation(tx.Downgrade(), validators[i]) 46 require.NoError(t, err) ··· 52 } 53 } 54 55 - err = store.Consensus.ChangeAllNonZeroValidatorReputations(tx, -1000) 56 require.NoError(t, err) 57 58 for i := range validators {
··· 5 "testing" 6 "time" 7 8 + "github.com/gbl08ma/stacktrace" 9 "github.com/stretchr/testify/require" 10 "tangled.org/gbl08ma.com/didplcbft/store" 11 "tangled.org/gbl08ma.com/didplcbft/testutil" ··· 17 tx, err := txFactory.ReadWorking(time.Now()).Upgrade() 18 require.NoError(t, err) 19 20 + err = store.Consensus.ChangeAllNonZeroValidatorReputations(tx, func(validatorAddress []byte, reputation uint64) (uint64, error) { 21 + return 0, stacktrace.NewError("should not be called once") 22 + }) 23 require.NoError(t, err) 24 25 validators := make([][]byte, 10) ··· 41 require.Equal(t, uint64(3*10*i), rep) 42 } 43 44 + err = store.Consensus.ChangeAllNonZeroValidatorReputations(tx, func(validatorAddress []byte, reputation uint64) (uint64, error) { 45 + require.Contains(t, validators, validatorAddress) 46 + return reputation + 100, nil 47 + }) 48 require.NoError(t, err) 49 50 + err = store.Consensus.ChangeAllNonZeroValidatorReputations(tx, func(validatorAddress []byte, reputation uint64) (uint64, error) { 51 + return 0, stacktrace.NewError("should propagate error") 52 + }) 53 + require.Error(t, err) 54 + 55 for i := range validators { 56 rep, err := store.Consensus.ValidatorReputation(tx.Downgrade(), validators[i]) 57 require.NoError(t, err) ··· 63 } 64 } 65 66 + err = store.Consensus.ChangeAllNonZeroValidatorReputations(tx, func(validatorAddress []byte, reputation uint64) (uint64, error) { 67 + require.Contains(t, validators, validatorAddress) 68 + if reputation < 1000 { 69 + return 0, nil 70 + } 71 + return reputation - 1000, nil 72 + }) 73 require.NoError(t, err) 74 75 for i := range validators {