diff --git a/Makefile b/Makefile index b6823e665a..60606cf4c6 100644 --- a/Makefile +++ b/Makefile @@ -293,6 +293,9 @@ $(GOPATH1)/bin/%: test: build $(GOTESTCOMMAND) $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 1h -coverprofile=coverage.txt -covermode=atomic +testc: + echo $(UNIT_TEST_SOURCES) | xargs -P8 -n1 go test -c + benchcheck: build $(GOTESTCOMMAND) $(GOTAGS) -race $(UNIT_TEST_SOURCES) -run ^NOTHING -bench Benchmark -benchtime 1x -timeout 1h diff --git a/agreement/gossip/networkFull_test.go b/agreement/gossip/networkFull_test.go index d2971a17c5..fa0133d1a7 100644 --- a/agreement/gossip/networkFull_test.go +++ b/agreement/gossip/networkFull_test.go @@ -103,7 +103,7 @@ func spinNetwork(t *testing.T, nodesCount int, cfg config.Local) ([]*networkImpl break } } - log.Infof("network established, %d nodes connected in %s", nodesCount, time.Now().Sub(start).String()) + log.Infof("network established, %d nodes connected in %s", nodesCount, time.Since(start).String()) return networkImpls, msgCounters } diff --git a/agreement/selector.go b/agreement/selector.go index 2d0f980ac3..1496027bd6 100644 --- a/agreement/selector.go +++ b/agreement/selector.go @@ -51,7 +51,13 @@ func (sel selector) CommitteeSize(proto config.ConsensusParams) uint64 { // looking at online stake (and status and key material). It is exported so that // AVM can provide opcodes that return the same data. func BalanceRound(r basics.Round, cparams config.ConsensusParams) basics.Round { - return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback)) + return r.SubSaturate(BalanceLookback(cparams)) +} + +// BalanceLookback is how far back agreement looks when considering balances for +// voting stake. +func BalanceLookback(cparams config.ConsensusParams) basics.Round { + return basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback) } func seedRound(r basics.Round, cparams config.ConsensusParams) basics.Round { diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index c7a8a9a4cf..fd99bcc612 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -88,7 +88,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro } else { return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } - downloadDuration = time.Now().Sub(blockDownloadStartTime) + downloadDuration = time.Since(blockDownloadStartTime) block, cert, err := processBlockBytes(fetchedBuf, round, address) if err != nil { return nil, nil, time.Duration(0), err diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index c1b534a722..9387918881 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -221,8 +221,7 @@ func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound } reportInfof(infoTxPending, txid, stat.LastRound) - // WaitForRound waits until round "stat.LastRound+1" is committed - stat, err = client.WaitForRound(stat.LastRound) + stat, err = client.WaitForRound(stat.LastRound + 1) if err != nil { return model.PendingTransactionResponse{}, fmt.Errorf(errorRequestFail, err) } diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go index 2474081a0e..00b3a96727 100644 --- a/cmd/loadgenerator/main.go +++ b/cmd/loadgenerator/main.go @@ -200,22 +200,23 @@ func waitForRound(restClient client.RestClient, cfg config, spendingRound bool) time.Sleep(1 * time.Second) continue } - if isSpendRound(cfg, nodeStatus.LastRound) == spendingRound { + lastRound := nodeStatus.LastRound + if isSpendRound(cfg, lastRound) == spendingRound { // time to send transactions. return } if spendingRound { - fmt.Printf("Last round %d, waiting for spending round %d\n", nodeStatus.LastRound, nextSpendRound(cfg, nodeStatus.LastRound)) + fmt.Printf("Last round %d, waiting for spending round %d\n", lastRound, nextSpendRound(cfg, nodeStatus.LastRound)) } for { // wait for the next round. - nodeStatus, err = restClient.WaitForBlock(basics.Round(nodeStatus.LastRound)) + err = restClient.WaitForRoundWithTimeout(lastRound + 1) if err != nil { fmt.Fprintf(os.Stderr, "unable to wait for next round node status : %v", err) - time.Sleep(1 * time.Second) break } - if isSpendRound(cfg, nodeStatus.LastRound) == spendingRound { + lastRound++ + if isSpendRound(cfg, lastRound) == spendingRound { // time to send transactions. return } diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go index d495fbb328..16f28fd904 100644 --- a/cmd/tealdbg/localLedger.go +++ b/cmd/tealdbg/localLedger.go @@ -359,6 +359,10 @@ func (l *localLedger) LookupAgreement(rnd basics.Round, addr basics.Address) (ba }, nil } +func (l *localLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, nil +} + func (l *localLedger) OnlineCirculation(rnd basics.Round, voteRound basics.Round) (basics.MicroAlgos, error) { // A constant is fine for tealdbg return basics.Algos(1_000_000_000), nil // 1B diff --git a/config/consensus.go b/config/consensus.go index 7e111ecc89..b153848230 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -544,6 +544,9 @@ type ConsensusParams struct { // occur, extra funds need to be put into the FeeSink. The bonus amount // decays exponentially. Bonus BonusPlan + + // Heartbeat support + Heartbeat bool } // ProposerPayoutRules puts several related consensus parameters in one place. The same @@ -603,7 +606,7 @@ type ProposerPayoutRules struct { // // BaseAmount: 0, DecayInterval: XXX // -// by using a zero baseAmount, the amount not affected. +// by using a zero baseAmount, the amount is not affected. // For a bigger change, we'd use a plan like: // // BaseRound: , BaseAmount: , DecayInterval: @@ -1519,7 +1522,7 @@ func initConsensusProtocols() { vFuture.EnableLogicSigSizePooling = true vFuture.Payouts.Enabled = true - vFuture.Payouts.Percent = 75 + vFuture.Payouts.Percent = 50 vFuture.Payouts.GoOnlineFee = 2_000_000 // 2 algos vFuture.Payouts.MinBalance = 30_000_000_000 // 30,000 algos vFuture.Payouts.MaxBalance = 70_000_000_000_000 // 70M algos @@ -1530,7 +1533,9 @@ func initConsensusProtocols() { vFuture.Bonus.BaseAmount = 10_000_000 // 10 Algos // 2.9 sec rounds gives about 10.8M rounds per year. - vFuture.Bonus.DecayInterval = 250_000 // .99^(10.8/0.25) ~ .648. So 35% decay per year + vFuture.Bonus.DecayInterval = 1_000_000 // .99^(10.8M/1M) ~ .897. So ~10% decay per year + + vFuture.Heartbeat = true Consensus[protocol.ConsensusFuture] = vFuture diff --git a/config/consensus_test.go b/config/consensus_test.go index c0d079cdf0..6bc8d45c45 100644 --- a/config/consensus_test.go +++ b/config/consensus_test.go @@ -37,6 +37,11 @@ func TestConsensusParams(t *testing.T) { if params.ApplyData && params.PaysetCommit == PaysetCommitUnsupported { t.Errorf("Protocol %s: ApplyData with PaysetCommitUnsupported", proto) } + + // To figure out challenges, nodes must be able to lookup headers up to two GracePeriods back + if 2*params.Payouts.ChallengeGracePeriod > params.MaxTxnLife+params.DeeperBlockHeaderHistory { + t.Errorf("Protocol %s: Grace period is too long", proto) + } } } diff --git a/crypto/msgp_gen.go b/crypto/msgp_gen.go index ab5bdceb88..fc279029a0 100644 --- a/crypto/msgp_gen.go +++ b/crypto/msgp_gen.go @@ -111,6 +111,16 @@ import ( // |-----> MsgIsZero // |-----> HashTypeMaxSize() // +// HeartbeatProof +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> HeartbeatProofMaxSize() +// // MasterDerivationKey // |-----> (*) MarshalMsg // |-----> (*) CanMarshalMsg @@ -1169,6 +1179,232 @@ func HashTypeMaxSize() (s int) { return } +// MarshalMsg implements msgp.Marshaler +func (z *HeartbeatProof) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0006Len := uint32(5) + var zb0006Mask uint8 /* 6 bits */ + if (*z).PK == (ed25519PublicKey{}) { + zb0006Len-- + zb0006Mask |= 0x2 + } + if (*z).PK1Sig == (ed25519Signature{}) { + zb0006Len-- + zb0006Mask |= 0x4 + } + if (*z).PK2 == (ed25519PublicKey{}) { + zb0006Len-- + zb0006Mask |= 0x8 + } + if (*z).PK2Sig == (ed25519Signature{}) { + zb0006Len-- + zb0006Mask |= 0x10 + } + if (*z).Sig == (ed25519Signature{}) { + zb0006Len-- + zb0006Mask |= 0x20 + } + // variable map header, size zb0006Len + o = append(o, 0x80|uint8(zb0006Len)) + if zb0006Len != 0 { + if (zb0006Mask & 0x2) == 0 { // if not empty + // string "p" + o = append(o, 0xa1, 0x70) + o = msgp.AppendBytes(o, ((*z).PK)[:]) + } + if (zb0006Mask & 0x4) == 0 { // if not empty + // string "p1s" + o = append(o, 0xa3, 0x70, 0x31, 0x73) + o = msgp.AppendBytes(o, ((*z).PK1Sig)[:]) + } + if (zb0006Mask & 0x8) == 0 { // if not empty + // string "p2" + o = append(o, 0xa2, 0x70, 0x32) + o = msgp.AppendBytes(o, ((*z).PK2)[:]) + } + if (zb0006Mask & 0x10) == 0 { // if not empty + // string "p2s" + o = append(o, 0xa3, 0x70, 0x32, 0x73) + o = msgp.AppendBytes(o, ((*z).PK2Sig)[:]) + } + if (zb0006Mask & 0x20) == 0 { // if not empty + // string "s" + o = append(o, 0xa1, 0x73) + o = msgp.AppendBytes(o, ((*z).Sig)[:]) + } + } + return +} + +func (_ *HeartbeatProof) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatProof) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *HeartbeatProof) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0006 int + var zb0007 bool + zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sig") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK2") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK1Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK1Sig") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK2Sig") + return + } + } + if zb0006 > 0 { + err = msgp.ErrTooManyArrayFields(zb0006) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0007 { + (*z) = HeartbeatProof{} + } + for zb0006 > 0 { + zb0006-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "s": + bts, err = msgp.ReadExactBytes(bts, ((*z).Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "Sig") + return + } + case "p": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK)[:]) + if err != nil { + err = msgp.WrapError(err, "PK") + return + } + case "p2": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2)[:]) + if err != nil { + err = msgp.WrapError(err, "PK2") + return + } + case "p1s": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK1Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "PK1Sig") + return + } + case "p2s": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "PK2Sig") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *HeartbeatProof) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *HeartbeatProof) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatProof) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *HeartbeatProof) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 2 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + return +} + +// MsgIsZero returns whether this is a zero value +func (z *HeartbeatProof) MsgIsZero() bool { + return ((*z).Sig == (ed25519Signature{})) && ((*z).PK == (ed25519PublicKey{})) && ((*z).PK2 == (ed25519PublicKey{})) && ((*z).PK1Sig == (ed25519Signature{})) && ((*z).PK2Sig == (ed25519Signature{})) +} + +// MaxSize returns a maximum valid message size for this message type +func HeartbeatProofMaxSize() (s int) { + s = 1 + 2 + // Calculating size of array: z.Sig + s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) + s += 2 + // Calculating size of array: z.PK + s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) + s += 3 + // Calculating size of array: z.PK2 + s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) + s += 4 + // Calculating size of array: z.PK1Sig + s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) + s += 4 + // Calculating size of array: z.PK2Sig + s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) + return +} + // MarshalMsg implements msgp.Marshaler func (z *MasterDerivationKey) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) diff --git a/crypto/msgp_gen_test.go b/crypto/msgp_gen_test.go index b3fb95150b..0105a58f1d 100644 --- a/crypto/msgp_gen_test.go +++ b/crypto/msgp_gen_test.go @@ -434,6 +434,66 @@ func BenchmarkUnmarshalHashFactory(b *testing.B) { } } +func TestMarshalUnmarshalHeartbeatProof(t *testing.T) { + partitiontest.PartitionTest(t) + v := HeartbeatProof{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingHeartbeatProof(t *testing.T) { + protocol.RunEncodingTest(t, &HeartbeatProof{}) +} + +func BenchmarkMarshalMsgHeartbeatProof(b *testing.B) { + v := HeartbeatProof{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHeartbeatProof(b *testing.B) { + v := HeartbeatProof{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHeartbeatProof(b *testing.B) { + v := HeartbeatProof{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalMasterDerivationKey(t *testing.T) { partitiontest.PartitionTest(t) v := MasterDerivationKey{} diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go index d05ccaa961..aba2385f0f 100644 --- a/crypto/onetimesig.go +++ b/crypto/onetimesig.go @@ -57,6 +57,56 @@ type OneTimeSignature struct { PK2Sig ed25519Signature `codec:"p2s"` } +// A HeartbeatProof is functionally equivalent to a OneTimeSignature, but it has +// been cleaned up for use as a transaction field in heartbeat transactions. +type HeartbeatProof struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // Sig is a signature of msg under the key PK. + Sig ed25519Signature `codec:"s"` + PK ed25519PublicKey `codec:"p"` + + // PK2 is used to verify a two-level ephemeral signature. + PK2 ed25519PublicKey `codec:"p2"` + // PK1Sig is a signature of OneTimeSignatureSubkeyOffsetID(PK, Batch, Offset) under the key PK2. + PK1Sig ed25519Signature `codec:"p1s"` + // PK2Sig is a signature of OneTimeSignatureSubkeyBatchID(PK2, Batch) under the master key (OneTimeSignatureVerifier). + PK2Sig ed25519Signature `codec:"p2s"` +} + +// ToOneTimeSignature converts a HeartbeatProof to a OneTimeSignature. +func (hbp HeartbeatProof) ToOneTimeSignature() OneTimeSignature { + return OneTimeSignature{ + Sig: hbp.Sig, + PK: hbp.PK, + PK2: hbp.PK2, + PK1Sig: hbp.PK1Sig, + PK2Sig: hbp.PK2Sig, + } +} + +// ToHeartbeatProof converts a OneTimeSignature to a HeartbeatProof. +func (ots OneTimeSignature) ToHeartbeatProof() HeartbeatProof { + return HeartbeatProof{ + Sig: ots.Sig, + PK: ots.PK, + PK2: ots.PK2, + PK1Sig: ots.PK1Sig, + PK2Sig: ots.PK2Sig, + } +} + +// BatchPrep enqueues the necessary checks into the batch. The caller must call +// batchVerifier.verify() to verify it. +func (hbp HeartbeatProof) BatchPrep(voteID OneTimeSignatureVerifier, id OneTimeSignatureIdentifier, msg Hashable, batchVerifier BatchVerifier) { + offsetID := OneTimeSignatureSubkeyOffsetID{SubKeyPK: hbp.PK, Batch: id.Batch, Offset: id.Offset} + batchID := OneTimeSignatureSubkeyBatchID{SubKeyPK: hbp.PK2, Batch: id.Batch} + batchVerifier.EnqueueSignature(PublicKey(voteID), batchID, Signature(hbp.PK2Sig)) + batchVerifier.EnqueueSignature(PublicKey(batchID.SubKeyPK), offsetID, Signature(hbp.PK1Sig)) + batchVerifier.EnqueueSignature(PublicKey(offsetID.SubKeyPK), msg, Signature(hbp.Sig)) + +} + // A OneTimeSignatureSubkeyBatchID identifies an ephemeralSubkey of a batch // for the purposes of signing it with the top-level master key. type OneTimeSignatureSubkeyBatchID struct { diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index e4990fe779..51cd801803 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -245,7 +245,7 @@ }, "/v2/accounts/{address}": { "get": { - "description": "Given a specific account public key, this call returns the accounts status, balance and spendable amounts", + "description": "Given a specific account public key, this call returns the account's status, balance and spendable amounts", "tags": [ "public", "nonparticipating" diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index c2c39a5372..34832fcda3 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -2999,7 +2999,7 @@ }, "/v2/accounts/{address}": { "get": { - "description": "Given a specific account public key, this call returns the accounts status, balance and spendable amounts", + "description": "Given a specific account public key, this call returns the account's status, balance and spendable amounts", "operationId": "AccountInformation", "parameters": [ { diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go index c349d3ecbf..6a44a32eaa 100644 --- a/daemon/algod/api/client/restClient.go +++ b/daemon/algod/api/client/restClient.go @@ -26,6 +26,7 @@ import ( "net/http" "net/url" "strings" + "time" "github.com/google/go-querystring/query" @@ -39,6 +40,8 @@ import ( "github.com/algorand/go-algorand/ledger/eval" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/rpcs" + "github.com/algorand/go-algorand/test/e2e-go/globals" ) const ( @@ -283,12 +286,56 @@ func (client RestClient) Status() (response model.NodeStatusResponse, err error) return } -// WaitForBlock returns the node status after waiting for the given round. -func (client RestClient) WaitForBlock(round basics.Round) (response model.NodeStatusResponse, err error) { +// WaitForBlockAfter returns the node status after trying to wait for the given +// round+1. This REST API has the documented misfeatures of returning after 1 +// minute, regardless of whether the given block has been reached. +func (client RestClient) WaitForBlockAfter(round basics.Round) (response model.NodeStatusResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d/", round), nil) return } +// WaitForRound returns the node status after waiting for the given round. It +// waits no more than waitTime in TOTAL, and returns an error if the round has +// not been reached. +func (client RestClient) WaitForRound(round uint64, waitTime time.Duration) (status model.NodeStatusResponse, err error) { + timeout := time.After(waitTime) + for { + status, err = client.Status() + if err != nil { + return + } + + if status.LastRound >= round { + return + } + select { + case <-timeout: + return model.NodeStatusResponse{}, fmt.Errorf("timeout waiting for round %v with last round = %v", round, status.LastRound) + case <-time.After(200 * time.Millisecond): + } + } +} + +const singleRoundMaxTime = globals.MaxTimePerRound * 40 + +// WaitForRoundWithTimeout waits for a given round to be reached. As it +// waits, it returns early with an error if the wait time for any round exceeds +// singleRoundMaxTime so we can alert when we're getting "hung" waiting. +func (client RestClient) WaitForRoundWithTimeout(roundToWaitFor uint64) error { + status, err := client.Status() + if err != nil { + return err + } + + for lastRound := status.LastRound; lastRound < roundToWaitFor; lastRound = status.LastRound { + status, err = client.WaitForRound(lastRound+1, singleRoundMaxTime) + if err != nil { + return fmt.Errorf("client.WaitForRound took too long between round %d and %d", lastRound, lastRound+1) + } + } + return nil +} + // HealthCheck does a health check on the potentially running node, // returning an error if the API is down func (client RestClient) HealthCheck() error { @@ -301,14 +348,6 @@ func (client RestClient) ReadyCheck() error { return client.get(nil, "/ready", nil) } -// StatusAfterBlock waits for a block to occur then returns the StatusResponse after that block -// blocks on the node end -// Not supported -func (client RestClient) StatusAfterBlock(blockNum uint64) (response model.NodeStatusResponse, err error) { - err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d", blockNum), nil) - return -} - type pendingTransactionsParams struct { Max uint64 `url:"max"` Format string `url:"format"` @@ -557,6 +596,16 @@ func (client RestClient) RawBlock(round uint64) (response []byte, err error) { return } +// EncodedBlockCert takes a round and returns its parsed block and certificate +func (client RestClient) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) { + resp, err := client.RawBlock(round) + if err != nil { + return + } + err = protocol.Decode(resp, &blockCert) + return +} + // Shutdown requests the node to shut itself down func (client RestClient) Shutdown() (err error) { response := 1 diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go index d3924eaf1d..25b3365f4a 100644 --- a/daemon/algod/api/server/v2/dryrun.go +++ b/daemon/algod/api/server/v2/dryrun.go @@ -329,6 +329,10 @@ func (dl *dryrunLedger) LookupAgreement(rnd basics.Round, addr basics.Address) ( }, nil } +func (dl *dryrunLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, nil +} + func (dl *dryrunLedger) OnlineCirculation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) { // dryrun doesn't support setting the global online stake, so we'll just return a constant return basics.Algos(1_000_000_000), nil // 1B diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index 600972e9bb..6f44c1afad 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -985,101 +985,101 @@ var swaggerSpec = []string{ "1jihBfsJzN7gXXkmsK+lQWqKJxGWlOWjJ/6n/9efsEkqlvXw/teR6880WmhdqCdHRxcXF5Pwk6M5pv4n", "WpTp4sjPg90GG/rKm5MqRt/G4eCO1tZj3FRHCsf47O3z0zNy/OZkUhPM6MnoweTB5KFrbc1pwUZPRl/h", "T3h6FrjvR1hf80i50vlHVa7Wx3HnWVHYwvrmkaNR99cCaI4FdswfS9CSpf6RBJpt3P/VBZ3PQU4we8P+", - "tHp05LWRow+ucsJHA1jUbWjrrAfFtX0gYlFOc5b6GmVMWfuxDbBXYXNZa1gv1ZhMbfthH8PLM4xQssUI", - "VNiC+yQzeLafn9S8zndQRrfy6MlvkWpWPvHDN/YNY86CaLT/c/rzayIkcbeiNzQ9r5JefJZTndkVJjmZ", - "Lyee7P9dgtzUZOkY5nikqu7gwMul4T0ue2ap5kWzsGutjMWMRR1c+5kNNQXnoapzUvM7tAwGkNTc23Dk", - "B8m37z98/Y+PowGAYNEdBdjn8Q+a539Y6xqsMbC2FXgz7guJGtd1M/CDeifHaMiqngaf1+8066H/wQWH", - "P/q2wQEW3Qea5+ZFwSG2B++xEyESCx7VRw8eeP7ktP8AuiN3poJZBrUAsM6FahRPEpcYqMvH7KO3VWlM", - "SQt7Fo99+PCmqNw79qWJYVePD7jQZgHPKy+3PVxn0d/TjEiXvoxLefjFLuWE21BQI4+s3Pw4Hn39Be/N", - "CTc8h+YE3wza/HYFzS/8nIsL7t80OlO5XFK5QY1IV7yw3ZeGzhX6VJFF2rMdVF/j89H7j71S7yiMeTz6", - "0CidlF1JJlonS6Or0w4xeUf1cU4cyyaluR/uHhcFhnyeVs+Pi8J2DccwAmAo/WDNlFb3JuSH8OuGb8RC", - "Yl0jjZwA30Tbt+ZuuMqDdpxRod0oSnArvz+t/D5u2khYBlyzGUN9PQZM4xRshakTrHRVAdrNEQpKJO0b", - "D12Vx3aqReJarw0cwzXhP1xfwQGVUexM72M3yJ2M+hZ3PbjrU5MCeCuNqW5qeDOs2VfarSRJQ2RcI+P+", - "wpW+VzQ3dBIst9XR5uTZrTL4t1IGq4qcc6udFcUB1EOfuLHrlaMPrsrkIbRGvB4P0hfDm3fwbRB7f7fF", - "ce5NyHH7ncuxFVelc6cmaN671QE/Bx3Qljndpf05Ov6kel+Y9rVPFlZDYTG/D/r4C1f0/sbI6tXsDKS7", - "dbpLsM+OvuaY9bWx1b+knuaQdquh/a01tKp29pV0tDD09chVIQg0tisZ+NoGPKYrTaxZPz3gbFhuBPPx", - "7REe12H+hsXY+GUXuazG/vKIjlp7r7SbNe5cLbsq1g8Q3mG/35w826VdfUGmoMFtkCNSIL43181Lo56J", - "tzfjmRjGmx4/eHxzEIS78Fpo8gKl+DVzyGtlaXGy2peFbeNIR1Ox3sWVeIstVQXqzKFt8KiqDuk4eG7e", - "tvEfdzHjt9k4696EfO9erauAuIz2uTCMymeKUTm3HxleZ5BB7vg/n+D4dybkBeY/ajXGMDZMrMAXGddP", - "Hj766rF7RdILGyXWfm/6zeMnx999514rJOMaQwbsPafzutLyyQLyXLgPnIzojmsePPnP//rvyWRyZydb", - "FevvN69tp93PhbeOYxUPKwLo260vfJNit3XXAXkn6m7Ew/+9WEelgFjfSqFPJoUM9v8S0mfaJCN3Ea2M", - "nY1ePAeURvaY7COPxk7+YBJHJUwm5LVwbdHKnEpbHwZL6CoyL6mkXANkE0+pmIGnbCG7NGdYOkASBXIF", - "MlGsKlVdSqiKmBQSVhh9Xxd5bUCwm9FjjO5ny+Rf0XWQNj+txLQWbslo9lzSNcE+H5oo0GNbQW1NvvuO", - "PBjXt5c8NwMkFWJizHVJ16MbtPpVxDa0LNAzhx0hd4f+4thDLEi19lPVl6yvGn93zv3Fau6W3N3GHohz", - "7u34qR07oR3BNR/bakGwip3GasiqLIp8U9fBNVqeV6HiLM7MMNQ48Bn7CHaapqOX0DZ6bw/xrRHgSqyk", - "TVB7sg3MZ1VHH/BeHvKMzrnFfLy/l7s08B1JsfTOI0FmoNOFSwVuoT7CnqRLR+znTUvG2dJA+WB87VoN", - "7mK3/nHY+zmjNgF/SHuxIEsTHXggI0T8M/6H5lhTj81saXff8MNXM0TXlKuOXTVctZdv24LZhfz7jOGC", - "NhrI7obyaT15VyFDtBzC/3mL4P0Q3GGOz121A3u83CL+CkkB/iqZkNeiTki3N6i/pOvxOiX7dS/oteBg", - "fexG87W0eOtOrdQOwzgsUnwlEnt/qdtrXVYFOfIVfLbqIT+al3boIkOkN1YD+hJF+I/ROkcNKWPWNtlZ", - "ZqEebQhzNi/afghhIZTJp7zFfBJ++hlebT4Fx7oZFoOH1PMZpxbwAzMdq2DtZDs+r/zqjMee0WtnPeO/", - "1A3tOhhptfPXobBHma19drDLxue0gi6jvkkxcavF32rxt1r8pUSs5RLXK2Sxgp6d6ajw5Q775O1L83LA", - "iWxRwcGSV4sq1hsipfvIFHLB5+rz1Pe30UccLxE6sYUibe+yzvonf0MF+alrLKZdbQ9XrlExngJRYgko", - "JI3m47o+WAj/cXMQaraEjIgSa04GNSQ+sQr/9YOvbm76U5ArlgI5g2UhJJUs35BfeNVA7Cr8ThHq9jx0", - "uUaYA+MY0tEs65mGNQivwATFfEsIi3MO14WJlb1DiFKDtCVpW30iWYdJx5yuyDBemqkPcHfJxfxLs5l4", - "rA/tpPCU5jmia1ckBw48KBUoz+1+wpJpXfdlCqUreU7TRbW34/qCVnXP9Q08xq2Szziya6Vq6+koMPus", - "gQSrCVwCIGEmsC0iSMCWSFMgyzLXrMib31TtpbHdXiTW19Jm2Knn5JlfnY2AErN66Db9+nYfbvCJmds9", - "wpm5sIujEpB3Vw6MVgfHSQNo23jS5zgF7QJd00NXTZjJVnnnOkC1KIDK+mNL+XcLCYkbQtIVSEXxsLYW", - "de/WHvZ52MPWrp/AZ2INiwYCXZXXX14UNVKVPug1yz7u1suDkvx7quSMByp5yC7sWbu8Lr7b6HXWmvHk", - "WZgNKqqilV5B6AHFoGjPhOj/NRoYZ4CV0MTMGTtLbgH1daSdxupSNcVsXCVDmBuumD0h7/h9ohbUtzlw", - "fz76+pseO5yZx5V/7Vri6oHMYzvMkICJW+NipXFU+H1y07u93yaORyxbR3q58AzWQfuw6uiE8vCOIgXd", - "+LTJTjnjIt7SoLqYhsMuwYgptWDFzZfNV5pN431DvLvrFDstnq35Cf++8nra2u5Gayg+Rbn08UhLgAwK", - "vdjZRQHfqncTXD8FplznO1vrfkzYBCa2FHzdoTSbgxNMlORAZ1WrUSGGJMsHfMYQmqeKAOvhQoZo0lH6", - "QZ0XifLmnZF1UrkVdB55baX4kyph+lMpYUlLC2ui5dPpZNgzaRyENxdSaJGK3OYqlEUhpK5Ot5oMsjxA", - "n6LXMDz0Ee6VlLk1y9ROB+YZvnUAG0CTstUXEzdx5tEUc1PFFnXJ2u71XENY2pkoiL3gt0D4pHzt9lIZ", - "42ctf9KXHmKhe0nvwM6glOp0URZHH/A/WNv+Y10YA7t+qSO95kfY5/now9YUFmSpudFNpG0Y1jDpdrpG", - "RxNRXuLndXOyF0IGl9sfzHc7U1RaSBu3hb7tWY25LhH2eD23yb/1JWyr66y14Vd31kZG7JzXqu5T0Om2", - "ot2g5Z0v5WT7XEdI+Da44PNaUO1PnDGeERpsY8vWJGTNCK7Zp3jdi/4ULsqbj6j4+gs+Z6+FJifLIocl", - "cA3Z1bLLSJvDeemxVdzupxg40d9NQevK/FDi+8TZShfZKeD3uPcEpQLBT0cl1u4zsvo2VvPvKMmfVt7W", - "kAxv5fKXI5elT/e9FcGfvwj+6otdzTXGMA0UyZdwDjfFcH0T31Mgd5QBZ8NqGQ62+ZXx6t1epXohpG/s", - "eivFv1CnqN3JwYFYQyw0uyyxbspDZFt8VtAPszPkecTS0HdQx1WsF8OiyCJl2ALvJFNjF1RmjRPuFN8q", - "Pp+14hPs9a3ec2t6+MJMDz1ajrv15/kQRWNfBWi1FBl4x6qYzVwTgj7tp9l12ZCn0nRZEPvlpDcO+4wt", - "4dS8+bOd4qAitga7pRa1wDPIUpAKnqkBURxu1MvKIXQ09QNw457Nagc8LK484eTSJPs2qHHcoQTSRr7C", - "btm+GYNDRgYrYghwcgCyPfpg/0VzWiFUZDWnnoA7G3PXbYvtLmHHbQBI3qASattU+K/EjDywTSZKjpVk", - "Fsy12cdYVi03RlH1NXUl0JykjQoSFRzdk3Pae3J2XgU6q+tZU/wuIOoTesgIhlb1np9u/AA8pdyRfBdB", - "WhBKOMypZivwLv/JbcXHS0szV29xCwMcE5pl9jTWmwArkBuiyqkyug5v5ijdUc3zsgfDgHUBkhkRTfPa", - "AW+vCUe2nOO2OKJT+8YVhVaLF9kikrIZteglqysxKWbkFUulOM7nooqFVxulYWnDCgMp6D79vacpkDck", - "dGNWBc8Zh2QpOGwiJxWfvsKHsa+xJGbfx2fmYd+3LXnbhL8FVnOeITL5qvj9TE7/lQJdWquVUAhpbrfT", - "jc2/QPrf8yj5Q7PhafckbXgaOLXcw2AgxFfs5yOfjlC3lel780PjT1f21b2pFqXOxEUwC9oAbDjjkIqP", - "qHzvmeRR29ya2ZNMXa/V7Tq9TQEeYmerehrpc18/7G91/zdNwnbOmZBIXE7jCqRqXeRuM7H/UpnYg/d9", - "L25shizVLo5WqsPqLq9FBnbcOh3XHP1YpzEuMiDKA9FSWaqwyHjKkJdf9XutJI6UlvOFJmVBtIili9Qf", - "JjS1TDaxF6H4hEFtf3tdwukWdAWE5hJoZi6vwImYmkXXkhQXSRV2V/A5Jy74M6o0BXAVUqSgFGSJ76y2", - "CzT/ng1V11vwhIAjwNUsRAkyo/LKwJ6vdsJ5DpsEL8OK3P3pV3O1vnF4rdK4HbG2pnsEve206y7Uw6bf", - "RnDtyUOyswndlmoxRU4sixxcklwEhXvhpHf/2hB1dvHqaMEsMnbNFO8nuRoBVaBeM71fFdqySIz87oL4", - "1D49Y0vUxDjlwlsgY4PlVOlkF1s2L4VrUWYFASeMcWIcuOdq+pIq/dblS2dYS9mKE5zH6thmin6AjRS1", - "d4vIyL/ah7GxUyMPuSoVcSP4HCjIYmvgsN4y12tYV3Nh7RQ/dpVkZW2Bu0buw1IwvkNW0F6OUB34/c1w", - "kcWhpZI6U0YXlQ0gakRsA+TUvxVgN3T49wDCVI1oSzjYLieknKkQOVBuc1VFURhuoZOSV9/1oenUvn2s", - "f6nf7RKXrYVh5XYmQIUJcA7yC4tZhabcBVXEwUGW9NzlyM1du/AuzOYwJlhmKdlG+WjcNW+FR2DnIS2L", - "uaQZJBnkNGJ0+cU+JvbxtgFwxz15JiuhIZlijZT4pteULHuNSdXQAsdTMeWR4BOSmiNoLs81gbivd4yc", - "AY4dY06Oju5UQ+Fc0S3y4+Gy7Vb3GLDMGGbHHT0gyI6jDwG4Bw/V0JdHBX6c1OaD9hT/BcpNUOkR+0+y", - "AdW3hHr8vRbQNvyFAqwhKVrsvcWBo2yzl43t4CN9RzZmavwi3QLtKKdrTLJrmlqDC+DkMpfbowvKdDIT", - "0irSCZ1pkDtD5/9JmXec+/Rd4aquEBzByU03DjL5sGmr4yIWBOLEhSERV0nKyDBKHpIl46W2T0Spx7bH", - "hASaLozSHtpg7UjYdt8VaZIwpzLLsSX7rJKbQtqiT7ol4BHoSD5i88Zv1v1CyEGda5qlIynTpOSa5UH3", - "vure/vlZL28tErcWiVuLxK1F4tYicWuRuLVI3Fokbi0StxaJW4vErUXi72uR+FRlkhKvcfiKjVzwpB1M", - "eRtL+ZeqKl+JKm8gQevEBWXIloIqBf12iz0MQRpojjhgOfRHd9ug07Pnxy+JEqVMgaQGQsZJkVNzNYC1", - "9v33yZQq+OaxTzW0opMuyXRjeIeRr+aFrx6R0x+PfcXRhauM2Xz37rGNVyNKb3K453qPAs+sJuqbkAI3", - "SHc9SKkXCanLk7QGihnLMTJekef49jNYQS4KkLaYIdGyhK7F5wxo/tThZofB559mchdq+4cZ7Y9xw+jl", - "0LakhVfz/VqpItRmXJJnQQ7mHzOaK/ijLw3TjrekxShSu7gSfNYUhMzke5FtWifE7NoRbmDzbNR1Rxmn", - "chOpEtVNgWiThhaGXTnC6tqyPh68Om6XaLtktovCYtq6LYMfH72PyqNlYasN6wxlE3VnLToZxXJM27VQ", - "RxWAgwoDYpqE3RPy1n73acsAIkTuiNXM/LOJYmy+WTENfNdcIhzr+VJzCTzio6cXz/7YEHZWpkCYVsQX", - "2N0tXsajdWJGmgNPHANKpiLbJA32NWpIoYwpqhQsp7slUcg/8cRVwsc82S6nPo0YeRYsbhtPDolmnTgG", - "3MOdNxoG8+YKWziiY88Bxq+bRfex0RAE4vhTzKjU4n37Mr16ms0t47tlfMFpbGkEjLuC5G0mMrlGxic3", - "suT9PO/5GtLSABee5LtonUeXHKx1w8mawbScz81toeujwzY6OB4T/BOxQrvcoVxwPwqyg7/1MfZXTVJv", - "D9flLkHe+F1fmfEebgflG3RmLAvKN97lC4liyzK3OLRtVA/LaG3N8FiJ6dr212fVfuNNfoHt1ona5u8W", - "LeSCKmL3FzJS8sxlPHVqW6/58DonduizNa/Z9NaaJna9kdW5eYeICL/LzVRzRQqQiV5ze6Aah8l1MLAn", - "95PW0r4VGzcnNmyiOvQw2G41/pohHEh6yICvofgIei7ViXmNTky0mU7YeIYWjf4Ul7A5k33zoIElneGb", - "8SW1ucX5TyEvCCVpztC7KrjSskz1O07RfxMsbNKNPfGG6n7e99S/EnchRjx8bqh3nGKQUeXVifLAGURc", - "GC8APItV5XwOyvDRkIBmAO+4e4txUnJzCxMzsmSpFIlNrTXny+guE/vmkm7IDCuaCPInSEGmRuoHu25t", - "yUqzPHfBLmYaImbvONUkB6o0ecUMBzbD+XIKVcgZ6AshzyssxHv1zIGDYiqJG2Z+sE+xHY5bvjcAojHT", - "Pq7bWNxsHxwPO8t6IT95hjFqWI05Zyrsv9iG/cZ840vGkyiRnS2AuHCxNm2Ru1gDzhHQvabjSC/gHTfS", - "TwuCHJ/qy5FD2wPUOYv2dLSoprERLUeRX+ug699BuAyJMJlbt8tfKIU0oAPv2cSNt/X1W3u/p4ulIXIB", - "W4P2CWT71LVP7HnJXSAaRrJWgRv3xlkD5K3+iy+/rOTh75IejQe7TXYH7LKrZoM8xJvf8DGhueBzW1fR", - "3C4F7hPjRakxAPw6DXiwonkiViAly0ANXCkT/PmK5j9Xn30cj2ANaaIlTSGxFoWhWDsz31g6xUaDnGlG", - "8wRv1UMBghP71an9aIc8DrqNLpeQMaoh35BCQgqZLUTGFKnv8xNboIGkC8rnKLqlKOcL+5od5wIkVI0Z", - "zRW6PUS8EMyaJ7YoXRfGY9eoOazbCzRdRBrHoIAzd3ZPUFmjJ9XAPWiUHO27pI9HvYq2QeqqDp2zyGmy", - "mQFaREMfCPBTT3yIGq23RH9L9F860cdKKiLqZi1rhcVXuC3XbNa67gKiN2gl+yTVhW9L9P/VS/R7DqQI", - "JZI27iDx3nBUEabJBZZFmgIx8qtE67xruOfu65hpFxx1V2lTufZ86YIy7mrqVHkNCIe5Ei+XTGvfnvZa", - "DJuWmaFF06AD0lIyvcFbCy3Y7+dg/v/eqP0K5MpfaEqZj56MFloXT46OcpHSfCGUPhp9HIfPVOvh+wr+", - "D/4uUki2Mverjwi2kGzOuJG5F3Q+B1mbEEePJg9GH/9vAAAA//+jyUunb8cBAA==", + "tHp05LWRow+ucsJHA1jUbWjrrAfFtX0gYlFOc5b6GmVMWfuxDbBXYXNZZ1kv1ZhMbf9hH8TLMwxRstUI", + "VNiD+yQziLbfn9TMzrdQRr/y6MlvkXJWPvPDd/YNg86CcLT/c/rzayIkcdeiNzQ9r7JefJpTndoVZjmZ", + "Lyee7v9dgtzUdOk45nikqvbgwMulYT4ufWap5kWzsmutjcWsRR1k+5kNOQUHoip0UjM8NA0GkNTs27Dk", + "B8m37z98/Y+PowGAYNUdBdjo8Q+a539Y8xqsMbK2FXkz7ouJGteFM/CDeifHaMmqngaf1+80C6L/wQWH", + "P/q2wQEW3Qea5+ZFwSG2B++xFSESC57VRw8eeAbl1P8AuiN3qIJZBvUAsN6FahRPEpcYqMvI7KO3VW1M", + "SQt7GI99/PCmqPw79qWJ4VePD7jQZgXPKy+3PVxn0d/TjEiXv4xLefjFLuWE21hQI5Cs4Pw4Hn39Be/N", + "CTc8h+YE3wz6/HYlzS/8nIsL7t80SlO5XFK5QZVIV7yw3ZiGzhU6VZFF2rMdlF/j89H7j71i7ygMejz6", + "0KidlF1JKFovS6Ot02452cM5cSybleZ+uHtcFBjzeVo9Py4K2zYc4wiAofSDNVNa3ZuQH8KvG84RC4n1", + "jTSSAnwXbd+bu+ErD/pxRoV2oyrBrfz+tPL7uGkkYRlwzWYMFfYYMI1TsBWmTrTSVQVoN0koqJG0b0B0", + "VR/bqRaJ6702cAzXhf9wjQUHlEaxM72PXSF3Mupb3PXgrk9NCuCtNKa6q+HNsGZfareSJA2RcY2M+wtX", + "+l7R3NBJsNxWS5uTZ7fK4N9KGaxKcs6tdlYUB1APfebGrleOPrgyk4fQGvF6PEhfDG/ewbdB8P3dFse5", + "NyHH7Xcux1Zcmc6dmqB571YH/Bx0QFvndJf25+j4k+p9Yd7XPmlYDYXF/D7o4y9c0fsbI6tXszOQ7tbp", + "LsE+O/qaY9bXxlb/knqaQ9qthva31tCq4tlX0tHC2NcjV4Yg0NiuZOBrG/CYrjSxZgH1gLNhvRFMyLdH", + "eFzH+RsWYwOYXeiyGvvLI3pq7b3Sbta4c7Xsqlg/QHiH/X5z8myXdvUFmYIG90GOSIH43lw3L416Jt7e", + "jGdiGG96/ODxzUEQ7sJrockLlOLXzCGvlaXFyWpfFraNIx1NxXoXV+IttlRVqDOHtsGjqkKk4+C5edsG", + "gNzFlN9m56x7E/K9e7UuA+JS2ufCMCqfKkbl3H5keJ1BBrnj/3yC49+ZkBeYAKnVGOPYMLMCX2RcP3n4", + "6KvH7hVJL2yYWPu96TePnxx/9517rZCMawwZsPeczutKyycLyHPhPnAyojuuefDkP//rvyeTyZ2dbFWs", + "v9+8tq12PxfeOo6VPKwIoG+3vvBNit3WXQvknai7EQ//92IdlQJifSuFPpkUMtj/S0ifaZOM3EW0MnY2", + "mvEcUBrZY7KPPBo7+YNZHJUwmZDXwvVFK3MqbYEYrKGryLykknINkE08pWIKnrKV7NKcYe0ASRTIFchE", + "sapWdSmhqmJSSFhh+H1d5bUBwW5Gj0G6ny2Tf0XXQd78tBLTWrglo9lzSdcEG31ookCPbQm1NfnuO/Jg", + "XN9e8twMkFSIiTHXJV2PbtDqVxHb0LpAzxx2hNwd+4tjD7Eg1dpPVWCyvmr83Tn3F6u5W3J3G3sgzrm3", + "46d27IR2BNd9bKsFwSp2Gsshq7Io8k1dCNdoeV6FirM4M8NQ48Bn7CPYaZqOXkLb6L09xLdGgCuxkjZB", + "7ck2MKFVHX3Ae3nIMzrnFhPy/l7u0sB3JMXSO48EmYFOFy4XuIX6CHuSLh+xnzctGWdLA+WD8bVrNbiL", + "3QLIYfPnjNoM/CH9xYI0TXTggYwQ8c/4H5pjUT02s7XdfccPX84QXVOuPHbVcdVevm0PZhfy71OGC9ro", + "ILsbyqf15F2FDNFyCP/nLYL3Q3CHOT535Q7s8XKL+CskBfirZEJeizoj3d6g/pKux+uU7Ne9oNeCg/Wx", + "G83X0uKtO7VSOwzjsEjxpUjs/aXur3VZFeTIl/DZqof8aF7aoYsMkd5YDuhLFOE/RgsdNaSMWdtkZ52F", + "erQhzNm8aBsihJVQJp/yFvNJ+OlneLX5FBzrZlgMHlLPZ5xawA/MdKyCtZPt+MTyqzMee0avnfWM/1I3", + "tOtgpNXOX4fCHmW29tnBLhuf0wq6jPomxcStFn+rxd9q8ZcSsZZLXK+QxRJ6dqajwtc77JO3L83LASey", + "VQUHS14tqlhviNTuI1PIBZ+rz1Pf30YfcbxE6MRWirTNyzrrn/wNFeSnrrOYdrU9XL1GxXgKRIkloJA0", + "mo9r+2Ah/MfNQajZEjIiSiw6GdSQ+MQq/NcPvrq56U9BrlgK5AyWhZBUsnxDfuFVB7Gr8DtFqNvz0OUa", + "YQ6MY0hHs65nGhYhvAITFPMtISzOOVxXJlb2DiFKDdLWpG01imQdJh1zuiLDeGmmPsDdJRfzL81m4rE+", + "tJXCU5rniK5dkRw48KBUoDy3+wlLpnXdmCmUruQ5TRfV3o7rC1rVPtd38Bi3aj7jyK6Xqq2no8DsswYS", + "rCZwCYCEmcC+iCABeyJNgSzLXLMib35T9ZfGfnuRWF9Lm2GrnpNnfnU2AkrM6qHb9Ov7fbjBJ2Zu9whn", + "5sIujkpA3l05MFotHCcNoG3nSZ/jFPQLdF0PXTlhJlv1nesA1aIAKuuPLeXfLSQkbghJVyAVxcPaWtS9", + "W3vY52EPW7uGAp+JNSwaCHRVXn95UdRIVfqg1yz7uFsvD2ry76mSMx6o5CG7sGft8rr4bqPXWWvGk2dh", + "NqioqlZ6BaEHFIOiPROi/9doYJwBVkITM2fsLLkF1BeSdhqrS9UUs3GVDGFuuGL2hLzj94laUN/nwP35", + "6OtveuxwZh5X/7VriasHMo/tMEMCJm6Ni5XGUeH3yU3v9n6bOB6xbB1p5sIzWAf9w6qjE8rDO4oUdOPT", + "Jjv1jIt4T4PqYhoOuwQjptSCFTdfN19pNo03DvHurlNstXi25if8+8rraYu7G62h+BT10scjLQEyKPRi", + "ZxsFfKveTXANFZhyre9ssfsxYROY2FrwdYvSbA5OMFGSA51VvUaFGJIsH/AZQ2ieKgKshwsZoklH6Qd1", + "XiTKm3dG1knlVtB55LWV4k+qhOlPpYQlLS2siZZPp5Nh06RxEN5cSKFFKnKbq1AWhZC6Ot1qMsjyAH2K", + "XsPw0Ee4V1Lm1ixTOx2YZ/jWAWwATcpWX0zcxJlHU8xNFVvUJYu713MNYWlnoiD2gt8C4ZPytdtLZYyf", + "tfxJX3qIhe4lvQM7g1Kq00VZHH3A/2Bx+491YQxs+6WO9JofYaPnow9bU1iQpeZGN5G2Y1jDpNtpGx1N", + "RHmJn9fdyV4IGVxufzDf7UxRaSFt3Bb6tmk15rpE2OP13Cb/1pewra6z1oZf3VkbGbFzXqu6T0Gr24p2", + "g553vpSTbXQdIeHb4ILPa0G1P3HGeEZosI0tW5OQNSO4Zp/idS/6U7gobz6i4usv+Jy9FpqcLIsclsA1", + "ZFfLLiNtDuelx1Zxu59i4ER/NwWtK/NDie8TZytdZKeA3+PeE5QKBD8dlVi7z8jq21jNv6Mkf1p5W0My", + "vJXLX45clj7d91YEf/4i+KsvdjXXGMM0UCRfwjncFMP1TXxPgdxRBpwNq2U42OZXxqt3e5XqhZC+s+ut", + "FP9CnaJ2JwcHYg2x0OyyxLopD5Ft8VlBP8zOkOcRS0PfQR1XsV4MiyKLlGELvJNMjV1QmTVOuFN8q/h8", + "1opPsNe3es+t6eELMz30aDnu1p/nQxSNfRWg1VJk4B2rYjZzTQj6tJ9m22VDnkrTZUHsl5PeOOwztoRT", + "8+bPdoqDitga7JZa1ALPIEtBKnimBkRxuFEvK4fQ0dQPwI17Nqsd8LC48oSTS5Ps26DGcYcSSBv5Cttl", + "+2YMDhkZrIghwMkByPbog/0XzWmFUJHVnHoC7mzMXbcttruEHbcBIHmDSqhtU+G/EjPywDaZKDlWklkw", + "12cfY1m13BhF1dfUlUBzkjYqSFRwdE/Oae/J2XkV6KyuZ03xu4CoT+ghIxha1Xt+uvED8JRyR/JdBGlB", + "KOEwp5qtwLv8J7cVHy8tzVy9xS0McExoltnTWG8CrEBuiCqnyug6vJmjdEc1z8seDAPWBUhmRDTNawe8", + "vSYc2XKO2+KITu0bVxRaLV5ki0jKZtSil6yuxKSYkVcsleI4n4sqFl5tlIalDSsMpKD79PeepkDekNCN", + "WRU8ZxySpeCwiZxUfPoKH8a+xpKYfR+fmYd937bkbRP+FljNeYbI5Kvi9zM5/VcKdGmtVkIhpLndTjc2", + "/wLpf8+j5A/Nhqfdk7ThaeDUcg+DgRBfsZ+PfDpC3Vam780PjT9d2Vf3plqUOhMXwSxoA7DhjEMqPqLy", + "vWeSR21za2ZPMnW9Vrfr9DYFeIidrepppM99/bC/1f3fNAnbOWdCInE5jSuQqnWRu83E/ktlYg/e9724", + "sRmyVLs4WqkOq7u8FhnYcet0XHP0Y53GuMiAKA9ES2WpwiLjKUNeftXvtZI4UlrOF5qUBdEili5Sf5jQ", + "1DLZxF6E4hMGtf3tdQmnW9AVEJpLoJm5vAInYmoWXUtSXCRV2F3B55y44M+o0hTAVUiRglKQJb6z2i7Q", + "/Hs2VF1vwRMCjgBXsxAlyIzKKwN7vtoJ5zlsErwMK3L3p1/N1frG4bVK43bE2pruEfS20667UA+bfhvB", + "tScPyc4mdFuqxRQ5sSxycElyERTuhZPe/WtD1NnFq6MFs8jYNVO8n+RqBFSBes30flVoyyIx8rsL4lP7", + "9IwtURPjlAtvgYwNllOlk11s2bwUrkWZFQScMMaJceCeq+lLqvRbly+dYS1lK05wHqtjmyn6ATZS1N4t", + "IiP/ah/Gxk6NPOSqVMSN4HOgIIutgcN6y1yvYV3NhbVT/NhVkpW1Be4auQ9LwfgOWUF7OUJ14Pc3w0UW", + "h5ZK6kwZXVQ2gKgRsQ2QU/9WgN3Q4d8DCFM1oi3hYLuckHKmQuRAuc1VFUVhuIVOSl5914emU/v2sf6l", + "frdLXLYWhpXbmQAVJsA5yC8sZhWachdUEQcHWdJzlyM3d+3CuzCbw5hgmaVkG+Wjcde8FR6BnYe0LOaS", + "ZpBkkNOI0eUX+5jYx9sGwB335JmshIZkijVS4pteU7LsNSZVQwscT8WUR4JPSGqOoLk81wTivt4xcgY4", + "dow5OTq6Uw2Fc0W3yI+Hy7Zb3WPAMmOYHXf0gCA7jj4E4B48VENfHhX4cVKbD9pT/BcoN0GlR+w/yQZU", + "3xLq8fdaQNvwFwqwhqRosfcWB46yzV42toOP9B3ZmKnxi3QLtKOcrjHJrmlqDS6Ak8tcbo8uKNPJTEir", + "SCd0pkHuDJ3/J2Xece7Td4WrukJwBCc33TjI5MOmrY6LWBCIExeGRFwlKSPDKHlIloyX2j4RpR7bHhMS", + "aLowSntog7UjYdt9V6RJwpzKLMeW7LNKbgppiz7ploBHoCP5iM0bv1n3CyEHda5plo6kTJOSa5YH3fuq", + "e/vnZ728tUjcWiRuLRK3Folbi8StReLWInFrkbi1SNxaJG4tErcWib+vReJTlUlKvMbhKzZywZN2MOVt", + "LOVfqqp8Jaq8gQStExeUIVsKqhT02y32MARpoDnigOXQH91tg07Pnh+/JEqUMgWSGggZJ0VOzdUA1tr3", + "3ydTquCbxz7V0IpOuiTTjeEdRr6aF756RE5/PPYVRxeuMmbz3bvHNl6NKL3J4Z7rPQo8s5qob0IK3CDd", + "9SClXiSkLk/SGihmLMfIeEWe49vPYAW5KEDaYoZEyxK6Fp8zoPlTh5sdBp9/msldqO0fZrQ/xg2jl0Pb", + "khZezfdrpYpQm3FJngU5mH/MaK7gj740TDvekhajSO3iSvBZUxAyk+9FtmmdELNrR7iBzbNR1x1lnMpN", + "pEpUNwWiTRpaGHblCKtry/p48Oq4XaLtktkuCotp67YMfnz0PiqPloWtNqwzlE3UnbXoZBTLMW3XQh1V", + "AA4qDIhpEnZPyFv73actA4gQuSNWM/PPJoqx+WbFNPBdc4lwrOdLzSXwiI+eXjz7Y0PYWZkCYVoRX2B3", + "t3gZj9aJGWkOPHEMKJmKbJM02NeoIYUypqhSsJzulkQh/8QTVwkf82S7nPo0YuRZsLhtPDkkmnXiGHAP", + "d95oGMybK2zhiI49Bxi/bhbdx0ZDEIjjTzGjUov37cv06mk2t4zvlvEFp7GlETDuCpK3mcjkGhmf3MiS", + "9/O852tISwNceJLvonUeXXKw1g0nawbTcj43t4Wujw7b6OB4TPBPxArtcodywf0oyA7+1sfYXzVJvT1c", + "l7sEeeN3fWXGe7gdlG/QmbEsKN94ly8kii3L3OLQtlE9LKO1NcNjJaZr21+fVfuNN/kFtlsnapu/W7SQ", + "C6qI3V/ISMkzl/HUqW295sPrnNihz9a8ZtNba5rY9UZW5+YdIiL8LjdTzRUpQCZ6ze2Bahwm18HAntxP", + "Wkv7VmzcnNiwierQw2C71fhrhnAg6SEDvobiI+i5VCfmNTox0WY6YeMZWjT6U1zC5kz2zYMGlnSGb8aX", + "1OYW5z+FvCCUpDlD76rgSssy1e84Rf9NsLBJN/bEG6r7ed9T/0rchRjx8Lmh3nGKQUaVVyfKA2cQcWG8", + "APAsVpXzOSjDR0MCmgG84+4txknJzS1MzMiSpVIkNrXWnC+ju0zsm0u6ITOsaCLInyAFmRqpH+y6tSUr", + "zfLcBbuYaYiYveNUkxyo0uQVMxzYDOfLKVQhZ6AvhDyvsBDv1TMHDoqpJG6Y+cE+xXY4bvneAIjGTPu4", + "bmNxs31wPOws64X85BnGqGE15pypsP9iG/Yb840vGU+iRHa2AOLCxdq0Re5iDThHQPeajiO9gHfcSD8t", + "CHJ8qi9HDm0PUOcs2tPRoprGRrQcRX6tg65/B+EyJMJkbt0uf6EU0oAOvGcTN97W12/t/Z4ulobIBWwN", + "2ieQ7VPXPrHnJXeBaBjJWgVu3BtnDZC3+i++/LKSh79LejQe7DbZHbDLrpoN8hBvfsPHhOaCz21dRXO7", + "FLhPjBelxgDw6zTgwYrmiViBlCwDNXClTPDnK5r/XH32cTyCNaSJljSFxFoUhmLtzHxj6RQbDXKmGc0T", + "vFUPBQhO7Fen9qMd8jjoNrpcQsaohnxDCgkpZLYQGVOkvs9PbIEGki4on6PolqKcL+xrdpwLkFA1ZjRX", + "6PYQ8UIwa57YonRdGI9do+awbi/QdBFpHIMCztzZPUFljZ5UA/egUXK075I+HvUq2gapqzp0ziKnyWYG", + "aBENfSDATz3xIWq03hL9LdF/6UQfK6mIqJu1rBUWX+G2XLNZ67oLiN6gleyTVBe+LdH/Vy/R7zmQIpRI", + "2riDxHvDUUWYJhdYFmkKxMivEq3zruGeu69jpl1w1F2lTeXa86ULyrirqVPlNSAc5kq8XDKtfXvaazFs", + "WmaGFk2DDkhLyfQGby20YL+fg/n/e6P2K5Arf6EpZT56MlpoXTw5OspFSvOFUPpo9HEcPlOth+8r+D/4", + "u0gh2crcrz4i2EKyOeNG5l7Q+RxkbUIcPZo8GH38vwEAAP//fOeJ6HDHAQA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go index 3bccd3f4ed..a9efeca1cb 100644 --- a/data/basics/userBalance.go +++ b/data/basics/userBalance.go @@ -19,7 +19,6 @@ package basics import ( "encoding/binary" "fmt" - "reflect" "slices" "github.com/algorand/go-algorand/config" @@ -111,7 +110,10 @@ type VotingData struct { type OnlineAccountData struct { MicroAlgosWithRewards MicroAlgos VotingData + IncentiveEligible bool + LastProposed Round + LastHeartbeat Round } // AccountData contains the data associated with a given address. @@ -561,6 +563,8 @@ func (u AccountData) OnlineAccountData() OnlineAccountData { VoteKeyDilution: u.VoteKeyDilution, }, IncentiveEligible: u.IncentiveEligible, + LastProposed: u.LastProposed, + LastHeartbeat: u.LastHeartbeat, } } @@ -581,15 +585,6 @@ func (u OnlineAccountData) KeyDilution(proto config.ConsensusParams) uint64 { return proto.DefaultKeyDilution } -// IsZero checks if an AccountData value is the same as its zero value. -func (u AccountData) IsZero() bool { - if u.Assets != nil && len(u.Assets) == 0 { - u.Assets = nil - } - - return reflect.DeepEqual(u, AccountData{}) -} - // NormalizedOnlineBalance returns a “normalized” balance for this account. // // The normalization compensates for rewards that have not yet been applied, diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go index 7f2632f3f0..af2068a1fe 100644 --- a/data/bookkeeping/block.go +++ b/data/bookkeeping/block.go @@ -72,8 +72,8 @@ type ( // begins as a consensus parameter value, and decays periodically. Bonus basics.MicroAlgos `codec:"bi"` - // ProposerPayout is the amount that should be moved from the FeeSink to - // the Proposer at the start of the next block. It is basically the + // ProposerPayout is the amount that is moved from the FeeSink to + // the Proposer in this block. It is basically the // bonus + the payouts percent of FeesCollected, but may be zero'd by // proposer ineligibility. ProposerPayout basics.MicroAlgos `codec:"pp"` diff --git a/data/bookkeeping/block_test.go b/data/bookkeeping/block_test.go index 3c305b3c3b..bc8aec6a7a 100644 --- a/data/bookkeeping/block_test.go +++ b/data/bookkeeping/block_test.go @@ -1013,11 +1013,11 @@ func TestFirstYearsBonus(t *testing.T) { fmt.Printf("paid %d algos\n", suma) fmt.Printf("bonus start: %d end: %d\n", plan.BaseAmount, bonus) - // pays about 88M algos - a.InDelta(88_500_000, suma, 100_000) + // pays about 103.5M algos + a.InDelta(103_500_000, suma, 100_000) - // decline about 35% - a.InDelta(0.65, float64(bonus)/float64(plan.BaseAmount), 0.01) + // decline about 10% + a.InDelta(0.90, float64(bonus)/float64(plan.BaseAmount), 0.01) // year 2 for i := 0; i < yearRounds; i++ { @@ -1033,11 +1033,11 @@ func TestFirstYearsBonus(t *testing.T) { fmt.Printf("paid %d algos after 2 years\n", sum2) fmt.Printf("bonus end: %d\n", bonus) - // pays about 146M algos (total for 2 years) - a.InDelta(145_700_000, sum2, 100_000) + // pays about 196M algos (total for 2 years) + a.InDelta(196_300_000, sum2, 100_000) - // decline about 58% - a.InDelta(0.42, float64(bonus)/float64(plan.BaseAmount), 0.01) + // decline to about 81% + a.InDelta(0.81, float64(bonus)/float64(plan.BaseAmount), 0.01) // year 3 for i := 0; i < yearRounds; i++ { @@ -1053,9 +1053,9 @@ func TestFirstYearsBonus(t *testing.T) { fmt.Printf("paid %d algos after 3 years\n", sum3) fmt.Printf("bonus end: %d\n", bonus) - // pays about 182M algos (total for 3 years) - a.InDelta(182_600_000, sum3, 100_000) + // pays about 279M algos (total for 3 years) + a.InDelta(279_500_000, sum3, 100_000) - // declined to about 27% (but foundation funding probably gone anyway) - a.InDelta(0.27, float64(bonus)/float64(plan.BaseAmount), 0.01) + // declined to about 72% (but foundation funding probably gone anyway) + a.InDelta(0.72, float64(bonus)/float64(plan.BaseAmount), 0.01) } diff --git a/data/committee/common_test.go b/data/committee/common_test.go index 1f7e7bd373..8566a9cd2a 100644 --- a/data/committee/common_test.go +++ b/data/committee/common_test.go @@ -24,7 +24,6 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/protocol" ) @@ -33,40 +32,33 @@ type selectionParameterListFn func(addr []basics.Address) (bool, []BalanceRecord var proto = config.Consensus[protocol.ConsensusCurrentVersion] -func newAccount(t testing.TB, gen io.Reader, latest basics.Round, keyBatchesForward uint) (basics.Address, *crypto.SignatureSecrets, *crypto.VrfPrivkey, *crypto.OneTimeSignatureSecrets) { +func newAccount(t testing.TB, gen io.Reader) (basics.Address, *crypto.SignatureSecrets, *crypto.VrfPrivkey) { var seed crypto.Seed gen.Read(seed[:]) s := crypto.GenerateSignatureSecrets(seed) _, v := crypto.VrfKeygenFromSeed(seed) - o := crypto.GenerateOneTimeSignatureSecrets(basics.OneTimeIDForRound(latest, proto.DefaultKeyDilution).Batch, uint64(keyBatchesForward)) addr := basics.Address(s.SignatureVerifier) - return addr, s, &v, o + return addr, s, &v } -func signTx(s *crypto.SignatureSecrets, t transactions.Transaction) transactions.SignedTxn { - return t.Sign(s) +// testingenv creates a random set of participating accounts and the associated +// selection parameters for use testing committee membership and credential +// validation. seedGen is provided as an external source of randomness for the +// selection seed; if the caller persists seedGen between calls to testingenv, +// each iteration that calls testingenv will exercise a new selection seed. +// formerly, testingenv, generated transactions and one-time secrets as well, +// but they were not used by the tests. +func testingenv(t testing.TB, numAccounts, numTxs int, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey) { + return testingenvMoreKeys(t, numAccounts, numTxs, seedGen) } -// testingenv creates a random set of participating accounts and random transactions between them, and -// the associated selection parameters for use testing committee membership and credential validation. -// seedGen is provided as an external source of randomness for the selection seed and transaction notes; -// if the caller persists seedGen between calls to testingenv, each iteration that calls testingenv will -// exercise a new selection seed. -func testingenv(t testing.TB, numAccounts, numTxs int, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) { - return testingenvMoreKeys(t, numAccounts, numTxs, uint(5), seedGen) -} - -func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward uint, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) { +func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey) { if seedGen == nil { seedGen = rand.New(rand.NewSource(1)) // same source as setting GODEBUG=randautoseed=0, same as pre-Go 1.20 default seed } P := numAccounts // n accounts - TXs := numTxs // n txns maxMoneyAtStart := 100000 // max money start minMoneyAtStart := 10000 // max money start - transferredMoney := 100 // max money/txn - maxFee := 10 // max maxFee/txn - E := basics.Round(50) // max round // generate accounts genesis := make(map[basics.Address]basics.AccountData) @@ -74,16 +66,14 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward addrs := make([]basics.Address, P) secrets := make([]*crypto.SignatureSecrets, P) vrfSecrets := make([]*crypto.VrfPrivkey, P) - otSecrets := make([]*crypto.OneTimeSignatureSecrets, P) proto := config.Consensus[protocol.ConsensusCurrentVersion] lookback := basics.Round(2*proto.SeedRefreshInterval + proto.SeedLookback + 1) var total basics.MicroAlgos for i := 0; i < P; i++ { - addr, sigSec, vrfSec, otSec := newAccount(t, gen, lookback, keyBatchesForward) + addr, sigSec, vrfSec := newAccount(t, gen) addrs[i] = addr secrets[i] = sigSec vrfSecrets[i] = vrfSec - otSecrets[i] = otSec startamt := uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart))) short := addr @@ -91,7 +81,6 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward Status: basics.Online, MicroAlgos: basics.MicroAlgos{Raw: startamt}, SelectionID: vrfSec.Pubkey(), - VoteID: otSec.OneTimeSignatureVerifier, } total.Raw += startamt } @@ -99,32 +88,8 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward var seed Seed seedGen.Read(seed[:]) - tx := make([]transactions.SignedTxn, TXs) - for i := 0; i < TXs; i++ { - send := gen.Int() % P - recv := gen.Int() % P - - saddr := addrs[send] - raddr := addrs[recv] - amt := basics.MicroAlgos{Raw: uint64(gen.Int() % transferredMoney)} - fee := basics.MicroAlgos{Raw: uint64(gen.Int() % maxFee)} - - t := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: saddr, - Fee: fee, - FirstValid: 0, - LastValid: E, - Note: make([]byte, 4), - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: raddr, - Amount: amt, - }, - } - seedGen.Read(t.Note) // to match output from previous versions, which shared global RNG for seed & note - tx[i] = t.Sign(secrets[send]) + for i := 0; i < numTxs; i++ { + seedGen.Read(make([]byte, 4)) // to match output from previous versions, which shared global RNG for seed & note } selParams := func(addr basics.Address) (bool, BalanceRecord, Seed, basics.MicroAlgos) { @@ -149,7 +114,7 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward return } - return selParams, selParamsList, lookback, addrs, secrets, vrfSecrets, otSecrets, tx + return selParams, selParamsList, lookback, addrs, secrets, vrfSecrets } /* TODO deprecate these types after they have been removed successfully */ diff --git a/data/committee/credential_test.go b/data/committee/credential_test.go index da2be625cd..bbabac62e9 100644 --- a/data/committee/credential_test.go +++ b/data/committee/credential_test.go @@ -35,7 +35,7 @@ func TestAccountSelected(t *testing.T) { seedGen := rand.New(rand.NewSource(1)) N := 1 for i := 0; i < N; i++ { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, seedGen) period := Period(0) leaders := uint64(0) @@ -98,7 +98,7 @@ func TestAccountSelected(t *testing.T) { func TestRichAccountSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 10, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 10, 2000, nil) period := Period(0) ok, record, selectionSeed, _ := selParams(addresses[0]) @@ -159,7 +159,7 @@ func TestPoorAccountSelectedLeaders(t *testing.T) { failsLeaders := 0 leaders := make([]uint64, N) for i := 0; i < N; i++ { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, seedGen) period := Period(0) for j := range addresses { ok, record, selectionSeed, _ := selParams(addresses[j]) @@ -207,7 +207,7 @@ func TestPoorAccountSelectedCommittee(t *testing.T) { N := 1 committee := uint64(0) for i := 0; i < N; i++ { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, seedGen) period := Period(0) step := Cert @@ -250,10 +250,9 @@ func TestNoMoneyAccountNotSelected(t *testing.T) { seedGen := rand.New(rand.NewSource(1)) N := 1 for i := 0; i < N; i++ { - selParams, _, round, addresses, _, _, _, _ := testingenv(t, 10, 2000, seedGen) - lookback := basics.Round(2*proto.SeedRefreshInterval + proto.SeedLookback + 1) + selParams, _, round, addresses, _, _ := testingenv(t, 10, 2000, seedGen) gen := rand.New(rand.NewSource(2)) - _, _, zeroVRFSecret, _ := newAccount(t, gen, lookback, 5) + _, _, zeroVRFSecret := newAccount(t, gen) period := Period(0) ok, record, selectionSeed, _ := selParams(addresses[i]) if !ok { @@ -281,7 +280,7 @@ func TestNoMoneyAccountNotSelected(t *testing.T) { func TestLeadersSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, nil) period := Period(0) step := Propose @@ -313,7 +312,7 @@ func TestLeadersSelected(t *testing.T) { func TestCommitteeSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, nil) period := Period(0) step := Soft @@ -345,7 +344,7 @@ func TestCommitteeSelected(t *testing.T) { func TestAccountNotSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, nil) period := Period(0) leaders := uint64(0) for i := range addresses { @@ -375,7 +374,7 @@ func TestAccountNotSelected(t *testing.T) { // TODO update to remove VRF verification overhead func BenchmarkSortition(b *testing.B) { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(b, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(b, 100, 2000, nil) period := Period(0) step := Soft diff --git a/data/transactions/heartbeat.go b/data/transactions/heartbeat.go new file mode 100644 index 0000000000..2c3120f1a1 --- /dev/null +++ b/data/transactions/heartbeat.go @@ -0,0 +1,49 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package transactions + +import ( + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" +) + +// HeartbeatTxnFields captures the fields used for an account to prove it is +// online (really, it proves that an entity with the account's part keys is able +// to submit transactions, so it should be able to propose/vote.) +type HeartbeatTxnFields struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // HbAddress is the account this txn is proving onlineness for. + HbAddress basics.Address `codec:"a"` + + // HbProof is a signature using HeartbeatAddress's partkey, thereby showing it is online. + HbProof crypto.HeartbeatProof `codec:"prf"` + + // The final three fields are included to allow early, concurrent check of + // the HbProof. + + // HbSeed must be the block seed for the this transaction's firstValid + // block. It is the message that must be signed with HbAddress's part key. + HbSeed committee.Seed `codec:"sd"` + + // HbVoteID must match the HbAddress account's current VoteID. + HbVoteID crypto.OneTimeSignatureVerifier `codec:"vid"` + + // HbKeyDilution must match HbAddress account's current KeyDilution. + HbKeyDilution uint64 `codec:"kd"` +} diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 9ba52138ec..cc8034fdfc 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -2738,6 +2738,16 @@ func AssembleString(text string) (*OpStream, error) { return AssembleStringWithVersion(text, assemblerNoVersion) } +// MustAssemble assembles a program and panics on error. It is useful for +// defining globals. +func MustAssemble(text string) []byte { + ops, err := AssembleString(text) + if err != nil { + panic(err) + } + return ops.Program +} + // AssembleStringWithVersion takes an entire program in a string and // assembles it to bytecode using the assembler version specified. If // version is assemblerNoVersion it uses #pragma version or fallsback diff --git a/data/transactions/logic/crypto_test.go b/data/transactions/logic/crypto_test.go index c0ffd76242..5c14e23049 100644 --- a/data/transactions/logic/crypto_test.go +++ b/data/transactions/logic/crypto_test.go @@ -295,13 +295,17 @@ pop // output`, "int 1"}, } } +func randSeed() crypto.Seed { + var s crypto.Seed + crypto.RandBytes(s[:]) + return s +} + func TestEd25519verify(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - var s crypto.Seed - crypto.RandBytes(s[:]) - c := crypto.GenerateSignatureSecrets(s) + c := crypto.GenerateSignatureSecrets(randSeed()) msg := "62fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd" data, err := hex.DecodeString(msg) require.NoError(t, err) @@ -340,9 +344,7 @@ func TestEd25519VerifyBare(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - var s crypto.Seed - crypto.RandBytes(s[:]) - c := crypto.GenerateSignatureSecrets(s) + c := crypto.GenerateSignatureSecrets(randSeed()) msg := "62fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd" data, err := hex.DecodeString(msg) require.NoError(t, err) @@ -824,9 +826,7 @@ func BenchmarkEd25519Verifyx1(b *testing.B) { crypto.RandBytes(buffer[:]) data = append(data, buffer) - var s crypto.Seed //generate programs and signatures - crypto.RandBytes(s[:]) - secret := crypto.GenerateSignatureSecrets(s) + secret := crypto.GenerateSignatureSecrets(randSeed()) //generate programs and signatures pk := basics.Address(secret.SignatureVerifier) pkStr := pk.String() ops, err := AssembleStringWithVersion(fmt.Sprintf(`arg 0 diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 550a464d56..27a64f1b82 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -433,7 +433,7 @@ func TestBlankStackSufficient(t *testing.T) { spec := opsByOpcode[v][i] argLen := len(spec.Arg.Types) blankStackLen := len(blankStack) - require.GreaterOrEqual(t, blankStackLen, argLen) + require.GreaterOrEqual(t, blankStackLen, argLen, spec.Name) } }) } @@ -3232,7 +3232,21 @@ func TestIllegalOp(t *testing.T) { } } -func TestShortProgram(t *testing.T) { +func TestShortSimple(t *testing.T) { + partitiontest.PartitionTest(t) + + t.Parallel() + for v := uint64(1); v <= AssemblerMaxVersion; v++ { + t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { + ops := testProg(t, `int 8; store 7`, v) + testLogicBytes(t, ops.Program[:len(ops.Program)-1], nil, + "program ends short of immediate values", + "program ends short of immediate values") + }) + } +} + +func TestShortBranch(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index b4d3ca53c9..3ec054919a 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -1548,7 +1548,7 @@ func (fs voterParamsFieldSpec) Note() string { } var voterParamsFieldSpecs = [...]voterParamsFieldSpec{ - {VoterBalance, StackUint64, 6, "Online stake in microalgos"}, + {VoterBalance, StackUint64, incentiveVersion, "Online stake in microalgos"}, {VoterIncentiveEligible, StackBoolean, incentiveVersion, "Had this account opted into block payouts"}, } diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go index 3dcead5e51..8b75f40855 100644 --- a/data/transactions/logic/ledger_test.go +++ b/data/transactions/logic/ledger_test.go @@ -46,9 +46,14 @@ import ( ) type balanceRecord struct { - addr basics.Address - auth basics.Address - balance uint64 + addr basics.Address + auth basics.Address + balance uint64 + voting basics.VotingData + + proposed basics.Round // The last round that this account proposed the accepted block + heartbeat basics.Round // The last round that this account sent a heartbeat to show it was online. + locals map[basics.AppIndex]basics.TealKeyValue holdings map[basics.AssetIndex]basics.AssetHolding mods map[basics.AppIndex]map[string]basics.ValueDelta @@ -312,7 +317,11 @@ func (l *Ledger) AccountData(addr basics.Address) (ledgercore.AccountData, error TotalBoxes: uint64(boxesTotal), TotalBoxBytes: uint64(boxBytesTotal), + + LastProposed: br.proposed, + LastHeartbeat: br.heartbeat, }, + VotingData: br.voting, }, nil } @@ -329,6 +338,9 @@ func (l *Ledger) AgreementData(addr basics.Address) (basics.OnlineAccountData, e // paid. Here, we ignore that for simple tests. return basics.OnlineAccountData{ MicroAlgosWithRewards: ad.MicroAlgos, + // VotingData is not exposed to `voter_params_get`, the thinking is that + // we don't want them used as "free" storage. And thus far, we don't + // have compelling reasons to examine them in AVM. VotingData: basics.VotingData{ VoteID: ad.VoteID, SelectionID: ad.SelectionID, @@ -940,7 +952,7 @@ func (l *Ledger) Perform(gi int, ep *EvalParams) error { } // Get returns the AccountData of an address. This test ledger does -// not handle rewards, so the pening rewards flag is ignored. +// not handle rewards, so withPendingRewards is ignored. func (l *Ledger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) { br, ok := l.balances[addr] if !ok { @@ -952,6 +964,17 @@ func (l *Ledger) Get(addr basics.Address, withPendingRewards bool) (basics.Accou Assets: map[basics.AssetIndex]basics.AssetHolding{}, AppLocalStates: map[basics.AppIndex]basics.AppLocalState{}, AppParams: map[basics.AppIndex]basics.AppParams{}, + LastProposed: br.proposed, + LastHeartbeat: br.heartbeat, + // The fields below are not exposed to `acct_params_get`, the thinking + // is that we don't want them used as "free" storage. And thus far, we + // don't have compelling reasons to examine them in AVM. + VoteID: br.voting.VoteID, + SelectionID: br.voting.SelectionID, + StateProofID: br.voting.StateProofID, + VoteFirstValid: br.voting.VoteFirstValid, + VoteLastValid: br.voting.VoteLastValid, + VoteKeyDilution: br.voting.VoteKeyDilution, }, nil } diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go index 15cd34ef2d..ceb12375be 100644 --- a/data/transactions/msgp_gen.go +++ b/data/transactions/msgp_gen.go @@ -12,6 +12,7 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/protocol" ) @@ -97,6 +98,16 @@ import ( // |-----> (*) MsgIsZero // |-----> HeaderMaxSize() // +// HeartbeatTxnFields +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> HeartbeatTxnFieldsMaxSize() +// // KeyregTxnFields // |-----> (*) MarshalMsg // |-----> (*) CanMarshalMsg @@ -2907,6 +2918,218 @@ func HeaderMaxSize() (s int) { return } +// MarshalMsg implements msgp.Marshaler +func (z *HeartbeatTxnFields) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 6 bits */ + if (*z).HbAddress.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).HbKeyDilution == 0 { + zb0001Len-- + zb0001Mask |= 0x4 + } + if (*z).HbProof.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x8 + } + if (*z).HbSeed.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x10 + } + if (*z).HbVoteID.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x20 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "a" + o = append(o, 0xa1, 0x61) + o = (*z).HbAddress.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "kd" + o = append(o, 0xa2, 0x6b, 0x64) + o = msgp.AppendUint64(o, (*z).HbKeyDilution) + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "prf" + o = append(o, 0xa3, 0x70, 0x72, 0x66) + o = (*z).HbProof.MarshalMsg(o) + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "sd" + o = append(o, 0xa2, 0x73, 0x64) + o = (*z).HbSeed.MarshalMsg(o) + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "vid" + o = append(o, 0xa3, 0x76, 0x69, 0x64) + o = (*z).HbVoteID.MarshalMsg(o) + } + } + return +} + +func (_ *HeartbeatTxnFields) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatTxnFields) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *HeartbeatTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbAddress.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbAddress") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbProof.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbProof") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbSeed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbSeed") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbVoteID.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbVoteID") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).HbKeyDilution, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbKeyDilution") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = HeartbeatTxnFields{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "a": + bts, err = (*z).HbAddress.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbAddress") + return + } + case "prf": + bts, err = (*z).HbProof.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbProof") + return + } + case "sd": + bts, err = (*z).HbSeed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbSeed") + return + } + case "vid": + bts, err = (*z).HbVoteID.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbVoteID") + return + } + case "kd": + (*z).HbKeyDilution, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "HbKeyDilution") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *HeartbeatTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *HeartbeatTxnFields) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatTxnFields) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *HeartbeatTxnFields) Msgsize() (s int) { + s = 1 + 2 + (*z).HbAddress.Msgsize() + 4 + (*z).HbProof.Msgsize() + 3 + (*z).HbSeed.Msgsize() + 4 + (*z).HbVoteID.Msgsize() + 3 + msgp.Uint64Size + return +} + +// MsgIsZero returns whether this is a zero value +func (z *HeartbeatTxnFields) MsgIsZero() bool { + return ((*z).HbAddress.MsgIsZero()) && ((*z).HbProof.MsgIsZero()) && ((*z).HbSeed.MsgIsZero()) && ((*z).HbVoteID.MsgIsZero()) && ((*z).HbKeyDilution == 0) +} + +// MaxSize returns a maximum valid message size for this message type +func HeartbeatTxnFieldsMaxSize() (s int) { + s = 1 + 2 + basics.AddressMaxSize() + 4 + crypto.HeartbeatProofMaxSize() + 3 + committee.SeedMaxSize() + 4 + crypto.OneTimeSignatureVerifierMaxSize() + 3 + msgp.Uint64Size + return +} + // MarshalMsg implements msgp.Marshaler func (z *KeyregTxnFields) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) @@ -4982,8 +5205,8 @@ func StateProofTxnFieldsMaxSize() (s int) { func (z *Transaction) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0007Len := uint32(46) - var zb0007Mask uint64 /* 55 bits */ + zb0007Len := uint32(47) + var zb0007Mask uint64 /* 56 bits */ if (*z).AssetTransferTxnFields.AssetAmount == 0 { zb0007Len-- zb0007Mask |= 0x200 @@ -5096,78 +5319,82 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) { zb0007Len-- zb0007Mask |= 0x1000000000 } - if (*z).Header.LastValid.MsgIsZero() { + if (*z).HeartbeatTxnFields == nil { zb0007Len-- zb0007Mask |= 0x2000000000 } - if (*z).Header.Lease == ([32]byte{}) { + if (*z).Header.LastValid.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x4000000000 } - if (*z).KeyregTxnFields.Nonparticipation == false { + if (*z).Header.Lease == ([32]byte{}) { zb0007Len-- zb0007Mask |= 0x8000000000 } - if len((*z).Header.Note) == 0 { + if (*z).KeyregTxnFields.Nonparticipation == false { zb0007Len-- zb0007Mask |= 0x10000000000 } - if (*z).PaymentTxnFields.Receiver.MsgIsZero() { + if len((*z).Header.Note) == 0 { zb0007Len-- zb0007Mask |= 0x20000000000 } - if (*z).Header.RekeyTo.MsgIsZero() { + if (*z).PaymentTxnFields.Receiver.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x40000000000 } - if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() { + if (*z).Header.RekeyTo.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x80000000000 } - if (*z).Header.Sender.MsgIsZero() { + if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x100000000000 } - if (*z).StateProofTxnFields.StateProof.MsgIsZero() { + if (*z).Header.Sender.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x200000000000 } - if (*z).StateProofTxnFields.Message.MsgIsZero() { + if (*z).StateProofTxnFields.StateProof.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x400000000000 } - if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() { + if (*z).StateProofTxnFields.Message.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x800000000000 } - if (*z).StateProofTxnFields.StateProofType.MsgIsZero() { + if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x1000000000000 } - if (*z).Type.MsgIsZero() { + if (*z).StateProofTxnFields.StateProofType.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x2000000000000 } - if (*z).KeyregTxnFields.VoteFirst.MsgIsZero() { + if (*z).Type.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x4000000000000 } - if (*z).KeyregTxnFields.VoteKeyDilution == 0 { + if (*z).KeyregTxnFields.VoteFirst.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x8000000000000 } - if (*z).KeyregTxnFields.VotePK.MsgIsZero() { + if (*z).KeyregTxnFields.VoteKeyDilution == 0 { zb0007Len-- zb0007Mask |= 0x10000000000000 } - if (*z).KeyregTxnFields.VoteLast.MsgIsZero() { + if (*z).KeyregTxnFields.VotePK.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x20000000000000 } - if (*z).AssetTransferTxnFields.XferAsset.MsgIsZero() { + if (*z).KeyregTxnFields.VoteLast.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x40000000000000 } + if (*z).AssetTransferTxnFields.XferAsset.MsgIsZero() { + zb0007Len-- + zb0007Mask |= 0x80000000000000 + } // variable map header, size zb0007Len o = msgp.AppendMapHeader(o, zb0007Len) if zb0007Len != 0 { @@ -5369,91 +5596,100 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) { o = (*z).Header.Group.MarshalMsg(o) } if (zb0007Mask & 0x2000000000) == 0 { // if not empty + // string "hb" + o = append(o, 0xa2, 0x68, 0x62) + if (*z).HeartbeatTxnFields == nil { + o = msgp.AppendNil(o) + } else { + o = (*z).HeartbeatTxnFields.MarshalMsg(o) + } + } + if (zb0007Mask & 0x4000000000) == 0 { // if not empty // string "lv" o = append(o, 0xa2, 0x6c, 0x76) o = (*z).Header.LastValid.MarshalMsg(o) } - if (zb0007Mask & 0x4000000000) == 0 { // if not empty + if (zb0007Mask & 0x8000000000) == 0 { // if not empty // string "lx" o = append(o, 0xa2, 0x6c, 0x78) o = msgp.AppendBytes(o, ((*z).Header.Lease)[:]) } - if (zb0007Mask & 0x8000000000) == 0 { // if not empty + if (zb0007Mask & 0x10000000000) == 0 { // if not empty // string "nonpart" o = append(o, 0xa7, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74) o = msgp.AppendBool(o, (*z).KeyregTxnFields.Nonparticipation) } - if (zb0007Mask & 0x10000000000) == 0 { // if not empty + if (zb0007Mask & 0x20000000000) == 0 { // if not empty // string "note" o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65) o = msgp.AppendBytes(o, (*z).Header.Note) } - if (zb0007Mask & 0x20000000000) == 0 { // if not empty + if (zb0007Mask & 0x40000000000) == 0 { // if not empty // string "rcv" o = append(o, 0xa3, 0x72, 0x63, 0x76) o = (*z).PaymentTxnFields.Receiver.MarshalMsg(o) } - if (zb0007Mask & 0x40000000000) == 0 { // if not empty + if (zb0007Mask & 0x80000000000) == 0 { // if not empty // string "rekey" o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79) o = (*z).Header.RekeyTo.MarshalMsg(o) } - if (zb0007Mask & 0x80000000000) == 0 { // if not empty + if (zb0007Mask & 0x100000000000) == 0 { // if not empty // string "selkey" o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79) o = (*z).KeyregTxnFields.SelectionPK.MarshalMsg(o) } - if (zb0007Mask & 0x100000000000) == 0 { // if not empty + if (zb0007Mask & 0x200000000000) == 0 { // if not empty // string "snd" o = append(o, 0xa3, 0x73, 0x6e, 0x64) o = (*z).Header.Sender.MarshalMsg(o) } - if (zb0007Mask & 0x200000000000) == 0 { // if not empty + if (zb0007Mask & 0x400000000000) == 0 { // if not empty // string "sp" o = append(o, 0xa2, 0x73, 0x70) o = (*z).StateProofTxnFields.StateProof.MarshalMsg(o) } - if (zb0007Mask & 0x400000000000) == 0 { // if not empty + if (zb0007Mask & 0x800000000000) == 0 { // if not empty // string "spmsg" o = append(o, 0xa5, 0x73, 0x70, 0x6d, 0x73, 0x67) o = (*z).StateProofTxnFields.Message.MarshalMsg(o) } - if (zb0007Mask & 0x800000000000) == 0 { // if not empty + if (zb0007Mask & 0x1000000000000) == 0 { // if not empty // string "sprfkey" o = append(o, 0xa7, 0x73, 0x70, 0x72, 0x66, 0x6b, 0x65, 0x79) o = (*z).KeyregTxnFields.StateProofPK.MarshalMsg(o) } - if (zb0007Mask & 0x1000000000000) == 0 { // if not empty + if (zb0007Mask & 0x2000000000000) == 0 { // if not empty // string "sptype" o = append(o, 0xa6, 0x73, 0x70, 0x74, 0x79, 0x70, 0x65) o = (*z).StateProofTxnFields.StateProofType.MarshalMsg(o) } - if (zb0007Mask & 0x2000000000000) == 0 { // if not empty + if (zb0007Mask & 0x4000000000000) == 0 { // if not empty // string "type" o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) o = (*z).Type.MarshalMsg(o) } - if (zb0007Mask & 0x4000000000000) == 0 { // if not empty + if (zb0007Mask & 0x8000000000000) == 0 { // if not empty // string "votefst" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74) o = (*z).KeyregTxnFields.VoteFirst.MarshalMsg(o) } - if (zb0007Mask & 0x8000000000000) == 0 { // if not empty + if (zb0007Mask & 0x10000000000000) == 0 { // if not empty // string "votekd" o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64) o = msgp.AppendUint64(o, (*z).KeyregTxnFields.VoteKeyDilution) } - if (zb0007Mask & 0x10000000000000) == 0 { // if not empty + if (zb0007Mask & 0x20000000000000) == 0 { // if not empty // string "votekey" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79) o = (*z).KeyregTxnFields.VotePK.MarshalMsg(o) } - if (zb0007Mask & 0x20000000000000) == 0 { // if not empty + if (zb0007Mask & 0x40000000000000) == 0 { // if not empty // string "votelst" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74) o = (*z).KeyregTxnFields.VoteLast.MarshalMsg(o) } - if (zb0007Mask & 0x40000000000000) == 0 { // if not empty + if (zb0007Mask & 0x80000000000000) == 0 { // if not empty // string "xaid" o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64) o = (*z).AssetTransferTxnFields.XferAsset.MarshalMsg(o) @@ -6086,6 +6322,25 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) return } } + if zb0007 > 0 { + zb0007-- + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z).HeartbeatTxnFields = nil + } else { + if (*z).HeartbeatTxnFields == nil { + (*z).HeartbeatTxnFields = new(HeartbeatTxnFields) + } + bts, err = (*z).HeartbeatTxnFields.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HeartbeatTxnFields") + return + } + } + } if zb0007 > 0 { err = msgp.ErrTooManyArrayFields(zb0007) if err != nil { @@ -6618,6 +6873,23 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Message") return } + case "hb": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z).HeartbeatTxnFields = nil + } else { + if (*z).HeartbeatTxnFields == nil { + (*z).HeartbeatTxnFields = new(HeartbeatTxnFields) + } + bts, err = (*z).HeartbeatTxnFields.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HeartbeatTxnFields") + return + } + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -6661,13 +6933,18 @@ func (z *Transaction) Msgsize() (s int) { for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets { s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].Msgsize() } - s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 7 + (*z).StateProofTxnFields.StateProofType.Msgsize() + 3 + (*z).StateProofTxnFields.StateProof.Msgsize() + 6 + (*z).StateProofTxnFields.Message.Msgsize() + s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 7 + (*z).StateProofTxnFields.StateProofType.Msgsize() + 3 + (*z).StateProofTxnFields.StateProof.Msgsize() + 6 + (*z).StateProofTxnFields.Message.Msgsize() + 3 + if (*z).HeartbeatTxnFields == nil { + s += msgp.NilSize + } else { + s += (*z).HeartbeatTxnFields.Msgsize() + } return } // MsgIsZero returns whether this is a zero value func (z *Transaction) MsgIsZero() bool { - return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.Boxes) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero()) + return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.Boxes) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero()) && ((*z).HeartbeatTxnFields == nil) } // MaxSize returns a maximum valid message size for this message type @@ -6689,7 +6966,8 @@ func TransactionMaxSize() (s int) { s += 5 // Calculating size of slice: z.ApplicationCallTxnFields.ForeignAssets s += msgp.ArrayHeaderSize + ((encodedMaxForeignAssets) * (basics.AssetIndexMaxSize())) - s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize() + s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize() + 3 + s += HeartbeatTxnFieldsMaxSize() return } diff --git a/data/transactions/msgp_gen_test.go b/data/transactions/msgp_gen_test.go index 0ce6b29c38..49ed14f6e3 100644 --- a/data/transactions/msgp_gen_test.go +++ b/data/transactions/msgp_gen_test.go @@ -494,6 +494,66 @@ func BenchmarkUnmarshalHeader(b *testing.B) { } } +func TestMarshalUnmarshalHeartbeatTxnFields(t *testing.T) { + partitiontest.PartitionTest(t) + v := HeartbeatTxnFields{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingHeartbeatTxnFields(t *testing.T) { + protocol.RunEncodingTest(t, &HeartbeatTxnFields{}) +} + +func BenchmarkMarshalMsgHeartbeatTxnFields(b *testing.B) { + v := HeartbeatTxnFields{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHeartbeatTxnFields(b *testing.B) { + v := HeartbeatTxnFields{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHeartbeatTxnFields(b *testing.B) { + v := HeartbeatTxnFields{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalKeyregTxnFields(t *testing.T) { partitiontest.PartitionTest(t) v := KeyregTxnFields{} diff --git a/data/transactions/stateproof.go b/data/transactions/stateproof.go index 7d24526851..ed23420a1c 100644 --- a/data/transactions/stateproof.go +++ b/data/transactions/stateproof.go @@ -33,14 +33,6 @@ type StateProofTxnFields struct { Message stateproofmsg.Message `codec:"spmsg"` } -// Empty returns whether the StateProofTxnFields are all zero, -// in the sense of being omitted in a msgpack encoding. -func (sp StateProofTxnFields) Empty() bool { - return sp.StateProofType == protocol.StateProofBasic && - sp.StateProof.MsgIsZero() && - sp.Message.MsgIsZero() -} - // specialAddr is used to form a unique address that will send out state proofs. // //msgp:ignore specialAddr diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index a8226654b5..f71aaa3744 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -27,6 +27,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/protocol" ) @@ -100,6 +101,11 @@ type Transaction struct { AssetFreezeTxnFields ApplicationCallTxnFields StateProofTxnFields + + // By making HeartbeatTxnFields a pointer we save a ton of space of the + // Transaction object. Unlike other txn types, the fields will be + // embedded under a named field in the transaction encoding. + *HeartbeatTxnFields `codec:"hb"` } // ApplyData contains information about the transaction's execution. @@ -324,7 +330,7 @@ func (tx Header) Alive(tc TxnContext) error { // MatchAddress checks if the transaction touches a given address. func (tx Transaction) MatchAddress(addr basics.Address, spec SpecialAddresses) bool { - return slices.Contains(tx.RelevantAddrs(spec), addr) + return slices.Contains(tx.relevantAddrs(spec), addr) } var errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound = errors.New("transaction first voting round need to be less than its last voting round") @@ -565,6 +571,42 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa return errLeaseMustBeZeroInStateproofTxn } + case protocol.HeartbeatTx: + if !proto.Heartbeat { + return fmt.Errorf("heartbeat transaction not supported") + } + + // If this is a free/cheap heartbeat, it must be very simple. + if tx.Fee.Raw < proto.MinTxnFee && tx.Group.IsZero() { + kind := "free" + if tx.Fee.Raw > 0 { + kind = "cheap" + } + + if len(tx.Note) > 0 { + return fmt.Errorf("tx.Note is set in %s heartbeat", kind) + } + if tx.Lease != [32]byte{} { + return fmt.Errorf("tx.Lease is set in %s heartbeat", kind) + } + if !tx.RekeyTo.IsZero() { + return fmt.Errorf("tx.RekeyTo is set in %s heartbeat", kind) + } + } + + if (tx.HbProof == crypto.HeartbeatProof{}) { + return errors.New("tx.HbProof is empty") + } + if (tx.HbSeed == committee.Seed{}) { + return errors.New("tx.HbSeed is empty") + } + if tx.HbVoteID.IsEmpty() { + return errors.New("tx.HbVoteID is empty") + } + if tx.HbKeyDilution == 0 { + return errors.New("tx.HbKeyDilution is zero") + } + default: return fmt.Errorf("unknown tx type %v", tx.Type) } @@ -594,10 +636,14 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa nonZeroFields[protocol.ApplicationCallTx] = true } - if !tx.StateProofTxnFields.Empty() { + if !tx.StateProofTxnFields.MsgIsZero() { nonZeroFields[protocol.StateProofTx] = true } + if tx.HeartbeatTxnFields != nil { + nonZeroFields[protocol.HeartbeatTx] = true + } + for t, nonZero := range nonZeroFields { if nonZero && t != tx.Type { return fmt.Errorf("transaction of type %v has non-zero fields for type %v", tx.Type, t) @@ -704,9 +750,8 @@ func (tx Header) Last() basics.Round { return tx.LastValid } -// RelevantAddrs returns the addresses whose balance records this transaction will need to access. -// The header's default is to return just the sender and the fee sink. -func (tx Transaction) RelevantAddrs(spec SpecialAddresses) []basics.Address { +// relevantAddrs returns the addresses whose balance records this transaction will need to access. +func (tx Transaction) relevantAddrs(spec SpecialAddresses) []basics.Address { addrs := []basics.Address{tx.Sender, spec.FeeSink} switch tx.Type { @@ -723,6 +768,8 @@ func (tx Transaction) RelevantAddrs(spec SpecialAddresses) []basics.Address { if !tx.AssetTransferTxnFields.AssetSender.IsZero() { addrs = append(addrs, tx.AssetTransferTxnFields.AssetSender) } + case protocol.HeartbeatTx: + addrs = append(addrs, tx.HeartbeatTxnFields.HbAddress) } return addrs diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go index 08dd145a8c..183ebdc760 100644 --- a/data/transactions/transaction_test.go +++ b/data/transactions/transaction_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" @@ -29,6 +30,7 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" @@ -591,10 +593,156 @@ func TestWellFormedErrors(t *testing.T) { proto: protoV36, expectedError: nil, }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + }, + proto: protoV36, + expectedError: fmt.Errorf("heartbeat transaction not supported"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbProof is empty"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbSeed is empty"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbVoteID is empty"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbKeyDilution is zero"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: Header{ + Sender: addr1, + Fee: basics.MicroAlgos{Raw: 100}, + LastValid: 105, + FirstValid: 100, + Note: []byte{0x01}, + }, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.Note is set in cheap heartbeat"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: Header{ + Sender: addr1, + Fee: basics.MicroAlgos{Raw: 100}, + LastValid: 105, + FirstValid: 100, + Lease: [32]byte{0x01}, + }, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.Lease is set in cheap heartbeat"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: Header{ + Sender: addr1, + LastValid: 105, + FirstValid: 100, + RekeyTo: [32]byte{0x01}, + }, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.RekeyTo is set in free heartbeat"), + }, } for _, usecase := range usecases { err := usecase.tx.WellFormed(SpecialAddresses{}, usecase.proto) - require.Equal(t, usecase.expectedError, err) + assert.Equal(t, usecase.expectedError, err) } } diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 46d3c4cf7e..f01727831f 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -221,11 +221,19 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr *bookkeeping.Bl prepErr.err = fmt.Errorf("transaction %+v invalid : %w", stxn, prepErr.err) return nil, prepErr } - if stxn.Txn.Type != protocol.StateProofTx { - minFeeCount++ - } feesPaid = basics.AddSaturate(feesPaid, stxn.Txn.Fee.Raw) lSigPooledSize += stxn.Lsig.Len() + if stxn.Txn.Type == protocol.StateProofTx { + // State proofs are free, bail before incrementing + continue + } + if stxn.Txn.Type == protocol.HeartbeatTx && stxn.Txn.Group.IsZero() { + // In apply.Heartbeat, we further confirm that the heartbeat is for + // a challenged account. Such heartbeats are free, bail before + // incrementing + continue + } + minFeeCount++ } if groupCtx.consensusParams.EnableLogicSigSizePooling { lSigMaxPooledSize := len(stxs) * int(groupCtx.consensusParams.LogicSigMaxSize) @@ -305,6 +313,11 @@ func stxnCoreChecks(gi int, groupCtx *GroupContext, batchVerifier crypto.BatchVe return err } + if s.Txn.Type == protocol.HeartbeatTx { + id := basics.OneTimeIDForRound(s.Txn.LastValid, s.Txn.HbKeyDilution) + s.Txn.HbProof.BatchPrep(s.Txn.HbVoteID, id, s.Txn.HbSeed, batchVerifier) + } + switch sigType { case regularSig: batchVerifier.EnqueueSignature(crypto.SignatureVerifier(s.Authorizer()), s.Txn, s.Sig) diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go index 7578d0f9da..1e7f39101f 100644 --- a/data/transactions/verify/txn_test.go +++ b/data/transactions/verify/txn_test.go @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" @@ -94,6 +95,41 @@ func keypair() *crypto.SignatureSecrets { return s } +func createHeartbeatTxn(fv basics.Round, t *testing.T) transactions.SignedTxn { + secrets, addrs, _ := generateAccounts(1) + + kd := uint64(111) + lv := fv + 15 + firstID := basics.OneTimeIDForRound(fv, kd) + lastID := basics.OneTimeIDForRound(lv, kd) + numBatches := lastID.Batch - firstID.Batch + 1 + id := basics.OneTimeIDForRound(lv, kd) + + seed := committee.Seed{0x33} + otss := crypto.GenerateOneTimeSignatureSecrets(firstID.Batch, numBatches) + + txn := transactions.Transaction{ + Type: "hb", + Header: transactions.Header{ + Sender: addrs[0], + FirstValid: fv, + LastValid: lv, + }, + HeartbeatTxnFields: &transactions.HeartbeatTxnFields{ + HbProof: otss.Sign(id, seed).ToHeartbeatProof(), + HbSeed: seed, + HbVoteID: otss.OneTimeSignatureVerifier, + HbKeyDilution: kd, + }, + } + + hb := transactions.SignedTxn{ + Sig: secrets[0].Sign(txn), + Txn: txn, + } + return hb +} + func generateMultiSigTxn(numTxs, numAccs int, blockRound basics.Round, t *testing.T) ([]transactions.Transaction, []transactions.SignedTxn, []*crypto.SignatureSecrets, []basics.Address) { secrets, addresses, pks, multiAddress := generateMultiSigAccounts(t, numAccs) @@ -574,7 +610,7 @@ func TestPaysetGroups(t *testing.T) { startPaysetGroupsTime := time.Now() err := PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, MakeVerifiedTransactionCache(50000), nil) require.NoError(t, err) - paysetGroupDuration := time.Now().Sub(startPaysetGroupsTime) + paysetGroupDuration := time.Since(startPaysetGroupsTime) // break the signature and see if it fails. txnGroups[0][0].Sig[0] = txnGroups[0][0].Sig[0] + 1 @@ -608,7 +644,7 @@ func TestPaysetGroups(t *testing.T) { // channel is closed without a return require.Failf(t, "Channel got closed ?!", "") } else { - actualDuration := time.Now().Sub(startPaysetGroupsTime) + actualDuration := time.Since(startPaysetGroupsTime) if err == nil { if actualDuration > 4*time.Second { // it took at least 2.5 seconds more than it should have had! @@ -864,6 +900,38 @@ func TestTxnGroupCacheUpdateMultiSig(t *testing.T) { verifyGroup(t, txnGroups, &blkHdr, breakSignatureFunc, restoreSignatureFunc, crypto.ErrBatchHasFailedSigs.Error()) } +// TestTxnHeartbeat makes sure that a heartbeat transaction is valid (and added +// to the cache) only if the normal outer signature is valid AND the inner +// HbProof is valid. +func TestTxnHeartbeat(t *testing.T) { + partitiontest.PartitionTest(t) + + blkHdr := createDummyBlockHeader(protocol.ConsensusFuture) + + txnGroups := make([][]transactions.SignedTxn, 2) // verifyGroup requires at least 2 + for i := 0; i < len(txnGroups); i++ { + txnGroups[i] = make([]transactions.SignedTxn, 1) + txnGroups[i][0] = createHeartbeatTxn(blkHdr.Round-1, t) + } + breakSignatureFunc := func(txn *transactions.SignedTxn) { + txn.Sig[0]++ + } + restoreSignatureFunc := func(txn *transactions.SignedTxn) { + txn.Sig[0]-- + } + // This shows the outer signature must be correct + verifyGroup(t, txnGroups, &blkHdr, breakSignatureFunc, restoreSignatureFunc, crypto.ErrBatchHasFailedSigs.Error()) + + breakHbProofFunc := func(txn *transactions.SignedTxn) { + txn.Txn.HeartbeatTxnFields.HbProof.Sig[0]++ + } + restoreHbProofFunc := func(txn *transactions.SignedTxn) { + txn.Txn.HeartbeatTxnFields.HbProof.Sig[0]-- + } + // This shows the inner signature must be correct + verifyGroup(t, txnGroups, &blkHdr, breakHbProofFunc, restoreHbProofFunc, crypto.ErrBatchHasFailedSigs.Error()) +} + // TestTxnGroupCacheUpdateFailLogic test makes sure that a payment transaction contains a logic (and no signature) // is valid (and added to the cache) only if logic passes func TestTxnGroupCacheUpdateFailLogic(t *testing.T) { @@ -1028,12 +1096,18 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= verifyGroup(t, txnGroups, &blkHdr, breakSignatureFunc, restoreSignatureFunc, "rejected by logic") } -func createDummyBlockHeader() bookkeeping.BlockHeader { +func createDummyBlockHeader(optVer ...protocol.ConsensusVersion) bookkeeping.BlockHeader { + // Most tests in this file were written to use current. Future is probably + // the better test, but I don't want to make that choice now, so optVer. + proto := protocol.ConsensusCurrentVersion + if len(optVer) > 0 { + proto = optVer[0] + } return bookkeeping.BlockHeader{ Round: 50, GenesisHash: crypto.Hash([]byte{1, 2, 3, 4, 5}), UpgradeState: bookkeeping.UpgradeState{ - CurrentProtocol: protocol.ConsensusCurrentVersion, + CurrentProtocol: proto, }, RewardsState: bookkeeping.RewardsState{ FeeSink: feeSink, @@ -1067,32 +1141,32 @@ func verifyGroup(t *testing.T, txnGroups [][]transactions.SignedTxn, blkHdr *boo breakSig(&txnGroups[0][0]) - dummeyLedger := DummyLedgerForSignature{} - _, err := TxnGroup(txnGroups[0], blkHdr, cache, &dummeyLedger) + dummyLedger := DummyLedgerForSignature{} + _, err := TxnGroup(txnGroups[0], blkHdr, cache, &dummyLedger) require.Error(t, err) require.Contains(t, err.Error(), errorString) // The txns should not be in the cache - unverifiedGroups := cache.GetUnverifiedTransactionGroups(txnGroups[:1], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups := cache.GetUnverifiedTransactionGroups(txnGroups[:1], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 1) - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 2) - _, err = TxnGroup(txnGroups[1], blkHdr, cache, &dummeyLedger) + _, err = TxnGroup(txnGroups[1], blkHdr, cache, &dummyLedger) require.NoError(t, err) // Only the second txn should be in the cache - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 1) restoreSig(&txnGroups[0][0]) - _, err = TxnGroup(txnGroups[0], blkHdr, cache, &dummeyLedger) + _, err = TxnGroup(txnGroups[0], blkHdr, cache, &dummyLedger) require.NoError(t, err) // Both transactions should be in the cache - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 0) cache = MakeVerifiedTransactionCache(1000) @@ -1105,7 +1179,7 @@ func verifyGroup(t *testing.T, txnGroups [][]transactions.SignedTxn, blkHdr *boo // Add them to the cache by verifying them for _, txng := range txnGroups { - _, err = TxnGroup(txng, blkHdr, cache, &dummeyLedger) + _, err = TxnGroup(txng, blkHdr, cache, &dummyLedger) if err != nil { require.Error(t, err) require.Contains(t, err.Error(), errorString) @@ -1115,7 +1189,7 @@ func verifyGroup(t *testing.T, txnGroups [][]transactions.SignedTxn, blkHdr *boo require.Equal(t, 1, numFailed) // Only one transaction should not be in cache - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups, spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups, spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 1) require.Equal(t, unverifiedGroups[0], txnGroups[txgIdx]) diff --git a/data/transactions/verify/verifiedTxnCache_test.go b/data/transactions/verify/verifiedTxnCache_test.go index d27510fe6a..03f5cac288 100644 --- a/data/transactions/verify/verifiedTxnCache_test.go +++ b/data/transactions/verify/verifiedTxnCache_test.go @@ -127,7 +127,7 @@ func BenchmarkGetUnverifiedTransactionGroups50(b *testing.B) { for i := 0; i < measuringMultipler; i++ { impl.GetUnverifiedTransactionGroups(queryTxnGroups, spec, protocol.ConsensusCurrentVersion) } - duration := time.Now().Sub(startTime) + duration := time.Since(startTime) // calculate time per 10K verified entries: t := int(duration*10000) / (measuringMultipler * b.N) b.ReportMetric(float64(t)/float64(time.Millisecond), "ms/10K_cache_compares") diff --git a/data/txntest/txn.go b/data/txntest/txn.go index aea4de005b..ed51c7ef40 100644 --- a/data/txntest/txn.go +++ b/data/txntest/txn.go @@ -26,6 +26,7 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" @@ -91,6 +92,12 @@ type Txn struct { StateProofType protocol.StateProofType StateProof stateproof.StateProof StateProofMsg stateproofmsg.Message + + HbAddress basics.Address + HbProof crypto.HeartbeatProof + HbSeed committee.Seed + HbVoteID crypto.OneTimeSignatureVerifier + HbKeyDilution uint64 } // internalCopy "finishes" a shallow copy done by a simple Go assignment by @@ -218,6 +225,17 @@ func (tx Txn) Txn() transactions.Transaction { case nil: tx.Fee = basics.MicroAlgos{} } + + hb := &transactions.HeartbeatTxnFields{ + HbAddress: tx.HbAddress, + HbProof: tx.HbProof, + HbSeed: tx.HbSeed, + HbVoteID: tx.HbVoteID, + HbKeyDilution: tx.HbKeyDilution, + } + if hb.MsgIsZero() { + hb = nil + } return transactions.Transaction{ Type: tx.Type, Header: transactions.Header{ @@ -281,6 +299,7 @@ func (tx Txn) Txn() transactions.Transaction { StateProof: tx.StateProof, Message: tx.StateProofMsg, }, + HeartbeatTxnFields: hb, } } diff --git a/heartbeat/README.md b/heartbeat/README.md new file mode 100644 index 0000000000..7293afd43f --- /dev/null +++ b/heartbeat/README.md @@ -0,0 +1,180 @@ +# Block Payouts, Suspensions, and Heartbeats + +Running a validator node on Algorand is a relatively lightweight operation. Therefore, participation +in consensus was not compensated. There was an expectation that financial motivated holders of Algos +would run nodes in order to help secure their holdings. + +Although simple participation is not terribly resource intensive, running _any_ service with high +uptime becomes expensive when one considers that it should be monitored for uptime, be somewhat +over-provisioned to handle unexpected load spikes, and plans need to be in place to restart in the +face of hardware failure (or the accounts should leave consensus properly). + +With those burdens in mind, fewer Algo holders chose to run participation nodes than would be +preferred to provide security against well-financed bad actors. To alleviate this problem, a +mechanism to reward block proposers has been created. With these _block payouts_ in place, large +Algo holders are incentivized to run participation nodes in order to earn more Algos, increasing +security for the entire Algorand network. + +With the financial incentive to run participation nodes comes the risk that some nodes may be +operated without sufficient care. Therefore, a mechanism to _suspend_ nodes that appear to be +performing poorly (or not at all). Appearances can be deceiving, however. Since Algorand is a +probabilistic consensus protocol, pure chance might lead to a node appearing to be delinquent. A new +transaction type, the _heartbeat_, allows a node to explicitly indicate that it is online even if it +does not propose blocks due to "bad luck". + +# Payouts + +Payouts are made in every block, if the proposer has opted into receiving them, has an Algo balance +in an appropriate range, and has not been suspended for poor behavior since opting-in. The size of +the payout is indicated in the block header, and comes from the `FeeSink`. The block payout consist +of two components. First, a portion of the block fees (currently 50%) are paid to the proposer. +This component incentives fuller blocks which lead to larger payouts. Second, a _bonus_ payout is +made according to a exponentially decaying formula. This bonus is (intentionally) unsustainable +from protocol fees. It is expected that the Algorand Foundation will seed the `FeeSink` with +sufficient funds to allow the bonuses to be paid out according to the formula for several years. If +the `FeeSink` has insufficient funds for the sum of these components, the payout will be as high as +possible while maintaining the `FeeSink`'s minimum balance. These calculations are performed in +`endOfBlock` in `eval/eval.go`. + +To opt-in to receiving block payouts, an account includes an extra fee in the `keyreg` +transaction. The amount is controlled by the consensus parameter `Payouts.GoOnlineFee`. When such a +fee is included, a new account state bit, `IncentiveEligible` is set to true. + +Even when an account is `IncentiveEligible` there is a proposal-time check of the account's online +stake. If the account has too much or too little, no payout is performed (though +`IncentiveEligible` remains true). As explained below, this check occurs in `agreement` code in +`payoutEligible()`. The balance check is performed on the _online_ stake, that is the stake from 320 +rounds earlier, so a clever proposer can not move Algos in the round it proposes in order to receive +the payout. Finally, in an interesting corner case, a proposing account could be closed at proposal +time, since voting is based on the earlier balance. Such an account receives no payout, even if its +balances was in the proper range 320 rounds ago. + +A surprising complication in the implementation of these payouts is that when a block is prepared by +a node, it does not know which account is the proposer. Until now, `algod` could prepare a single +block which would be used by any of the accounts it was participating for. The block would be +handed off to `agreement` which would manipulate the block only to add the appropriate block seed +(which depended upon the proposer). That interaction between `eval` and `agreement` was widened +(see `WithProposer()`) to allow `agreement` to modify the block to include the proper `Proposer`, +and to zero the `ProposerPayout` if the account that proposed was not actually eligible to receive a +payout. + +# Suspensions + +Accounts can be _suspended_ for poor behavior. There are two forms of poor behavior that can lead +to suspension. First, an account is considered _absent_ if it fails to propose as often as it +should. Second, an account can be suspended for failing to respond to a _challenge_ issued by the +network at random. + +## Absenteeism + +An account can be expected to propose once every `n = TotalOnlineStake/AccountOnlineStake` rounds. +For example, a node with 2% of online stake ought to propose once every 50 rounds. Of course the +actual proposer is chosen by random sortition. To make false positive suspensions unlikely, a node +is considered absent if it fails to produce a block over the course of `20n` rounds. + +The suspension mechanism is implemented in `generateKnockOfflineAccountsList` in `eval/eval.go`. It +is closely modeled on the mechanism that knocks accounts offline if their voting keys have expired. +An absent account is added to the `AbsentParticipationAccounts` list of the block header. When +evaluating a block, accounts in `AbsentParticipationAccounts` are suspended by changing their +`Status` to `Offline` and setting `IncentiveEligible` to false, but retaining their voting keys. + +### Keyreg and `LastHeartbeat` + +As described so far, 320 rounds after a `keyreg` to go online, an account suddenly is expected to +have proposed more recently than 20 times its new expected interval. That would be impossible, since +it was not online until that round. Therefore, when a `keyreg` is used to go online and become +`IncentiveEligible`, the account's `LastHeartbeat` field is set 320 rounds into the future. In +effect, the account is treated as though it proposed in the first round it is online. + +### Large Algo increases and `LastHeartbeat` + +A similar problem can occur when an online account receives Algos. 320 rounds after receiving the +new Algos, the account's expected proposal interval will shrink. If, for example, such an account +increases by a factor of 10, then it is reasonably likely that it will not have proposed recently +enough, and will be suspended immediately. To mitigate this risk, any time an online, +`IncentiveEligible` account balance doubles from a single `Pay`, its `LastHeartbeat` is incremented +to 320 rounds past the current round. + +## Challenges + +The absenteeism checks quickly suspend a high-value account if it becomes inoperative. For example, +and account with 2% of stake can be marked absent after 500 rounds (about 24 minutes). After +suspension, the effect on consensus is mitigated after 320 more rounds (about 15 +minutes). Therefore, the suspension mechanism makes Algorand significantly more robust in the face +of operational errors. + +However, the absenteeism mechanism is very slow to notice small accounts. An account with 30,000 +Algos might represent 1/100,000 or less of total stake. It would only be considered absent after a +million or more rounds without a proposal. At current network speeds, this is about a month. With such +slow detection, a financially motived entity might make the decision to run a node even if they lack +the wherewithal to run the node with excellent uptime. A worst case scenario might be a node that is +turned off daily, overnight. Such a node would generate profit for the runner, would probably never +be marked offline by the absenteeism mechanism, yet would impact consensus negatively. Algorand +can't make progress with 1/3 of nodes offline at any given time for a nightly rest. + +To combat this scenario, the network generates random _challenges_ periodically. Every +`Payouts.ChallengeInterval` rounds (currently 1000), a random selected portion (currently 1/32) of +all online accounts are challenged. They must _heartbeat_ within `Payouts.ChallengeGracePeriod` +rounds (currently 200), or they will be subject to suspension. With the current consensus +parameters, nodes can be expected to be challenged daily. When suspended, accounts must `keyreg` +with the `GoOnlineFee` in order to receive block payouts again, so it becomes unprofitable for +these low-stake nodes to operate with poor uptimes. + +# Heartbeats + +The absenteeism mechanism is subject to rare false positives. The challenge mechanism explicitly +requires an affirmative response from nodes to indicate they are operating properly on behalf of a +challenged account. Both of these needs are addressed by a new transaction type --- _Heartbeat_. A +Heartbeat transaction contains a signature (`HbProof`) of the blockseed (`HbSeed`) of the +transaction's FirstValid block under the participation key of the account (`HbAddress`) in +question. Note that the account being heartbeat for is _not_ the `Sender` of the transaction, which +can be any address. Signing a recent block seed makes it more difficult to pre-sign heartbeats that +another machine might send on your behalf. Signing the FirstValid's blockseed (rather than +FirstValid-1) simply enforces a best practice: emit a transaction with FirstValid set to a committed +round, not a future round, avoiding a race. The node you send transactions to might not have +committed your latest round yet. + +It is relatively easy for a bad actor to emit Heartbeats for its accounts without actually +participating. However, there is no financial incentive to do so. Pretending to be operational when +offline does not earn block payouts. Furthermore, running a server to monitor the block chain to +notice challenges and gather the recent blockseed is not significantly cheaper than simply running a +functional node. It is _already_ possible for malicious, well-resourced accounts to cause consensus +difficulties by putting significant stake online without actually participating. Heartbeats do not +mitigate that risk. But these mechanisms have been designed to avoid _motivating_ such behavior, so +that they can accomplish their actual goal of noticing poor behavior stemming from _inadvertent_ +operational problems. + +## Free Heartbeats + +Challenges occur frequently, so it important that `algod` can easily send Heartbeats as +required. How should these transactions be paid for? Many accounts, especially high-value accounts, +would not want to keep their spending keys available for automatic use by `algod`. Further, creating +(and keeping funded) a low-value side account to pay for Heartbeats would be an annoying operational +overhead. Therefore, when required by challenges, heartbeat transactions do not require a fee. +Therefore, any account, even an unfunded logicsig, can send heartbeats for an account under +challenge. + +The conditions for a free Heartbeat are: + +1. The Heartbeat is not part of a larger group, and has a zero `GroupID`. +1. The `HbAddress` is Online and under challenge with the grace period at least half over. +1. The `HbAddress` is `IncentiveEligible`. +1. There is no `Note`, `Lease`, or `RekeyTo`. + +## Heartbeat Service + +The Heartbeat Service (`heartbeat/service.go`) watches the state of all acounts for which `algod` +has participation keys. If any of those accounts meets the requirements above, a heartbeat +transaction is sent, starting with the round following half a grace period from the challenge. It +uses the (presumably unfunded) logicsig that does nothing except preclude rekey operations. + +The heartbeat service does _not_ heartbeat if an account is unlucky and threatened to be considered +absent. We presume such false postives to be so unlikely that, if they occur, the node must be +brought back online manually. It would be reasonable to consider in the future: + +1. Making heartbeats free for accounts that are "nearly absent". + +or + +2. Allowing for paid heartbeats by the heartbeat service when configured with access to a funded + account's spending key. diff --git a/heartbeat/abstractions.go b/heartbeat/abstractions.go new file mode 100644 index 0000000000..a60f383669 --- /dev/null +++ b/heartbeat/abstractions.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package heartbeat + +import ( + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger/ledgercore" +) + +// txnBroadcaster is an interface that captures the node's ability to broadcast +// a new transaction. +type txnBroadcaster interface { + BroadcastInternalSignedTxGroup([]transactions.SignedTxn) error +} + +// ledger represents the aspects of the "real" Ledger that the heartbeat service +// needs to interact with +type ledger interface { + // LastRound tells the round is ready for checking + LastRound() basics.Round + + // WaitMem allows the Service to wait for the results of a round to be available + WaitMem(r basics.Round) chan struct{} + + // BlockHdr allows the service access to consensus values + BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) + + // LookupAccount allows the Service to observe accounts for suspension + LookupAccount(round basics.Round, addr basics.Address) (data ledgercore.AccountData, validThrough basics.Round, withoutRewards basics.MicroAlgos, err error) + + LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) +} + +// participants captures the aspects of the AccountManager that are used by this +// package. Service must be able to find out which accounts to monitor and have +// access to their part keys to construct heartbeats. +type participants interface { + Keys(rnd basics.Round) []account.ParticipationRecordForRound +} diff --git a/heartbeat/service.go b/heartbeat/service.go new file mode 100644 index 0000000000..3e0a6cfa00 --- /dev/null +++ b/heartbeat/service.go @@ -0,0 +1,196 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package heartbeat + +import ( + "context" + "sync" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/apply" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" +) + +// Service emits keep-alive heartbeats for accts that are in danger of +// suspension. +type Service struct { + // addresses that should be monitored for suspension + accts participants + // current status and balances + ledger ledger + // where to send the heartbeats + bcast txnBroadcaster + + // infrastructure + ctx context.Context + shutdown context.CancelFunc + wg sync.WaitGroup + log logging.Logger +} + +// NewService creates a heartbeat service. It will need to know which accounts +// to emit heartbeats for, and how to create the heartbeats. +func NewService(accts participants, ledger ledger, bcast txnBroadcaster, log logging.Logger) *Service { + return &Service{ + accts: accts, + ledger: ledger, + bcast: bcast, + log: log.With("Context", "heartbeat"), + } +} + +// Start starts the goroutines for the Service. +func (s *Service) Start() { + s.ctx, s.shutdown = context.WithCancel(context.Background()) + s.wg.Add(1) + s.log.Info("starting heartbeat service") + go s.loop() +} + +// Stop any goroutines associated with this worker. +func (s *Service) Stop() { + s.log.Debug("heartbeat service is stopping") + defer s.log.Debug("heartbeat service has stopped") + s.shutdown() + s.wg.Wait() +} + +// findChallenged() returns a list of accounts that need a heartbeat because +// they have been challenged. +func (s *Service) findChallenged(rules config.ProposerPayoutRules, current basics.Round) []account.ParticipationRecordForRound { + ch := apply.FindChallenge(rules, current, s.ledger, apply.ChRisky) + if ch.IsZero() { + return nil + } + + var found []account.ParticipationRecordForRound + for _, pr := range s.accts.Keys(current) { // only look at accounts we have part keys for + acct, err := s.ledger.LookupAgreement(current, pr.Account) + if err != nil { + s.log.Errorf("error looking up %v: %v", pr.Account, err) + continue + } + // There can be more than one `pr` for a single Account in the case of + // overlapping partkey validity windows. Heartbeats are validated with + // the _current_ VoterID (see apply/heartbeat.go), so we only care about + // a ParticipationRecordForRound if it is for the VoterID in `acct`. + if acct.VoteID != pr.Voting.OneTimeSignatureVerifier { + continue + } + // We want to match the logic in generateKnockOfflineAccountsList, but + // don't need to check Online status because we obtained records from + // LookupAgreement, which only returns Online accounts (or empty, which + // will not be IncentiveEligible) If we ever decide to knockoff accounts + // that are not IncentiveEligoible, this code should remember to check + // acct.MicroAlgosWithRewards > 0 to ensure we need a heartbeat. + if acct.IncentiveEligible { + if ch.Failed(pr.Account, max(acct.LastHeartbeat, acct.LastProposed)) { + s.log.Infof(" %v needs a heartbeat\n", pr.Account) + found = append(found, pr) + } + } + } + return found +} + +// loop monitors for any of Service's participants being suspended. If they are, +// it tries to being them back online by emitting a heartbeat transaction. It +// could try to predict an upcoming suspension, which would prevent the +// suspension from ever occurring, but that would be considerably more complex +// both to avoid emitting repeated heartbeats, and to ensure the prediction and +// the suspension logic match. This feels like a cleaner end-to-end test, at +// the cost of lost couple rounds of participation. (Though suspension is +// designed to be extremely unlikely anyway.) +func (s *Service) loop() { + defer s.wg.Done() + suppress := make(map[basics.Address]basics.Round) + latest := s.ledger.LastRound() + for { + // exit if Done, else wait for next round + select { + case <-s.ctx.Done(): + return + case <-s.ledger.WaitMem(latest + 1): + } + + latest = s.ledger.LastRound() + + lastHdr, err := s.ledger.BlockHdr(latest) + if err != nil { + s.log.Errorf("heartbeat service could not fetch block header for round %d: %v", latest, err) + continue // Try again next round, I guess? + } + proto := config.Consensus[lastHdr.CurrentProtocol] + + for _, pr := range s.findChallenged(proto.Payouts, latest) { + if suppress[pr.Account] > latest { + continue + } + stxn := s.prepareHeartbeat(pr, lastHdr) + s.log.Infof("sending heartbeat %v for %v\n", stxn.Txn.HeartbeatTxnFields, pr.Account) + err = s.bcast.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{stxn}) + if err != nil { + s.log.Errorf("error broadcasting heartbeat %v for %v: %v", stxn, pr.Account, err) + } else { + // Don't bother heartbeating again until the last one expires. + // If it is accepted, we won't need to (because we won't be + // under challenge any more). + suppress[pr.Account] = stxn.Txn.LastValid + } + } + } +} + +// acceptingByteCode is the byte code to a logic signature that will accept anything (except rekeying). +var acceptingByteCode = logic.MustAssemble(` +#pragma version 11 +txn RekeyTo; global ZeroAddress; == +`) +var acceptingSender = basics.Address(logic.HashProgram(acceptingByteCode)) + +// hbLifetime is somewhat short. It seems better to try several times during the +// grace period than to try a single time with a longer lifetime. +const hbLifetime = 10 + +func (s *Service) prepareHeartbeat(pr account.ParticipationRecordForRound, latest bookkeeping.BlockHeader) transactions.SignedTxn { + var stxn transactions.SignedTxn + stxn.Lsig = transactions.LogicSig{Logic: acceptingByteCode} + stxn.Txn.Type = protocol.HeartbeatTx + stxn.Txn.Header = transactions.Header{ + Sender: acceptingSender, + FirstValid: latest.Round, + LastValid: latest.Round + hbLifetime, + GenesisHash: latest.GenesisHash, + } + + id := basics.OneTimeIDForRound(stxn.Txn.LastValid, pr.KeyDilution) + stxn.Txn.HeartbeatTxnFields = &transactions.HeartbeatTxnFields{ + HbAddress: pr.Account, + HbProof: pr.Voting.Sign(id, latest.Seed).ToHeartbeatProof(), + HbSeed: latest.Seed, + HbVoteID: pr.Voting.OneTimeSignatureVerifier, + HbKeyDilution: pr.KeyDilution, + } + + return stxn +} diff --git a/heartbeat/service_test.go b/heartbeat/service_test.go new file mode 100644 index 0000000000..8fd3cb2865 --- /dev/null +++ b/heartbeat/service_test.go @@ -0,0 +1,300 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package heartbeat + +import ( + "fmt" + "testing" + "time" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/algorand/go-deadlock" + "github.com/stretchr/testify/require" +) + +type table map[basics.Address]ledgercore.AccountData + +type mockedLedger struct { + mu deadlock.Mutex + waiters map[basics.Round]chan struct{} + history []table + hdr bookkeeping.BlockHeader + t *testing.T +} + +func newMockedLedger(t *testing.T) mockedLedger { + return mockedLedger{ + waiters: make(map[basics.Round]chan struct{}), + history: []table{nil}, // some genesis accounts could go here + hdr: bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + CurrentProtocol: protocol.ConsensusFuture, + }, + }, + } +} + +func (l *mockedLedger) LastRound() basics.Round { + l.mu.Lock() + defer l.mu.Unlock() + return l.lastRound() +} +func (l *mockedLedger) lastRound() basics.Round { + return basics.Round(len(l.history) - 1) +} + +func (l *mockedLedger) WaitMem(r basics.Round) chan struct{} { + l.mu.Lock() + defer l.mu.Unlock() + + if l.waiters[r] == nil { + l.waiters[r] = make(chan struct{}) + } + + // Return an already-closed channel if we already have the block. + if r <= l.lastRound() { + close(l.waiters[r]) + retChan := l.waiters[r] + delete(l.waiters, r) + return retChan + } + + return l.waiters[r] +} + +// BlockHdr allows the service access to consensus values +func (l *mockedLedger) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + l.mu.Lock() + defer l.mu.Unlock() + + if r > l.lastRound() { + return bookkeeping.BlockHeader{}, fmt.Errorf("%d is beyond current block (%d)", r, l.LastRound()) + } + // return the template hdr, with round + hdr := l.hdr + hdr.Round = r + return hdr, nil +} + +// setSeed allows the mock to return a specific seed +func (l *mockedLedger) setSeed(seed committee.Seed) { + l.mu.Lock() + defer l.mu.Unlock() + + l.hdr.Seed = seed +} + +func (l *mockedLedger) addBlock(delta table) error { + l.mu.Lock() + defer l.mu.Unlock() + + l.history = append(l.history, delta) + + for r, ch := range l.waiters { + switch { + case r < l.lastRound(): + l.t.Logf("%d < %d", r, l.lastRound()) + panic("why is there a waiter for an old block?") + case r == l.lastRound(): + close(ch) + delete(l.waiters, r) + case r > l.lastRound(): + /* waiter keeps waiting */ + } + } + return nil +} + +func (l *mockedLedger) LookupAccount(round basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, basics.MicroAlgos, error) { + l.mu.Lock() + defer l.mu.Unlock() + + if round > l.lastRound() { + panic("mockedLedger.LookupAccount: future round") + } + + for r := round; r <= round; r-- { + if acct, ok := l.history[r][addr]; ok { + more := basics.MicroAlgos{Raw: acct.MicroAlgos.Raw + 1} + return acct, round, more, nil + } + } + return ledgercore.AccountData{}, round, basics.MicroAlgos{}, nil +} + +func (l *mockedLedger) LookupAgreement(round basics.Round, addr basics.Address) (basics.OnlineAccountData, error) { + l.mu.Lock() + defer l.mu.Unlock() + + if round > l.lastRound() { + panic("mockedLedger.LookupAgreement: future round") + } + + for r := round; r <= round; r-- { + if acct, ok := l.history[r][addr]; ok { + oad := basics.OnlineAccountData{ + MicroAlgosWithRewards: acct.MicroAlgos, + VotingData: acct.VotingData, + IncentiveEligible: acct.IncentiveEligible, + LastProposed: acct.LastProposed, + LastHeartbeat: acct.LastHeartbeat, + } + return oad, nil + } + } + return basics.OnlineAccountData{}, nil +} + +// waitFor confirms that the Service made it through the last block in the +// ledger and is waiting for the next. The Service is written such that it +// operates properly without this sort of wait, but for testing, we often want +// to wait so that we can confirm that the Service *didn't* do something. +func (l *mockedLedger) waitFor(s *Service, a *require.Assertions) { + a.Eventually(func() bool { // delay and confirm that the service advances to wait for next block + _, ok := l.waiters[l.LastRound()+1] + return ok + }, time.Second, 10*time.Millisecond) +} + +type mockedAcctManager []account.ParticipationRecordForRound + +func (am *mockedAcctManager) Keys(rnd basics.Round) []account.ParticipationRecordForRound { + return *am +} + +func (am *mockedAcctManager) addParticipant(addr basics.Address, otss *crypto.OneTimeSignatureSecrets) { + *am = append(*am, account.ParticipationRecordForRound{ + ParticipationRecord: account.ParticipationRecord{ + ParticipationID: [32]byte{}, + Account: addr, + Voting: otss, + FirstValid: 0, + LastValid: 1_000_000, + KeyDilution: 7, + }, + }) +} + +type txnSink struct { + t *testing.T + txns [][]transactions.SignedTxn +} + +func (ts *txnSink) BroadcastInternalSignedTxGroup(group []transactions.SignedTxn) error { + ts.t.Logf("sinking %+v", group[0].Txn.Header) + ts.txns = append(ts.txns, group) + return nil +} + +func TestStartStop(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + a := require.New(t) + sink := txnSink{t: t} + ledger := newMockedLedger(t) + s := NewService(&mockedAcctManager{}, &ledger, &sink, logging.TestingLog(t)) + a.NotNil(s) + a.NoError(ledger.addBlock(nil)) + s.Start() + a.NoError(ledger.addBlock(nil)) + s.Stop() +} + +func makeBlock(r basics.Round) bookkeeping.Block { + return bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{Round: r}, + Payset: []transactions.SignedTxnInBlock{}, + } +} + +func TestHeartbeatOnlyWhenChallenged(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + a := require.New(t) + sink := txnSink{t: t} + ledger := newMockedLedger(t) + participants := &mockedAcctManager{} + s := NewService(participants, &ledger, &sink, logging.TestingLog(t)) + s.Start() + + joe := basics.Address{0xcc} // 0xcc will matter when we set the challenge + mary := basics.Address{0xaa} // 0xaa will matter when we set the challenge + + acct := ledgercore.AccountData{} + + a.NoError(ledger.addBlock(table{joe: acct})) + ledger.waitFor(s, a) + a.Empty(sink.txns) + + // make "part keys" and install them + kd := uint64(100) + startBatch := basics.OneTimeIDForRound(ledger.LastRound(), kd).Batch + const batches = 50 // gives 50 * kd rounds = 5000 + otss1 := crypto.GenerateOneTimeSignatureSecrets(startBatch, batches) + otss2 := crypto.GenerateOneTimeSignatureSecrets(startBatch, batches) + participants.addParticipant(joe, otss1) + participants.addParticipant(joe, otss2) // Simulate overlapping part keys, so Keys() returns both + participants.addParticipant(mary, otss1) + + // now they are online, but not challenged, so no heartbeat + acct.Status = basics.Online + acct.VoteKeyDilution = kd + acct.VoteID = otss1.OneTimeSignatureVerifier + a.NoError(ledger.addBlock(table{joe: acct, mary: acct})) // in effect, "keyreg" with otss1 + ledger.waitFor(s, a) + a.Empty(sink.txns) + + // now we have to make it seem like joe has been challenged. We obtain the + // payout rules to find the first challenge round, skip forward to it, then + // go forward half a grace period. Only then should the service heartbeat + ledger.setSeed(committee.Seed{0xc8}) // share 5 bits with 0xcc + hdr, err := ledger.BlockHdr(ledger.LastRound()) + a.NoError(err) + rules := config.Consensus[hdr.CurrentProtocol].Payouts + for ledger.LastRound() < basics.Round(rules.ChallengeInterval+rules.ChallengeGracePeriod/2) { + a.NoError(ledger.addBlock(table{})) + ledger.waitFor(s, a) + a.Empty(sink.txns) + } + + a.NoError(ledger.addBlock(table{joe: acct})) + ledger.waitFor(s, a) + a.Empty(sink.txns) // Just kidding, no heartbeat yet, joe isn't eligible + + acct.IncentiveEligible = true + a.NoError(ledger.addBlock(table{joe: acct})) + ledger.waitFor(s, a) + // challenge is already in place, it counts immediately, so service will heartbeat + a.Len(sink.txns, 1) // only one heartbeat (for joe) despite having two part records + a.Len(sink.txns[0], 1) + a.Equal(sink.txns[0][0].Txn.Type, protocol.HeartbeatTx) + a.Equal(sink.txns[0][0].Txn.HbAddress, joe) + + s.Stop() +} diff --git a/ledger/acctonline.go b/ledger/acctonline.go index 380ff45852..76e6ef13ee 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -612,11 +612,6 @@ func (ao *onlineAccounts) onlineTotals(rnd basics.Round) (basics.MicroAlgos, pro return basics.MicroAlgos{Raw: onlineRoundParams.OnlineSupply}, onlineRoundParams.CurrentProtocol, nil } -// LookupOnlineAccountData returns the online account data for a given address at a given round. -func (ao *onlineAccounts) LookupOnlineAccountData(rnd basics.Round, addr basics.Address) (data basics.OnlineAccountData, err error) { - return ao.lookupOnlineAccountData(rnd, addr) -} - // roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken. func (ao *onlineAccounts) roundOffset(rnd basics.Round) (offset uint64, err error) { if rnd < ao.cachedDBRoundOnline { diff --git a/ledger/apply/apply.go b/ledger/apply/apply.go index dfa61b2632..5bbe482f38 100644 --- a/ledger/apply/apply.go +++ b/ledger/apply/apply.go @@ -25,9 +25,14 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" ) +// hdrProvider allows fetching old block headers +type hdrProvider interface { + BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) +} + // StateProofsApplier allows fetching and updating state-proofs state on the ledger type StateProofsApplier interface { - BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) + hdrProvider GetStateProofNextRound() basics.Round SetStateProofNextRound(rnd basics.Round) GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) diff --git a/ledger/apply/challenge.go b/ledger/apply/challenge.go new file mode 100644 index 0000000000..0de7c1208e --- /dev/null +++ b/ledger/apply/challenge.go @@ -0,0 +1,114 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "bytes" + "math/bits" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" +) + +// ChallengePeriod indicates which part of the challenge period is under discussion. +type ChallengePeriod int + +const ( + // ChRisky indicates that a challenge is in effect, and the initial grace period is running out. + ChRisky ChallengePeriod = iota + // ChActive indicates that a challenege is in effect, and the grace period + // has run out, so accounts can be suspended + ChActive +) + +type challenge struct { + // round is when the challenge occurred. 0 means this is not a challenge. + round basics.Round + // accounts that match the first `bits` of `seed` must propose or heartbeat to stay online + seed committee.Seed + bits int +} + +// FindChallenge returns the Challenge that was last issued if it's in the period requested. +func FindChallenge(rules config.ProposerPayoutRules, current basics.Round, headers hdrProvider, period ChallengePeriod) challenge { + // are challenges active? + interval := basics.Round(rules.ChallengeInterval) + if rules.ChallengeInterval == 0 || current < interval { + return challenge{} + } + lastChallenge := current - (current % interval) + grace := basics.Round(rules.ChallengeGracePeriod) + // FindChallenge is structured this way, instead of returning the challenge + // and letting the caller determine the period it cares about, to avoid + // using BlockHdr unnecessarily. + switch period { + case ChRisky: + if current <= lastChallenge+grace/2 || current > lastChallenge+grace { + return challenge{} + } + case ChActive: + if current <= lastChallenge+grace || current > lastChallenge+2*grace { + return challenge{} + } + } + challengeHdr, err := headers.BlockHdr(lastChallenge) + if err != nil { + return challenge{} + } + challengeProto := config.Consensus[challengeHdr.CurrentProtocol] + // challenge is not considered if rules have changed since that round + if challengeProto.Payouts != rules { + return challenge{} + } + return challenge{lastChallenge, challengeHdr.Seed, rules.ChallengeBits} +} + +// IsZero returns true if the challenge is empty (used to indicate no challenege) +func (ch challenge) IsZero() bool { + return ch == challenge{} +} + +// Failed returns true iff ch is in effect, matches address, and lastSeen is +// before the challenge issue. When an address "Fails" in this way, the +// _meaning_ depends on how the challenged was obtained. If it was "risky" then +// it means the address is at risk, not that it should be suspended. It it's an +// "active" challenge, then the account should be suspended. +func (ch challenge) Failed(address basics.Address, lastSeen basics.Round) bool { + return ch.round != 0 && bitsMatch(ch.seed[:], address[:], ch.bits) && lastSeen < ch.round +} + +// bitsMatch checks if the first n bits of two byte slices match. Written to +// work on arbitrary slices, but we expect that n is small. Only user today +// calls with n=5. +func bitsMatch(a, b []byte, n int) bool { + // Ensure n is a valid number of bits to compare + if n < 0 || n > len(a)*8 || n > len(b)*8 { + return false + } + + // Compare entire bytes when we care about enough bits + if !bytes.Equal(a[:n/8], b[:n/8]) { + return false + } + + remaining := n % 8 + if remaining == 0 { + return true + } + return bits.LeadingZeros8(a[n/8]^b[n/8]) >= remaining +} diff --git a/ledger/apply/challenge_test.go b/ledger/apply/challenge_test.go new file mode 100644 index 0000000000..3114b6f935 --- /dev/null +++ b/ledger/apply/challenge_test.go @@ -0,0 +1,121 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "testing" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBitsMatch(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for b := 0; b <= 6; b++ { + require.True(t, bitsMatch([]byte{0x1}, []byte{0x2}, b), "%d", b) + } + require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 7)) + require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 8)) + require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 9)) + + for b := 0; b <= 12; b++ { + require.True(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, b), "%d", b) + } + require.False(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, 13)) + + // on a byte boundary + require.True(t, bitsMatch([]byte{0x1}, []byte{0x1}, 8)) + require.False(t, bitsMatch([]byte{0x1}, []byte{0x1}, 9)) + require.True(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 0x00}, 8)) + require.False(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 00}, 9)) +} + +func TestFailsChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + a := assert.New(t) + + // a valid challenge, with 4 matching bits, and an old last seen + a.True(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10)) + + // challenge isn't "on" + a.False(challenge{round: 0, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10)) + // node has appeared more recently + a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 12)) + // bits don't match + a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xcf, 0x34}, 10)) + // no enough bits match + a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 5}.Failed(basics.Address{0xbf, 0x34}, 10)) +} + +type singleSource bookkeeping.BlockHeader + +func (ss singleSource) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + return bookkeeping.BlockHeader(ss), nil +} + +func TestActiveChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + a := assert.New(t) + + nowHeader := bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + // Here the rules are on, so they certainly differ from rules in oldHeader's params + CurrentProtocol: protocol.ConsensusFuture, + }, + } + rules := config.Consensus[nowHeader.CurrentProtocol].Payouts + + // simplest test. when interval=X and grace=G, X+G+1 is a challenge + inChallenge := basics.Round(rules.ChallengeInterval + rules.ChallengeGracePeriod + 1) + ch := FindChallenge(rules, inChallenge, singleSource(nowHeader), ChActive) + a.NotZero(ch.round) + + // all rounds before that have no challenge + for r := basics.Round(1); r < inChallenge; r++ { + ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive) + a.Zero(ch.round, r) + } + + // ChallengeGracePeriod rounds allow challenges starting with inChallenge + for r := inChallenge; r < inChallenge+basics.Round(rules.ChallengeGracePeriod); r++ { + ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive) + a.EqualValues(ch.round, rules.ChallengeInterval) + } + + // And the next round is again challenge-less + ch = FindChallenge(rules, inChallenge+basics.Round(rules.ChallengeGracePeriod), singleSource(nowHeader), ChActive) + a.Zero(ch.round) + + // ignore challenge if upgrade happened + oldHeader := bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + // We need a version from before payouts got turned on + CurrentProtocol: protocol.ConsensusV39, + }, + } + ch = FindChallenge(rules, inChallenge, singleSource(oldHeader), ChActive) + a.Zero(ch.round) +} diff --git a/ledger/apply/heartbeat.go b/ledger/apply/heartbeat.go new file mode 100644 index 0000000000..a37c8238a4 --- /dev/null +++ b/ledger/apply/heartbeat.go @@ -0,0 +1,102 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "fmt" + + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" +) + +// Heartbeat applies a Heartbeat transaction using the Balances interface. +func Heartbeat(hb transactions.HeartbeatTxnFields, header transactions.Header, balances Balances, provider hdrProvider, round basics.Round) error { + // Get the account's balance entry + account, err := balances.Get(hb.HbAddress, false) + if err != nil { + return err + } + + // In txnGroupBatchPrep, we do not charge for singleton (Group.IsZero) + // heartbeats. But we only _want_ to allow free heartbeats if the account is + // under challenge. If this is an underpaid singleton heartbeat, reject it + // unless the account is under challenge. + + proto := balances.ConsensusParams() + if header.Fee.Raw < proto.MinTxnFee && header.Group.IsZero() { + kind := "free" + if header.Fee.Raw > 0 { + kind = "cheap" + } + + if account.Status != basics.Online { + return fmt.Errorf("%s heartbeat is not allowed for %s %+v", kind, account.Status, hb.HbAddress) + } + if !account.IncentiveEligible { + return fmt.Errorf("%s heartbeat is not allowed when not IncentiveEligible %+v", kind, hb.HbAddress) + } + ch := FindChallenge(proto.Payouts, round, provider, ChRisky) + if ch.IsZero() { + return fmt.Errorf("%s heartbeat for %s is not allowed with no challenge", kind, hb.HbAddress) + } + if !ch.Failed(hb.HbAddress, account.LastSeen()) { + return fmt.Errorf("%s heartbeat for %s is not challenged by %+v", kind, hb.HbAddress, ch) + } + } + + // Note the contrast with agreement. We require the account's _current_ + // partkey be used to sign the heartbeat. This is required because we can + // only look 320 rounds back for voting information. If a heartbeat was + // delayed a few rounds (even 1), we could not ask "what partkey was in + // effect at firstValid-320?" Using the current keys means that an account + // that changes keys would invalidate any heartbeats it has already sent out + // (that haven't been evaluated yet). Maybe more importantly, after going + // offline, an account can no longer heartbeat, since it has no _current_ + // keys. Yet it is still expected to vote for 320 rounds. Therefore, + // challenges do not apply to accounts that are offline (even if they should + // still be voting). + + // heartbeats sign a message consisting of the BlockSeed of the first-valid + // round, to discourage unsavory behaviour like presigning a bunch of + // heartbeats for later use keeping an unavailable account online. + hdr, err := provider.BlockHdr(header.FirstValid) + if err != nil { + return err + } + if hdr.Seed != hb.HbSeed { + return fmt.Errorf("provided seed %v does not match round %d's seed %v", + hb.HbSeed, header.FirstValid, hdr.Seed) + } + if account.VotingData.VoteID != hb.HbVoteID { + return fmt.Errorf("provided voter ID %v does not match %v's voter ID %v", + hb.HbVoteID, hb.HbAddress, account.VotingData.VoteID) + } + if account.VotingData.VoteKeyDilution != hb.HbKeyDilution { + return fmt.Errorf("provided key dilution %d does not match %v's key dilution %d", + hb.HbKeyDilution, hb.HbAddress, account.VotingData.VoteKeyDilution) + } + + account.LastHeartbeat = round + + // Write the updated entry + err = balances.Put(hb.HbAddress, account) + if err != nil { + return err + } + + return nil +} diff --git a/ledger/apply/heartbeat_test.go b/ledger/apply/heartbeat_test.go new file mode 100644 index 0000000000..06a91bf156 --- /dev/null +++ b/ledger/apply/heartbeat_test.go @@ -0,0 +1,208 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" + "github.com/algorand/go-algorand/data/txntest" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func TestHeartbeat(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // Creator + sender := basics.Address{0x01} + voter := basics.Address{0x02} + const keyDilution = 777 + + fv := basics.Round(100) + lv := basics.Round(1000) + + id := basics.OneTimeIDForRound(lv, keyDilution) + otss := crypto.GenerateOneTimeSignatureSecrets(1, 2) // This will cover rounds 1-2*777 + + mockBal := makeMockBalancesWithAccounts(protocol.ConsensusFuture, map[basics.Address]basics.AccountData{ + sender: { + MicroAlgos: basics.MicroAlgos{Raw: 10_000_000}, + }, + voter: { + Status: basics.Online, + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + VoteID: otss.OneTimeSignatureVerifier, + VoteKeyDilution: keyDilution, + IncentiveEligible: true, + }, + }) + + seed := committee.Seed{0x01, 0x02, 0x03} + mockHdr := makeMockHeaders(bookkeeping.BlockHeader{ + Round: fv, + Seed: seed, + }) + + test := txntest.Txn{ + Type: protocol.HeartbeatTx, + Sender: sender, + FirstValid: fv, + LastValid: lv, + HbAddress: voter, + HbProof: otss.Sign(id, seed).ToHeartbeatProof(), + } + + tx := test.Txn() + + rnd := basics.Round(150) + // no fee + err := Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "free heartbeat") + + // just as bad: cheap + tx.Fee = basics.MicroAlgos{Raw: 10} + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "cheap heartbeat") + + // address fee + tx.Fee = basics.MicroAlgos{Raw: 1000} + + // Seed is missing + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided seed") + + tx.HbSeed = seed + // VoterID is missing + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided voter ID") + + tx.HbVoteID = otss.OneTimeSignatureVerifier + // still no key dilution + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided key dilution 0") + + tx.HbKeyDilution = keyDilution + 1 + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided key dilution 778") + + tx.HbKeyDilution = keyDilution + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.NoError(t, err) + after, err := mockBal.Get(voter, false) + require.NoError(t, err) + require.Equal(t, rnd, after.LastHeartbeat) + require.Zero(t, after.LastProposed) // unchanged +} + +// TestCheapRules ensures a heartbeat can only have a low fee if the account +// being heartbeat for is online, under risk of suspension by challenge, and +// incentive eligible. +func TestCheapRules(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + type tcase struct { + rnd basics.Round + addrStart byte + status basics.Status + incentiveEligble bool + note []byte + lease [32]byte + rekey [32]byte + err string + } + empty := [32]byte{} + // Grace period is 200. For the second half of the grace period (1101-1200), + // the heartbeat is free for online, incentive eligible, challenged accounts. + const grace = 200 + const half = grace / 2 + cases := []tcase{ + // test of range + {1000 + half, 0x01, basics.Online, true, nil, empty, empty, "no challenge"}, + {1000 + half + 1, 0x01, basics.Online, true, nil, empty, empty, ""}, + {1000 + grace, 0x01, basics.Online, true, nil, empty, empty, ""}, + {1000 + grace + 1, 0x01, basics.Online, true, nil, empty, empty, "no challenge"}, + + // test of the other requirements + {1000 + half + 1, 0xf1, basics.Online, true, nil, empty, empty, "not challenged by"}, + {1000 + half + 1, 0x01, basics.Offline, true, nil, empty, empty, "not allowed for Offline"}, + {1000 + half + 1, 0x01, basics.Online, false, nil, empty, empty, "not allowed when not IncentiveEligible"}, + } + for _, tc := range cases { + const keyDilution = 777 + + lv := basics.Round(tc.rnd + 10) + + id := basics.OneTimeIDForRound(lv, keyDilution) + otss := crypto.GenerateOneTimeSignatureSecrets(1, 10) // This will cover rounds 1-10*777 + + sender := basics.Address{0x01} + voter := basics.Address{tc.addrStart} + mockBal := makeMockBalancesWithAccounts(protocol.ConsensusFuture, map[basics.Address]basics.AccountData{ + sender: { + MicroAlgos: basics.MicroAlgos{Raw: 10_000_000}, + }, + voter: { + Status: tc.status, + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + VoteID: otss.OneTimeSignatureVerifier, + VoteKeyDilution: keyDilution, + IncentiveEligible: tc.incentiveEligble, + }, + }) + + seed := committee.Seed{0x01, 0x02, 0x03} + mockHdr := makeMockHeaders() + mockHdr.setFallback(bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + CurrentProtocol: protocol.ConsensusFuture, + }, + Seed: seed, + }) + txn := txntest.Txn{ + Type: protocol.HeartbeatTx, + Sender: sender, + Fee: basics.MicroAlgos{Raw: 1}, + FirstValid: tc.rnd - 10, + LastValid: tc.rnd + 10, + Lease: tc.lease, + Note: tc.note, + RekeyTo: tc.rekey, + HbAddress: voter, + HbProof: otss.Sign(id, seed).ToHeartbeatProof(), + HbSeed: seed, + HbVoteID: otss.OneTimeSignatureVerifier, + HbKeyDilution: keyDilution, + } + + tx := txn.Txn() + err := Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, tc.rnd) + if tc.err == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, tc.err, "%+v", tc) + } + } +} diff --git a/ledger/apply/keyreg.go b/ledger/apply/keyreg.go index f5326f8240..d883618685 100644 --- a/ledger/apply/keyreg.go +++ b/ledger/apply/keyreg.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" + "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" ) @@ -79,7 +80,8 @@ func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, bal } record.Status = basics.Online if params.Payouts.Enabled { - record.LastHeartbeat = header.FirstValid + lookback := agreement.BalanceLookback(balances.ConsensusParams()) + record.LastHeartbeat = round + lookback } record.VoteFirstValid = keyreg.VoteFirst record.VoteLastValid = keyreg.VoteLast diff --git a/ledger/apply/mockBalances_test.go b/ledger/apply/mockBalances_test.go index a02a2108fd..312f37e76d 100644 --- a/ledger/apply/mockBalances_test.go +++ b/ledger/apply/mockBalances_test.go @@ -17,10 +17,12 @@ package apply import ( + "fmt" "maps" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -271,3 +273,33 @@ func (b *mockCreatableBalances) HasAssetParams(addr basics.Address, aidx basics. _, ok = acct.AssetParams[aidx] return } + +type mockHeaders struct { + perRound map[basics.Round]bookkeeping.BlockHeader + fallback *bookkeeping.BlockHeader +} + +// makeMockHeaders takes a bunch of BlockHeaders and returns a HdrProivder for them. +func makeMockHeaders(hdrs ...bookkeeping.BlockHeader) mockHeaders { + b := make(map[basics.Round]bookkeeping.BlockHeader) + for _, hdr := range hdrs { + b[hdr.Round] = hdr + } + return mockHeaders{perRound: b} +} + +func (m mockHeaders) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + if hdr, ok := m.perRound[r]; ok { + return hdr, nil + } + if m.fallback != nil { + copy := *m.fallback + copy.Round = r + return copy, nil + } + return bookkeeping.BlockHeader{}, fmt.Errorf("round %v is not present", r) +} + +func (m *mockHeaders) setFallback(hdr bookkeeping.BlockHeader) { + m.fallback = &hdr +} diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go index a7b3b15214..fce41b00a3 100644 --- a/ledger/apptxn_test.go +++ b/ledger/apptxn_test.go @@ -104,9 +104,9 @@ func TestPayAction(t *testing.T) { dl.t.Log("postsink", postsink, "postprop", postprop) if ver >= payoutsVer { - bonus := 10_000_000 // config/consensus.go - assert.EqualValues(t, bonus-500, presink-postsink) // based on 75% in config/consensus.go - require.EqualValues(t, bonus+1500, postprop-preprop) + bonus := 10_000_000 // config/consensus.go + assert.EqualValues(t, bonus-1000, presink-postsink) // based on 50% in config/consensus.go + require.EqualValues(t, bonus+1000, postprop-preprop) } else { require.EqualValues(t, 2000, postsink-presink) // no payouts yet } diff --git a/ledger/eval/eval.go b/ledger/eval/eval.go index 8bf7f8ce7d..4317fa1016 100644 --- a/ledger/eval/eval.go +++ b/ledger/eval/eval.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "math" - "math/bits" "sync" "github.com/algorand/go-algorand/agreement" @@ -29,7 +28,6 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/data/transactions/verify" @@ -38,6 +36,7 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/execpool" ) @@ -48,6 +47,7 @@ type LedgerForCowBase interface { CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error LookupWithoutRewards(basics.Round, basics.Address) (ledgercore.AccountData, basics.Round, error) LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error) + GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) LookupAsset(basics.Round, basics.Address, basics.AssetIndex) (ledgercore.AssetResource, error) LookupApplication(basics.Round, basics.Address, basics.AppIndex) (ledgercore.AppResource, error) LookupKv(basics.Round, string) ([]byte, error) @@ -207,14 +207,16 @@ func (x *roundCowBase) lookup(addr basics.Address) (ledgercore.AccountData, erro } // balanceRound reproduces the way that the agreement package finds the round to -// consider for online accounts. +// consider for online accounts. It returns the round that would be considered +// while voting on the current round (which is x.rnd+1). func (x *roundCowBase) balanceRound() (basics.Round, error) { - phdr, err := x.BlockHdr(agreement.ParamsRound(x.rnd)) + current := x.rnd + 1 + phdr, err := x.BlockHdr(agreement.ParamsRound(current)) if err != nil { return 0, err } agreementParams := config.Consensus[phdr.CurrentProtocol] - return agreement.BalanceRound(x.rnd, agreementParams), nil + return agreement.BalanceRound(current, agreementParams), nil } // lookupAgreement returns the online accountdata for the provided account address. It uses an internal cache @@ -248,12 +250,12 @@ func (x *roundCowBase) onlineStake() (basics.MicroAlgos, error) { if err != nil { return basics.MicroAlgos{}, err } - total, err := x.l.OnlineCirculation(brnd, x.rnd) + total, err := x.l.OnlineCirculation(brnd, x.rnd+1) // x.rnd+1 is round being built if err != nil { return basics.MicroAlgos{}, err } x.totalOnline = total - return x.totalOnline, err + return x.totalOnline, nil } func (x *roundCowBase) updateAssetResourceCache(aa ledgercore.AccountAsset, r ledgercore.AssetResource) { @@ -604,6 +606,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics if overflowed { return fmt.Errorf("overspend (account %v, data %+v, tried to spend %v)", from, fromBal, amt) } + fromBalNew = cs.autoHeartbeat(fromBal, fromBalNew) err = cs.putAccount(from, fromBalNew) if err != nil { return err @@ -632,6 +635,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics if overflowed { return fmt.Errorf("balance overflow (account %v, data %+v, was going to receive %v)", to, toBal, amt) } + toBalNew = cs.autoHeartbeat(toBal, toBalNew) err = cs.putAccount(to, toBalNew) if err != nil { return err @@ -641,6 +645,24 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics return nil } +// autoHeartbeat compares `before` and `after`, returning a new AccountData +// based on `after` but with an updated `LastHeartbeat` if `after` shows enough +// balance increase to risk a false positive suspension for absenteeism. +func (cs *roundCowState) autoHeartbeat(before, after ledgercore.AccountData) ledgercore.AccountData { + // No need to adjust unless account is suspendable + if after.Status != basics.Online || !after.IncentiveEligible { + return after + } + + // Adjust only if balance has doubled + twice, o := basics.OMul(before.MicroAlgos.Raw, 2) + if !o && after.MicroAlgos.Raw >= twice { + lookback := agreement.BalanceLookback(cs.ConsensusParams()) + after.LastHeartbeat = cs.Round() + lookback + } + return after +} + func (cs *roundCowState) ConsensusParams() config.ConsensusParams { return cs.proto } @@ -1285,6 +1307,9 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, cow *r // Validation of the StateProof transaction before applying will only occur in validate mode. err = apply.StateProof(tx.StateProofTxnFields, tx.Header.FirstValid, cow, eval.validate) + case protocol.HeartbeatTx: + err = apply.Heartbeat(*tx.HeartbeatTxnFields, tx.Header, cow, cow, cow.Round()) + default: err = fmt.Errorf("unknown transaction type %v", tx.Type) } @@ -1339,7 +1364,13 @@ func (eval *BlockEvaluator) TestingTxnCounter() uint64 { } // Call "endOfBlock" after all the block's rewards and transactions are processed. -func (eval *BlockEvaluator) endOfBlock() error { +// When generating a block, participating addresses are passed to prevent a +// proposer from suspending itself. +func (eval *BlockEvaluator) endOfBlock(participating ...basics.Address) error { + if participating != nil && !eval.generate { + panic("logic error: only pass partAddresses to endOfBlock when generating") + } + if eval.generate { var err error eval.block.TxnCommitments, err = eval.block.PaysetCommit() @@ -1364,7 +1395,7 @@ func (eval *BlockEvaluator) endOfBlock() error { } } - eval.generateKnockOfflineAccountsList() + eval.generateKnockOfflineAccountsList(participating) if eval.proto.StateProofInterval > 0 { var basicStateProof bookkeeping.StateProofTrackingData @@ -1579,6 +1610,10 @@ func (eval *BlockEvaluator) recordProposal() error { return nil } +// proposerPayout determines how much the proposer should be paid, assuming it +// gets paid at all. It may not examine the actual proposer because it is +// called before the proposer is known. Agreement might zero out this value +// when the actual proposer is decided, if that proposer is ineligible. func (eval *BlockEvaluator) proposerPayout() (basics.MicroAlgos, error) { incentive, _ := basics.NewPercent(eval.proto.Payouts.Percent).DivvyAlgos(eval.block.FeesCollected) total, o := basics.OAddA(incentive, eval.block.Bonus) @@ -1594,38 +1629,106 @@ func (eval *BlockEvaluator) proposerPayout() (basics.MicroAlgos, error) { return basics.MinA(total, available), nil } -type challenge struct { - // round is when the challenge occurred. 0 means this is not a challenge. - round basics.Round - // accounts that match the first `bits` of `seed` must propose or heartbeat to stay online - seed committee.Seed - bits int -} - // generateKnockOfflineAccountsList creates the lists of expired or absent -// participation accounts by traversing over the modified accounts in the state -// deltas and testing if any of them needs to be reset/suspended. Expiration -// takes precedence - if an account is expired, it should be knocked offline and -// key material deleted. If it is only suspended, the key material will remain. -func (eval *BlockEvaluator) generateKnockOfflineAccountsList() { +// participation accounts to be suspended. It examines the accounts that appear +// in the current block and high-stake accounts being tracked for state +// proofs. Expiration takes precedence - if an account is expired, it should be +// knocked offline and key material deleted. If it is only suspended, the key +// material will remain. +// +// Different nodes may propose different list of addresses based on node state, +// the protocol does not enforce which accounts must appear. Block validators +// only check whether ExpiredParticipationAccounts or +// AbsentParticipationAccounts meet the criteria for expiration or suspension, +// not whether the lists are complete. +// +// This function is passed a list of participating addresses so a node will not +// propose a block that suspends or expires itself. +func (eval *BlockEvaluator) generateKnockOfflineAccountsList(participating []basics.Address) { if !eval.generate { return } - current := eval.Round() + current := eval.Round() maxExpirations := eval.proto.MaxProposedExpiredOnlineAccounts maxSuspensions := eval.proto.Payouts.MaxMarkAbsent updates := &eval.block.ParticipationUpdates - ch := activeChallenge(&eval.proto, uint64(eval.Round()), eval.state) + ch := apply.FindChallenge(eval.proto.Payouts, current, eval.state, apply.ChActive) + onlineStake, err := eval.state.onlineStake() + if err != nil { + logging.Base().Errorf("unable to fetch online stake, no knockoffs: %v", err) + return + } + + // Make a set of candidate addresses to check for expired or absentee status. + type candidateData struct { + VoteLastValid basics.Round + VoteID crypto.OneTimeSignatureVerifier + Status basics.Status + LastProposed basics.Round + LastHeartbeat basics.Round + MicroAlgosWithRewards basics.MicroAlgos + IncentiveEligible bool // currently unused below, but may be needed in the future + } + candidates := make(map[basics.Address]candidateData) + partAddrs := util.MakeSet(participating...) + // First, ask the ledger for the top N online accounts, with their latest + // online account data, current up to the previous round. + if maxSuspensions > 0 { + knockOfflineCandidates, err := eval.l.GetKnockOfflineCandidates(eval.prevHeader.Round, eval.proto) + if err != nil { + // Log an error and keep going; generating lists of absent and expired + // accounts is not required by block validation rules. + logging.Base().Warnf("error fetching knockOfflineCandidates: %v", err) + knockOfflineCandidates = nil + } + for accountAddr, acctData := range knockOfflineCandidates { + // acctData is from previous block: doesn't include any updates in mods + candidates[accountAddr] = candidateData{ + VoteLastValid: acctData.VoteLastValid, + VoteID: acctData.VoteID, + Status: basics.Online, // GetKnockOfflineCandidates only returns online accounts + LastProposed: acctData.LastProposed, + LastHeartbeat: acctData.LastHeartbeat, + MicroAlgosWithRewards: acctData.MicroAlgosWithRewards, + IncentiveEligible: acctData.IncentiveEligible, + } + } + } + + // Then add any accounts modified in this block, with their state at the + // end of the round. for _, accountAddr := range eval.state.modifiedAccounts() { acctData, found := eval.state.mods.Accts.GetData(accountAddr) if !found { continue } + // This will overwrite data from the knockOfflineCandidates list, if they were modified in the current block. + candidates[accountAddr] = candidateData{ + VoteLastValid: acctData.VoteLastValid, + VoteID: acctData.VoteID, + Status: acctData.Status, + LastProposed: acctData.LastProposed, + LastHeartbeat: acctData.LastHeartbeat, + MicroAlgosWithRewards: acctData.WithUpdatedRewards(eval.proto, eval.state.rewardsLevel()).MicroAlgos, + IncentiveEligible: acctData.IncentiveEligible, + } + } + + // Now, check these candidate accounts to see if they are expired or absent. + for accountAddr, acctData := range candidates { + if acctData.MicroAlgosWithRewards.IsZero() { + continue // don't check accounts that are being closed + } + + if partAddrs.Contains(accountAddr) { + continue // don't check our own participation accounts + } + // Expired check: are this account's voting keys no longer valid? // Regardless of being online or suspended, if voting data exists, the // account can be expired to remove it. This means an offline account // can be expired (because it was already suspended). @@ -1637,18 +1740,25 @@ func (eval *BlockEvaluator) generateKnockOfflineAccountsList() { updates.ExpiredParticipationAccounts, accountAddr, ) - continue // if marking expired, do not also suspend + continue // if marking expired, do not consider suspension } } + // Absent check: has it been too long since the last heartbeat/proposal, or + // has this online account failed a challenge? if len(updates.AbsentParticipationAccounts) >= maxSuspensions { continue // no more room (don't break the loop, since we may have more expiries) } - if acctData.Status == basics.Online { + if acctData.Status == basics.Online && acctData.IncentiveEligible { lastSeen := max(acctData.LastProposed, acctData.LastHeartbeat) - if isAbsent(eval.state.prevTotals.Online.Money, acctData.MicroAlgos, lastSeen, current) || - failsChallenge(ch, accountAddr, lastSeen) { + oad, lErr := eval.state.lookupAgreement(accountAddr) + if lErr != nil { + logging.Base().Errorf("unable to check account for absenteeism: %v", accountAddr) + continue + } + if isAbsent(onlineStake, oad.VotingStake(), lastSeen, current) || + ch.Failed(accountAddr, lastSeen) { updates.AbsentParticipationAccounts = append( updates.AbsentParticipationAccounts, accountAddr, @@ -1658,76 +1768,25 @@ func (eval *BlockEvaluator) generateKnockOfflineAccountsList() { } } -// bitsMatch checks if the first n bits of two byte slices match. Written to -// work on arbitrary slices, but we expect that n is small. Only user today -// calls with n=5. -func bitsMatch(a, b []byte, n int) bool { - // Ensure n is a valid number of bits to compare - if n < 0 || n > len(a)*8 || n > len(b)*8 { - return false - } - - // Compare entire bytes when n is bigger than 8 - for i := 0; i < n/8; i++ { - if a[i] != b[i] { - return false - } - } - remaining := n % 8 - if remaining == 0 { - return true - } - return bits.LeadingZeros8(a[n/8]^b[n/8]) >= remaining -} +const absentFactor = 20 func isAbsent(totalOnlineStake basics.MicroAlgos, acctStake basics.MicroAlgos, lastSeen basics.Round, current basics.Round) bool { // Don't consider accounts that were online when payouts went into effect as // absent. They get noticed the next time they propose or keyreg, which // ought to be soon, if they are high stake or want to earn incentives. - if lastSeen == 0 { + if lastSeen == 0 || acctStake.Raw == 0 { return false } - // See if the account has exceeded 10x their expected observation interval. - allowableLag, o := basics.Muldiv(10, totalOnlineStake.Raw, acctStake.Raw) - if o { - // This can't happen with 10B total possible stake, but if we imagine - // another algorand network with huge possible stake, this seems reasonable. - allowableLag = math.MaxInt64 / acctStake.Raw - } - return lastSeen+basics.Round(allowableLag) < current -} - -type headerSource interface { - BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) -} - -func activeChallenge(proto *config.ConsensusParams, current uint64, headers headerSource) challenge { - rules := proto.Payouts - // are challenges active? - if rules.ChallengeInterval == 0 || current < rules.ChallengeInterval { - return challenge{} - } - lastChallenge := current - (current % rules.ChallengeInterval) - // challenge is in effect if we're after one grace period, but before the 2nd ends. - if current <= lastChallenge+rules.ChallengeGracePeriod || - current > lastChallenge+2*rules.ChallengeGracePeriod { - return challenge{} - } - round := basics.Round(lastChallenge) - challengeHdr, err := headers.BlockHdr(round) - if err != nil { - panic(err) - } - challengeProto := config.Consensus[challengeHdr.CurrentProtocol] - // challenge is not considered if rules have changed since that round - if challengeProto.Payouts != rules { - return challenge{} + // See if the account has exceeded their expected observation interval. + allowableLag, o := basics.Muldiv(absentFactor, totalOnlineStake.Raw, acctStake.Raw) + // just return false for overflow or a huge allowableLag. It implies the lag + // is longer that any network could be around, and computing with wraparound + // is annoying. + if o || allowableLag > math.MaxUint32 { + return false } - return challenge{round, challengeHdr.Seed, rules.ChallengeBits} -} -func failsChallenge(ch challenge, address basics.Address, lastSeen basics.Round) bool { - return ch.round != 0 && bitsMatch(ch.seed[:], address[:], ch.bits) && lastSeen < ch.round + return lastSeen+basics.Round(allowableLag) < current } // validateExpiredOnlineAccounts tests the expired online accounts specified in ExpiredParticipationAccounts, and verify @@ -1797,7 +1856,15 @@ func (eval *BlockEvaluator) validateAbsentOnlineAccounts() error { // For consistency with expired account handling, we preclude duplicates addressSet := make(map[basics.Address]bool, suspensionCount) - ch := activeChallenge(&eval.proto, uint64(eval.Round()), eval.state) + ch := apply.FindChallenge(eval.proto.Payouts, eval.Round(), eval.state, apply.ChActive) + totalOnlineStake, err := eval.state.onlineStake() + if err != nil { + logging.Base().Errorf("unable to fetch online stake, can't check knockoffs: %v", err) + // I suppose we can still return successfully if the absent list is empty. + if suspensionCount > 0 { + return err + } + } for _, accountAddr := range eval.block.ParticipationUpdates.AbsentParticipationAccounts { if _, exists := addressSet[accountAddr]; exists { @@ -1813,12 +1880,21 @@ func (eval *BlockEvaluator) validateAbsentOnlineAccounts() error { if acctData.Status != basics.Online { return fmt.Errorf("proposed absent account %v was %v, not Online", accountAddr, acctData.Status) } + if acctData.MicroAlgos.IsZero() { + return fmt.Errorf("proposed absent account %v with zero algos", accountAddr) + } + if !acctData.IncentiveEligible { + return fmt.Errorf("proposed absent account %v not IncentiveEligible", accountAddr) + } - lastSeen := max(acctData.LastProposed, acctData.LastHeartbeat) - if isAbsent(eval.state.prevTotals.Online.Money, acctData.MicroAlgos, lastSeen, eval.Round()) { + oad, lErr := eval.state.lookupAgreement(accountAddr) + if lErr != nil { + return fmt.Errorf("unable to check absent account: %v", accountAddr) + } + if isAbsent(totalOnlineStake, oad.VotingStake(), acctData.LastSeen(), eval.Round()) { continue // ok. it's "normal absent" } - if failsChallenge(ch, accountAddr, lastSeen) { + if ch.Failed(accountAddr, acctData.LastSeen()) { continue // ok. it's "challenge absent" } return fmt.Errorf("proposed absent account %v is not absent in %d, %d", @@ -1882,7 +1958,16 @@ func (eval *BlockEvaluator) suspendAbsentAccounts() error { // After a call to GenerateBlock, the BlockEvaluator can still be used to // accept transactions. However, to guard against reuse, subsequent calls // to GenerateBlock on the same BlockEvaluator will fail. -func (eval *BlockEvaluator) GenerateBlock(addrs []basics.Address) (*ledgercore.UnfinishedBlock, error) { +// +// A list of participating addresses is passed to GenerateBlock. This lets +// the BlockEvaluator know which of this node's participating addresses might +// be proposing this block. This information is used when: +// - generating lists of absent accounts (don't suspend yourself) +// - preparing a ledgercore.UnfinishedBlock, which contains the end-of-block +// state of each potential proposer. This allows for a final check in +// UnfinishedBlock.FinishBlock to ensure the proposer hasn't closed its +// account before setting the ProposerPayout header. +func (eval *BlockEvaluator) GenerateBlock(participating []basics.Address) (*ledgercore.UnfinishedBlock, error) { if !eval.generate { logging.Base().Panicf("GenerateBlock() called but generate is false") } @@ -1891,19 +1976,19 @@ func (eval *BlockEvaluator) GenerateBlock(addrs []basics.Address) (*ledgercore.U return nil, fmt.Errorf("GenerateBlock already called on this BlockEvaluator") } - err := eval.endOfBlock() + err := eval.endOfBlock(participating...) if err != nil { return nil, err } - // look up set of participation accounts passed to GenerateBlock (possible proposers) - finalAccounts := make(map[basics.Address]ledgercore.AccountData, len(addrs)) - for i := range addrs { - acct, err := eval.state.lookup(addrs[i]) + // look up end-of-block state of possible proposers passed to GenerateBlock + finalAccounts := make(map[basics.Address]ledgercore.AccountData, len(participating)) + for i := range participating { + acct, err := eval.state.lookup(participating[i]) if err != nil { return nil, err } - finalAccounts[addrs[i]] = acct + finalAccounts[participating[i]] = acct } vb := ledgercore.MakeUnfinishedBlock(eval.block, eval.state.deltas(), finalAccounts) diff --git a/ledger/eval/eval_test.go b/ledger/eval/eval_test.go index 358ba6b430..5874c83325 100644 --- a/ledger/eval/eval_test.go +++ b/ledger/eval/eval_test.go @@ -792,9 +792,27 @@ func (ledger *evalTestLedger) LookupAgreement(rnd basics.Round, addr basics.Addr return convertToOnline(ad), err } -// OnlineCirculation just returns a deterministic value for a given round. +func (ledger *evalTestLedger) GetKnockOfflineCandidates(rnd basics.Round, _ config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + // simulate by returning all online accounts known by the test ledger + ret := make(map[basics.Address]basics.OnlineAccountData) + for addr, data := range ledger.roundBalances[rnd] { + if data.Status == basics.Online && !data.MicroAlgos.IsZero() { + ret[addr] = data.OnlineAccountData() + } + } + return ret, nil +} + +// OnlineCirculation add up the balances of all online accounts in rnd. It +// doesn't remove expired accounts. func (ledger *evalTestLedger) OnlineCirculation(rnd, voteRound basics.Round) (basics.MicroAlgos, error) { - return basics.MicroAlgos{Raw: uint64(rnd) * 1_000_000}, nil + circulation := basics.MicroAlgos{} + for _, data := range ledger.roundBalances[rnd] { + if data.Status == basics.Online { + circulation.Raw += data.MicroAlgos.Raw + } + } + return circulation, nil } func (ledger *evalTestLedger) LookupApplication(rnd basics.Round, addr basics.Address, aidx basics.AppIndex) (ledgercore.AppResource, error) { @@ -947,8 +965,8 @@ func (ledger *evalTestLedger) nextBlock(t testing.TB) *BlockEvaluator { } // endBlock completes the block being created, returns the ValidatedBlock for inspection -func (ledger *evalTestLedger) endBlock(t testing.TB, eval *BlockEvaluator) *ledgercore.ValidatedBlock { - unfinishedBlock, err := eval.GenerateBlock(nil) +func (ledger *evalTestLedger) endBlock(t testing.TB, eval *BlockEvaluator, proposers ...basics.Address) *ledgercore.ValidatedBlock { + unfinishedBlock, err := eval.GenerateBlock(proposers) require.NoError(t, err) // fake agreement's setting of header fields so later validates work. seed := committee.Seed{} @@ -1024,6 +1042,10 @@ func (l *testCowBaseLedger) LookupAgreement(rnd basics.Round, addr basics.Addres return basics.OnlineAccountData{}, errors.New("not implemented") } +func (l *testCowBaseLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, errors.New("not implemented") +} + func (l *testCowBaseLedger) OnlineCirculation(rnd, voteRnd basics.Round) (basics.MicroAlgos, error) { return basics.MicroAlgos{}, errors.New("not implemented") } @@ -1098,7 +1120,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) + genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) sendAddr := addrs[0] recvAddr := addrs[1] @@ -1144,11 +1166,11 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) { // Advance the evaluator a couple rounds, watching for lack of expiration for i := uint64(0); i < uint64(targetRound); i++ { - vb := l.endBlock(t, blkEval) + vb := l.endBlock(t, blkEval, recvAddr) blkEval = l.nextBlock(t) for _, acct := range vb.Block().ExpiredParticipationAccounts { if acct == recvAddr { - // won't happen, because recvAddr didn't appear in block + // won't happen, because recvAddr was proposer require.Fail(t, "premature expiration") } } @@ -1156,26 +1178,6 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) { require.Greater(t, uint64(blkEval.Round()), uint64(recvAddrLastValidRound)) - genHash := l.GenesisHash() - txn := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: sendAddr, - Fee: minFee, - FirstValid: newBlock.Round(), - LastValid: blkEval.Round(), - GenesisHash: genHash, - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: recvAddr, - Amount: basics.MicroAlgos{Raw: 100}, - }, - } - - st := txn.Sign(keys[0]) - err = blkEval.Transaction(st, transactions.ApplyData{}) - require.NoError(t, err) - // Make sure we validate our block as well blkEval.validate = true @@ -1250,7 +1252,7 @@ func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) + genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) sendAddr := addrs[0] recvAddr := addrs[1] @@ -1296,26 +1298,6 @@ func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) { eval = l.nextBlock(t) } - genHash := l.GenesisHash() - txn := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: sendAddr, - Fee: minFee, - FirstValid: newBlock.Round(), - LastValid: eval.Round(), - GenesisHash: genHash, - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: recvAddr, - Amount: basics.MicroAlgos{Raw: 100}, - }, - } - - st := txn.Sign(keys[0]) - err = eval.Transaction(st, transactions.ApplyData{}) - require.NoError(t, err) - eval.validate = true eval.generate = false @@ -1353,19 +1335,46 @@ func TestAbsenteeChecks(t *testing.T) { crypto.RandBytes(tmp.StateProofID[:]) crypto.RandBytes(tmp.SelectionID[:]) crypto.RandBytes(tmp.VoteID[:]) + tmp.IncentiveEligible = true // make suspendable tmp.VoteFirstValid = 1 tmp.VoteLastValid = 1500 // large enough to avoid EXPIRATION, so we can see SUSPENSION - tmp.LastHeartbeat = 1 // non-zero allows suspensions switch i { case 1: - tmp.LastHeartbeat = 1150 // lie here so that addr[1] won't be suspended + tmp.LastHeartbeat = 1 // we want addrs[1] to be suspended earlier than others case 2: - tmp.LastProposed = 1150 // lie here so that addr[2] won't be suspended + tmp.LastProposed = 1 // we want addrs[2] to be suspended earlier than others + case 3: + tmp.LastProposed = 1 // we want addrs[3] to be a proposer, and never suspend itself + case 5: + tmp.LastHeartbeat = 1 // like addr[1] but !IncentiveEligible, no suspend + tmp.IncentiveEligible = false + case 6: + tmp.LastProposed = 1 // like addr[2] but !IncentiveEligible, no suspend + tmp.IncentiveEligible = false + default: + if i < 10 { // make 0,3,4,7,8,9 unsuspendable + switch i % 3 { + case 0: + tmp.LastProposed = 1200 + case 1: + tmp.LastHeartbeat = 1200 + case 2: + tmp.IncentiveEligible = false + } + } else { + // ensure non-zero balance for the new accounts, but a small + // balance so they will not be absent, just challenged. + tmp.MicroAlgos = basics.MicroAlgos{Raw: 1_000_000} + tmp.LastHeartbeat = 1 // non-zero allows suspensions + } } genesisInitState.Accounts[addr] = tmp } + // pretend this node is participating on behalf of addrs[3] and addrs[4] + proposers := []basics.Address{addrs[3], addrs[4]} + l := newTestLedger(t, bookkeeping.GenesisBalances{ Balances: genesisInitState.Accounts, FeeSink: testSinkAddr, @@ -1377,15 +1386,21 @@ func TestAbsenteeChecks(t *testing.T) { blkEval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil) require.NoError(t, err) - // Advance the evaluator, watching for lack of suspensions since we don't - // suspend until a txn with a suspendable account appears + // Advance the evaluator, watching for suspensions as they appear challenge := byte(0) - for i := uint64(0); i < uint64(1210); i++ { // A bit past one grace period (200) past challenge at 1000. - vb := l.endBlock(t, blkEval) + for i := uint64(0); i < uint64(1200); i++ { // Just before first suspension at 1171 + vb := l.endBlock(t, blkEval, proposers...) blkEval = l.nextBlock(t) - require.Zero(t, vb.Block().AbsentParticipationAccounts) - if vb.Block().Round() == 1000 { + + switch vb.Block().Round() { + case 202: // 2 out of 10 genesis accounts are now absent + require.Len(t, vb.Block().AbsentParticipationAccounts, 2, addrs) + require.Contains(t, vb.Block().AbsentParticipationAccounts, addrs[1]) + require.Contains(t, vb.Block().AbsentParticipationAccounts, addrs[2]) + case 1000: challenge = vb.Block().BlockHeader.Seed[0] + default: + require.Zero(t, vb.Block().AbsentParticipationAccounts, "round %v", vb.Block().Round()) } } challenged := basics.Address{(challenge >> 3) << 3, 0xaa} @@ -1421,26 +1436,32 @@ func TestAbsenteeChecks(t *testing.T) { // Make sure we validate our block as well blkEval.validate = true - unfinishedBlock, err := blkEval.GenerateBlock(nil) + unfinishedBlock, err := blkEval.GenerateBlock(proposers) require.NoError(t, err) // fake agreement's setting of header fields so later validates work validatedBlock := ledgercore.MakeValidatedBlock(unfinishedBlock.UnfinishedBlock().WithProposer(committee.Seed{}, testPoolAddr, true), unfinishedBlock.UnfinishedDeltas()) - require.Zero(t, validatedBlock.Block().ExpiredParticipationAccounts) - require.Contains(t, validatedBlock.Block().AbsentParticipationAccounts, addrs[0], addrs[0].String()) - require.NotContains(t, validatedBlock.Block().AbsentParticipationAccounts, addrs[1], addrs[1].String()) - require.NotContains(t, validatedBlock.Block().AbsentParticipationAccounts, addrs[2], addrs[2].String()) + require.Equal(t, basics.Round(1201), validatedBlock.Block().Round()) + require.Empty(t, validatedBlock.Block().ExpiredParticipationAccounts) // Of the 32 extra accounts, make sure only the one matching the challenge is suspended + require.Len(t, validatedBlock.Block().AbsentParticipationAccounts, 1) require.Contains(t, validatedBlock.Block().AbsentParticipationAccounts, challenged, challenged.String()) + foundChallenged := false for i := byte(0); i < 32; i++ { if i == challenge>>3 { + rnd := validatedBlock.Block().Round() + ad := basics.Address{i << 3, 0xaa} + t.Logf("extra account %d %s is challenged, balance rnd %d %d", i, ad, + rnd, l.roundBalances[rnd][ad].MicroAlgos.Raw) require.Equal(t, basics.Address{i << 3, 0xaa}, challenged) + foundChallenged = true continue } require.NotContains(t, validatedBlock.Block().AbsentParticipationAccounts, basics.Address{i << 3, 0xaa}) } + require.True(t, foundChallenged) _, err = Eval(context.Background(), l, validatedBlock.Block(), false, nil, nil, l.tracer) require.NoError(t, err) @@ -1458,7 +1479,7 @@ func TestAbsenteeChecks(t *testing.T) { // Introduce an address that shouldn't be suspended badBlock := goodBlock - badBlock.AbsentParticipationAccounts = append(badBlock.AbsentParticipationAccounts, addrs[1]) + badBlock.AbsentParticipationAccounts = append(badBlock.AbsentParticipationAccounts, addrs[9]) _, err = Eval(context.Background(), l, badBlock, true, verify.GetMockedCache(true), nil, l.tracer) require.ErrorContains(t, err, "not absent") @@ -1496,16 +1517,21 @@ func TestExpiredAccountGeneration(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) + genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) sendAddr := addrs[0] recvAddr := addrs[1] + propAddr := addrs[2] + otherPropAddr := addrs[3] // not expiring, but part of proposer addresses passed to GenerateBlock - // the last round that the recvAddr is valid for - recvAddrLastValidRound := basics.Round(2) + // pretend this node is participating on behalf of addrs[2] and addrs[3] + proposers := []basics.Address{propAddr, otherPropAddr} + + // the last round that the recvAddr and propAddr are valid for + testAddrLastValidRound := basics.Round(2) // the target round we want to advance the evaluator to - targetRound := basics.Round(4) + targetRound := basics.Round(2) // Set all to online except the sending address for _, addr := range addrs { @@ -1526,11 +1552,11 @@ func TestExpiredAccountGeneration(t *testing.T) { genesisInitState.Accounts[addr] = tmp } - // Choose recvAddr to have a last valid round less than genesis block round - { - tmp := genesisInitState.Accounts[recvAddr] - tmp.VoteLastValid = recvAddrLastValidRound - genesisInitState.Accounts[recvAddr] = tmp + // Choose recvAddr and propAddr to have a last valid round less than genesis block round + for _, addr := range []basics.Address{recvAddr, propAddr} { + tmp := genesisInitState.Accounts[addr] + tmp.VoteLastValid = testAddrLastValidRound + genesisInitState.Accounts[addr] = tmp } l := newTestLedger(t, bookkeeping.GenesisBalances{ @@ -1547,36 +1573,18 @@ func TestExpiredAccountGeneration(t *testing.T) { // Advance the evaluator a couple rounds... for i := uint64(0); i < uint64(targetRound); i++ { - l.endBlock(t, eval) + vb := l.endBlock(t, eval) eval = l.nextBlock(t) + require.Empty(t, vb.Block().ExpiredParticipationAccounts) } - require.Greater(t, uint64(eval.Round()), uint64(recvAddrLastValidRound)) - - genHash := l.GenesisHash() - txn := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: sendAddr, - Fee: minFee, - FirstValid: newBlock.Round(), - LastValid: eval.Round(), - GenesisHash: genHash, - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: recvAddr, - Amount: basics.MicroAlgos{Raw: 100}, - }, - } - - st := txn.Sign(keys[0]) - err = eval.Transaction(st, transactions.ApplyData{}) - require.NoError(t, err) + require.Greater(t, uint64(eval.Round()), uint64(testAddrLastValidRound)) // Make sure we validate our block as well eval.validate = true - unfinishedBlock, err := eval.GenerateBlock(nil) + // GenerateBlock will not mark its own proposer addresses as expired + unfinishedBlock, err := eval.GenerateBlock(proposers) require.NoError(t, err) listOfExpiredAccounts := unfinishedBlock.UnfinishedBlock().ParticipationUpdates.ExpiredParticipationAccounts @@ -1593,29 +1601,17 @@ func TestExpiredAccountGeneration(t *testing.T) { require.Zero(t, recvAcct.VoteID) require.Zero(t, recvAcct.SelectionID) require.Zero(t, recvAcct.StateProofID) -} - -func TestBitsMatch(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - for b := 0; b <= 6; b++ { - require.True(t, bitsMatch([]byte{0x1}, []byte{0x2}, b), "%d", b) - } - require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 7)) - require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 8)) - require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 9)) - - for b := 0; b <= 12; b++ { - require.True(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, b), "%d", b) - } - require.False(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, 13)) - // on a byte boundary - require.True(t, bitsMatch([]byte{0x1}, []byte{0x1}, 8)) - require.False(t, bitsMatch([]byte{0x1}, []byte{0x1}, 9)) - require.True(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 0x00}, 8)) - require.False(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 00}, 9)) + // propAddr not marked expired + propAcct, err := eval.state.lookup(propAddr) + require.NoError(t, err) + require.Equal(t, basics.Online, propAcct.Status) + require.NotZero(t, propAcct.VoteFirstValid) + require.NotZero(t, propAcct.VoteLastValid) + require.NotZero(t, propAcct.VoteKeyDilution) + require.NotZero(t, propAcct.VoteID) + require.NotZero(t, propAcct.SelectionID) + require.NotZero(t, propAcct.StateProofID) } func TestIsAbsent(t *testing.T) { @@ -1626,82 +1622,13 @@ func TestIsAbsent(t *testing.T) { var absent = func(total uint64, acct uint64, last uint64, current uint64) bool { return isAbsent(basics.Algos(total), basics.Algos(acct), basics.Round(last), basics.Round(current)) } - // 1% of stake, absent for 1000 rounds - a.False(absent(1000, 10, 5000, 6000)) - a.True(absent(1000, 10, 5000, 6001)) // longer - a.True(absent(1000, 11, 5000, 6001)) // more acct stake - a.False(absent(1000, 9, 5000, 6001)) // less acct stake - a.False(absent(1001, 10, 5000, 6001)) // more online stake + a.False(absent(1000, 10, 5000, 6000)) // 1% of stake, absent for 1000 rounds + a.False(absent(1000, 10, 5000, 7000)) // 1% of stake, absent for 2000 rounds + a.True(absent(1000, 10, 5000, 7001)) // 2001 + a.True(absent(1000, 11, 5000, 7000)) // more acct stake drives percent down, makes it absent + a.False(absent(1000, 9, 5000, 7001)) // less acct stake + a.False(absent(1001, 10, 5000, 7001)) // more online stake // not absent if never seen - a.False(absent(1000, 10, 0, 6000)) - a.False(absent(1000, 10, 0, 6001)) -} - -func TestFailsChallenge(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - a := assert.New(t) - - // a valid challenge, with 4 matching bits, and an old last seen - a.True(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xbf, 0x34}, 10)) - - // challenge isn't "on" - a.False(failsChallenge(challenge{round: 0, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xbf, 0x34}, 10)) - // node has appeared more recently - a.False(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xbf, 0x34}, 12)) - // bits don't match - a.False(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xcf, 0x34}, 10)) - // no enough bits match - a.False(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 5}, basics.Address{0xbf, 0x34}, 10)) -} - -type singleSource bookkeeping.BlockHeader - -func (ss singleSource) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { - return bookkeeping.BlockHeader(ss), nil -} - -func TestActiveChallenge(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - a := assert.New(t) - - nowHeader := bookkeeping.BlockHeader{ - UpgradeState: bookkeeping.UpgradeState{ - // Here the rules are on, so they certainly differ from rules in oldHeader's params - CurrentProtocol: protocol.ConsensusFuture, - }, - } - now := config.Consensus[nowHeader.CurrentProtocol] - - // simplest test. when interval=X and grace=G, X+G+1 is a challenge - inChallenge := now.Payouts.ChallengeInterval + now.Payouts.ChallengeGracePeriod + 1 - ch := activeChallenge(&now, inChallenge, singleSource(nowHeader)) - a.NotZero(ch.round) - - // all rounds before that have no challenge - for r := uint64(1); r < inChallenge; r++ { - ch := activeChallenge(&now, r, singleSource(nowHeader)) - a.Zero(ch.round, r) - } - - // ChallengeGracePeriod rounds allow challenges starting with inChallenge - for r := inChallenge; r < inChallenge+now.Payouts.ChallengeGracePeriod; r++ { - ch := activeChallenge(&now, r, singleSource(nowHeader)) - a.EqualValues(ch.round, now.Payouts.ChallengeInterval) - } - - // And the next round is again challenge-less - ch = activeChallenge(&now, inChallenge+now.Payouts.ChallengeGracePeriod, singleSource(nowHeader)) - a.Zero(ch.round) - - // ignore challenge if upgrade happened - oldHeader := bookkeeping.BlockHeader{ - UpgradeState: bookkeeping.UpgradeState{ - // We need a version from before payouts got turned on - CurrentProtocol: protocol.ConsensusV39, - }, - } - ch = activeChallenge(&now, inChallenge, singleSource(oldHeader)) - a.Zero(ch.round) + a.False(absent(1000, 10, 0, 2001)) + a.True(absent(1000, 10, 1, 2002)) } diff --git a/ledger/eval/prefetcher/prefetcher.go b/ledger/eval/prefetcher/prefetcher.go index 487d370524..765b6ea9c2 100644 --- a/ledger/eval/prefetcher/prefetcher.go +++ b/ledger/eval/prefetcher/prefetcher.go @@ -343,7 +343,9 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) { // since they might be non-used arbitrary values case protocol.StateProofTx: - case protocol.KeyRegistrationTx: + case protocol.KeyRegistrationTx: // No extra accounts besides the sender + case protocol.HeartbeatTx: + loadAccountsAddAccountTask(&stxn.Txn.HbAddress, task, accountTasks, queue) } // If you add new addresses here, also add them in getTxnAddresses(). diff --git a/ledger/eval/prefetcher/prefetcher_alignment_test.go b/ledger/eval/prefetcher/prefetcher_alignment_test.go index 734d84a661..0c232aebf4 100644 --- a/ledger/eval/prefetcher/prefetcher_alignment_test.go +++ b/ledger/eval/prefetcher/prefetcher_alignment_test.go @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/eval" @@ -119,6 +120,7 @@ func (l *prefetcherAlignmentTestLedger) LookupWithoutRewards(_ basics.Round, add } return ledgercore.AccountData{}, 0, nil } + func (l *prefetcherAlignmentTestLedger) LookupAgreement(_ basics.Round, addr basics.Address) (basics.OnlineAccountData, error) { // prefetch alignment tests do not check for prefetching of online account data // because it's quite different and can only occur in AVM opcodes, which @@ -126,9 +128,15 @@ func (l *prefetcherAlignmentTestLedger) LookupAgreement(_ basics.Round, addr bas // will be accessed in AVM.) return basics.OnlineAccountData{}, errors.New("not implemented") } + func (l *prefetcherAlignmentTestLedger) OnlineCirculation(rnd, voteRnd basics.Round) (basics.MicroAlgos, error) { return basics.MicroAlgos{}, errors.New("not implemented") } + +func (l *prefetcherAlignmentTestLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, errors.New("not implemented") +} + func (l *prefetcherAlignmentTestLedger) LookupApplication(rnd basics.Round, addr basics.Address, aidx basics.AppIndex) (ledgercore.AppResource, error) { l.mu.Lock() if l.requestedApps == nil { @@ -144,6 +152,7 @@ func (l *prefetcherAlignmentTestLedger) LookupApplication(rnd basics.Round, addr return l.apps[addr][aidx], nil } + func (l *prefetcherAlignmentTestLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx basics.AssetIndex) (ledgercore.AssetResource, error) { l.mu.Lock() if l.requestedAssets == nil { @@ -159,9 +168,11 @@ func (l *prefetcherAlignmentTestLedger) LookupAsset(rnd basics.Round, addr basic return l.assets[addr][aidx], nil } + func (l *prefetcherAlignmentTestLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) { panic("not implemented") } + func (l *prefetcherAlignmentTestLedger) GetCreatorForRound(_ basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) { l.mu.Lock() if l.requestedCreators == nil { @@ -175,6 +186,7 @@ func (l *prefetcherAlignmentTestLedger) GetCreatorForRound(_ basics.Round, cidx } return basics.Address{}, false, nil } + func (l *prefetcherAlignmentTestLedger) GenesisHash() crypto.Digest { return crypto.Digest{} } @@ -1411,3 +1423,57 @@ func TestEvaluatorPrefetcherAlignmentStateProof(t *testing.T) { prefetched.pretend(rewardsPool()) require.Equal(t, requested, prefetched) } + +func TestEvaluatorPrefetcherAlignmentHeartbeat(t *testing.T) { + partitiontest.PartitionTest(t) + + // We need valid part keys to evaluate the Heartbeat. + const kd = 10 + firstID := basics.OneTimeIDForRound(0, kd) + otss := crypto.GenerateOneTimeSignatureSecrets(firstID.Batch, 5) + + l := &prefetcherAlignmentTestLedger{ + balances: map[basics.Address]ledgercore.AccountData{ + rewardsPool(): { + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 1234567890}, + }, + }, + makeAddress(1): { + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 1000001}, + }, + }, + makeAddress(2): { + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 100_000}, + }, + VotingData: basics.VotingData{ + VoteID: otss.OneTimeSignatureVerifier, + VoteKeyDilution: 123, + }, + }, + }, + } + + txn := transactions.Transaction{ + Type: protocol.HeartbeatTx, + Header: transactions.Header{ + Sender: makeAddress(1), + GenesisHash: genesisHash(), + Fee: basics.Algos(1), // Heartbeat txn is unusual in that it checks fees a bit. + }, + HeartbeatTxnFields: &transactions.HeartbeatTxnFields{ + HbAddress: makeAddress(2), + HbProof: otss.Sign(firstID, committee.Seed(genesisHash())).ToHeartbeatProof(), + HbSeed: committee.Seed(genesisHash()), + HbVoteID: otss.OneTimeSignatureVerifier, + HbKeyDilution: 123, + }, + } + + requested, prefetched := run(t, l, txn) + + prefetched.pretend(rewardsPool()) + require.Equal(t, requested, prefetched) +} diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go index 972821c26c..6ec44b99f3 100644 --- a/ledger/eval_simple_test.go +++ b/ledger/eval_simple_test.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "fmt" "reflect" + "slices" "strings" "testing" @@ -280,15 +281,15 @@ func TestPayoutFees(t *testing.T) { // new fields are in the header require.EqualValues(t, 2000, vb.Block().FeesCollected.Raw) require.EqualValues(t, bonus1, vb.Block().Bonus.Raw) - require.EqualValues(t, bonus1+1_500, vb.Block().ProposerPayout().Raw) + require.EqualValues(t, bonus1+1_000, vb.Block().ProposerPayout().Raw) // This last one is really only testing the "fake" agreement that // happens in dl.endBlock(). require.EqualValues(t, proposer, vb.Block().Proposer()) // At the end of the block, part of the fees + bonus have been moved to // the proposer. - require.EqualValues(t, bonus1+1500, postprop-preprop) // based on 75% in config/consensus.go - require.EqualValues(t, bonus1-500, presink-postsink) + require.EqualValues(t, bonus1+1_000, postprop-preprop) // based on 75% in config/consensus.go + require.EqualValues(t, bonus1-1_000, presink-postsink) require.Equal(t, prp.LastProposed, dl.generator.Latest()) } else { require.False(t, dl.generator.GenesisProto().Payouts.Enabled) @@ -412,8 +413,34 @@ func TestAbsentTracking(t *testing.T) { int 0; voter_params_get VoterIncentiveEligible; itob; log; itob; log; int 1` + addrIndexes := make(map[basics.Address]int) + for i, addr := range addrs { + addrIndexes[addr] = i + } + prettyAddrs := func(inAddrs []basics.Address) []string { + ret := make([]string, len(inAddrs)) + for i, addr := range inAddrs { + if idx, ok := addrIndexes[addr]; ok { + ret[i] = fmt.Sprintf("addrs[%d]", idx) + } else { + ret[i] = addr.String() + } + } + return ret + } + + printAbsent := func(vb *ledgercore.ValidatedBlock) { + t.Helper() + absent := vb.Block().AbsentParticipationAccounts + expired := vb.Block().ExpiredParticipationAccounts + if len(expired) > 0 || len(absent) > 0 { + t.Logf("rnd %d: expired %d, absent %d (exp %v abs %v)", vb.Block().Round(), + len(expired), len(absent), prettyAddrs(expired), prettyAddrs(absent)) + } + } + checkingBegins := 40 - ledgertesting.TestConsensusRange(t, checkingBegins, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + runTest := func(t *testing.T, cv protocol.ConsensusVersion, cfg config.Local) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() @@ -456,13 +483,17 @@ func TestAbsentTracking(t *testing.T) { // have addrs[1] go online explicitly, which makes it eligible for suspension. // use a large fee, so we can see IncentiveEligible change - dl.txn(&txntest.Txn{ // #2 + vb := dl.fullBlock(&txntest.Txn{ // #2 Type: "keyreg", Fee: 10_000_000, Sender: addrs[1], VotePK: [32]byte{1}, SelectionPK: [32]byte{1}, }) + addr1Keyreg := vb.Block().Round() + require.EqualValues(t, 2, addr1Keyreg) // sanity check + const lookback = 320 // keyreg puts LastHeartbeat 320 rounds into the future + require.EqualValues(t, addr1Keyreg+lookback, lookup(t, dl.generator, addrs[1]).LastHeartbeat) // as configured above, only the first two accounts should be online require.True(t, lookup(t, dl.generator, addrs[0]).Status == basics.Online) @@ -480,7 +511,8 @@ func TestAbsentTracking(t *testing.T) { require.True(t, lookup(t, dl.generator, addrs[1]).IncentiveEligible) require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) - vb := dl.fullBlock() // #6 + vb = dl.fullBlock() // #6 + printAbsent(vb) totals, err := dl.generator.Totals(vb.Block().Round()) require.NoError(t, err) require.NotZero(t, totals.Online.Money.Raw) @@ -494,7 +526,7 @@ func TestAbsentTracking(t *testing.T) { Receiver: addrs[2], Amount: 100_000, }) - dl.endBlock(proposer) // #7 + printAbsent(dl.endBlock(proposer)) // #7 prp := lookup(t, dl.validator, proposer) require.Equal(t, prp.LastProposed, dl.validator.Latest()) @@ -508,7 +540,7 @@ func TestAbsentTracking(t *testing.T) { require.Equal(t, totals.Online.Money.Raw-100_000-1000, newtotals.Online.Money.Raw) totals = newtotals - dl.fullBlock() + printAbsent(dl.fullBlock()) // addrs[2] was already offline dl.txns(&txntest.Txn{Type: "keyreg", Sender: addrs[2]}) // OFFLINE keyreg #9 @@ -524,12 +556,13 @@ func TestAbsentTracking(t *testing.T) { require.Zero(t, regger.LastHeartbeat) // ONLINE keyreg without extra fee - dl.txns(&txntest.Txn{ + vb = dl.fullBlock(&txntest.Txn{ Type: "keyreg", Sender: addrs[2], VotePK: [32]byte{1}, SelectionPK: [32]byte{1}, }) // #10 + printAbsent(vb) // online totals have grown, addr[2] was added newtotals, err = dl.generator.Totals(dl.generator.Latest()) require.NoError(t, err) @@ -539,7 +572,7 @@ func TestAbsentTracking(t *testing.T) { require.Zero(t, regger.LastProposed) require.True(t, regger.Status == basics.Online) - // But nothing has changed, since we're not past 320 + // But nothing has changed for voter_params_get, since we're not past 320 checkState(addrs[0], true, false, 833_333_333_333_333) // #11 checkState(addrs[1], true, false, 833_333_333_333_333) // #12 checkState(addrs[2], false, false, 0) // #13 @@ -555,14 +588,16 @@ func TestAbsentTracking(t *testing.T) { VotePK: [32]byte{1}, SelectionPK: [32]byte{1}, }) // #14 - twoEligible := vb.Block().Round() - require.EqualValues(t, 14, twoEligible) // sanity check + printAbsent(vb) + addr2Eligible := vb.Block().Round() + require.EqualValues(t, 14, addr2Eligible) // sanity check regger = lookup(t, dl.validator, addrs[2]) require.True(t, regger.IncentiveEligible) + require.EqualValues(t, 14+320, regger.LastHeartbeat) for i := 0; i < 5; i++ { - dl.fullBlock() // #15-19 + printAbsent(dl.fullBlock()) // #15-19 require.True(t, lookup(t, dl.generator, addrs[0]).Status == basics.Online) require.True(t, lookup(t, dl.generator, addrs[1]).Status == basics.Online) require.True(t, lookup(t, dl.generator, addrs[2]).Status == basics.Online) @@ -573,107 +608,96 @@ func TestAbsentTracking(t *testing.T) { require.True(t, lookup(t, dl.generator, addrs[1]).Status == basics.Online) require.True(t, lookup(t, dl.generator, addrs[2]).Status == basics.Online) - for i := 0; i < 30; i++ { - dl.fullBlock() // #20-49 - } + var addr1off basics.Round + var addr2off basics.Round + // We're at 20, skip ahead by lookback + 60 to see the knockoffs + const absentFactor = 20 + skip := basics.Round(3) * absentFactor + for { + vb := dl.fullBlock() + printAbsent(vb) + + rnd := vb.Block().Round() + switch { + case slices.Contains(vb.Block().AbsentParticipationAccounts, addrs[1]): + addr1off = rnd + case slices.Contains(vb.Block().AbsentParticipationAccounts, addrs[2]): + addr2off = rnd + default: + require.Empty(t, vb.Block().AbsentParticipationAccounts) + } - // addrs 0-2 all have about 1/3 of stake, so seemingly (see next block - // of checks) become eligible for suspension after 30 rounds. We're at - // about 35. But, since blocks are empty, nobody's suspendible account - // is noticed. - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[1]).Status) - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[2]).Status) - require.True(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) + if rnd < 100 { + // `vote_params_get` sees no changes in the early going, because it looks back 320 + checkState(addrs[1], true, false, 833_333_333_333_333) // this also advances a round! + // to avoid complications from advancing an extra round, we only do this check for 100 rounds + } - // when 2 pays 0, they both get noticed but addr[0] is not considered - // absent because it is a genesis account - vb = dl.fullBlock(&txntest.Txn{ - Type: "pay", - Sender: addrs[2], - Receiver: addrs[0], - Amount: 0, - }) // #50 - require.Equal(t, vb.Block().AbsentParticipationAccounts, []basics.Address{addrs[2]}) - - twoPaysZero := vb.Block().Round() - require.EqualValues(t, 50, twoPaysZero) - // addr[0] has never proposed or heartbeat so it is not considered absent - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) - // addr[1] still hasn't been "noticed" - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[1]).Status) - require.Equal(t, basics.Offline, lookup(t, dl.generator, addrs[2]).Status) - require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) + // addr[1] spent 10A on a fee in rnd 1, so online stake and eligibility adjusted in 323 + if rnd == addr1Keyreg-2+lookback { + checkState(addrs[1], true, false, 833_333_333_333_333) // check occurs during reg+lookback-1 + checkState(addrs[1], true, true, 833_333_323_333_333) // check occurs during reg+lookback + } - // separate the payments by a few blocks so it will be easier to test - // when the changes go into effect - for i := 0; i < 4; i++ { - dl.fullBlock() // #51-54 + // watch the change across the round that addr2 becomes eligible (by spending 2A in keyreg) + if rnd == addr2Eligible-2+lookback { + checkState(addrs[2], true, false, 833_333_333_429_333) + checkState(addrs[2], true, true, 833_333_331_429_333) // after keyreg w/ 2A is effective + } + + if rnd > 20+lookback+skip { + break + } } - // now, when 2 pays 1, 1 gets suspended (unlike 0, we had 1 keyreg early on, so LastHeartbeat>0) - vb = dl.fullBlock(&txntest.Txn{ - Type: "pay", - Sender: addrs[2], - Receiver: addrs[1], - Amount: 0, - }) // #55 - twoPaysOne := vb.Block().Round() - require.EqualValues(t, 55, twoPaysOne) - require.Equal(t, vb.Block().AbsentParticipationAccounts, []basics.Address{addrs[1]}) - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) + require.Equal(t, addr2Eligible+lookback+skip, addr2off) + require.Equal(t, addr1Keyreg+lookback+skip+1, addr1off) // addr1 paid out a little bit, extending its lag by 1 + + require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) // genesis account require.Equal(t, basics.Offline, lookup(t, dl.generator, addrs[1]).Status) - require.False(t, lookup(t, dl.generator, addrs[1]).IncentiveEligible) require.Equal(t, basics.Offline, lookup(t, dl.generator, addrs[2]).Status) require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) // now, addrs[2] proposes, so it gets back online, but stays ineligible dl.proposer = addrs[2] - dl.fullBlock() + printAbsent(dl.fullBlock()) require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[2]).Status) require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) - // "synchronize" so the loop below ends on 320 - for dl.fullBlock().Block().Round()%4 != 3 { - } - // keep in mind that each call to checkState also advances the round, so - // each loop advances by 4. - for rnd := dl.fullBlock().Block().Round(); rnd < 320; rnd = dl.fullBlock().Block().Round() { - // STILL nothing has changed, as we're under 320 - checkState(addrs[0], true, false, 833_333_333_333_333) - checkState(addrs[1], true, false, 833_333_333_333_333) - checkState(addrs[2], false, false, 0) - } - // rnd was 320 in the last fullBlock + // The knockoffs have happened, now skip through another lookback rounds + // to observe the changes with checkstate + addr1check, addr2check := false, false + for { + vb := dl.fullBlock() + printAbsent(vb) + rnd := vb.Block().Round() + + // observe addr1 stake going to zero 320 rounds after knockoff + if rnd == addr1off+lookback-2 { + checkState(addrs[1], true, true, 833_333_323_188_333) + checkState(addrs[1], false, false, 0) + addr1check = true + } - // We will soon see effects visible to `vote_params_get` - // In first block, addr[3] created an app. No effect on 0-2 - checkState(addrs[1], true, false, 833_333_333_333_333) // 321 - // in second block, the checkstate app was created - checkState(addrs[1], true, false, 833_333_333_333_333) // 322 - // addr[1] spent 10A on a fee in rnd 3, so online stake and eligibility adjusted in 323 - checkState(addrs[1], true, true, 833_333_323_333_333) // 323 + // observe addr2 stake going to zero 320 rounds after knockoff + if rnd == addr2off+lookback-2 { + checkState(addrs[2], true, true, 833_333_331_427_333) // still "online" + checkState(addrs[2], false, false, 0) + addr2check = true + } - for rnd := dl.fullBlock().Block().Round(); rnd < 320+twoEligible-1; rnd = dl.fullBlock().Block().Round() { + if rnd > 20+2*lookback+skip { + break + } } - checkState(addrs[2], true, false, 833_333_333_429_333) - checkState(addrs[2], true, true, 833_333_331_429_333) // after keyreg w/ 2A is effective + // sanity check that we didn't skip one because of checkstate advancing a round + require.True(t, addr1check) + require.True(t, addr2check) - for rnd := dl.fullBlock().Block().Round(); rnd < 320+twoPaysZero-1; rnd = dl.fullBlock().Block().Round() { - } - // we're at the round before two's suspension kicks in - checkState(addrs[2], true, true, 833_333_331_429_333) // still "online" - checkState(addrs[0], true, false, 833_333_333_331_333) // paid fee in #5 and #11, we're at ~371 - // 2 was noticed & suspended after paying 0, eligible and amount go to 0 - checkState(addrs[2], false, false, 0) checkState(addrs[0], true, false, 833_333_333_331_333) // addr 0 didn't get suspended (genesis) + } - // roughly the same check, except for addr 1, which was genesis, but - // after doing a keyreg, became susceptible to suspension - for rnd := dl.fullBlock().Block().Round(); rnd < 320+twoPaysOne-1; rnd = dl.fullBlock().Block().Round() { - } - checkState(addrs[1], true, true, 833_333_323_230_333) // still online, balance irrelevant - // 1 was noticed & suspended after being paid by 2, so eligible and amount go to 0 - checkState(addrs[1], false, false, 0) + ledgertesting.TestConsensusRange(t, checkingBegins, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + runTest(t, cv, cfg) }) } @@ -736,71 +760,138 @@ func TestAbsenteeChallenges(t *testing.T) { dl.beginBlock() dl.endBlock(seedAndProp) // This becomes the seed, which is used for the challenge - for vb := dl.fullBlock(); vb.Block().Round() < 1200; vb = dl.fullBlock() { - // advance through first grace period + for vb := dl.fullBlock(); vb.Block().Round() < 1199; vb = dl.fullBlock() { + // advance through first grace period: no one marked absent + require.Empty(t, vb.Block().AbsentParticipationAccounts) } + + // regguy keyregs before he's caught, which is a heartbeat, he stays on as well + vb := dl.fullBlock(&txntest.Txn{ + Type: "keyreg", // Does not pay extra fee, since he's still eligible + Sender: regguy, + VotePK: [32]byte{1}, + SelectionPK: [32]byte{1}, + }) + require.Equal(t, basics.Round(1200), vb.Block().Round()) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + acct := lookup(t, dl.generator, regguy) + require.Equal(t, basics.Online, acct.Status) + require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + dl.beginBlock() - dl.endBlock(propguy) // propose, which is a fine (though less likely) way to respond + vb = dl.endBlock(propguy) // propose, which is a fine (though less likely) way to respond - // All still online, unchanged eligibility + // propguy could be suspended in 1201 here, but won't, because they are proposer + require.Equal(t, basics.Round(1201), vb.Block().Round()) + + require.NotContains(t, vb.Block().AbsentParticipationAccounts, []basics.Address{propguy}) + require.NotContains(t, vb.Block().AbsentParticipationAccounts, regguy) + if ver >= checkingBegins { + // badguy and regguy will both be suspended in 1201 + require.Contains(t, vb.Block().AbsentParticipationAccounts, badguy) + } + + // propguy & regguy still online, badguy suspended (depending on consensus version) for _, guy := range []basics.Address{propguy, regguy, badguy} { acct := lookup(t, dl.generator, guy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible, guy) + switch guy { + case propguy, regguy: + require.Equal(t, basics.Online, acct.Status) + require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + require.False(t, acct.VoteID.IsEmpty()) + case badguy: + // if checking, badguy fails + require.Equal(t, ver >= checkingBegins, basics.Offline == acct.Status) + require.False(t, acct.IncentiveEligible) + } + // whether suspended or online, all still have VoteID + require.False(t, acct.VoteID.IsEmpty()) } - for vb := dl.fullBlock(); vb.Block().Round() < 1220; vb = dl.fullBlock() { - // advance into knockoff period. but no transactions means - // unresponsive accounts go unnoticed. + if ver < checkingBegins { + for vb := dl.fullBlock(); vb.Block().Round() < 1220; vb = dl.fullBlock() { + // advance into knockoff period. + } + // All still online, same eligibility + for _, guy := range []basics.Address{propguy, regguy, badguy} { + acct := lookup(t, dl.generator, guy) + require.Equal(t, basics.Online, acct.Status) + require.False(t, acct.IncentiveEligible) + } } - // All still online, same eligibility - for _, guy := range []basics.Address{propguy, regguy, badguy} { - acct := lookup(t, dl.generator, guy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible, guy) + }) +} + +func TestDoubleLedgerGetKnockoffCandidates(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + const onlineCount = 5 + genBalances, addrs, _ := ledgertesting.NewTestGenesis(func(cfg *ledgertesting.GenesisCfg) { + cfg.OnlineCount = onlineCount + ledgertesting.TurnOffRewards(cfg) + }) + payoutsBegin := 40 + + // txn to send in round 1, to change the balances to be different from genesis + payTxn := &txntest.Txn{Type: "pay", Sender: addrs[1], Receiver: addrs[2], Amount: 1_000_000} + + checkAccts := func(l *Ledger, rnd basics.Round, cv protocol.ConsensusVersion) { + accts, err := l.GetKnockOfflineCandidates(rnd, config.Consensus[cv]) + require.NoError(t, err) + require.NotEmpty(t, accts) + + // get online genesis accounts + onlineCnt := 0 + genesisOnlineAccts := make(map[basics.Address]basics.OnlineAccountData) + afterPayTxnOnlineAccts := make(map[basics.Address]basics.OnlineAccountData) + for addr, ad := range genBalances.Balances { + if ad.Status == basics.Online { + onlineCnt++ + genesisOnlineAccts[addr] = ad.OnlineAccountData() + afterPayTxnOnlineAccts[addr] = ad.OnlineAccountData() + } } - // badguy never responded, he gets knocked off when paid - vb := dl.fullBlock(&txntest.Txn{ - Type: "pay", - Sender: addrs[0], - Receiver: badguy, - }) - if ver >= checkingBegins { - require.Equal(t, vb.Block().AbsentParticipationAccounts, []basics.Address{badguy}) + // calculate expected balances after applying payTxn + payTxnReceiver := afterPayTxnOnlineAccts[payTxn.Receiver] + payTxnReceiver.MicroAlgosWithRewards.Raw += payTxn.Amount + payTxnSender := afterPayTxnOnlineAccts[payTxn.Sender] + payTxnSender.MicroAlgosWithRewards.Raw -= (payTxn.Amount + config.Consensus[cv].MinTxnFee) + afterPayTxnOnlineAccts[payTxn.Receiver] = payTxnReceiver + afterPayTxnOnlineAccts[payTxn.Sender] = payTxnSender + + require.Equal(t, onlineCount, onlineCnt) + require.Len(t, accts, onlineCnt) + if rnd == 0 { + // balances should be same as genesis + require.Equal(t, genesisOnlineAccts, accts) + } else { + // balances > rnd 1 should reflect payTxn change + require.Equal(t, afterPayTxnOnlineAccts, accts, "rnd %d", rnd) } - acct := lookup(t, dl.generator, badguy) - require.Equal(t, ver >= checkingBegins, basics.Offline == acct.Status) // if checking, badguy fails - require.False(t, acct.IncentiveEligible) - // propguy proposed during the grace period, he stays on even when paid - dl.txns(&txntest.Txn{ - Type: "pay", - Sender: addrs[0], - Receiver: propguy, - }) - acct = lookup(t, dl.generator, propguy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + } - // regguy keyregs before he's caught, which is a heartbeat, he stays on as well - dl.txns(&txntest.Txn{ - Type: "keyreg", // Does not pay extra fee, since he's still eligible - Sender: regguy, - VotePK: [32]byte{1}, - SelectionPK: [32]byte{1}, - }) - acct = lookup(t, dl.generator, regguy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) - dl.txns(&txntest.Txn{ - Type: "pay", - Sender: addrs[0], - Receiver: regguy, - }) - acct = lookup(t, dl.generator, regguy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + ledgertesting.TestConsensusRange(t, payoutsBegin-1, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) + defer dl.Close() + + checkAccts(dl.generator, basics.Round(0), cv) + checkAccts(dl.validator, basics.Round(0), cv) + + // change two accounts' balances to be different from genesis + payTxn.GenesisHash = crypto.Digest{} // clear if set from previous run + dl.fullBlock(payTxn) + + // run up to round 240 + proto := config.Consensus[cv] + upToRound := basics.Round(proto.StateProofInterval - proto.StateProofVotersLookback) + require.Equal(t, basics.Round(240), upToRound) + for rnd := dl.fullBlock().Block().Round(); rnd < upToRound; rnd = dl.fullBlock().Block().Round() { + checkAccts(dl.generator, rnd, cv) + checkAccts(dl.validator, rnd, cv) + } }) } diff --git a/ledger/ledger.go b/ledger/ledger.go index 2f10724fee..bb0dad21de 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -638,10 +638,55 @@ func (l *Ledger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics. defer l.trackerMu.RUnlock() // Intentionally apply (pending) rewards up to rnd. - data, err := l.acctsOnline.LookupOnlineAccountData(rnd, addr) + data, err := l.acctsOnline.lookupOnlineAccountData(rnd, addr) return data, err } +// GetKnockOfflineCandidates retrieves a list of online accounts who will be +// checked to a recent proposal or heartbeat. Large accounts are the ones worth checking. +func (l *Ledger) GetKnockOfflineCandidates(rnd basics.Round, proto config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + l.trackerMu.RLock() + defer l.trackerMu.RUnlock() + + // get state proof worker's most recent list for top N addresses + if proto.StateProofInterval == 0 { + return nil, nil + } + + var addrs []basics.Address + + // special handling for rounds 0-240: return participating genesis accounts + if rnd < basics.Round(proto.StateProofInterval).SubSaturate(basics.Round(proto.StateProofVotersLookback)) { + for addr, data := range l.genesisAccounts { + if data.Status == basics.Online { + addrs = append(addrs, addr) + } + } + } else { + // get latest state proof voters information, up to rnd, without calling cond.Wait() + _, voters := l.acctsOnline.voters.LatestCompletedVotersUpTo(rnd) + if voters == nil { // no cached voters found < rnd + return nil, nil + } + addrs = make([]basics.Address, 0, len(voters.AddrToPos)) + for addr := range voters.AddrToPos { + addrs = append(addrs, addr) + } + } + + // fetch fresh data up to this round from online account cache. These accounts should all + // be in cache, as long as proto.StateProofTopVoters < onlineAccountsCacheMaxSize. + ret := make(map[basics.Address]basics.OnlineAccountData) + for _, addr := range addrs { + data, err := l.acctsOnline.lookupOnlineAccountData(rnd, addr) + if err != nil || data.MicroAlgosWithRewards.IsZero() { + continue // skip missing / not online accounts + } + ret[addr] = data + } + return ret, nil +} + // LookupWithoutRewards is like Lookup but does not apply pending rewards up // to the requested round rnd. func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) { @@ -717,7 +762,7 @@ func (l *Ledger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) { func (l *Ledger) BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) { // Expected availability range in txTail.blockHeader is [Latest - MaxTxnLife, Latest] - // allowing (MaxTxnLife + 1) = 1001 rounds back loopback. + // allowing (MaxTxnLife + 1) = 1001 rounds lookback. // The depth besides the MaxTxnLife is controlled by DeeperBlockHeaderHistory parameter // and currently set to 1. // Explanation: diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go index b34877aed5..b2ea97b1ce 100644 --- a/ledger/ledger_perf_test.go +++ b/ledger/ledger_perf_test.go @@ -33,6 +33,7 @@ import ( "github.com/algorand/go-algorand/data/basics" basics_testing "github.com/algorand/go-algorand/data/basics/testing" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/data/transactions/verify" @@ -296,21 +297,23 @@ func benchmarkFullBlocks(params testParams, b *testing.B) { lvb, err := eval.GenerateBlock(nil) require.NoError(b, err) + fb := lvb.FinishBlock(committee.Seed{0x01}, basics.Address{0x01}, false) + // If this is the app creation block, add to both ledgers if i == 1 { - err = l0.AddBlock(lvb.UnfinishedBlock(), cert) + err = l0.AddBlock(fb, cert) require.NoError(b, err) - err = l1.AddBlock(lvb.UnfinishedBlock(), cert) + err = l1.AddBlock(fb, cert) require.NoError(b, err) continue } // For all other blocks, add just to the first ledger, and stash // away to be replayed in the second ledger while running timer - err = l0.AddBlock(lvb.UnfinishedBlock(), cert) + err = l0.AddBlock(fb, cert) require.NoError(b, err) - blocks = append(blocks, lvb.UnfinishedBlock()) + blocks = append(blocks, fb) } b.Logf("built %d blocks, each with %d txns", numBlocks, txPerBlock) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 9018d5d73b..8941452d4c 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -1979,6 +1979,35 @@ func TestLookupAgreement(t *testing.T) { require.Equal(t, oad, ad.OnlineAccountData()) } +func TestGetKnockOfflineCandidates(t *testing.T) { + partitiontest.PartitionTest(t) + + ver := protocol.ConsensusFuture + genesisInitState, _ := ledgertesting.GenerateInitState(t, ver, 1_000_000) + const inMem = true + log := logging.TestingLog(t) + cfg := config.GetDefaultLocal() + cfg.Archival = true + ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) + require.NoError(t, err, "could not open ledger") + defer ledger.Close() + + accts, err := ledger.GetKnockOfflineCandidates(0, config.Consensus[ver]) + require.NoError(t, err) + require.NotEmpty(t, accts) + // get online genesis accounts + onlineCnt := 0 + onlineAddrs := make(map[basics.Address]basics.OnlineAccountData) + for addr, ad := range genesisInitState.Accounts { + if ad.Status == basics.Online { + onlineCnt++ + onlineAddrs[addr] = ad.OnlineAccountData() + } + } + require.Len(t, accts, onlineCnt) + require.Equal(t, onlineAddrs, accts) +} + func BenchmarkLedgerStartup(b *testing.B) { log := logging.TestingLog(b) tmpDir := b.TempDir() diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go index 081fbffde6..ea7b150a6e 100644 --- a/ledger/ledgercore/accountdata.go +++ b/ledger/ledgercore/accountdata.go @@ -135,10 +135,15 @@ func (u *AccountData) Suspend() { } // Suspended returns true if the account is suspended (offline with keys) -func (u *AccountData) Suspended() bool { +func (u AccountData) Suspended() bool { return u.Status == basics.Offline && !u.VoteID.IsEmpty() } +// LastSeen returns the last round that the account was seen online +func (u AccountData) LastSeen() basics.Round { + return max(u.LastProposed, u.LastHeartbeat) +} + // MinBalance computes the minimum balance requirements for an account based on // some consensus parameters. MinBalance should correspond roughly to how much // storage the account is allowed to store on disk. @@ -187,6 +192,8 @@ func (u AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLeve MicroAlgosWithRewards: microAlgos, VotingData: u.VotingData, IncentiveEligible: u.IncentiveEligible, + LastProposed: u.LastProposed, + LastHeartbeat: u.LastHeartbeat, } } diff --git a/ledger/ledgercore/onlineacct.go b/ledger/ledgercore/onlineacct.go index 8a6b771aad..f5b29c789e 100644 --- a/ledger/ledgercore/onlineacct.go +++ b/ledger/ledgercore/onlineacct.go @@ -22,7 +22,7 @@ import ( ) // An OnlineAccount corresponds to an account whose AccountData.Status -// is Online. This is used for a Merkle tree commitment of online +// is Online. This is used for a Merkle tree commitment of online // accounts, which is subsequently used to validate participants for // a state proof. type OnlineAccount struct { diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go index 7ab103dcd1..957ec08a52 100644 --- a/ledger/ledgercore/votersForRound.go +++ b/ledger/ledgercore/votersForRound.go @@ -183,3 +183,11 @@ func (tr *VotersForRound) Wait() error { } return nil } + +// Completed returns true if the tree has finished being constructed. +// If there was an error constructing the tree, the error is also returned. +func (tr *VotersForRound) Completed() (bool, error) { + tr.mu.Lock() + defer tr.mu.Unlock() + return tr.Tree != nil || tr.loadTreeError != nil, tr.loadTreeError +} diff --git a/ledger/onlineaccountscache_test.go b/ledger/onlineaccountscache_test.go index b64d18aabf..fa66d67a9f 100644 --- a/ledger/onlineaccountscache_test.go +++ b/ledger/onlineaccountscache_test.go @@ -189,6 +189,15 @@ func TestOnlineAccountsCacheMaxEntries(t *testing.T) { require.Equal(t, 2, oac.accounts[addr].Len()) } +// TestOnlineAccountsCacheSizeBiggerThanStateProofTopVoters asserts that the online accounts cache +// is bigger than the number of top online accounts tracked by the state proof system. +func TestOnlineAccountsCacheSizeBiggerThanStateProofTopVoters(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + require.Greater(t, uint64(onlineAccountsCacheMaxSize), config.Consensus[protocol.ConsensusFuture].StateProofTopVoters) +} + var benchmarkOnlineAccountsCacheReadResult cachedOnlineAccount func benchmarkOnlineAccountsCacheRead(b *testing.B, historyLength int) { diff --git a/ledger/simple_test.go b/ledger/simple_test.go index 8b4632d1de..3bc9f335c8 100644 --- a/ledger/simple_test.go +++ b/ledger/simple_test.go @@ -140,7 +140,7 @@ func txn(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txn *txntest.T } return } - require.True(t, len(problem) == 0 || problem[0] == "") + require.True(t, len(problem) == 0 || problem[0] == "", "Transaction did not fail. Expected: %v", problem) } func txgroup(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txns ...*txntest.Txn) error { @@ -157,10 +157,11 @@ func txgroup(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txns ...*t // inspection. Proposer is optional - if unset, blocks will be finished with // ZeroAddress proposer. func endBlock(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, proposer ...basics.Address) *ledgercore.ValidatedBlock { - ub, err := eval.GenerateBlock(nil) + // pass proposers to GenerateBlock, if provided + ub, err := eval.GenerateBlock(proposer) require.NoError(t, err) - // We fake some thigns that agreement would do, like setting proposer + // We fake some things that agreement would do, like setting proposer validatedBlock := ledgercore.MakeValidatedBlock(ub.UnfinishedBlock(), ub.UnfinishedDeltas()) gvb := &validatedBlock diff --git a/ledger/store/trackerdb/data.go b/ledger/store/trackerdb/data.go index 8e69f2fc69..1649d1f82d 100644 --- a/ledger/store/trackerdb/data.go +++ b/ledger/store/trackerdb/data.go @@ -152,6 +152,8 @@ type BaseOnlineAccountData struct { BaseVotingData + LastProposed basics.Round `codec:"V"` + LastHeartbeat basics.Round `codec:"W"` IncentiveEligible bool `codec:"X"` MicroAlgos basics.MicroAlgos `codec:"Y"` RewardsBase uint64 `codec:"Z"` @@ -456,7 +458,10 @@ func (bo *BaseOnlineAccountData) IsVotingEmpty() bool { func (bo *BaseOnlineAccountData) IsEmpty() bool { return bo.IsVotingEmpty() && bo.MicroAlgos.Raw == 0 && - bo.RewardsBase == 0 && !bo.IncentiveEligible + bo.RewardsBase == 0 && + bo.LastHeartbeat == 0 && + bo.LastProposed == 0 && + !bo.IncentiveEligible } // GetOnlineAccount returns ledgercore.OnlineAccount for top online accounts / voters @@ -491,6 +496,8 @@ func (bo *BaseOnlineAccountData) GetOnlineAccountData(proto config.ConsensusPara VoteKeyDilution: bo.VoteKeyDilution, }, IncentiveEligible: bo.IncentiveEligible, + LastProposed: bo.LastProposed, + LastHeartbeat: bo.LastHeartbeat, } } @@ -507,6 +514,8 @@ func (bo *BaseOnlineAccountData) SetCoreAccountData(ad *ledgercore.AccountData) bo.MicroAlgos = ad.MicroAlgos bo.RewardsBase = ad.RewardsBase bo.IncentiveEligible = ad.IncentiveEligible + bo.LastProposed = ad.LastProposed + bo.LastHeartbeat = ad.LastHeartbeat } // MakeResourcesData returns a new empty instance of resourcesData. diff --git a/ledger/store/trackerdb/data_test.go b/ledger/store/trackerdb/data_test.go index edc0d0dc9e..b256fa4e76 100644 --- a/ledger/store/trackerdb/data_test.go +++ b/ledger/store/trackerdb/data_test.go @@ -1152,7 +1152,7 @@ func TestBaseOnlineAccountDataIsEmpty(t *testing.T) { structureTesting := func(t *testing.T) { encoding, err := json.Marshal(&empty) zeros32 := "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0" - expectedEncoding := `{"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"IncentiveEligible":false,"MicroAlgos":{"Raw":0},"RewardsBase":0}` + expectedEncoding := `{"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"LastProposed":0,"LastHeartbeat":0,"IncentiveEligible":false,"MicroAlgos":{"Raw":0},"RewardsBase":0}` require.NoError(t, err) require.Equal(t, expectedEncoding, string(encoding)) } @@ -1249,7 +1249,7 @@ func TestBaseOnlineAccountDataReflect(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - require.Equal(t, 5, reflect.TypeOf(BaseOnlineAccountData{}).NumField(), "update all getters and setters for baseOnlineAccountData and change the field count") + require.Equal(t, 7, reflect.TypeOf(BaseOnlineAccountData{}).NumField(), "update all getters and setters for baseOnlineAccountData and change the field count") } func TestBaseVotingDataReflect(t *testing.T) { diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go index 465248e93d..98f35bf519 100644 --- a/ledger/store/trackerdb/msgp_gen.go +++ b/ledger/store/trackerdb/msgp_gen.go @@ -749,8 +749,8 @@ func BaseAccountDataMaxSize() (s int) { func (z *BaseOnlineAccountData) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(9) - var zb0001Mask uint16 /* 11 bits */ + zb0001Len := uint32(11) + var zb0001Mask uint16 /* 13 bits */ if (*z).BaseVotingData.VoteID.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x1 @@ -775,18 +775,26 @@ func (z *BaseOnlineAccountData) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x20 } - if (*z).IncentiveEligible == false { + if (*z).LastProposed.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x40 } - if (*z).MicroAlgos.MsgIsZero() { + if (*z).LastHeartbeat.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80 } - if (*z).RewardsBase == 0 { + if (*z).IncentiveEligible == false { zb0001Len-- zb0001Mask |= 0x100 } + if (*z).MicroAlgos.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x200 + } + if (*z).RewardsBase == 0 { + zb0001Len-- + zb0001Mask |= 0x400 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) if zb0001Len != 0 { @@ -821,16 +829,26 @@ func (z *BaseOnlineAccountData) MarshalMsg(b []byte) (o []byte) { o = (*z).BaseVotingData.StateProofID.MarshalMsg(o) } if (zb0001Mask & 0x40) == 0 { // if not empty + // string "V" + o = append(o, 0xa1, 0x56) + o = (*z).LastProposed.MarshalMsg(o) + } + if (zb0001Mask & 0x80) == 0 { // if not empty + // string "W" + o = append(o, 0xa1, 0x57) + o = (*z).LastHeartbeat.MarshalMsg(o) + } + if (zb0001Mask & 0x100) == 0 { // if not empty // string "X" o = append(o, 0xa1, 0x58) o = msgp.AppendBool(o, (*z).IncentiveEligible) } - if (zb0001Mask & 0x80) == 0 { // if not empty + if (zb0001Mask & 0x200) == 0 { // if not empty // string "Y" o = append(o, 0xa1, 0x59) o = (*z).MicroAlgos.MarshalMsg(o) } - if (zb0001Mask & 0x100) == 0 { // if not empty + if (zb0001Mask & 0x400) == 0 { // if not empty // string "Z" o = append(o, 0xa1, 0x5a) o = msgp.AppendUint64(o, (*z).RewardsBase) @@ -910,6 +928,22 @@ func (z *BaseOnlineAccountData) UnmarshalMsgWithState(bts []byte, st msgp.Unmars return } } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).LastProposed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "LastProposed") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).LastHeartbeat.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "LastHeartbeat") + return + } + } if zb0001 > 0 { zb0001-- (*z).IncentiveEligible, bts, err = msgp.ReadBoolBytes(bts) @@ -993,6 +1027,18 @@ func (z *BaseOnlineAccountData) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err, "StateProofID") return } + case "V": + bts, err = (*z).LastProposed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "LastProposed") + return + } + case "W": + bts, err = (*z).LastHeartbeat.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "LastHeartbeat") + return + } case "X": (*z).IncentiveEligible, bts, err = msgp.ReadBoolBytes(bts) if err != nil { @@ -1034,18 +1080,18 @@ func (_ *BaseOnlineAccountData) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BaseOnlineAccountData) Msgsize() (s int) { - s = 1 + 2 + (*z).BaseVotingData.VoteID.Msgsize() + 2 + (*z).BaseVotingData.SelectionID.Msgsize() + 2 + (*z).BaseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).BaseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).BaseVotingData.StateProofID.Msgsize() + 2 + msgp.BoolSize + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + s = 1 + 2 + (*z).BaseVotingData.VoteID.Msgsize() + 2 + (*z).BaseVotingData.SelectionID.Msgsize() + 2 + (*z).BaseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).BaseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).BaseVotingData.StateProofID.Msgsize() + 2 + (*z).LastProposed.Msgsize() + 2 + (*z).LastHeartbeat.Msgsize() + 2 + msgp.BoolSize + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size return } // MsgIsZero returns whether this is a zero value func (z *BaseOnlineAccountData) MsgIsZero() bool { - return ((*z).BaseVotingData.VoteID.MsgIsZero()) && ((*z).BaseVotingData.SelectionID.MsgIsZero()) && ((*z).BaseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).BaseVotingData.VoteLastValid.MsgIsZero()) && ((*z).BaseVotingData.VoteKeyDilution == 0) && ((*z).BaseVotingData.StateProofID.MsgIsZero()) && ((*z).IncentiveEligible == false) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) + return ((*z).BaseVotingData.VoteID.MsgIsZero()) && ((*z).BaseVotingData.SelectionID.MsgIsZero()) && ((*z).BaseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).BaseVotingData.VoteLastValid.MsgIsZero()) && ((*z).BaseVotingData.VoteKeyDilution == 0) && ((*z).BaseVotingData.StateProofID.MsgIsZero()) && ((*z).LastProposed.MsgIsZero()) && ((*z).LastHeartbeat.MsgIsZero()) && ((*z).IncentiveEligible == false) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) } // MaxSize returns a maximum valid message size for this message type func BaseOnlineAccountDataMaxSize() (s int) { - s = 1 + 2 + crypto.OneTimeSignatureVerifierMaxSize() + 2 + crypto.VRFVerifierMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.Uint64Size + 2 + merklesignature.CommitmentMaxSize() + 2 + msgp.BoolSize + 2 + basics.MicroAlgosMaxSize() + 2 + msgp.Uint64Size + s = 1 + 2 + crypto.OneTimeSignatureVerifierMaxSize() + 2 + crypto.VRFVerifierMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.Uint64Size + 2 + merklesignature.CommitmentMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.BoolSize + 2 + basics.MicroAlgosMaxSize() + 2 + msgp.Uint64Size return } diff --git a/ledger/tracker.go b/ledger/tracker.go index 1f7950a1c2..7f6b025d18 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -948,7 +948,12 @@ func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, } func (aul *accountUpdatesLedgerEvaluator) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) { - return aul.ao.LookupOnlineAccountData(rnd, addr) + return aul.ao.lookupOnlineAccountData(rnd, addr) +} + +func (aul *accountUpdatesLedgerEvaluator) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + // This method is only used when generating blocks, so we don't need to implement it here. + return nil, fmt.Errorf("accountUpdatesLedgerEvaluator: GetKnockOfflineCandidates is not implemented and should not be called during replay") } func (aul *accountUpdatesLedgerEvaluator) OnlineCirculation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) { diff --git a/ledger/voters.go b/ledger/voters.go index 63e0722a6f..49d7adf457 100644 --- a/ledger/voters.go +++ b/ledger/voters.go @@ -291,7 +291,30 @@ func (vt *votersTracker) lowestRound(base basics.Round) basics.Round { return minRound } -// VotersForStateProof returns the top online participants from round r. +// LatestCompletedVotersUpTo returns the highest round <= r for which information about the top online +// participants has already been collected, and the completed VotersForRound for that round. +// If none is found, it returns 0, nil. Unlike VotersForStateProof, this function does not wait. +func (vt *votersTracker) LatestCompletedVotersUpTo(r basics.Round) (basics.Round, *ledgercore.VotersForRound) { + vt.votersMu.RLock() + defer vt.votersMu.RUnlock() + + var latestRound basics.Round + var latestVoters *ledgercore.VotersForRound + + for round, voters := range vt.votersForRoundCache { + if round <= r && round > latestRound { + if completed, err := voters.Completed(); completed && err == nil { + latestRound = round + latestVoters = voters + } + } + } + + return latestRound, latestVoters +} + +// VotersForStateProof returns the top online participants from round r. If this data is still being +// constructed in another goroutine, this function will wait until it is ready. func (vt *votersTracker) VotersForStateProof(r basics.Round) (*ledgercore.VotersForRound, error) { tr, exists := vt.getVoters(r) if !exists { diff --git a/ledger/voters_test.go b/ledger/voters_test.go index a4913c4999..083492c610 100644 --- a/ledger/voters_test.go +++ b/ledger/voters_test.go @@ -17,6 +17,7 @@ package ledger import ( + "fmt" "testing" "github.com/algorand/go-algorand/config" @@ -273,3 +274,84 @@ func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) { a.Equal(merklesignature.NoKeysCommitment, top.Participants[j].PK.Commitment) } } + +// implements ledgercore.OnlineAccountsFetcher +type testOnlineAccountsFetcher struct { + topAccts []*ledgercore.OnlineAccount + totalStake basics.MicroAlgos + err error +} + +func (o testOnlineAccountsFetcher) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Round, n uint64, params *config.ConsensusParams, rewardsLevel uint64) (topOnlineAccounts []*ledgercore.OnlineAccount, totalOnlineStake basics.MicroAlgos, err error) { + return o.topAccts, o.totalStake, o.err +} + +func TestLatestCompletedVotersUpToWithError(t *testing.T) { + partitiontest.PartitionTest(t) + a := require.New(t) + + // Set up mock ledger with initial data + accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)} + ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, accts) + defer ml.Close() + + conf := config.GetDefaultLocal() + _, ao := newAcctUpdates(t, ml, conf) + + // Add several blocks + for i := uint64(1); i < 10; i++ { + addRandomBlock(t, ml) + } + commitAll(t, ml) + + // Populate votersForRoundCache with test data + for r := basics.Round(1); r <= 9; r += 2 { // simulate every odd round + vr := ledgercore.MakeVotersForRound() + if r%4 == 1 { // Simulate an error for rounds 1, 5, and 9 + vr.BroadcastError(fmt.Errorf("error loading data for round %d", r)) + } else { + // Simulate a successful load of voter data + hdr := bookkeeping.BlockHeader{Round: r} + oaf := testOnlineAccountsFetcher{nil, basics.MicroAlgos{Raw: 1_000_000}, nil} + require.NoError(t, vr.LoadTree(oaf, hdr)) + } + + ao.voters.setVoters(r, vr) + } + + // LastCompletedVotersUpTo retrieves the highest round less than or equal to + // the requested round where data is complete, ignoring rounds with errors. + for _, tc := range []struct { + reqRound, retRound uint64 + completed bool + }{ + {0, 0, false}, + {1, 0, false}, + {2, 0, false}, // requested 2, no completed rounds <= 2 + {3, 3, true}, + {4, 3, true}, + {5, 3, true}, // requested 5, got 3 (round 5 had error) + {6, 3, true}, + {7, 7, true}, // requested 7, got 7 (last completed <= 8) + {8, 7, true}, // requested 8, got 7 (last completed <= 8) + {9, 7, true}, // requested 9, got 7 (err at 9) + {10, 7, true}, + {11, 7, true}, + } { + completedRound, voters := ao.voters.LatestCompletedVotersUpTo(basics.Round(tc.reqRound)) + a.Equal(completedRound, basics.Round(tc.retRound)) // No completed rounds before 2 + a.Equal(voters != nil, tc.completed) + } + + // Test with errors in all rounds + ao.voters.votersForRoundCache = make(map[basics.Round]*ledgercore.VotersForRound) // reset map + for r := basics.Round(1); r <= 9; r += 2 { + vr := ledgercore.MakeVotersForRound() + vr.BroadcastError(fmt.Errorf("error loading data for round %d", r)) + ao.voters.setVoters(r, vr) + } + + completedRound, voters := ao.voters.LatestCompletedVotersUpTo(basics.Round(9)) + a.Equal(basics.Round(0), completedRound) // No completed rounds due to errors + a.Nil(voters) +} diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go index f3f1c67192..e7739e085c 100644 --- a/libgoal/libgoal.go +++ b/libgoal/libgoal.go @@ -28,7 +28,6 @@ import ( v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" kmdclient "github.com/algorand/go-algorand/daemon/kmd/client" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/rpcs" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" @@ -831,53 +830,43 @@ func (c *Client) Block(round uint64) (resp v2.BlockResponseJSON, err error) { // RawBlock takes a round and returns its block func (c *Client) RawBlock(round uint64) (resp []byte, err error) { algod, err := c.ensureAlgodClient() - if err == nil { - resp, err = algod.RawBlock(round) - } - return -} - -// EncodedBlockCert takes a round and returns its parsed block and certificate -func (c *Client) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) { - algod, err := c.ensureAlgodClient() - if err == nil { - var resp []byte - resp, err = algod.RawBlock(round) - if err == nil { - err = protocol.Decode(resp, &blockCert) - if err != nil { - return - } - } + if err != nil { + return } - return + return algod.RawBlock(round) } // BookkeepingBlock takes a round and returns its block func (c *Client) BookkeepingBlock(round uint64) (block bookkeeping.Block, err error) { - blockCert, err := c.EncodedBlockCert(round) - if err == nil { - return blockCert.Block, nil + algod, err := c.ensureAlgodClient() + if err != nil { + return } - return + blockCert, err := algod.EncodedBlockCert(round) + if err != nil { + return + } + return blockCert.Block, nil } // HealthCheck returns an error if something is wrong func (c *Client) HealthCheck() error { algod, err := c.ensureAlgodClient() - if err == nil { - err = algod.HealthCheck() + if err != nil { + return err } - return err + return algod.HealthCheck() } -// WaitForRound takes a round, waits until it appears and returns its status. This function blocks. +// WaitForRound takes a round, waits up to one minute, for it to appear and +// returns the node status. This function blocks and fails if the block does not +// appear in one minute. func (c *Client) WaitForRound(round uint64) (resp model.NodeStatusResponse, err error) { algod, err := c.ensureAlgodClient() - if err == nil { - resp, err = algod.StatusAfterBlock(round) + if err != nil { + return } - return + return algod.WaitForRound(round, time.Minute) } // GetBalance takes an address and returns its total balance; if the address doesn't exist, it returns 0. diff --git a/network/connPerfMon_test.go b/network/connPerfMon_test.go index 560be72a96..4c2bc5f034 100644 --- a/network/connPerfMon_test.go +++ b/network/connPerfMon_test.go @@ -103,14 +103,14 @@ func TestConnMonitorStageTiming(t *testing.T) { startTestTime := time.Now().UnixNano() perfMonitor := makeConnectionPerformanceMonitor([]Tag{protocol.AgreementVoteTag}) // measure measuring overhead. - measuringOverhead := time.Now().Sub(time.Now()) + measuringOverhead := time.Since(time.Now()) perfMonitor.Reset(peers) for msgIdx, msg := range msgPool { msg.Received += startTestTime beforeNotify := time.Now() beforeNotifyStage := perfMonitor.stage perfMonitor.Notify(&msg) - notifyTime := time.Now().Sub(beforeNotify) + notifyTime := time.Since(beforeNotify) stageTimings[beforeNotifyStage] += notifyTime stageNotifyCalls[beforeNotifyStage]++ if perfMonitor.GetPeersStatistics() != nil { diff --git a/node/node.go b/node/node.go index 44c8449d95..2d452afa90 100644 --- a/node/node.go +++ b/node/node.go @@ -43,6 +43,7 @@ import ( "github.com/algorand/go-algorand/data/pools" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/verify" + "github.com/algorand/go-algorand/heartbeat" "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/ledger/simulation" @@ -155,6 +156,8 @@ type AlgorandFullNode struct { stateProofWorker *stateproof.Worker partHandles []db.Accessor + + heartbeatService *heartbeat.Service } // TxnWithStatus represents information about a single transaction, @@ -338,6 +341,8 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.stateProofWorker = stateproof.NewWorker(node.genesisDirs.StateproofGenesisDir, node.log, node.accountManager, node.ledger.Ledger, node.net, node) + node.heartbeatService = heartbeat.NewService(node.accountManager, node.ledger, node, node.log) + return node, err } @@ -380,6 +385,7 @@ func (node *AlgorandFullNode) Start() error { node.ledgerService.Start() node.txHandler.Start() node.stateProofWorker.Start() + node.heartbeatService.Start() err := startNetwork() if err != nil { return err @@ -459,6 +465,7 @@ func (node *AlgorandFullNode) Stop() { if node.catchpointCatchupService != nil { node.catchpointCatchupService.Stop() } else { + node.heartbeatService.Stop() node.stateProofWorker.Stop() node.txHandler.Stop() node.agreementService.Shutdown() @@ -1220,6 +1227,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo }() node.net.ClearHandlers() node.net.ClearValidatorHandlers() + node.heartbeatService.Stop() node.stateProofWorker.Stop() node.txHandler.Stop() node.agreementService.Shutdown() @@ -1248,6 +1256,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo node.ledgerService.Start() node.txHandler.Start() node.stateProofWorker.Start() + node.heartbeatService.Start() // Set up a context we can use to cancel goroutines on Stop() node.ctx, node.cancelCtx = context.WithCancel(context.Background()) diff --git a/protocol/txntype.go b/protocol/txntype.go index 76cb2dc406..ee2d085dcb 100644 --- a/protocol/txntype.go +++ b/protocol/txntype.go @@ -47,6 +47,9 @@ const ( // StateProofTx records a state proof StateProofTx TxType = "stpf" + // HeartbeatTx demonstrates the account is alive + HeartbeatTx TxType = "hb" + // UnknownTx signals an error UnknownTx TxType = "unknown" ) diff --git a/stateproof/builder.go b/stateproof/builder.go index 96ca279a4b..3f2e61d695 100644 --- a/stateproof/builder.go +++ b/stateproof/builder.go @@ -668,7 +668,7 @@ func (spw *Worker) tryBroadcast() { latestHeader, err := spw.ledger.BlockHdr(firstValid) if err != nil { - spw.log.Warnf("spw.tryBroadcast: could not fetch block header for round %d failed: %v", firstValid, err) + spw.log.Warnf("spw.tryBroadcast: could not fetch block header for round %d: %v", firstValid, err) break } diff --git a/stateproof/worker.go b/stateproof/worker.go index f74e118f58..163ec214e0 100644 --- a/stateproof/worker.go +++ b/stateproof/worker.go @@ -95,9 +95,7 @@ func NewWorker(genesisDir string, log logging.Logger, accts Accounts, ledger Led // Start starts the goroutines for the worker. func (spw *Worker) Start() { - ctx, cancel := context.WithCancel(context.Background()) - spw.ctx = ctx - spw.shutdown = cancel + spw.ctx, spw.shutdown = context.WithCancel(context.Background()) spw.signedCh = make(chan struct{}, 1) err := spw.initDb(spw.inMemory) diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go index 946d1b24b6..94feb3e9eb 100644 --- a/test/e2e-go/features/accountPerf/sixMillion_test.go +++ b/test/e2e-go/features/accountPerf/sixMillion_test.go @@ -1024,13 +1024,10 @@ func checkPoint(counter, firstValid, tLife uint64, force bool, fixture *fixtures if verbose { fmt.Printf("Waiting for round %d...", int(lastRound)) } - nodeStat, err := fixture.AlgodClient.WaitForBlock(basics.Round(lastRound - 1)) + nodeStat, err := fixture.AlgodClient.WaitForRound(lastRound, time.Minute) if err != nil { return 0, 0, fmt.Errorf("failed to wait for block %d : %w", lastRound, err) } - if nodeStat.LastRound < lastRound { - return 0, 0, fmt.Errorf("failed to wait for block %d : node is at round %d", lastRound, nodeStat.LastRound) - } return 0, nodeStat.LastRound + 1, nil } return counter, firstValid, nil diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index 2e3ac87943..adc8c43f18 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -56,9 +56,8 @@ func TestBasicCatchup(t *testing.T) { a.NoError(err) // Let the network make some progress - a.NoError(err) waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(3) a.NoError(err) // Now spin up third node @@ -71,7 +70,7 @@ func TestBasicCatchup(t *testing.T) { defer shutdownClonedNode(cloneDataDir, &fixture, t) // Now, catch up - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + _, err = cloneClient.WaitForRound(waitForRound) a.NoError(err) } @@ -155,7 +154,7 @@ func runCatchupOverGossip(t fixtures.TestingTB, // Let the secondary make progress up to round 3, while the primary was never startred ( hence, it's on round = 0) waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // stop the secondary, which is on round 3 or more. @@ -167,7 +166,7 @@ func runCatchupOverGossip(t fixtures.TestingTB, a.NoError(err) // Now, catch up - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(lg, waitForRound) + _, err = lg.WaitForRound(waitForRound) a.NoError(err) waitStart := time.Now() @@ -184,7 +183,7 @@ func runCatchupOverGossip(t fixtures.TestingTB, break } - if time.Now().Sub(waitStart) > time.Minute { + if time.Since(waitStart) > time.Minute { // it's taking too long. a.FailNow("Waiting too long for catchup to complete") } @@ -258,7 +257,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) { // Let the network make some progress a.NoError(err) waitForRound := uint64(3) // UpgradeVoteRounds + DefaultUpgradeWaitRounds - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Now spin up third node @@ -274,7 +273,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) { defer shutdownClonedNode(cloneDataDir, &fixture, t) // Now, catch up - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + _, err = cloneClient.WaitForRound(waitForRound) a.NoError(err) timeout := time.NewTimer(20 * time.Second) @@ -374,7 +373,7 @@ func TestBasicCatchupCompletes(t *testing.T) { a.NoError(err) // Wait for the network to make some progess. - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Start the third node to catchup. @@ -384,7 +383,7 @@ func TestBasicCatchupCompletes(t *testing.T) { defer shutdownClonedNode(cloneDataDir, &fixture, t) // Wait for it to catchup - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + _, err = cloneClient.WaitForRound(waitForRound) a.NoError(err) // Calculate the catchup time diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 3a1eefedc4..0a1d522cac 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -46,7 +46,7 @@ import ( const basicTestCatchpointInterval = 4 func waitForCatchpointGeneration(t *testing.T, fixture *fixtures.RestClientFixture, client client.RestClient, catchpointRound basics.Round) string { - err := fixture.ClientWaitForRoundWithTimeout(client, uint64(catchpointRound+1)) + err := client.WaitForRoundWithTimeout(uint64(catchpointRound + 1)) if err != nil { return "" } @@ -212,7 +212,7 @@ func startCatchpointGeneratingNode(a *require.Assertions, fixture *fixtures.Rest restClient := fixture.GetAlgodClientForController(nodeController) // We don't want to start using the node without it being properly initialized. - err = fixture.ClientWaitForRoundWithTimeout(restClient, 1) + err = restClient.WaitForRoundWithTimeout(1) a.NoError(err) return nodeController, restClient, &errorsCollector @@ -239,7 +239,7 @@ func startCatchpointUsingNode(a *require.Assertions, fixture *fixtures.RestClien restClient := fixture.GetAlgodClientForController(nodeController) // We don't want to start using the node without it being properly initialized. - err = fixture.ClientWaitForRoundWithTimeout(restClient, 1) + err = restClient.WaitForRoundWithTimeout(1) a.NoError(err) return nodeController, restClient, wp, &errorsCollector @@ -263,7 +263,7 @@ func startCatchpointNormalNode(a *require.Assertions, fixture *fixtures.RestClie restClient := fixture.GetAlgodClientForController(nodeController) // We don't want to start using the node without it being properly initialized. - err = fixture.ClientWaitForRoundWithTimeout(restClient, 1) + err = restClient.WaitForRoundWithTimeout(1) a.NoError(err) return nodeController, restClient, &errorsCollector @@ -365,7 +365,7 @@ func TestBasicCatchpointCatchup(t *testing.T) { _, err = usingNodeRestClient.Catchup(catchpointLabel, 0) a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1)) a.NoError(err) // ensure the raw block can be downloaded (including cert) @@ -438,7 +438,7 @@ func TestCatchpointLabelGeneration(t *testing.T) { primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode) log.Infof("Building ledger history..") for { - err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second) + _, err = primaryNodeRestClient.WaitForRound(currentRound+1, 45*time.Second) a.NoError(err) if targetRound <= currentRound { break @@ -553,8 +553,7 @@ func TestNodeTxHandlerRestart(t *testing.T) { // Wait for the network to start making progress again primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode) - err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound, - 10*catchpointCatchupProtocol.AgreementFilterTimeout) + _, err = primaryNodeRestClient.WaitForRound(targetRound, 10*catchpointCatchupProtocol.AgreementFilterTimeout) a.NoError(err) // let the 2nd client send a transaction @@ -674,8 +673,7 @@ func TestReadyEndpoint(t *testing.T) { // Wait for the network to start making progress again primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode) - err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound, - 10*catchpointCatchupProtocol.AgreementFilterTimeout) + _, err = primaryNodeRestClient.WaitForRound(targetRound, 10*catchpointCatchupProtocol.AgreementFilterTimeout) a.NoError(err) // The primary node has reached the target round, diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go index 5dcbc11452..f9639abeb1 100644 --- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go +++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go @@ -115,7 +115,7 @@ func TestStateProofInReplayCatchpoint(t *testing.T) { } // wait for fastcatchup to complete and the node is synced - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1)) a.NoError(err) primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode) @@ -174,7 +174,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) { roundAfterSPGeneration := targetCatchpointRound.RoundUpToMultipleOf(basics.Round(consensusParams.StateProofInterval)) + basics.Round(consensusParams.StateProofInterval/2) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(roundAfterSPGeneration)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(roundAfterSPGeneration)) a.NoError(err) primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode) @@ -234,14 +234,14 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { primaryNodeAddr, err := primaryNode.GetListeningAddress() a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(primaryNodeRestClient, 3) + err = primaryNodeRestClient.WaitForRoundWithTimeout(3) a.NoError(err) normalNode, normalNodeRestClient, normalNodeEC := startCatchpointNormalNode(a, &fixture, "Node1", primaryNodeAddr) defer normalNodeEC.Print() defer normalNode.StopAlgod() - err = fixture.ClientWaitForRoundWithTimeout(normalNodeRestClient, 3) + err = normalNodeRestClient.WaitForRoundWithTimeout(3) a.NoError(err) // at this point PrimaryNode and Node1 would pass round 3. Before running Node2 we remove block 2 from Primary database. @@ -267,7 +267,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { _, err = usingNodeRestClient.Catchup(catchpointLabel, 0) a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound)+1) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound) + 1) a.NoError(err) lastNormalRound, err := fixture.GetLibGoalClientFromNodeController(normalNode).CurrentRound() @@ -280,7 +280,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { lastNormalNodeSignedRound := basics.Round(lastNormalRound).RoundDownToMultipleOf(basics.Round(consensusParams.StateProofInterval)) lastNormalNextStateProofRound := lastNormalNodeSignedRound + basics.Round(consensusParams.StateProofInterval) targetRound := lastNormalNextStateProofRound + basics.Round(consensusParams.StateProofInterval*2) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetRound)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetRound)) a.NoError(err) primaryClient := fixture.GetLibGoalClientFromNodeController(primaryNode) diff --git a/test/e2e-go/features/followernode/syncDeltas_test.go b/test/e2e-go/features/followernode/syncDeltas_test.go index af27c7dda7..d1458b7451 100644 --- a/test/e2e-go/features/followernode/syncDeltas_test.go +++ b/test/e2e-go/features/followernode/syncDeltas_test.go @@ -74,7 +74,7 @@ func TestBasicSyncMode(t *testing.T) { // Let the network make some progress waitForRound := uint64(5) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Get the follower client, and exercise the sync/ledger functionality @@ -88,7 +88,7 @@ func TestBasicSyncMode(t *testing.T) { a.NoError(err) a.Equal(round, rResp.Round) // make some progress to round - err = fixture.ClientWaitForRoundWithTimeout(followClient, round) + err = followClient.WaitForRoundWithTimeout(round) a.NoError(err) // retrieve state delta gResp, err := followClient.GetLedgerStateDelta(round) @@ -113,6 +113,6 @@ func TestBasicSyncMode(t *testing.T) { err = followClient.SetSyncRound(round + 1) a.NoError(err) } - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(fixture.LibGoalClient, waitForRound) + err = fixture.WaitForRoundWithTimeout(waitForRound) a.NoError(err) } diff --git a/test/e2e-go/features/followernode/syncRestart_test.go b/test/e2e-go/features/followernode/syncRestart_test.go index 589bb7b53c..1aa5b2560d 100644 --- a/test/e2e-go/features/followernode/syncRestart_test.go +++ b/test/e2e-go/features/followernode/syncRestart_test.go @@ -62,7 +62,7 @@ func TestSyncRestart(t *testing.T) { waitTill := func(node string, round uint64) { controller, err := fixture.GetNodeController(node) a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(controller), round) + err = fixture.GetAlgodClientForController(controller).WaitForRoundWithTimeout(round) a.NoError(err) } diff --git a/test/e2e-go/features/incentives/challenge_test.go b/test/e2e-go/features/incentives/challenge_test.go new file mode 100644 index 0000000000..661bc7b40c --- /dev/null +++ b/test/e2e-go/features/incentives/challenge_test.go @@ -0,0 +1,222 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package suspension + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/libgoal" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/framework/fixtures" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/algorand/go-algorand/util" +) + +// eligible is just a dumb 50/50 choice of whether to mark an address +// incentiveELigible or not, so we get a diversity of testing. Ineligible +// accounts should not be challenged or try to heartbeat. +func eligible(address string) bool { + return address[0]&0x01 == 0 +} + +// TestChallenges ensures that accounts are knocked off if they don't respond to +// a challenge, and that algod responds for accounts it knows (keepign them online) +func TestChallenges(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + // Overview of this test: + // Use a consensus protocol with challenge interval=50, grace period=10, bits=2. + // Start a three-node network. One relay, two nodes with 4 accounts each + // At round 50, ~2 nodes will be challenged. + + const lookback = 32 + const interval = 50 + const grace = 10 + const mask = 0x80 + + var fixture fixtures.RestClientFixture + // Speed up rounds, keep lookback > 2 * grace period + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) + fixture.AlterConsensus(protocol.ConsensusFuture, + func(cp config.ConsensusParams) config.ConsensusParams { + cp.Payouts.ChallengeInterval = 50 + cp.Payouts.ChallengeGracePeriod = 10 + cp.Payouts.ChallengeBits = 1 // half of nodes should get challenged + return cp + }) + fixture.Setup(t, filepath.Join("nettemplates", "Challenges.json")) + defer fixture.Shutdown() + + clientAndAccounts := func(name string) (libgoal.Client, []model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 8) + fmt.Printf("Client %s has %v\n", name, accounts) + return c, accounts + } + + c1, accounts1 := clientAndAccounts("Node1") + c2, accounts2 := clientAndAccounts("Node2") + + err := fixture.WaitForRoundWithTimeout(interval - lookback) // Make all LastHeartbeats > interval, < 2*interval + a.NoError(err) + + // eligible accounts1 will get challenged with node offline, and suspended + for _, account := range accounts1 { + rekeyreg(&fixture, a, c1, account.Address, eligible(account.Address)) + } + // eligible accounts2 will get challenged, but node2 will heartbeat for them + for _, account := range accounts2 { + rekeyreg(&fixture, a, c2, account.Address, eligible(account.Address)) + } + + // turn off node 1, so it can't heartbeat + a.NoError(c1.FullStop()) + + current, err := c2.CurrentRound() + a.NoError(err) + // Get them all done so that their inflated LastHeartbeat comes before the + // next challenge. + a.Less(current+lookback, 2*uint64(interval)) + + // We need to wait for the first challenge that happens after the keyreg + // LastHeartbeat has passed. Example: current is 40, so the lastPossible + // LastHeartbeat is 72. Interval is 50, so challengeRound is 100. + + // 100 = 40 + 32 + (50-22) = 72 + 28 + lastPossible := current + lookback + challengeRound := lastPossible + (interval - lastPossible%interval) + + // Advance to challenge round, check the blockseed + err = fixture.WaitForRoundWithTimeout(challengeRound) + a.NoError(err) + blk, err := c2.BookkeepingBlock(challengeRound) + a.NoError(err) + challenge := blk.BlockHeader.Seed[0] & mask // high bit + + // match1 are the accounts from node1 that match the challenge, but only + // eligible ones are truly challenged and could be suspended. + match1 := util.MakeSet[basics.Address]() + eligible1 := util.MakeSet[basics.Address]() // matched AND eligible + for _, account := range accounts1 { + address, err := basics.UnmarshalChecksumAddress(account.Address) + a.NoError(err) + if address[0]&mask == challenge { + fmt.Printf("%v of node 1 was challenged %v by %v\n", address, address[0], challenge) + match1.Add(address) + if eligible(address.String()) { + eligible1.Add(address) + } + } + } + require.NotEmpty(t, match1, "rerun the test") // TODO: remove. + + match2 := util.MakeSet[basics.Address]() + eligible2 := util.MakeSet[basics.Address]() // matched AND eligible + for _, account := range accounts2 { + address, err := basics.UnmarshalChecksumAddress(account.Address) + a.NoError(err) + if address[0]&mask == challenge { + fmt.Printf("%v of node 2 was challenged %v by %v\n", address, address[0], challenge) + match2.Add(address) + if eligible(address.String()) { + eligible2.Add(address) + } + } + } + require.NotEmpty(t, match2, "rerun the test") // TODO: remove. + + allMatches := util.Union(match1, match2) + + // All nodes are online to start + for address := range allMatches { + data, err := c2.AccountData(address.String()) + a.NoError(err) + a.Equal(basics.Online, data.Status, "%v %d", address.String(), data.LastHeartbeat) + a.NotZero(data.VoteID) + a.Equal(eligible(address.String()), data.IncentiveEligible) + } + + // Watch the first half grace period for proposals from challenged nodes, since they won't have to heartbeat. + lucky := util.MakeSet[basics.Address]() + fixture.WithEveryBlock(challengeRound, challengeRound+grace/2, func(block bookkeeping.Block) { + if eligible2.Contains(block.Proposer()) { + lucky.Add(block.Proposer()) + } + a.Empty(block.AbsentParticipationAccounts) // nobody suspended during grace + }) + + // In the second half of the grace period, Node 2 should heartbeat for its eligible accounts + beated := util.MakeSet[basics.Address]() + fixture.WithEveryBlock(challengeRound+grace/2, challengeRound+grace, func(block bookkeeping.Block) { + if eligible2.Contains(block.Proposer()) { + lucky.Add(block.Proposer()) + } + for i, txn := range block.Payset { + hb := txn.Txn.HeartbeatTxnFields + fmt.Printf("Heartbeat txn %v in position %d round %d\n", hb, i, block.Round()) + a.True(match2.Contains(hb.HbAddress)) // only Node 2 is alive + a.True(eligible2.Contains(hb.HbAddress)) // only eligible accounts get heartbeat + a.False(beated.Contains(hb.HbAddress)) // beat only once + beated.Add(hb.HbAddress) + a.False(lucky.Contains(hb.HbAddress)) // we should not see a heartbeat from an account that proposed + } + a.Empty(block.AbsentParticipationAccounts) // nobody suspended during grace + }) + a.Equal(eligible2, util.Union(beated, lucky)) + + blk, err = fixture.WaitForBlockWithTimeout(challengeRound + grace + 1) + a.NoError(err) + a.Equal(eligible1, util.MakeSet(blk.AbsentParticipationAccounts...)) + + // node 1 challenged (eligible) accounts are suspended because node 1 is off + for address := range match1 { + data, err := c2.AccountData(address.String()) + a.NoError(err) + if eligible1.Contains(address) { + a.Equal(basics.Offline, data.Status, address) + } else { + a.Equal(basics.Online, data.Status, address) // not eligible, so not suspended + } + a.NotZero(data.VoteID, address) + a.False(data.IncentiveEligible, address) // suspension turns off flag + } + + // node 2 challenged accounts are not suspended (saved by heartbeat or weren't eligible) + for address := range match2 { + data, err := c2.AccountData(address.String()) + a.NoError(err) + a.Equal(basics.Online, data.Status, address) + a.NotZero(data.VoteID, address) + a.Equal(data.IncentiveEligible, eligible(address.String())) + } + +} diff --git a/test/e2e-go/features/incentives/payouts_test.go b/test/e2e-go/features/incentives/payouts_test.go index 1b9f4d0ec3..bf8b2e20e2 100644 --- a/test/e2e-go/features/incentives/payouts_test.go +++ b/test/e2e-go/features/incentives/payouts_test.go @@ -48,7 +48,7 @@ func TestBasicPayouts(t *testing.T) { var fixture fixtures.RestClientFixture // Make the seed lookback shorter, otherwise we need to wait 320 rounds to become IncentiveEligible. const lookback = 32 - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, 32) + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) fmt.Printf("lookback is %d\n", lookback) fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) defer fixture.Shutdown() @@ -71,8 +71,8 @@ func TestBasicPayouts(t *testing.T) { c01, account01 := clientAndAccount("Node01") relay, _ := clientAndAccount("Relay") - data01 := rekeyreg(&fixture, a, c01, account01.Address) - data15 := rekeyreg(&fixture, a, c15, account15.Address) + data01 := rekeyreg(&fixture, a, c01, account01.Address, true) + data15 := rekeyreg(&fixture, a, c15, account15.Address, true) // have account01 burn some money to get below the eligibility cap // Starts with 100M, so burn 60M and get under 70M cap. @@ -317,14 +317,19 @@ func getblock(client libgoal.Client, round uint64) (bookkeeping.Block, error) { return client.BookkeepingBlock(round) } -func rekeyreg(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string) basics.AccountData { +func rekeyreg(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string, becomeEligible bool) basics.AccountData { // we start by making an _offline_ tx here, because we want to populate the // key material ourself with a copy of the account's existing material. That // makes it an _online_ keyreg. That allows the running node to chug along // without new part keys. We overpay the fee, which makes us // IncentiveEligible, and to get some funds into FeeSink because we will // watch it drain toward bottom of test. - reReg, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, 12_000_000, [32]byte{}) + + fee := uint64(1000) + if becomeEligible { + fee = 12_000_000 + } + reReg, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, fee, [32]byte{}) a.NoError(err) data, err := client.AccountData(address) @@ -354,7 +359,7 @@ func rekeyreg(f *fixtures.RestClientFixture, a *require.Assertions, client libgo a.NoError(err) a.Equal(basics.Online, data.Status) a.True(data.LastHeartbeat > 0) - a.True(data.IncentiveEligible) + a.Equal(becomeEligible, data.IncentiveEligible) fmt.Printf(" %v has %v in round %d\n", address, data.MicroAlgos.Raw, *txn.ConfirmedRound) return data } diff --git a/test/e2e-go/features/incentives/suspension_test.go b/test/e2e-go/features/incentives/suspension_test.go index 6768f7926e..4a3709d96e 100644 --- a/test/e2e-go/features/incentives/suspension_test.go +++ b/test/e2e-go/features/incentives/suspension_test.go @@ -33,7 +33,6 @@ import ( ) // TestBasicSuspension confirms that accounts that don't propose get suspended -// (when a tx naming them occurs) func TestBasicSuspension(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) @@ -45,15 +44,17 @@ func TestBasicSuspension(t *testing.T) { // Start a three-node network (70,20,10), all online // Wait for 10 and 20% nodes to propose (we never suspend accounts with lastProposed=lastHeartbeat=0) // Stop them both - // Run for 55 rounds, which is enough for 20% node to be suspended, but not 10% + // Run for 105 rounds, which is enough for 20% node to be suspended, but not 10% // check neither suspended, send a tx from 20% to 10%, only 20% gets suspended - // TODO once we have heartbeats: bring them back up, make sure 20% gets back online - const suspend20 = 55 + // bring n20 back up, make sure it gets back online by proposing during the lookback + const suspend20 = 105 // 1.00/0.20 * absentFactor var fixture fixtures.RestClientFixture - // Speed up rounds, but keep long lookback, so 20% node has a chance to get - // back online after being suspended. - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, 320) + // Speed up rounds. Long enough lookback, so 20% node has a chance to + // get back online after being suspended. (0.8^32 is very small) + + const lookback = 32 + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) fixture.Setup(t, filepath.Join("nettemplates", "Suspension.json")) defer fixture.Shutdown() @@ -69,74 +70,43 @@ func TestBasicSuspension(t *testing.T) { c10, account10 := clientAndAccount("Node10") c20, account20 := clientAndAccount("Node20") - rekeyreg(&fixture, a, c10, account10.Address) - rekeyreg(&fixture, a, c20, account20.Address) - - // Wait until each have proposed, so they are suspendable - proposed10 := false - proposed20 := false - for !proposed10 || !proposed20 { - status, err := c10.Status() - a.NoError(err) - block, err := c10.BookkeepingBlock(status.LastRound) - a.NoError(err) - - fmt.Printf(" block %d proposed by %v\n", status.LastRound, block.Proposer()) - - fixture.WaitForRoundWithTimeout(status.LastRound + 1) - - switch block.Proposer().String() { - case account10.Address: - proposed10 = true - case account20.Address: - proposed20 = true - } - } + rekeyreg(&fixture, a, c10, account10.Address, true) + rekeyreg(&fixture, a, c20, account20.Address, true) + // Accounts are now suspendable whether they have proposed yet or not + // because keyreg sets LastHeartbeat. Stop c20 which means account20 will be + // absent about 50 rounds after keyreg goes into effect (lookback) a.NoError(c20.FullStop()) afterStop, err := c10.Status() a.NoError(err) - // Advance 55 rounds - err = fixture.WaitForRoundWithTimeout(afterStop.LastRound + suspend20) - a.NoError(err) - - // n20 is still online after 55 rounds of absence (the node is off, but the - // account is marked online) because it has not been "noticed". - account, err := fixture.LibGoalClient.AccountData(account20.Address) + // Advance lookback+55 rounds + err = fixture.WaitForRoundWithTimeout(afterStop.LastRound + lookback + suspend20) a.NoError(err) - a.Equal(basics.Online, account.Status) - voteID := account.VoteID - a.NotZero(voteID) - - // pay n10 & n20, so both could be noticed - richAccount, err := fixture.GetRichestAccount() - a.NoError(err) - fixture.SendMoneyAndWait(afterStop.LastRound+suspend20, 5, 1000, richAccount.Address, account10.Address, "") - fixture.SendMoneyAndWait(afterStop.LastRound+suspend20, 5, 1000, richAccount.Address, account20.Address, "") // make sure c10 node is in-sync with the network status, err := fixture.LibGoalClient.Status() a.NoError(err) + fmt.Printf("status.LastRound %d\n", status.LastRound) _, err = c10.WaitForRound(status.LastRound) a.NoError(err) - // n20's account is now offline, but has voting key material (suspended) - account, err = c10.AccountData(account20.Address) + // n20's account has been suspended (offline, but has voting key material) + account, err := c10.AccountData(account20.Address) a.NoError(err) + fmt.Printf("account20 %d %d\n", account.LastProposed, account.LastHeartbeat) a.Equal(basics.Offline, account.Status) a.NotZero(account.VoteID) a.False(account.IncentiveEligible) // suspension turns off flag - // n10's account is still online, because it's got less stake, has not been absent 10 x interval. account, err = c10.AccountData(account10.Address) a.NoError(err) a.Equal(basics.Online, account.Status) a.NotZero(account.VoteID) a.True(account.IncentiveEligible) - // Use the fixture to start the node again. Since we're only a bit past the + // Use the fixture to start node20 again. Since we're only a bit past the // suspension round, it will still be voting. It should get a chance to // propose soon (20/100 of blocks) which will put it back online. lg, err := fixture.StartNode(c20.DataDir()) @@ -172,8 +142,6 @@ func TestBasicSuspension(t *testing.T) { a.NoError(err) r.Equal(basics.Online, account.Status, i) r.Greater(account.LastProposed, restartRound, i) - - r.Equal(voteID, account.VoteID, i) r.False(account.IncentiveEligible, i) } } diff --git a/test/e2e-go/features/incentives/whalejoin_test.go b/test/e2e-go/features/incentives/whalejoin_test.go new file mode 100644 index 0000000000..90a67450e2 --- /dev/null +++ b/test/e2e-go/features/incentives/whalejoin_test.go @@ -0,0 +1,324 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package suspension + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + + v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/libgoal" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/framework/fixtures" + "github.com/algorand/go-algorand/test/partitiontest" +) + +// TestWhaleJoin shows a "whale" with more stake than is currently online can go +// online without immediate suspension. This tests for a bug we had where we +// calcululated expected proposal interval using the _old_ totals, rather than +// the totals following the keyreg. So big joiner was being expected to propose +// in the same block it joined. +func TestWhaleJoin(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + var fixture fixtures.RestClientFixture + // Make rounds shorter and seed lookback smaller, otherwise we need to wait + // 320 slow rounds for particpation effects to matter. + const lookback = 32 + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) + fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) + defer fixture.Shutdown() + + // Overview of this test: + // 1. Take wallet15 offline (but retain keys so can back online later) + // 2. Have wallet01 spend almost all their algos + // 3. Wait for balances to flow through "lookback" + // 4. Rejoin wallet15 which will have way more stake that what is online. + + clientAndAccount := func(name string) (libgoal.Client, model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 1) + fmt.Printf("Client %s is %v\n", name, accounts[0].Address) + return c, accounts[0] + } + + c15, account15 := clientAndAccount("Node15") + c01, account01 := clientAndAccount("Node01") + + // 1. take wallet15 offline + keys := offline(&fixture, a, c15, account15.Address) + + // 2. c01 starts with 100M, so burn 99.9M to get total online stake down + burn, err := c01.SendPaymentFromUnencryptedWallet(account01.Address, basics.Address{}.String(), + 1000, 99_900_000_000_000, nil) + a.NoError(err) + receipt, err := fixture.WaitForConfirmedTxn(uint64(burn.LastValid), burn.ID().String()) + a.NoError(err) + + // 3. Wait lookback rounds + _, err = c01.WaitForRound(*receipt.ConfirmedRound + lookback) + a.NoError(err) + + // 4. rejoin, with 1.5B against the paltry 100k that's currently online + online(&fixture, a, c15, account15.Address, keys) + + // 5. wait for agreement balances to kick in (another lookback's worth, plus some slack) + _, err = c01.WaitForRound(*receipt.ConfirmedRound + 2*lookback + 5) + a.NoError(err) + + data, err := c15.AccountData(account15.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // even after being in the block to "get noticed" + txn, err := c15.SendPaymentFromUnencryptedWallet(account15.Address, basics.Address{}.String(), + 1000, 1, nil) + a.NoError(err) + _, err = fixture.WaitForConfirmedTxn(uint64(txn.LastValid), txn.ID().String()) + a.NoError(err) + data, err = c15.AccountData(account15.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) +} + +// TestBigJoin shows that even though an account can't vote during the first 320 +// rounds after joining, it is not marked absent because of that gap. This would +// be a problem for "biggish" accounts, that might already be absent after 320 +// rounds of not voting. +func TestBigJoin(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + var fixture fixtures.RestClientFixture + // We need lookback to be fairly long, so that we can have a node join with + // 1/16 stake, and have lookback be long enough to risk absenteeism. + const lookback = 164 // > 160, which is 10x the 1/16th's interval + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second/2, lookback) + fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) + defer fixture.Shutdown() + + // Overview of this test: + // 1. Take wallet01 offline (but retain keys so can back online later) + // 2. Wait `lookback` rounds so it can't propose. + // 3. Rejoin wallet01 which will now have 1/16 of the stake + // 4. Wait 160 rounds and ensure node01 does not get knocked offline for being absent + // 5. Wait the rest of lookback to ensure it _still_ does not get knock off. + + clientAndAccount := func(name string) (libgoal.Client, model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 1) + fmt.Printf("Client %s is %v\n", name, accounts[0].Address) + return c, accounts[0] + } + + c01, account01 := clientAndAccount("Node01") + + // 1. take wallet01 offline + keys := offline(&fixture, a, c01, account01.Address) + + // 2. Wait lookback rounds + wait(&fixture, a, lookback) + + // 4. rejoin, with 1/16 of total stake + onRound := online(&fixture, a, c01, account01.Address, keys) + + // 5. wait for enough rounds to pass, during which c01 can't vote, that is + // could get knocked off. + wait(&fixture, a, 161) + data, err := c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // 5a. just to be sure, do a zero pay to get it "noticed" + zeroPay(&fixture, a, c01, account01.Address) + data, err = c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // 6. Now wait until lookback after onRound (which should just be a couple + // more rounds). Check again, to ensure that once c01 is _really_ + // online/voting, it is still safe for long enough to propose. + a.NoError(fixture.WaitForRoundWithTimeout(onRound + lookback)) + data, err = c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + zeroPay(&fixture, a, c01, account01.Address) + data, err = c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // The node _could_ have gotten lucky and propose in first couple rounds it + // is allowed to propose, so this test is expected to be "flaky" in a + // sense. It would pass about 1/8 of the time, even if we had the problem it + // is looking for. +} + +// TestBigIncrease shows when an incentive eligible account receives a lot of +// algos, they are not immediately suspended. We also check the details of the +// mechanism - that LastHeartbeat is incremented when such an account doubles +// its balance in a single pay. +func TestBigIncrease(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + var fixture fixtures.RestClientFixture + const lookback = 32 + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second/2, lookback) + fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) + defer fixture.Shutdown() + + // Overview of this test: + // 0. spend wallet01 down so it has a very small percent of stake + // 1. rereg wallet01 so it is suspendable + // 2. move almost all of wallet15's money to wallet01 + // 3. check that c1.LastHeart is set to 32 rounds later + // 4. wait 40 rounds ensure c1 stays online + + clientAndAccount := func(name string) (libgoal.Client, model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 1) + fmt.Printf("Client %s is %v\n", name, accounts[0].Address) + return c, accounts[0] + } + + c1, account01 := clientAndAccount("Node01") + c15, account15 := clientAndAccount("Node15") + + // We need to spend 01 down so that it has nearly no stake. That way, it + // certainly will not have proposed by pure luck just before the critical + // round. If we don't do that, 1/16 of stake is enough that it will probably + // have a fairly recent proposal, and not get knocked off. + pay(&fixture, a, c1, account01.Address, account15.Address, 99*account01.Amount/100) + + rekeyreg(&fixture, a, c1, account01.Address, true) + + // 2. Wait lookback rounds + wait(&fixture, a, lookback) + + tx := pay(&fixture, a, c15, account15.Address, account01.Address, 50*account15.Amount/100) + data, err := c15.AccountData(account01.Address) + a.NoError(err) + a.EqualValues(*tx.ConfirmedRound+lookback, data.LastHeartbeat) + + wait(&fixture, a, lookback+5) + data, err = c15.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + a.True(data.IncentiveEligible) +} + +func wait(f *fixtures.RestClientFixture, a *require.Assertions, count uint64) { + res, err := f.AlgodClient.Status() + a.NoError(err) + round := res.LastRound + count + a.NoError(f.WaitForRoundWithTimeout(round)) +} + +func pay(f *fixtures.RestClientFixture, a *require.Assertions, + c libgoal.Client, from string, to string, amount uint64) v2.PreEncodedTxInfo { + pay, err := c.SendPaymentFromUnencryptedWallet(from, to, 1000, amount, nil) + a.NoError(err) + tx, err := f.WaitForConfirmedTxn(uint64(pay.LastValid), pay.ID().String()) + a.NoError(err) + return tx +} + +func zeroPay(f *fixtures.RestClientFixture, a *require.Assertions, + c libgoal.Client, address string) { + pay(f, a, c, address, address, 0) +} + +// Go offline, but return the key material so it's easy to go back online +func offline(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string) transactions.KeyregTxnFields { + offTx, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, 100_000, [32]byte{}) + a.NoError(err) + + data, err := client.AccountData(address) + a.NoError(err) + keys := transactions.KeyregTxnFields{ + VotePK: data.VoteID, + SelectionPK: data.SelectionID, + StateProofPK: data.StateProofID, + VoteFirst: data.VoteFirstValid, + VoteLast: data.VoteLastValid, + VoteKeyDilution: data.VoteKeyDilution, + } + + wh, err := client.GetUnencryptedWalletHandle() + a.NoError(err) + onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, offTx) + a.NoError(err) + txn, err := f.WaitForConfirmedTxn(uint64(offTx.LastValid), onlineTxID) + a.NoError(err) + // sync up with the network + _, err = client.WaitForRound(*txn.ConfirmedRound) + a.NoError(err) + data, err = client.AccountData(address) + a.NoError(err) + a.Equal(basics.Offline, data.Status) + return keys +} + +// Go online with the supplied key material +func online(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string, keys transactions.KeyregTxnFields) uint64 { + // sanity check that we start offline + data, err := client.AccountData(address) + a.NoError(err) + a.Equal(basics.Offline, data.Status) + + // make an empty keyreg, we'll copy in the keys + onTx, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, 100_000, [32]byte{}) + a.NoError(err) + + onTx.KeyregTxnFields = keys + wh, err := client.GetUnencryptedWalletHandle() + a.NoError(err) + onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, onTx) + a.NoError(err) + receipt, err := f.WaitForConfirmedTxn(uint64(onTx.LastValid), onlineTxID) + a.NoError(err) + data, err = client.AccountData(address) + a.NoError(err) + // Before bug fix, the account would be suspended in the same round of the + // keyreg, so it would not be online. + a.Equal(basics.Online, data.Status) + return *receipt.ConfirmedRound +} diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go index 0b38fe76ff..21a701139a 100644 --- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go +++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go @@ -216,7 +216,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { // Need to wait for funding to take effect on selection, then we can see if we're participating // Stop before the account should become eligible for selection so we can ensure it wasn't - err = fixture.ClientWaitForRound(fixture.AlgodClient, uint64(accountProposesStarting-1), + err = fixture.WaitForRound(uint64(accountProposesStarting-1), time.Duration(uint64(globals.MaxTimePerRound)*uint64(accountProposesStarting-1))) a.NoError(err) @@ -226,7 +226,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { a.False(blockWasProposed, "account should not be selected until BalLookback (round %d) passes", int(accountProposesStarting-1)) // Now wait until the round where the funded account will be used. - err = fixture.ClientWaitForRound(fixture.AlgodClient, uint64(accountProposesStarting), 10*globals.MaxTimePerRound) + err = fixture.WaitForRound(uint64(accountProposesStarting), 10*globals.MaxTimePerRound) a.NoError(err) blockWasProposedByNewAccountRecently := fixture.VerifyBlockProposedRange(newAccount, int(accountProposesStarting), 1) diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go index 21ce3bdf0d..e3429490c4 100644 --- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go +++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go @@ -57,7 +57,7 @@ func TestBasicPartitionRecovery(t *testing.T) { // Let the network make some progress waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Now stop 2nd node @@ -133,7 +133,7 @@ func runTestWithStaggeredStopStart(t *testing.T, fixture *fixtures.RestClientFix // Let the network make some progress waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound) + err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Stop Node1 @@ -196,7 +196,7 @@ func TestBasicPartitionRecoveryPartOffline(t *testing.T) { // Let the network make some progress waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound) + err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Stop Node1 @@ -264,8 +264,7 @@ func TestPartitionHalfOffline(t *testing.T) { // Let the network make some progress client := fixture.LibGoalClient - waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound) + err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(3) a.NoError(err) // Stop nodes with 50% of stake diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go index 85d7d5e127..4735ca840f 100644 --- a/test/e2e-go/features/stateproofs/stateproofs_test.go +++ b/test/e2e-go/features/stateproofs/stateproofs_test.go @@ -810,6 +810,7 @@ func TestTotalWeightChanges(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensusParams := getDefaultStateProofConsensusParams() + consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100 consensusParams.StateProofStrengthTarget = 4 consensusParams.StateProofTopVoters = 4 diff --git a/test/e2e-go/restAPI/other/misc_test.go b/test/e2e-go/restAPI/other/misc_test.go index eeaff9fcd1..23e805dc25 100644 --- a/test/e2e-go/restAPI/other/misc_test.go +++ b/test/e2e-go/restAPI/other/misc_test.go @@ -62,7 +62,7 @@ func TestDisabledAPIConfig(t *testing.T) { a.NoError(err) testClient := client.MakeRestClient(url, "") // empty token - _, err = testClient.WaitForBlock(1) + err = testClient.WaitForRoundWithTimeout(1) assert.NoError(t, err) _, err = testClient.Block(1) assert.NoError(t, err) diff --git a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go index 66601c1737..b058b510e4 100644 --- a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go +++ b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go @@ -53,7 +53,7 @@ func TestSimulateTxnTracerDevMode(t *testing.T) { testClient := localFixture.LibGoalClient - _, err := testClient.WaitForRound(1) + _, err := testClient.Status() a.NoError(err) wh, err := testClient.GetUnencryptedWalletHandle() @@ -288,11 +288,11 @@ int 1` // Let the primary node make some progress primaryClient := fixture.GetAlgodClientForController(nc) - err = fixture.ClientWaitForRoundWithTimeout(primaryClient, followerSyncRound+uint64(cfg.MaxAcctLookback)) + err = primaryClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback)) a.NoError(err) // Let follower node progress as far as it can - err = fixture.ClientWaitForRoundWithTimeout(followClient, followerSyncRound+uint64(cfg.MaxAcctLookback)-1) + err = followClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback) - 1) a.NoError(err) simulateRequest := v2.PreEncodedSimulateRequest{ diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go index 549a82c5ab..c41ad84166 100644 --- a/test/e2e-go/upgrades/application_support_test.go +++ b/test/e2e-go/upgrades/application_support_test.go @@ -180,7 +180,7 @@ int 1 curStatus, err = client.Status() a.NoError(err) - a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) } @@ -438,7 +438,7 @@ int 1 curStatus, err = client.Status() a.NoError(err) - a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go index 0dcec41545..cc3eca018c 100644 --- a/test/e2e-go/upgrades/rekey_support_test.go +++ b/test/e2e-go/upgrades/rekey_support_test.go @@ -150,7 +150,7 @@ func TestRekeyUpgrade(t *testing.T) { curStatus, err = client.Status() a.NoError(err) - a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index bd4f615ae7..c05a59ff1f 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -42,7 +42,6 @@ import ( "github.com/algorand/go-algorand/netdeploy" "github.com/algorand/go-algorand/nodecontrol" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/test/e2e-go/globals" "github.com/algorand/go-algorand/util/db" ) @@ -67,26 +66,32 @@ func (f *RestClientFixture) SetConsensus(consensus config.ConsensusProtocols) { f.consensus = consensus } +// AlterConsensus allows the caller to modify the consensus settings for a given version. +func (f *RestClientFixture) AlterConsensus(ver protocol.ConsensusVersion, alter func(config.ConsensusParams) config.ConsensusParams) { + if f.consensus == nil { + f.consensus = make(config.ConsensusProtocols) + } + f.consensus[ver] = alter(f.ConsensusParamsFromVer(ver)) +} + // FasterConsensus speeds up the given consensus version in two ways. The seed // refresh lookback is set to 8 (instead of 80), so the 320 round balance // lookback becomes 32. And, if the architecture implies it can be handled, // round times are shortened by lowering vote timeouts. func (f *RestClientFixture) FasterConsensus(ver protocol.ConsensusVersion, timeout time.Duration, lookback basics.Round) { - if f.consensus == nil { - f.consensus = make(config.ConsensusProtocols) - } - fast := config.Consensus[ver] - // balanceRound is 4 * SeedRefreshInterval - if lookback%4 != 0 { - panic(fmt.Sprintf("lookback must be a multiple of 4, got %d", lookback)) - } - fast.SeedRefreshInterval = uint64(lookback) / 4 - // and speed up the rounds while we're at it - if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" { - fast.AgreementFilterTimeoutPeriod0 = timeout - fast.AgreementFilterTimeout = timeout - } - f.consensus[ver] = fast + f.AlterConsensus(ver, func(fast config.ConsensusParams) config.ConsensusParams { + // balanceRound is 4 * SeedRefreshInterval + if lookback%4 != 0 { + panic(fmt.Sprintf("lookback must be a multiple of 4, got %d", lookback)) + } + fast.SeedRefreshInterval = uint64(lookback) / 4 + // and speed up the rounds while we're at it + if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" { + fast.AgreementFilterTimeoutPeriod0 = timeout + fast.AgreementFilterTimeout = timeout + } + return fast + }) } // Setup is called to initialize the test fixture for the test(s) @@ -452,75 +457,6 @@ func (f *LibGoalFixture) GetParticipationOnlyAccounts(lg libgoal.Client) []accou return f.clientPartKeys[lg.DataDir()] } -// WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the -// globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach. -func (f *LibGoalFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error { - return f.ClientWaitForRoundWithTimeout(f.LibGoalClient, roundToWaitFor) -} - -// ClientWaitForRoundWithTimeout waits for a given round to be reached by the specific client/node. The implementation -// also ensures to limit the wait time for each round to the globals.MaxTimePerRound so we can alert when we're -// getting "hung" before waiting for all the expected rounds to reach. -func (f *LibGoalFixture) ClientWaitForRoundWithTimeout(client libgoal.Client, roundToWaitFor uint64) error { - status, err := client.Status() - require.NoError(f.t, err) - lastRound := status.LastRound - - // If node is already at or past target round, we're done - if lastRound >= roundToWaitFor { - return nil - } - - roundTime := globals.MaxTimePerRound * 10 // For first block, we wait much longer - roundComplete := make(chan error, 2) - - for nextRound := lastRound + 1; lastRound < roundToWaitFor; { - roundStarted := time.Now() - - go func(done chan error) { - err := f.ClientWaitForRound(client, nextRound, roundTime) - done <- err - }(roundComplete) - - select { - case lastError := <-roundComplete: - if lastError != nil { - close(roundComplete) - return lastError - } - case <-time.After(roundTime): - // we've timed out. - time := time.Now().Sub(roundStarted) - return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound) - } - - roundTime = singleRoundMaxTime - lastRound++ - nextRound++ - } - return nil -} - -// ClientWaitForRound waits up to the specified amount of time for -// the network to reach or pass the specified round, on the specific client/node -func (f *LibGoalFixture) ClientWaitForRound(client libgoal.Client, round uint64, waitTime time.Duration) error { - timeout := time.NewTimer(waitTime) - for { - status, err := client.Status() - if err != nil { - return err - } - if status.LastRound >= round { - return nil - } - select { - case <-timeout.C: - return fmt.Errorf("timeout waiting for round %v", round) - case <-time.After(200 * time.Millisecond): - } - } -} - // CurrentConsensusParams returns the consensus parameters for the currently active protocol func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusParams, err error) { status, err := f.LibGoalClient.Status() @@ -532,20 +468,20 @@ func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusPar } // ConsensusParams returns the consensus parameters for the protocol from the specified round -func (f *LibGoalFixture) ConsensusParams(round uint64) (consensus config.ConsensusParams, err error) { +func (f *LibGoalFixture) ConsensusParams(round uint64) (config.ConsensusParams, error) { block, err := f.LibGoalClient.BookkeepingBlock(round) if err != nil { - return + return config.ConsensusParams{}, err } - version := protocol.ConsensusVersion(block.CurrentProtocol) - if f.consensus != nil { - consensus, has := f.consensus[version] - if has { - return consensus, nil - } + return f.ConsensusParamsFromVer(block.CurrentProtocol), nil +} + +// ConsensusParamsFromVer looks up a consensus version, allowing for override +func (f *LibGoalFixture) ConsensusParamsFromVer(cv protocol.ConsensusVersion) config.ConsensusParams { + if consensus, has := f.consensus[cv]; has { + return consensus } - consensus = config.Consensus[version] - return + return config.Consensus[cv] } // CurrentMinFeeAndBalance returns the MinTxnFee and MinBalance for the currently active protocol diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go index 473df25d38..fb1a26d31b 100644 --- a/test/framework/fixtures/restClientFixture.go +++ b/test/framework/fixtures/restClientFixture.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/netdeploy" "github.com/algorand/go-algorand/protocol" @@ -34,7 +35,6 @@ import ( "github.com/algorand/go-algorand/libgoal" "github.com/algorand/go-algorand/nodecontrol" - "github.com/algorand/go-algorand/test/e2e-go/globals" "github.com/algorand/go-algorand/util/tokens" ) @@ -80,79 +80,37 @@ func (f *RestClientFixture) GetAlgodClientForController(nc nodecontrol.NodeContr // WaitForRound waits up to the specified amount of time for // the network to reach or pass the specified round func (f *RestClientFixture) WaitForRound(round uint64, waitTime time.Duration) error { - return f.ClientWaitForRound(f.AlgodClient, round, waitTime) + _, err := f.AlgodClient.WaitForRound(round, waitTime) + return err } -// ClientWaitForRound waits up to the specified amount of time for -// the network to reach or pass the specified round, on the specific client/node -func (f *RestClientFixture) ClientWaitForRound(client client.RestClient, round uint64, waitTime time.Duration) error { - timeout := time.NewTimer(waitTime) - for { - status, err := client.Status() - if err != nil { - return err - } - - if status.LastRound >= round { - return nil - } - select { - case <-timeout.C: - return fmt.Errorf("timeout waiting for round %v with last round = %v", round, status.LastRound) - case <-time.After(200 * time.Millisecond): - } +// WithEveryBlock calls the provided function for every block from first to last. +func (f *RestClientFixture) WithEveryBlock(first, last uint64, visit func(bookkeeping.Block)) { + for round := first; round <= last; round++ { + err := f.WaitForRoundWithTimeout(round) + require.NoError(f.t, err) + block, err := f.AlgodClient.Block(round) + require.NoError(f.t, err) + visit(block.Block) } } // WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the // globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach. func (f *RestClientFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error { - return f.ClientWaitForRoundWithTimeout(f.AlgodClient, roundToWaitFor) + return f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor) } -const singleRoundMaxTime = globals.MaxTimePerRound * 40 - -// ClientWaitForRoundWithTimeout waits for a given round to be reached by the specific client/node. The implementation -// also ensures to limit the wait time for each round to the globals.MaxTimePerRound so we can alert when we're -// getting "hung" before waiting for all the expected rounds to reach. -func (f *RestClientFixture) ClientWaitForRoundWithTimeout(client client.RestClient, roundToWaitFor uint64) error { - status, err := client.Status() - require.NoError(f.t, err) - lastRound := status.LastRound - - // If node is already at or past target round, we're done - if lastRound >= roundToWaitFor { - return nil +// WaitForBlockWithTimeout waits for a given round and returns its block. +func (f *RestClientFixture) WaitForBlockWithTimeout(roundToWaitFor uint64) (bookkeeping.Block, error) { + if err := f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor); err != nil { + return bookkeeping.Block{}, err } - - roundTime := globals.MaxTimePerRound * 10 // For first block, we wait much longer - roundComplete := make(chan error, 2) - - for nextRound := lastRound + 1; lastRound < roundToWaitFor; { - roundStarted := time.Now() - - go func(done chan error) { - err := f.ClientWaitForRound(client, nextRound, roundTime) - done <- err - }(roundComplete) - - select { - case lastError := <-roundComplete: - if lastError != nil { - close(roundComplete) - return lastError - } - case <-time.After(roundTime): - // we've timed out. - time := time.Now().Sub(roundStarted) - return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound) - } - - roundTime = singleRoundMaxTime - lastRound++ - nextRound++ + both, err := f.AlgodClient.EncodedBlockCert(roundToWaitFor) + if err != nil { + return bookkeeping.Block{}, err } - return nil + return both.Block, nil } // GetFirstAccount returns the first account from listing local accounts @@ -367,17 +325,15 @@ func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassw // VerifyBlockProposedRange checks the rounds starting at fromRounds and moving backwards checking countDownNumRounds rounds if any // blocks were proposed by address -func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) (blockWasProposed bool) { - c := f.LibGoalClient +func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) bool { for i := 0; i < countDownNumRounds; i++ { - cert, err := c.EncodedBlockCert(uint64(fromRound - i)) + cert, err := f.AlgodClient.EncodedBlockCert(uint64(fromRound - i)) require.NoError(f.t, err, "client failed to get block %d", fromRound-i) if cert.Certificate.Proposal.OriginalProposer.GetUserAddress() == account { - blockWasProposed = true - break + return true } } - return + return false } // VerifyBlockProposed checks the last searchRange blocks to see if any blocks were proposed by address diff --git a/test/testdata/nettemplates/Challenges.json b/test/testdata/nettemplates/Challenges.json new file mode 100644 index 0000000000..1d9944937c --- /dev/null +++ b/test/testdata/nettemplates/Challenges.json @@ -0,0 +1,60 @@ +{ + "Genesis": { + "NetworkName": "tbd", + "ConsensusProtocol": "future", + "LastPartKeyRound": 500, + "Wallets": [ + { "Name": "Relay", "Stake": 84, "Online": true }, + { "Name": "Wallet0", "Stake": 1, "Online": true }, + { "Name": "Wallet1", "Stake": 1, "Online": true }, + { "Name": "Wallet2", "Stake": 1, "Online": true }, + { "Name": "Wallet3", "Stake": 1, "Online": true }, + { "Name": "Wallet4", "Stake": 1, "Online": true }, + { "Name": "Wallet5", "Stake": 1, "Online": true }, + { "Name": "Wallet6", "Stake": 1, "Online": true }, + { "Name": "Wallet7", "Stake": 1, "Online": true }, + { "Name": "Wallet8", "Stake": 1, "Online": true }, + { "Name": "Wallet9", "Stake": 1, "Online": true }, + { "Name": "WalletA", "Stake": 1, "Online": true }, + { "Name": "WalletB", "Stake": 1, "Online": true }, + { "Name": "WalletC", "Stake": 1, "Online": true }, + { "Name": "WalletD", "Stake": 1, "Online": true }, + { "Name": "WalletE", "Stake": 1, "Online": true }, + { "Name": "WalletF", "Stake": 1, "Online": true } + ], + "RewardsPoolBalance": 0 + }, + "Nodes": [ + { + "Name": "Relay", + "Wallets": [{ "Name": "Relay", "ParticipationOnly": false }], + "IsRelay": true + }, + { + "Name": "Node1", + "Wallets": [ + { "Name": "Wallet0", "ParticipationOnly": false }, + { "Name": "Wallet1", "ParticipationOnly": false }, + { "Name": "Wallet2", "ParticipationOnly": false }, + { "Name": "Wallet3", "ParticipationOnly": false }, + { "Name": "Wallet4", "ParticipationOnly": false }, + { "Name": "Wallet5", "ParticipationOnly": false }, + { "Name": "Wallet6", "ParticipationOnly": false }, + { "Name": "Wallet7", "ParticipationOnly": false } + ] + }, + { + "Name": "Node2", + "Wallets": [ + { "Name": "Wallet8", "ParticipationOnly": false }, + { "Name": "Wallet9", "ParticipationOnly": false }, + { "Name": "WalletA", "ParticipationOnly": false }, + { "Name": "WalletB", "ParticipationOnly": false }, + { "Name": "WalletC", "ParticipationOnly": false }, + { "Name": "WalletD", "ParticipationOnly": false }, + { "Name": "WalletE", "ParticipationOnly": false }, + { "Name": "WalletF", "ParticipationOnly": false } + ] + } + ] +} diff --git a/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go index cde3894823..edc9d612dd 100644 --- a/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go +++ b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go @@ -237,12 +237,17 @@ func (t *TypeNode) buildStructChildren(path TypePath) TypePath { if typeField.Anonymous { // embedded struct case - actualKind := typeField.Type.Kind() + fieldType := typeField.Type + if fieldType.Kind() == reflect.Ptr { + // get underlying type for embedded pointer to struct + fieldType = fieldType.Elem() + } + actualKind := fieldType.Kind() if actualKind != reflect.Struct { panic(fmt.Sprintf("expected [%s] but got unexpected embedded type: %s", reflect.Struct, typeField.Type)) } - embedded := TypeNode{t.Depth, typeField.Type, reflect.Struct, nil, nil} + embedded := TypeNode{t.Depth, fieldType, reflect.Struct, nil, nil} embeddedCyclePath := embedded.build(path) if len(embeddedCyclePath) > 0 { cyclePath = embeddedCyclePath diff --git a/util/db/dbutil.go b/util/db/dbutil.go index 8b045ad70c..e1cd16e2b5 100644 --- a/util/db/dbutil.go +++ b/util/db/dbutil.go @@ -327,7 +327,7 @@ func (db *Accessor) AtomicContextWithRetryClearFn(ctx context.Context, fn idemFn } if time.Now().After(atomicDeadline) { - db.getDecoratedLogger(fn, extras).Warnf("dbatomic: tx surpassed expected deadline by %v", time.Now().Sub(atomicDeadline)) + db.getDecoratedLogger(fn, extras).Warnf("dbatomic: tx surpassed expected deadline by %v", time.Since(atomicDeadline)) } return } diff --git a/util/execpool/stream.go b/util/execpool/stream.go index 29ec4613f1..f6017a0af1 100644 --- a/util/execpool/stream.go +++ b/util/execpool/stream.go @@ -87,7 +87,7 @@ func (sv *StreamToBatch) Start(ctx context.Context) { go sv.batchingLoop() } -// WaitForStop waits until the batching loop terminates afer the ctx is canceled +// WaitForStop waits until the batching loop terminates after the ctx is canceled func (sv *StreamToBatch) WaitForStop() { sv.activeLoopWg.Wait() } diff --git a/util/set.go b/util/set.go index 6851299c46..3727a99f33 100644 --- a/util/set.go +++ b/util/set.go @@ -40,3 +40,39 @@ func (s Set[T]) Contains(elem T) (exists bool) { _, exists = s[elem] return } + +// Union constructs a new set, containing all elements from the given sets. nil +// is never returned +func Union[T comparable](sets ...Set[T]) Set[T] { + union := make(Set[T]) + for _, set := range sets { + for elem := range set { + union.Add(elem) + } + } + return union +} + +// Intersection constructs a new set, containing all elements that appear in all +// given sets. nil is never returned. Intersection of no sets is an empty set +// because that seems more useful, regardless of your very reasonable arguments +// otherwise. +func Intersection[T comparable](sets ...Set[T]) Set[T] { + var intersection = make(Set[T]) + if len(sets) == 0 { + return intersection + } + for elem := range sets[0] { + inAll := true + for _, set := range sets[1:] { + if _, exists := set[elem]; !exists { + inAll = false + break + } + } + if inAll { + intersection.Add(elem) + } + } + return intersection +} diff --git a/util/set_test.go b/util/set_test.go new file mode 100644 index 0000000000..86df9c5464 --- /dev/null +++ b/util/set_test.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package util + +import ( + "testing" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +func TestMakeSet(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + s := MakeSet(1, 2, 3) + require.True(t, s.Contains(1)) + require.True(t, s.Contains(2)) + require.True(t, s.Contains(3)) + require.False(t, s.Contains(4)) + + s = MakeSet[int]() + require.NotNil(t, s) + require.False(t, s.Contains(1)) + require.False(t, s.Contains(4)) +} + +func TestSetAdd(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + s := MakeSet[int]() + s.Add(6) + require.False(t, s.Contains(1)) + require.True(t, s.Contains(6)) + s.Add(6) + require.False(t, s.Contains(1)) + require.True(t, s.Contains(6)) +} + +func TestSetOps(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + empty := MakeSet[string]() + abc := MakeSet("a", "b", "c") + cde := MakeSet("c", "d", "e") + + require.Equal(t, abc, Union(abc)) + require.Equal(t, abc, Union(empty, abc)) + require.Equal(t, abc, Union(abc, empty, abc)) + require.NotNil(t, Union(empty, empty, empty)) + require.Equal(t, empty, Union(empty, empty, empty)) + + require.Equal(t, abc, Intersection(abc, abc)) + require.NotNil(t, Intersection(abc, empty)) + require.Equal(t, empty, Intersection(abc, empty)) + require.Equal(t, empty, Intersection(empty, abc)) + require.Equal(t, MakeSet("c"), Intersection(abc, cde)) + require.Equal(t, MakeSet("c"), Intersection(cde, abc, cde)) +}