diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go
index 2fc19f108f..f83ed9aab6 100644
--- a/accounts/abi/bind/backend.go
+++ b/accounts/abi/bind/backend.go
@@ -39,7 +39,7 @@ import (
var (
// ErrNoCode is returned by call and transact operations for which the requested
// recipient contract to operate on does not exist in the state db or does not
- // have any code associated with it (i.e. suicided).
+ // have any code associated with it (i.e. self-destructed).
ErrNoCode = errors.New("no contract code at given address")
// ErrNoAcceptedState is raised when attempting to perform a accepted state action
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 7efabaa1b6..d401e086fe 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -62,8 +62,6 @@ import (
var (
_ bind.AcceptedContractCaller = (*SimulatedBackend)(nil)
_ bind.ContractBackend = (*SimulatedBackend)(nil)
- _ bind.ContractFilterer = (*SimulatedBackend)(nil)
- _ bind.ContractTransactor = (*SimulatedBackend)(nil)
_ bind.DeployBackend = (*SimulatedBackend)(nil)
_ interfaces.ChainReader = (*SimulatedBackend)(nil)
@@ -147,7 +145,7 @@ func (b *SimulatedBackend) Close() error {
return nil
}
-// Commit imports all the pending transactions as a single block and starts a
+// Commit imports all the accepted transactions as a single block and starts a
// fresh new state.
func (b *SimulatedBackend) Commit(accept bool) common.Hash {
b.mu.Lock()
@@ -171,7 +169,7 @@ func (b *SimulatedBackend) Commit(accept bool) common.Hash {
return blockHash
}
-// Rollback aborts all pending transactions, reverting to the last committed state.
+// Rollback aborts all accepted transactions, reverting to the last committed state.
func (b *SimulatedBackend) Rollback() {
b.mu.Lock()
defer b.mu.Unlock()
@@ -206,7 +204,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error {
defer b.mu.Unlock()
if len(b.acceptedBlock.Transactions()) != 0 {
- return errors.New("pending block dirty")
+ return errors.New("accepted block dirty")
}
block, err := b.blockByHash(ctx, parent)
if err != nil {
@@ -293,10 +291,10 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
return receipt, nil
}
-// TransactionByHash checks the pool of pending transactions in addition to the
-// blockchain. The isPending return value indicates whether the transaction has been
+// TransactionByHash checks the pool of accepted transactions in addition to the
+// blockchain. The isAccepted return value indicates whether the transaction has been
// mined yet. Note that the transaction may not be part of the canonical chain even if
-// it's not pending.
+// it's not accepted.
func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) {
b.mu.Lock()
defer b.mu.Unlock()
@@ -537,7 +535,7 @@ func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, erro
return big.NewInt(1), nil
}
-// EstimateGas executes the requested code against the currently pending block/state and
+// EstimateGas executes the requested code against the currently accepted block/state and
// returns the used amount of gas.
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.CallMsg) (uint64, error) {
b.mu.Lock()
@@ -641,7 +639,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call
return hi, nil
}
-// callContract implements common code between normal and pending contract calls.
+// callContract implements common code between normal and accepted contract calls.
// state is modified during execution, make sure to copy it if necessary.
func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) {
// Gas prices post 1559 need to be initialized
@@ -711,7 +709,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.Cal
return core.ApplyMessage(vmEnv, msg, gasPool)
}
-// SendTransaction updates the pending block to include the given transaction.
+// SendTransaction updates the accepted block to include the given transaction.
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
b.mu.Lock()
defer b.mu.Unlock()
@@ -854,7 +852,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
}
block := b.blockchain.GetBlockByHash(b.acceptedBlock.ParentHash())
if block == nil {
- return fmt.Errorf("could not find parent")
+ return errors.New("could not find parent")
}
blocks, _, _ := core.GenerateChain(b.config, block, dummy.NewFaker(), b.database, 1, 10, func(number int, block *core.BlockGen) {
diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go
index 714caf95f2..f51e3e7101 100644
--- a/accounts/abi/bind/backends/simulated_test.go
+++ b/accounts/abi/bind/backends/simulated_test.go
@@ -163,6 +163,7 @@ func TestAdjustTime(t *testing.T) {
func TestNewAdjustTimeFail(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
sim := simTestBackend(testAddr)
+ defer sim.blockchain.Stop()
// Create tx and send
head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index e05f509bcc..b38f118c40 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -151,7 +151,7 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s
normalized := original
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
// Ensure there is no duplicated identifier
- identifiers := callIdentifiers
+ var identifiers = callIdentifiers
if !original.IsConstant() {
identifiers = transactIdentifiers
}
diff --git a/accounts/abi/error.go b/accounts/abi/error.go
index d94c262124..34bb373c60 100644
--- a/accounts/abi/error.go
+++ b/accounts/abi/error.go
@@ -42,7 +42,7 @@ type Error struct {
str string
// Sig contains the string signature according to the ABI spec.
- // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)"
+ // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256"
Sig string
diff --git a/accounts/abi/event.go b/accounts/abi/event.go
index 63ecda229d..f4f0f5d92d 100644
--- a/accounts/abi/event.go
+++ b/accounts/abi/event.go
@@ -59,6 +59,7 @@ type Event struct {
// e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256"
Sig string
+
// ID returns the canonical representation of the event's signature used by the
// abi definition to identify event names and types.
ID common.Hash
diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go
index 9f7a07a0c3..4adbf5b1c6 100644
--- a/accounts/abi/reflect.go
+++ b/accounts/abi/reflect.go
@@ -238,7 +238,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
structFieldName := ToCamelCase(argName)
if structFieldName == "" {
- return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
+ return nil, errors.New("abi: purely underscored output cannot unpack to struct")
}
// this abi has already been paired, skip it... unless there exists another, yet unassigned
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index f7dc5e6a82..75a6c15fd7 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -80,7 +80,7 @@ var (
func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
- return Type{}, fmt.Errorf("invalid arg type in abi")
+ return Type{}, errors.New("invalid arg type in abi")
}
typ.stringKind = t
@@ -119,7 +119,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
}
typ.stringKind = embeddedType.stringKind + sliced
} else {
- return Type{}, fmt.Errorf("invalid formatting of array type")
+ return Type{}, errors.New("invalid formatting of array type")
}
return typ, err
}
@@ -356,7 +356,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
}
}
-// requireLengthPrefix returns whether the type requires any sort of length
+// requiresLengthPrefix returns whether the type requires any sort of length
// prefixing.
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go
index bc57d71db6..2899e5a5b3 100644
--- a/accounts/abi/unpack.go
+++ b/accounts/abi/unpack.go
@@ -28,6 +28,7 @@ package abi
import (
"encoding/binary"
+ "errors"
"fmt"
"math"
"math/big"
@@ -135,7 +136,7 @@ func readBool(word []byte) (bool, error) {
// readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
if t.T != FunctionTy {
- return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array")
+ return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array")
}
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
@@ -148,7 +149,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
// ReadFixedBytes uses reflection to create a fixed array to be read from.
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
if t.T != FixedBytesTy {
- return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
+ return nil, errors.New("abi: invalid type in call to make fixed byte array")
}
// convert
array := reflect.New(t.GetType()).Elem()
@@ -176,7 +177,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// declare our array
refSlice = reflect.New(t.GetType()).Elem()
} else {
- return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
+ return nil, errors.New("abi: invalid type in array/slice unpacking stage")
}
// Arrays have packed elements, resulting in longer unpack steps.
diff --git a/accounts/external/backend.go b/accounts/external/backend.go
index 1869aef403..d80932c323 100644
--- a/accounts/external/backend.go
+++ b/accounts/external/backend.go
@@ -27,6 +27,7 @@
package external
import (
+ "errors"
"fmt"
"math/big"
"sync"
@@ -108,11 +109,11 @@ func (api *ExternalSigner) Status() (string, error) {
}
func (api *ExternalSigner) Open(passphrase string) error {
- return fmt.Errorf("operation not supported on external signers")
+ return errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) Close() error {
- return fmt.Errorf("operation not supported on external signers")
+ return errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) Accounts() []accounts.Account {
@@ -155,7 +156,7 @@ func (api *ExternalSigner) Contains(account accounts.Account) bool {
}
func (api *ExternalSigner) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
- return accounts.Account{}, fmt.Errorf("operation not supported on external signers")
+ return accounts.Account{}, errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain interfaces.ChainStateReader) {
@@ -252,14 +253,14 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
}
func (api *ExternalSigner) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {
- return []byte{}, fmt.Errorf("password-operations not supported on external signers")
+ return []byte{}, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
- return nil, fmt.Errorf("password-operations not supported on external signers")
+ return nil, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) {
- return nil, fmt.Errorf("password-operations not supported on external signers")
+ return nil, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) listAccounts() ([]common.Address, error) {
diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go
index 913def043c..dbe834b198 100644
--- a/accounts/keystore/account_cache.go
+++ b/accounts/keystore/account_cache.go
@@ -41,6 +41,7 @@ import (
mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
+ "golang.org/x/exp/slices"
)
// Minimum amount of time between cache reloads. This limit applies if the platform does
@@ -48,11 +49,10 @@ import (
// exist yet, the code will attempt to create a watcher at most this often.
const minReloadInterval = 2 * time.Second
-type accountsByURL []accounts.Account
-
-func (s accountsByURL) Len() int { return len(s) }
-func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 }
-func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+// byURL defines the sorting order for accounts.
+func byURL(a, b accounts.Account) int {
+ return a.URL.Cmp(b.URL)
+}
// AmbiguousAddrError is returned when attempting to unlock
// an address for which more than one file exists.
@@ -77,7 +77,7 @@ type accountCache struct {
keydir string
watcher *watcher
mu sync.Mutex
- all accountsByURL
+ all []accounts.Account
byAddr map[common.Address][]accounts.Account
throttle *time.Timer
notify chan struct{}
@@ -204,7 +204,7 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
default:
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
copy(err.Matches, matches)
- sort.Sort(accountsByURL(err.Matches))
+ slices.SortFunc(err.Matches, byURL)
return accounts.Account{}, err
}
}
diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go
index be030f0c0f..16a9453575 100644
--- a/accounts/keystore/account_cache_test.go
+++ b/accounts/keystore/account_cache_test.go
@@ -27,12 +27,12 @@
package keystore
import (
+ "errors"
"fmt"
"math/rand"
"os"
"path/filepath"
"reflect"
- "sort"
"testing"
"time"
@@ -40,6 +40,7 @@ import (
"github.com/cespare/cp"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
+ "golang.org/x/exp/slices"
)
var (
@@ -84,7 +85,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
select {
case <-ks.changes:
default:
- return fmt.Errorf("wasn't notified of new accounts")
+ return errors.New("wasn't notified of new accounts")
}
return nil
}
@@ -212,7 +213,7 @@ func TestCacheAddDeleteOrder(t *testing.T) {
// Check that the account list is sorted by filename.
wantAccounts := make([]accounts.Account, len(accs))
copy(wantAccounts, accs)
- sort.Sort(accountsByURL(wantAccounts))
+ slices.SortFunc(wantAccounts, byURL)
list := cache.accounts()
if !reflect.DeepEqual(list, wantAccounts) {
t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts))
diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go
index dc574e82e1..16db8259c9 100644
--- a/accounts/keystore/keystore_test.go
+++ b/accounts/keystore/keystore_test.go
@@ -30,7 +30,6 @@ import (
"math/rand"
"os"
"runtime"
- "sort"
"strings"
"sync"
"sync/atomic"
@@ -41,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
+ "golang.org/x/exp/slices"
)
var testSigData = make([]byte, 32)
@@ -410,19 +410,19 @@ func TestImportRace(t *testing.T) {
t.Fatalf("failed to export account: %v", acc)
}
_, ks2 := tmpKeyStore(t, true)
- var atom uint32
+ var atom atomic.Uint32
var wg sync.WaitGroup
wg.Add(2)
for i := 0; i < 2; i++ {
go func() {
defer wg.Done()
if _, err := ks2.Import(json, "new", "new"); err != nil {
- atomic.AddUint32(&atom, 1)
+ atom.Add(1)
}
}()
}
wg.Wait()
- if atom != 1 {
+ if atom.Load() != 1 {
t.Errorf("Import is racy")
}
}
@@ -437,7 +437,7 @@ func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, walle
for _, account := range live {
liveList = append(liveList, account)
}
- sort.Sort(accountsByURL(liveList))
+ slices.SortFunc(liveList, byURL)
for j, wallet := range wallets {
if accs := wallet.Accounts(); len(accs) != 1 {
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go
index 7eeb0a9dbd..98cf02ba37 100644
--- a/accounts/keystore/passphrase.go
+++ b/accounts/keystore/passphrase.go
@@ -235,10 +235,13 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
if err != nil {
return nil, err
}
- key := crypto.ToECDSAUnsafe(keyBytes)
+ key, err := crypto.ToECDSA(keyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("invalid key: %w", err)
+ }
id, err := uuid.FromBytes(keyId)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("invalid UUID: %w", err)
}
return &Key{
Id: id,
diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go
index 5fc816e12a..062bfcb198 100644
--- a/accounts/scwallet/securechannel.go
+++ b/accounts/scwallet/securechannel.go
@@ -34,6 +34,7 @@ import (
"crypto/rand"
"crypto/sha256"
"crypto/sha512"
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/crypto"
@@ -135,7 +136,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
// Unpair disestablishes an existing pairing.
func (s *SecureChannelSession) Unpair() error {
if s.PairingKey == nil {
- return fmt.Errorf("cannot unpair: not paired")
+ return errors.New("cannot unpair: not paired")
}
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
@@ -151,7 +152,7 @@ func (s *SecureChannelSession) Unpair() error {
// Open initializes the secure channel.
func (s *SecureChannelSession) Open() error {
if s.iv != nil {
- return fmt.Errorf("session already opened")
+ return errors.New("session already opened")
}
response, err := s.open()
@@ -225,7 +226,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
if s.iv == nil {
- return nil, fmt.Errorf("channel not open")
+ return nil, errors.New("channel not open")
}
data, err := s.encryptAPDU(data)
@@ -264,7 +265,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
return nil, err
}
if !bytes.Equal(s.iv, rmac) {
- return nil, fmt.Errorf("invalid MAC in response")
+ return nil, errors.New("invalid MAC in response")
}
rapdu := &responseAPDU{}
@@ -329,7 +330,7 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
}
}
- return nil, fmt.Errorf("expected end of padding, got 0")
+ return nil, errors.New("expected end of padding, got 0")
}
// updateIV is an internal method that updates the initialization vector after
diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go
index 71a9ac9fbf..b83fce913e 100644
--- a/accounts/scwallet/wallet.go
+++ b/accounts/scwallet/wallet.go
@@ -262,7 +262,7 @@ func (w *Wallet) release() error {
// with the wallet.
func (w *Wallet) pair(puk []byte) error {
if w.session.paired() {
- return fmt.Errorf("wallet already paired")
+ return errors.New("wallet already paired")
}
pairing, err := w.session.pair(puk)
if err != nil {
@@ -823,7 +823,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
// unpair deletes an existing pairing.
func (s *Session) unpair() error {
if !s.verified {
- return fmt.Errorf("unpair requires that the PIN be verified")
+ return errors.New("unpair requires that the PIN be verified")
}
return s.Channel.Unpair()
}
@@ -917,7 +917,7 @@ func (s *Session) initialize(seed []byte) error {
return err
}
if status == "Online" {
- return fmt.Errorf("card is already initialized, cowardly refusing to proceed")
+ return errors.New("card is already initialized, cowardly refusing to proceed")
}
s.Wallet.lock.Lock()
diff --git a/cmd/evm/README.md b/cmd/evm/README.md
index 4df5f3a2a7..2459b853b9 100644
--- a/cmd/evm/README.md
+++ b/cmd/evm/README.md
@@ -342,7 +342,7 @@ To make `t8n` apply these, the following inputs are required:
- For ethash, it is `5000000000000000000` `wei`,
- If this is not defined, mining rewards are not applied,
- A value of `0` is valid, and causes accounts to be 'touched'.
-- For each ommer, the tool needs to be given an `addres\` and a `delta`. This
+- For each ommer, the tool needs to be given an `address\` and a `delta`. This
is done via the `ommers` field in `env`.
Note: the tool does not verify that e.g. the normal uncle rules apply,
diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go
index a5ec44f170..1b11e25e53 100644
--- a/cmd/evm/internal/t8ntool/block.go
+++ b/cmd/evm/internal/t8ntool/block.go
@@ -271,7 +271,7 @@ func readInput(ctx *cli.Context) (*bbInput, error) {
return inputData, nil
}
-// dispatchOutput writes the output data to either stderr or stdout, or to the specified
+// dispatchBlock writes the output data to either stderr or stdout, or to the specified
// files
func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error {
raw, _ := rlp.EncodeToBytes(block)
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 0f82378ba4..7b33af0d02 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -29,8 +29,8 @@ package t8ntool
import (
"fmt"
"math/big"
- "os"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
"github.com/ava-labs/subnet-evm/core"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/state"
@@ -55,16 +55,18 @@ type Prestate struct {
// ExecutionResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested.
type ExecutionResult struct {
- StateRoot common.Hash `json:"stateRoot"`
- TxRoot common.Hash `json:"txRoot"`
- ReceiptRoot common.Hash `json:"receiptsRoot"`
- LogsHash common.Hash `json:"logsHash"`
- Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
- Receipts types.Receipts `json:"receipts"`
- Rejected []*rejectedTx `json:"rejected,omitempty"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
- GasUsed math.HexOrDecimal64 `json:"gasUsed"`
- BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ StateRoot common.Hash `json:"stateRoot"`
+ TxRoot common.Hash `json:"txRoot"`
+ ReceiptRoot common.Hash `json:"receiptsRoot"`
+ LogsHash common.Hash `json:"logsHash"`
+ Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
+ Receipts types.Receipts `json:"receipts"`
+ Rejected []*rejectedTx `json:"rejected,omitempty"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
+ BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
+ CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"`
}
type ommer struct {
@@ -74,38 +76,44 @@ type ommer struct {
//go:generate go run github.com/fjl/gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
type stEnv struct {
- Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
- Difficulty *big.Int `json:"currentDifficulty"`
- Random *big.Int `json:"currentRandom"`
- ParentDifficulty *big.Int `json:"parentDifficulty"`
- ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"`
- ParentGasUsed uint64 `json:"parentGasUsed,omitempty"`
- ParentGasLimit uint64 `json:"parentGasLimit,omitempty"`
- MinBaseFee *big.Int `json:"minBaseFee,omitempty"`
- GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
- Number uint64 `json:"currentNumber" gencodec:"required"`
- Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
- ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
- BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
- Ommers []ommer `json:"ommers,omitempty"`
- BaseFee *big.Int `json:"currentBaseFee,omitempty"`
- ParentUncleHash common.Hash `json:"parentUncleHash"`
+ Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *big.Int `json:"currentDifficulty"`
+ Random *big.Int `json:"currentRandom"`
+ ParentDifficulty *big.Int `json:"parentDifficulty"`
+ ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"`
+ ParentGasUsed uint64 `json:"parentGasUsed,omitempty"`
+ ParentGasLimit uint64 `json:"parentGasLimit,omitempty"`
+ MinBaseFee *big.Int `json:"minBaseFee,omitempty"`
+ GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
+ Number uint64 `json:"currentNumber" gencodec:"required"`
+ Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
+ ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ BaseFee *big.Int `json:"currentBaseFee,omitempty"`
+ ParentUncleHash common.Hash `json:"parentUncleHash"`
+ ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"`
+ ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
+ ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
}
type stEnvMarshaling struct {
- Coinbase common.UnprefixedAddress
- Difficulty *math.HexOrDecimal256
- Random *math.HexOrDecimal256
- ParentDifficulty *math.HexOrDecimal256
- ParentBaseFee *math.HexOrDecimal256
- ParentGasUsed math.HexOrDecimal64
- ParentGasLimit math.HexOrDecimal64
- MinBaseFee *math.HexOrDecimal256
- GasLimit math.HexOrDecimal64
- Number math.HexOrDecimal64
- Timestamp math.HexOrDecimal64
- ParentTimestamp math.HexOrDecimal64
- BaseFee *math.HexOrDecimal256
+ Coinbase common.UnprefixedAddress
+ Difficulty *math.HexOrDecimal256
+ Random *math.HexOrDecimal256
+ ParentDifficulty *math.HexOrDecimal256
+ ParentBaseFee *math.HexOrDecimal256
+ ParentGasUsed math.HexOrDecimal64
+ ParentGasLimit math.HexOrDecimal64
+ MinBaseFee *math.HexOrDecimal256
+ GasLimit math.HexOrDecimal64
+ Number math.HexOrDecimal64
+ Timestamp math.HexOrDecimal64
+ ParentTimestamp math.HexOrDecimal64
+ BaseFee *math.HexOrDecimal256
+ ExcessBlobGas *math.HexOrDecimal64
+ ParentExcessBlobGas *math.HexOrDecimal64
+ ParentBlobGasUsed *math.HexOrDecimal64
}
type rejectedTx struct {
@@ -163,6 +171,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
// rnd := common.BigToHash(pre.Env.Random)
// vmContext.Random = &rnd
// }
+ // If excessBlobGas is defined, add it to the vmContext.
+ if pre.Env.ExcessBlobGas != nil {
+ vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas
+ } else {
+ // If it is not explicitly defined, but we have the parent values, we try
+ // to calculate it ourselves.
+ parentExcessBlobGas := pre.Env.ParentExcessBlobGas
+ parentBlobGasUsed := pre.Env.ParentBlobGasUsed
+ if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
+ excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
+ vmContext.ExcessBlobGas = &excessBlobGas
+ }
+ }
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
// done in StateProcessor.Process(block, ...), right before transactions are applied.
// if chainConfig.DAOForkSupport &&
@@ -170,8 +191,14 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
// chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 {
// misc.ApplyDAOHardFork(statedb)
// }
-
+ var blobGasUsed uint64
for i, tx := range txs {
+ if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil {
+ errMsg := "blob tx used but field env.ExcessBlobGas missing"
+ log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg)
+ rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg})
+ continue
+ }
msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee)
if err != nil {
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err)
@@ -201,6 +228,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
gaspool.SetGas(prevGas)
continue
}
+ if tx.Type() == types.BlobTxType {
+ blobGasUsed += params.BlobTxBlobGasPerBlob
+ }
includedTxs = append(includedTxs, tx)
if hashError != nil {
return nil, nil, NewError(ErrorMissingBlockhash, hashError)
@@ -249,7 +279,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
if miningReward >= 0 {
// Add mining reward. The mining reward may be `0`, which only makes a difference in the cases
// where
- // - the coinbase suicided, or
+ // - the coinbase self-destructed, or
// - there are only 'bad' transactions, which aren't executed. In those cases,
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
var (
@@ -270,9 +300,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
statedb.AddBalance(pre.Env.Coinbase, minerReward)
}
// Commit block
- root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber), false)
+ root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), false)
if err != nil {
- fmt.Fprintf(os.Stderr, "Could not commit state: %v", err)
return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
}
execRs := &ExecutionResult{
@@ -287,6 +316,16 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
GasUsed: (math.HexOrDecimal64)(gasUsed),
BaseFee: (*math.HexOrDecimal256)(vmContext.BaseFee),
}
+ if vmContext.ExcessBlobGas != nil {
+ execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas)
+ execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
+ }
+ // Re-create statedb instance with new root upon the updated database
+ // for accessing latest states.
+ statedb, err = state.New(root, statedb.Database(), nil)
+ if err != nil {
+ return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
+ }
return statedb, execRs, nil
}
@@ -302,7 +341,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(false, false)
+ root, _ := statedb.Commit(0, false, false)
statedb, _ = state.New(root, sdb, nil)
return statedb
}
diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go
index e0a4061564..ed8652d741 100644
--- a/cmd/evm/internal/t8ntool/gen_stenv.go
+++ b/cmd/evm/internal/t8ntool/gen_stenv.go
@@ -16,22 +16,25 @@ var _ = (*stEnvMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct {
- Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
- Random *math.HexOrDecimal256 `json:"currentRandom"`
- ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
- ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"`
- ParentGasUsed math.HexOrDecimal64 `json:"parentGasUsed,omitempty"`
- ParentGasLimit math.HexOrDecimal64 `json:"parentGasLimit,omitempty"`
- MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"`
- GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
- Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
- Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
- ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
- BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
- Ommers []ommer `json:"ommers,omitempty"`
- BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
- ParentUncleHash common.Hash `json:"parentUncleHash"`
+ Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ Random *math.HexOrDecimal256 `json:"currentRandom"`
+ ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
+ ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"`
+ ParentGasUsed math.HexOrDecimal64 `json:"parentGasUsed,omitempty"`
+ ParentGasLimit math.HexOrDecimal64 `json:"parentGasLimit,omitempty"`
+ MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"`
+ GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
+ Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
+ Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
+ ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ ParentUncleHash common.Hash `json:"parentUncleHash"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
+ ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
+ ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
}
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
@@ -50,28 +53,34 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
enc.Ommers = s.Ommers
enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee)
enc.ParentUncleHash = s.ParentUncleHash
+ enc.ExcessBlobGas = (*math.HexOrDecimal64)(s.ExcessBlobGas)
+ enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas)
+ enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed)
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct {
- Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
- Random *math.HexOrDecimal256 `json:"currentRandom"`
- ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
- ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"`
- ParentGasUsed *math.HexOrDecimal64 `json:"parentGasUsed,omitempty"`
- ParentGasLimit *math.HexOrDecimal64 `json:"parentGasLimit,omitempty"`
- MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"`
- GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
- Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
- Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
- ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
- BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
- Ommers []ommer `json:"ommers,omitempty"`
- BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
- ParentUncleHash *common.Hash `json:"parentUncleHash"`
+ Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ Random *math.HexOrDecimal256 `json:"currentRandom"`
+ ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
+ ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"`
+ ParentGasUsed *math.HexOrDecimal64 `json:"parentGasUsed,omitempty"`
+ ParentGasLimit *math.HexOrDecimal64 `json:"parentGasLimit,omitempty"`
+ MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"`
+ GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
+ Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
+ Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
+ ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ ParentUncleHash *common.Hash `json:"parentUncleHash"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
+ ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
+ ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
}
var dec stEnv
if err := json.Unmarshal(input, &dec); err != nil {
@@ -129,5 +138,14 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.ParentUncleHash != nil {
s.ParentUncleHash = *dec.ParentUncleHash
}
+ if dec.ExcessBlobGas != nil {
+ s.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
+ }
+ if dec.ParentExcessBlobGas != nil {
+ s.ParentExcessBlobGas = (*uint64)(dec.ParentExcessBlobGas)
+ }
+ if dec.ParentBlobGasUsed != nil {
+ s.ParentBlobGasUsed = (*uint64)(dec.ParentBlobGasUsed)
+ }
return nil
}
diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go
index 98ebc3e275..2b05394cb5 100644
--- a/cmd/evm/internal/t8ntool/transaction.go
+++ b/cmd/evm/internal/t8ntool/transaction.go
@@ -149,7 +149,7 @@ func Transaction(ctx *cli.Context) error {
r.Address = sender
}
// Check intrinsic gas
- rules := chainConfig.AvalancheRules(new(big.Int), 0)
+ rules := chainConfig.Rules(new(big.Int), 0)
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, rules); err != nil {
r.Error = err
results = append(results, r)
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 4882c1b6bd..019e1ab989 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -138,6 +138,7 @@ func runCmd(ctx *cli.Context) error {
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
preimages = ctx.Bool(DumpFlag.Name)
+ blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
@@ -227,6 +228,7 @@ func runCmd(ctx *cli.Context) error {
Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
+ BlobHashes: blobHashes,
EVMConfig: vm.Config{
Tracer: tracer,
},
@@ -288,8 +290,7 @@ func runCmd(ctx *cli.Context) error {
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
- statedb.Commit(true, false)
- statedb.IntermediateRoot(true)
+ statedb.Commit(genesisConfig.Number, true, false)
fmt.Println(string(statedb.Dump(nil)))
}
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 900f5bba1f..81b7442394 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -127,6 +127,7 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
// Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error()
if dump && s != nil {
+ s, _ = state.New(*result.Root, s.Database(), nil)
dump := s.RawDump(nil)
result.State = &dump
}
diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go
index 80284dee67..0cfa0151e6 100644
--- a/cmd/evm/t8n_test.go
+++ b/cmd/evm/t8n_test.go
@@ -276,6 +276,14 @@ func TestT8n(t *testing.T) {
// output: t8nOutput{alloc: true, result: true},
// expOut: "exp.json",
// },
+ { // Cancun tests
+ base: "./testdata/28",
+ input: t8nInput{
+ "alloc.json", "txs.rlp", "env.json", "Cancun", "",
+ },
+ output: t8nOutput{alloc: true, result: true},
+ expOut: "exp.json",
+ },
} {
args := []string{"t8n"}
args = append(args, tc.output.get()...)
diff --git a/cmd/evm/testdata/13/readme.md b/cmd/evm/testdata/13/readme.md
index 64f52fc9a9..889975d47e 100644
--- a/cmd/evm/testdata/13/readme.md
+++ b/cmd/evm/testdata/13/readme.md
@@ -1,4 +1,4 @@
## Input transactions in RLP form
-This testdata folder is used to examplify how transaction input can be provided in rlp form.
+This testdata folder is used to exemplify how transaction input can be provided in rlp form.
Please see the README in `evm` folder for how this is performed.
\ No newline at end of file
diff --git a/cmd/evm/testdata/23/readme.md b/cmd/evm/testdata/23/readme.md
index 85fe8db66c..f31b64de2f 100644
--- a/cmd/evm/testdata/23/readme.md
+++ b/cmd/evm/testdata/23/readme.md
@@ -1 +1 @@
-These files examplify how to sign a transaction using the pre-EIP155 scheme.
+These files exemplify how to sign a transaction using the pre-EIP155 scheme.
diff --git a/cmd/evm/testdata/28/alloc.json b/cmd/evm/testdata/28/alloc.json
new file mode 100644
index 0000000000..680a89f4ed
--- /dev/null
+++ b/cmd/evm/testdata/28/alloc.json
@@ -0,0 +1,16 @@
+{
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x016345785d8a0000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x016345785d8a0000",
+ "code" : "0x60004960015500",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmd/evm/testdata/28/env.json b/cmd/evm/testdata/28/env.json
new file mode 100644
index 0000000000..14a1f3ed1e
--- /dev/null
+++ b/cmd/evm/testdata/28/env.json
@@ -0,0 +1,23 @@
+{
+ "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
+ "currentNumber" : "0x01",
+ "currentTimestamp" : "0x079e",
+ "currentGasLimit" : "0x7fffffffffffffff",
+ "previousHash" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6",
+ "currentBlobGasUsed" : "0x00",
+ "parentTimestamp" : "0x03b6",
+ "parentDifficulty" : "0x00",
+ "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "withdrawals" : [
+ ],
+ "parentBaseFee" : "0x0a",
+ "parentGasUsed" : "0x00",
+ "parentGasLimit" : "0x7fffffffffffffff",
+ "parentExcessBlobGas" : "0x00",
+ "parentBlobGasUsed" : "0x00",
+ "blockHashes" : {
+ "0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6"
+ },
+ "minBaseFee" : "0x9"
+}
\ No newline at end of file
diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json
new file mode 100644
index 0000000000..9a3358f908
--- /dev/null
+++ b/cmd/evm/testdata/28/exp.json
@@ -0,0 +1,46 @@
+{
+ "alloc": {
+ "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": {
+ "balance": "0x73c57"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x16345785d80c3a9",
+ "nonce": "0x1"
+ },
+ "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "code": "0x60004960015500",
+ "storage": {
+ "0x0000000000000000000000000000000000000000000000000000000000000001": "0x01a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
+ },
+ "balance": "0x16345785d8a0000"
+ }
+ },
+ "result": {
+ "stateRoot": "0xabcbb1d3be8aee044a219dd181fe6f2c2482749b9da95d15358ba7af9b43c372",
+ "txRoot": "0x4409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9ce",
+ "receiptsRoot": "0xbff643da765981266133094092d98c81d2ac8e9a83a7bbda46c3d736f1f874ac",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [
+ {
+ "type": "0x3",
+ "root": "0x",
+ "status": "0x1",
+ "cumulativeGasUsed": "0xa865",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "logs": null,
+ "transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262",
+ "contractAddress": "0x0000000000000000000000000000000000000000",
+ "gasUsed": "0xa865",
+ "effectiveGasPrice": null,
+ "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionIndex": "0x0"
+ }
+ ],
+ "currentDifficulty": null,
+ "gasUsed": "0xa865",
+ "currentBaseFee": "0x9",
+ "currentExcessBlobGas": "0x0",
+ "currentBlobGasUsed": "0x20000"
+ }
+}
\ No newline at end of file
diff --git a/cmd/evm/testdata/28/txs.rlp b/cmd/evm/testdata/28/txs.rlp
new file mode 100644
index 0000000000..8df20e3aa2
--- /dev/null
+++ b/cmd/evm/testdata/28/txs.rlp
@@ -0,0 +1 @@
+"0xf88bb88903f8860180026483061a8094b94f5374fce5edbc8e2a8697c15331677e6ebf0b8080c00ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d801a025e16bb498552165016751911c3608d79000ab89dc3100776e729e6ea13091c7a03acacff7fc0cff6eda8a927dec93ca17765e1ee6cbc06c5954ce102e097c01d2"
\ No newline at end of file
diff --git a/cmd/evm/testdata/3/readme.md b/cmd/evm/testdata/3/readme.md
index 499f03d7aa..246c58ef3b 100644
--- a/cmd/evm/testdata/3/readme.md
+++ b/cmd/evm/testdata/3/readme.md
@@ -1,2 +1,2 @@
-These files examplify a transition where a transaction (excuted on block 5) requests
+These files exemplify a transition where a transaction (executed on block 5) requests
the blockhash for block `1`.
diff --git a/cmd/evm/testdata/4/readme.md b/cmd/evm/testdata/4/readme.md
index 08840d37bd..eede41a9fd 100644
--- a/cmd/evm/testdata/4/readme.md
+++ b/cmd/evm/testdata/4/readme.md
@@ -1,3 +1,3 @@
-These files examplify a transition where a transaction (excuted on block 5) requests
+These files exemplify a transition where a transaction (executed on block 5) requests
the blockhash for block `4`, but where the hash for that block is missing.
It's expected that executing these should cause `exit` with errorcode `4`.
diff --git a/cmd/evm/testdata/5/readme.md b/cmd/evm/testdata/5/readme.md
index e2b608face..1a84afaab6 100644
--- a/cmd/evm/testdata/5/readme.md
+++ b/cmd/evm/testdata/5/readme.md
@@ -1 +1 @@
-These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2).
\ No newline at end of file
+These files exemplify a transition where there are no transactions, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2).
\ No newline at end of file
diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh
index a2ea534189..8cc6aa41de 100644
--- a/cmd/evm/transition-test.sh
+++ b/cmd/evm/transition-test.sh
@@ -280,7 +280,7 @@ To make `t8n` apply these, the following inputs are required:
- For ethash, it is `5000000000000000000` `wei`,
- If this is not defined, mining rewards are not applied,
- A value of `0` is valid, and causes accounts to be 'touched'.
-- For each ommer, the tool needs to be given an `addres\` and a `delta`. This
+- For each ommer, the tool needs to be given an `address\` and a `delta`. This
is done via the `ommers` field in `env`.
Note: the tool does not verify that e.g. the normal uncle rules apply,
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
new file mode 100644
index 0000000000..2bd5559436
--- /dev/null
+++ b/cmd/utils/cmd.go
@@ -0,0 +1,45 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+// Package utils contains internal helper functions for go-ethereum commands.
+package utils
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+)
+
+// Fatalf formats a message to standard error and exits the program.
+// The message is also printed to standard output if standard error
+// is redirected to a different file.
+func Fatalf(format string, args ...interface{}) {
+ w := io.MultiWriter(os.Stdout, os.Stderr)
+ if runtime.GOOS == "windows" {
+ // The SameFile check below doesn't work on Windows.
+ // stdout is unlikely to get redirected though, so just print there.
+ w = os.Stdout
+ } else {
+ outf, _ := os.Stdout.Stat()
+ errf, _ := os.Stderr.Stat()
+ if outf != nil && errf != nil && os.SameFile(outf, errf) {
+ w = os.Stderr
+ }
+ }
+ fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
+ os.Exit(1)
+}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index dfa6990ddf..07a6e60015 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -19,34 +19,11 @@ package utils
import (
"fmt"
- "io"
- "os"
- "runtime"
"strings"
"github.com/urfave/cli/v2"
)
-// Fatalf formats a message to standard error and exits the program.
-// The message is also printed to standard output if standard error
-// is redirected to a different file.
-func Fatalf(format string, args ...interface{}) {
- w := io.MultiWriter(os.Stdout, os.Stderr)
- if runtime.GOOS == "windows" {
- // The SameFile check below doesn't work on Windows.
- // stdout is unlikely to get redirected though, so just print there.
- w = os.Stdout
- } else {
- outf, _ := os.Stdout.Stat()
- errf, _ := os.Stderr.Stat()
- if outf != nil && errf != nil && os.SameFile(outf, errf) {
- w = os.Stderr
- }
- }
- fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
- os.Exit(1)
-}
-
// CheckExclusive verifies that only a single instance of the provided flags was
// set by the user. Each flag might optionally be followed by a string type to
// specialize it further.
diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go
index 173f1e8d53..1f003661fe 100644
--- a/consensus/dummy/consensus.go
+++ b/consensus/dummy/consensus.go
@@ -12,6 +12,7 @@ import (
"github.com/ava-labs/avalanchego/utils/timer/mockable"
"github.com/ava-labs/subnet-evm/consensus"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
"github.com/ava-labs/subnet-evm/core/state"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/params"
@@ -233,13 +234,18 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
return consensus.ErrInvalidNumber
}
- // Verify the existence / non-existence of excessDataGas
- cancun := chain.Config().IsCancun(header.Time)
- if cancun && header.ExcessDataGas == nil {
- return errors.New("missing excessDataGas")
+ // Verify the existence / non-existence of excessBlobGas
+ cancun := chain.Config().IsCancun(header.Number, header.Time)
+ if !cancun && header.ExcessBlobGas != nil {
+ return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
}
- if !cancun && header.ExcessDataGas != nil {
- return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas)
+ if !cancun && header.BlobGasUsed != nil {
+ return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
+ }
+ if cancun {
+ if err := eip4844.VerifyEIP4844Header(parent, header); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go
deleted file mode 100644
index 70d84d8529..0000000000
--- a/consensus/misc/eip4844.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package misc
-
-import (
- "math/big"
-
- "github.com/ava-labs/subnet-evm/params"
-)
-
-var (
- minDataGasPrice = big.NewInt(params.BlobTxMinDataGasprice)
- dataGaspriceUpdateFraction = big.NewInt(params.BlobTxDataGaspriceUpdateFraction)
-)
-
-// CalcBlobFee calculates the blobfee from the header's excess data gas field.
-func CalcBlobFee(excessDataGas *big.Int) *big.Int {
- // If this block does not yet have EIP-4844 enabled, return the starting fee
- if excessDataGas == nil {
- return big.NewInt(params.BlobTxMinDataGasprice)
- }
- return fakeExponential(minDataGasPrice, excessDataGas, dataGaspriceUpdateFraction)
-}
-
-// fakeExponential approximates factor * e ** (numerator / denominator) using
-// Taylor expansion.
-func fakeExponential(factor, numerator, denominator *big.Int) *big.Int {
- var (
- output = new(big.Int)
- accum = new(big.Int).Mul(factor, denominator)
- )
- for i := 1; accum.Sign() > 0; i++ {
- output.Add(output, accum)
-
- accum.Mul(accum, numerator)
- accum.Div(accum, denominator)
- accum.Div(accum, big.NewInt(int64(i)))
- }
- return output.Div(output, denominator)
-}
diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go
new file mode 100644
index 0000000000..8be8b32969
--- /dev/null
+++ b/consensus/misc/eip4844/eip4844.go
@@ -0,0 +1,108 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package eip4844
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/params"
+)
+
+var (
+ minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice)
+ blobGaspriceUpdateFraction = big.NewInt(params.BlobTxBlobGaspriceUpdateFraction)
+)
+
+// VerifyEIP4844Header verifies the presence of the excessBlobGas field and that
+// if the current block contains no transactions, the excessBlobGas is updated
+// accordingly.
+func VerifyEIP4844Header(parent, header *types.Header) error {
+ // Verify the header is not malformed
+ if header.ExcessBlobGas == nil {
+ return errors.New("header is missing excessBlobGas")
+ }
+ if header.BlobGasUsed == nil {
+ return errors.New("header is missing blobGasUsed")
+ }
+ // Verify that the blob gas used remains within reasonable limits.
+ if *header.BlobGasUsed > params.BlobTxMaxBlobGasPerBlock {
+ return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.BlobTxMaxBlobGasPerBlock)
+ }
+ if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 {
+ return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob)
+ }
+ // Verify the excessBlobGas is correct based on the parent header
+ var (
+ parentExcessBlobGas uint64
+ parentBlobGasUsed uint64
+ )
+ if parent.ExcessBlobGas != nil {
+ parentExcessBlobGas = *parent.ExcessBlobGas
+ parentBlobGasUsed = *parent.BlobGasUsed
+ }
+ expectedExcessBlobGas := CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed)
+ if *header.ExcessBlobGas != expectedExcessBlobGas {
+ return fmt.Errorf("invalid excessBlobGas: have %d, want %d, parent excessBlobGas %d, parent blobDataUsed %d",
+ *header.ExcessBlobGas, expectedExcessBlobGas, parentExcessBlobGas, parentBlobGasUsed)
+ }
+ return nil
+}
+
+// CalcExcessBlobGas calculates the excess blob gas after applying the set of
+// blobs on top of the excess blob gas.
+func CalcExcessBlobGas(parentExcessBlobGas uint64, parentBlobGasUsed uint64) uint64 {
+ excessBlobGas := parentExcessBlobGas + parentBlobGasUsed
+ if excessBlobGas < params.BlobTxTargetBlobGasPerBlock {
+ return 0
+ }
+ return excessBlobGas - params.BlobTxTargetBlobGasPerBlock
+}
+
+// CalcBlobFee calculates the blobfee from the header's excess blob gas field.
+func CalcBlobFee(excessBlobGas uint64) *big.Int {
+ return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(excessBlobGas), blobGaspriceUpdateFraction)
+}
+
+// fakeExponential approximates factor * e ** (numerator / denominator) using
+// Taylor expansion.
+func fakeExponential(factor, numerator, denominator *big.Int) *big.Int {
+ var (
+ output = new(big.Int)
+ accum = new(big.Int).Mul(factor, denominator)
+ )
+ for i := 1; accum.Sign() > 0; i++ {
+ output.Add(output, accum)
+
+ accum.Mul(accum, numerator)
+ accum.Div(accum, denominator)
+ accum.Div(accum, big.NewInt(int64(i)))
+ }
+ return output.Div(output, denominator)
+}
diff --git a/consensus/misc/eip4844_test.go b/consensus/misc/eip4844/eip4844_test.go
similarity index 58%
rename from consensus/misc/eip4844_test.go
rename to consensus/misc/eip4844/eip4844_test.go
index ea636ede9b..3780acc15a 100644
--- a/consensus/misc/eip4844_test.go
+++ b/consensus/misc/eip4844/eip4844_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package misc
+package eip4844
import (
"fmt"
@@ -24,9 +24,42 @@ import (
"github.com/ava-labs/subnet-evm/params"
)
+func TestCalcExcessBlobGas(t *testing.T) {
+ var tests = []struct {
+ excess uint64
+ blobs uint64
+ want uint64
+ }{
+ // The excess blob gas should not increase from zero if the used blob
+ // slots are below - or equal - to the target.
+ {0, 0, 0},
+ {0, 1, 0},
+ {0, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, 0},
+
+ // If the target blob gas is exceeded, the excessBlobGas should increase
+ // by however much it was overshot
+ {0, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob},
+ {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob + 1},
+ {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 2, 2*params.BlobTxBlobGasPerBlob + 1},
+
+ // The excess blob gas should decrease by however much the target was
+ // under-shot, capped at zero.
+ {params.BlobTxTargetBlobGasPerBlock, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, params.BlobTxTargetBlobGasPerBlock},
+ {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxBlobGasPerBlob},
+ {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, 0},
+ {params.BlobTxBlobGasPerBlob - 1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, 0},
+ }
+ for _, tt := range tests {
+ result := CalcExcessBlobGas(tt.excess, tt.blobs*params.BlobTxBlobGasPerBlob)
+ if result != tt.want {
+ t.Errorf("excess blob gas mismatch: have %v, want %v", result, tt.want)
+ }
+ }
+}
+
func TestCalcBlobFee(t *testing.T) {
tests := []struct {
- excessDataGas int64
+ excessBlobGas uint64
blobfee int64
}{
{0, 1},
@@ -34,12 +67,8 @@ func TestCalcBlobFee(t *testing.T) {
{1542707, 2},
{10 * 1024 * 1024, 111},
}
- have := CalcBlobFee(nil)
- if have.Int64() != params.BlobTxMinDataGasprice {
- t.Errorf("nil test: blobfee mismatch: have %v, want %v", have, params.BlobTxMinDataGasprice)
- }
for i, tt := range tests {
- have := CalcBlobFee(big.NewInt(tt.excessDataGas))
+ have := CalcBlobFee(tt.excessBlobGas)
if have.Int64() != tt.blobfee {
t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee)
}
diff --git a/core/block_validator.go b/core/block_validator.go
index dd0133136e..86160826c4 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -27,6 +27,7 @@
package core
import (
+ "errors"
"fmt"
"github.com/ava-labs/subnet-evm/consensus"
@@ -77,6 +78,23 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash)
}
+ // Blob transactions may be present after the Cancun fork.
+ var blobs int
+ for _, tx := range block.Transactions() {
+ // Count the number of blobs to validate against the header's blobGasUsed
+ blobs += len(tx.BlobHashes())
+ // The individual checks for blob validity (version-check + not empty)
+ // happens in the state_transition check.
+ }
+ if header.BlobGasUsed != nil {
+ if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
+ return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob)
+ }
+ } else {
+ if blobs > 0 {
+ return errors.New("data blobs present in block body")
+ }
+ }
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
diff --git a/core/blockchain.go b/core/blockchain.go
index 4f85ed5887..f13b478a94 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -41,6 +41,7 @@ import (
"github.com/ava-labs/subnet-evm/commontype"
"github.com/ava-labs/subnet-evm/consensus"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/state"
"github.com/ava-labs/subnet-evm/core/state/snapshot"
@@ -101,6 +102,8 @@ var (
errFutureBlockUnsupported = errors.New("future block insertion not supported")
errCacheConfigNotSpecified = errors.New("must specify cache config")
+ errInvalidOldChain = errors.New("invalid old chain")
+ errInvalidNewChain = errors.New("invalid new chain")
)
const (
@@ -143,7 +146,7 @@ const (
// trieCleanCacheStatsNamespace is the namespace to surface stats from the trie
// clean cache's underlying fastcache.
- trieCleanCacheStatsNamespace = "trie/memcache/clean/fastcache"
+ trieCleanCacheStatsNamespace = "hashdb/memcache/clean/fastcache"
)
// cacheableFeeConfig encapsulates fee configuration itself and the block number that it has changed at,
@@ -163,25 +166,23 @@ type cacheableCoinbaseConfig struct {
// CacheConfig contains the configuration values for the trie database
// that's resident in a blockchain.
type CacheConfig struct {
- TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
- TrieCleanJournal string // Disk journal for saving clean cache entries.
- TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically
- TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk
- TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit
- TriePrefetcherParallelism int // Max concurrent disk reads trie prefetcher should perform at once
- CommitInterval uint64 // Commit the trie every [CommitInterval] blocks.
- Pruning bool // Whether to disable trie write caching and GC altogether (archive node)
- AcceptorQueueLimit int // Blocks to queue before blocking during acceptance
- PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries.
- PopulateMissingTriesParallelism int // Number of readers to use when trying to populate missing tries.
- AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled
- SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call
- SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
- SnapshotVerify bool // Verify generated snapshots
- Preimages bool // Whether to store preimage of trie key to the disk
- AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip
- TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices
- SkipTxIndexing bool // Whether to skip transaction indexing
+ TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
+ TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk
+ TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit
+ TriePrefetcherParallelism int // Max concurrent disk reads trie prefetcher should perform at once
+ CommitInterval uint64 // Commit the trie every [CommitInterval] blocks.
+ Pruning bool // Whether to disable trie write caching and GC altogether (archive node)
+ AcceptorQueueLimit int // Blocks to queue before blocking during acceptance
+ PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries.
+ PopulateMissingTriesParallelism int // Number of readers to use when trying to populate missing tries.
+ AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled
+ SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call
+ SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
+ SnapshotVerify bool // Verify generated snapshots
+ Preimages bool // Whether to store preimage of trie key to the disk
+ AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip
+ TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices
+ SkipTxIndexing bool // Whether to skip transaction indexing
SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
@@ -315,7 +316,6 @@ func NewBlockChain(
// Open trie database with provided config
triedb := trie.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
- Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
StatsPrefix: trieCleanCacheStatsNamespace,
})
@@ -416,17 +416,6 @@ func NewBlockChain(
// Start processing accepted blocks effects in the background
go bc.startAcceptor()
- // If periodic cache journal is required, spin it up.
- if bc.cacheConfig.TrieCleanRejournal > 0 && len(bc.cacheConfig.TrieCleanJournal) > 0 {
- log.Info("Starting to save trie clean cache periodically", "journalDir", bc.cacheConfig.TrieCleanJournal, "freq", bc.cacheConfig.TrieCleanRejournal)
-
- bc.wg.Add(1)
- go func() {
- defer bc.wg.Done()
- bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
- }()
- }
-
// Start tx indexer/unindexer if required.
if bc.cacheConfig.TxLookupLimit != 0 {
bc.wg.Add(1)
@@ -801,7 +790,7 @@ func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, fi
return fmt.Errorf("export failed on #%d: not found", nr)
}
if nr > first && block.ParentHash() != parentHash {
- return fmt.Errorf("export failed: chain reorg during export")
+ return errors.New("export failed: chain reorg during export")
}
parentHash = block.Hash()
if err := callback(block); err != nil {
@@ -989,7 +978,6 @@ func (bc *BlockChain) Stop() {
if err := bc.stateCache.TrieDB().Close(); err != nil {
log.Error("Failed to close trie db", "err", err)
}
-
log.Info("Blockchain stopped")
}
@@ -1229,9 +1217,9 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// diff layer for the block.
var err error
if bc.snaps == nil {
- _, err = state.Commit(bc.chainConfig.IsEIP158(block.Number()), true)
+ _, err = state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), true)
} else {
- _, err = state.CommitWithSnap(bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
+ _, err = state.CommitWithSnap(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
}
if err != nil {
return err
@@ -1458,8 +1446,15 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
// collectUnflattenedLogs collects the logs that were generated or removed during
// the processing of a block.
func (bc *BlockChain) collectUnflattenedLogs(b *types.Block, removed bool) [][]*types.Log {
+ var blobGasPrice *big.Int
+ excessBlobGas := b.ExcessBlobGas()
+ if excessBlobGas != nil {
+ blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
+ }
receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
- receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), b.Transactions())
+ if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil {
+ log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err)
+ }
// Note: gross but this needs to be initialized here because returning nil will be treated specially as an incorrect
// error case downstream.
@@ -1467,11 +1462,10 @@ func (bc *BlockChain) collectUnflattenedLogs(b *types.Block, removed bool) [][]*
for i, receipt := range receipts {
receiptLogs := make([]*types.Log, len(receipt.Logs))
for i, log := range receipt.Logs {
- l := *log
if removed {
- l.Removed = true
+ log.Removed = true
}
- receiptLogs[i] = &l
+ receiptLogs[i] = log
}
logs[i] = receiptLogs
}
@@ -1513,10 +1507,10 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
}
}
if oldBlock == nil {
- return errors.New("invalid old chain")
+ return errInvalidOldChain
}
if newBlock == nil {
- return errors.New("invalid new chain")
+ return errInvalidNewChain
}
// Both sides of the reorg are at the same number, reduce both until the common
// ancestor is found
@@ -1533,11 +1527,11 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// Step back with both chains
oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
if oldBlock == nil {
- return fmt.Errorf("invalid old chain")
+ return errInvalidOldChain
}
newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if newBlock == nil {
- return fmt.Errorf("invalid new chain")
+ return errInvalidNewChain
}
}
@@ -1772,9 +1766,9 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block)
// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
// diff layer for the block.
if bc.snaps == nil {
- return statedb.Commit(bc.chainConfig.IsEIP158(current.Number()), false)
+ return statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), false)
}
- return statedb.CommitWithSnap(bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
+ return statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
}
// initSnapshot instantiates a Snapshot instance and adds it to [bc]
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 0694fa33ac..bf099e3cdc 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -21,8 +21,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/fsnotify/fsnotify"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -83,100 +81,6 @@ func TestArchiveBlockChain(t *testing.T) {
}
}
-// awaitWatcherEventsSubside waits for at least one event on [watcher] and then waits
-// for at least [subsideTimeout] before returning
-func awaitWatcherEventsSubside(watcher *fsnotify.Watcher, subsideTimeout time.Duration) {
- done := make(chan struct{})
-
- go func() {
- defer func() {
- close(done)
- }()
-
- select {
- case <-watcher.Events:
- case <-watcher.Errors:
- return
- }
-
- for {
- select {
- case <-watcher.Events:
- case <-watcher.Errors:
- return
- case <-time.After(subsideTimeout):
- return
- }
- }
- }()
- <-done
-}
-
-func TestTrieCleanJournal(t *testing.T) {
- if os.Getenv("RUN_FLAKY_TESTS") != "true" {
- t.Skip("FLAKY")
- }
- require := require.New(t)
- assert := assert.New(t)
-
- trieCleanJournal := t.TempDir()
- trieCleanJournalWatcher, err := fsnotify.NewWatcher()
- require.NoError(err)
- defer func() {
- assert.NoError(trieCleanJournalWatcher.Close())
- }()
- require.NoError(trieCleanJournalWatcher.Add(trieCleanJournal))
-
- create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) {
- config := *archiveConfig
- config.TrieCleanJournal = trieCleanJournal
- config.TrieCleanRejournal = 100 * time.Millisecond
- return createBlockChain(db, &config, gspec, lastAcceptedHash)
- }
-
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = crypto.PubkeyToAddress(key2.PublicKey)
- chainDB = rawdb.NewMemoryDatabase()
- )
-
- // Ensure that key1 has some funds in the genesis block.
- genesisBalance := big.NewInt(1000000)
- gspec := &Genesis{
- Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)},
- Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}},
- }
-
- blockchain, err := create(chainDB, gspec, common.Hash{})
- require.NoError(err)
- defer blockchain.Stop()
-
- // This call generates a chain of 3 blocks.
- signer := types.HomesteadSigner{}
- _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) {
- tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1)
- gen.AddTx(tx)
- })
- require.NoError(err)
-
- // Insert and accept the generated chain
- _, err = blockchain.InsertChain(chain)
- require.NoError(err)
-
- for _, block := range chain {
- require.NoError(blockchain.Accept(block))
- }
- blockchain.DrainAcceptorQueue()
-
- awaitWatcherEventsSubside(trieCleanJournalWatcher, time.Second)
- // Assert that a new file is created in the trie clean journal
- dirEntries, err := os.ReadDir(trieCleanJournal)
- require.NoError(err)
- require.NotEmpty(dirEntries)
-}
-
func TestArchiveBlockChainSnapsDisabled(t *testing.T) {
create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) {
return createBlockChain(
@@ -375,7 +279,6 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) {
prunerConfig := pruner.Config{
Datadir: tempDir,
BloomSize: 256,
- Cachedir: pruningConfig.TrieCleanJournal,
}
pruner, err := pruner.NewPruner(db, prunerConfig)
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index 2034619f75..a7a2f945f2 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -29,6 +29,7 @@ package core
import (
"context"
"encoding/binary"
+ "errors"
"fmt"
"sync"
"sync/atomic"
@@ -413,7 +414,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
if header == nil {
return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4])
} else if header.ParentHash != lastHead {
- return common.Hash{}, fmt.Errorf("chain reorged during section processing")
+ return common.Hash{}, errors.New("chain reorged during section processing")
}
if err := c.backend.Process(c.ctx, header); err != nil {
return common.Hash{}, err
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 2231e413ae..4e9f794fc8 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -271,7 +271,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Write state changes to db
- root, err := statedb.Commit(config.IsEIP158(b.header.Number), false)
+ root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), false)
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
@@ -335,7 +335,6 @@ func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent
Number: new(big.Int).Add(parent.Number(), common.Big1),
Time: time,
}
-
if chain.Config().IsSubnetEVM(time) {
feeConfig, _, err := chain.GetFeeConfigAt(parent.Header())
if err != nil {
diff --git a/core/error.go b/core/error.go
index bc519410c0..eac66628d6 100644
--- a/core/error.go
+++ b/core/error.go
@@ -105,4 +105,8 @@ var (
// ErrSenderNoEOA is returned if the sender of a transaction is a contract.
ErrSenderNoEOA = errors.New("sender not an eoa")
+
+ // ErrBlobFeeCapTooLow is returned if the transaction fee cap is less than the
+ // blob gas fee of the block.
+ ErrBlobFeeCapTooLow = errors.New("max fee per blob gas less than block blob gas fee")
)
diff --git a/core/evm.go b/core/evm.go
index 71da2dd4d4..9a93e70840 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -102,14 +102,16 @@ func newEVMBlockContext(header *types.Header, chain ChainContext, author *common
Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee,
GasLimit: header.GasLimit,
+ ExcessBlobGas: header.ExcessBlobGas,
}
}
// NewEVMTxContext creates a new transaction context for a single transaction.
func NewEVMTxContext(msg *Message) vm.TxContext {
return vm.TxContext{
- Origin: msg.From,
- GasPrice: new(big.Int).Set(msg.GasPrice),
+ Origin: msg.From,
+ GasPrice: new(big.Int).Set(msg.GasPrice),
+ BlobHashes: msg.BlobHashes,
}
}
diff --git a/core/gen_genesis.go b/core/gen_genesis.go
index 44002eb07a..d2938b70d0 100644
--- a/core/gen_genesis.go
+++ b/core/gen_genesis.go
@@ -29,10 +29,13 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
AirdropHash common.Hash `json:"airdropHash"`
AirdropAmount *math.HexOrDecimal256 `json:"airdropAmount"`
+ AirdropData []byte `json:"-"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var enc Genesis
enc.Config = g.Config
@@ -51,10 +54,13 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
}
enc.AirdropHash = g.AirdropHash
enc.AirdropAmount = (*math.HexOrDecimal256)(g.AirdropAmount)
+ enc.AirdropData = g.AirdropData
enc.Number = math.HexOrDecimal64(g.Number)
enc.GasUsed = math.HexOrDecimal64(g.GasUsed)
enc.ParentHash = g.ParentHash
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
+ enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
+ enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
return json.Marshal(&enc)
}
@@ -72,10 +78,13 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
AirdropHash *common.Hash `json:"airdropHash"`
AirdropAmount *math.HexOrDecimal256 `json:"airdropAmount"`
+ AirdropData []byte `json:"-"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
@@ -120,6 +129,9 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.AirdropAmount != nil {
g.AirdropAmount = (*big.Int)(dec.AirdropAmount)
}
+ if dec.AirdropData != nil {
+ g.AirdropData = dec.AirdropData
+ }
if dec.Number != nil {
g.Number = uint64(*dec.Number)
}
@@ -132,5 +144,11 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BaseFee != nil {
g.BaseFee = (*big.Int)(dec.BaseFee)
}
+ if dec.ExcessBlobGas != nil {
+ g.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
+ }
+ if dec.BlobGasUsed != nil {
+ g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
+ }
return nil
}
diff --git a/core/genesis.go b/core/genesis.go
index 78037416d7..fc39b6f965 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -77,10 +77,12 @@ type Genesis struct {
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
- Number uint64 `json:"number"`
- GasUsed uint64 `json:"gasUsed"`
- ParentHash common.Hash `json:"parentHash"`
- BaseFee *big.Int `json:"baseFeePerGas"`
+ Number uint64 `json:"number"`
+ GasUsed uint64 `json:"gasUsed"`
+ ParentHash common.Hash `json:"parentHash"`
+ BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
+ ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
+ BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
}
// GenesisAlloc specifies the initial state that is part of the genesis block.
@@ -119,6 +121,8 @@ type genesisSpecMarshaling struct {
BaseFee *math.HexOrDecimal256
Alloc map[common.UnprefixedAddress]GenesisAccount
AirdropAmount *math.HexOrDecimal256
+ ExcessBlobGas *math.HexOrDecimal64
+ BlobGasUsed *math.HexOrDecimal64
}
type genesisAccountMarshaling struct {
@@ -232,7 +236,7 @@ func SetupGenesisBlock(
// when we start syncing from scratch, the last accepted block
// will be genesis block
if lastBlock == nil {
- return newcfg, common.Hash{}, fmt.Errorf("missing last accepted block")
+ return newcfg, common.Hash{}, errors.New("missing last accepted block")
}
height := lastBlock.NumberU64()
timestamp := lastBlock.Time()
@@ -325,14 +329,28 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *trie.Database) *types.Block
if g.Difficulty == nil {
head.Difficulty = params.GenesisDifficulty
}
- if g.Config != nil && g.Config.IsSubnetEVM(0) {
- if g.BaseFee != nil {
- head.BaseFee = g.BaseFee
- } else {
- head.BaseFee = new(big.Int).Set(g.Config.FeeConfig.MinBaseFee)
+ if conf := g.Config; conf != nil {
+ num := new(big.Int).SetUint64(g.Number)
+ if conf.IsSubnetEVM(g.Timestamp) {
+ if g.BaseFee != nil {
+ head.BaseFee = g.BaseFee
+ } else {
+ head.BaseFee = new(big.Int).Set(g.Config.FeeConfig.MinBaseFee)
+ }
+ }
+ if conf.IsCancun(num, g.Timestamp) {
+ head.ExcessBlobGas = g.ExcessBlobGas
+ head.BlobGasUsed = g.BlobGasUsed
+ if head.ExcessBlobGas == nil {
+ head.ExcessBlobGas = new(uint64)
+ }
+ if head.BlobGasUsed == nil {
+ head.BlobGasUsed = new(uint64)
+ }
}
}
- statedb.Commit(false, false)
+
+ statedb.Commit(0, false, false)
// Commit newly generated states into disk if it's not empty.
if root != types.EmptyRootHash {
if err := triedb.Commit(root, true); err != nil {
diff --git a/core/genesis_test.go b/core/genesis_test.go
index ea751f7f90..a59beda33f 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -289,9 +289,9 @@ func TestPrecompileActivationAfterHeaderBlock(t *testing.T) {
func TestGenesisWriteUpgradesRegression(t *testing.T) {
require := require.New(t)
- testConfig := *params.TestChainConfig
+ config := *params.TestChainConfig
genesis := &Genesis{
- Config: &testConfig,
+ Config: &config,
Alloc: GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
},
diff --git a/core/mkalloc.go b/core/mkalloc.go
index affc4c3aee..4ab78b31b7 100644
--- a/core/mkalloc.go
+++ b/core/mkalloc.go
@@ -40,32 +40,28 @@ import (
"fmt"
"math/big"
"os"
- "sort"
"strconv"
"github.com/ava-labs/subnet-evm/core"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/exp/slices"
)
type allocItem struct{ Addr, Balance *big.Int }
-type allocList []allocItem
-
-func (a allocList) Len() int { return len(a) }
-func (a allocList) Less(i, j int) bool { return a[i].Addr.Cmp(a[j].Addr) < 0 }
-func (a allocList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func makelist(g *core.Genesis) allocList {
- a := make(allocList, 0, len(g.Alloc))
+func makelist(g *core.Genesis) []allocItem {
+ items := make([]allocItem, 0, len(g.Alloc))
for addr, account := range g.Alloc {
if len(account.Storage) > 0 || len(account.Code) > 0 || account.Nonce != 0 {
panic(fmt.Sprintf("can't encode account %x", addr))
}
bigAddr := new(big.Int).SetBytes(addr.Bytes())
- a = append(a, allocItem{bigAddr, account.Balance})
+ items = append(items, allocItem{bigAddr, account.Balance})
}
- sort.Sort(a)
- return a
+ slices.SortFunc(items, func(a, b allocItem) bool {
+ return a.Addr.Cmp(b.Addr) < 0
+ })
+ return items
}
func makealloc(g *core.Genesis) string {
diff --git a/core/predicate_check_test.go b/core/predicate_check_test.go
index e661ba7f2c..85e685154e 100644
--- a/core/predicate_check_test.go
+++ b/core/predicate_check_test.go
@@ -297,7 +297,7 @@ func TestCheckPredicate(t *testing.T) {
t.Run(name, func(t *testing.T) {
require := require.New(t)
// Create the rules from TestChainConfig and update the predicates based on the test params
- rules := params.TestChainConfig.AvalancheRules(common.Big0, 0)
+ rules := params.TestChainConfig.Rules(common.Big0, 0)
if test.createPredicates != nil {
for address, predicater := range test.createPredicates(t) {
rules.Predicaters[address] = predicater
@@ -423,7 +423,7 @@ func TestCheckPredicatesOutput(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
require := require.New(t)
// Create the rules from TestChainConfig and update the predicates based on the test params
- rules := params.TestChainConfig.AvalancheRules(common.Big0, 0)
+ rules := params.TestChainConfig.Rules(common.Big0, 0)
predicater := precompileconfig.NewMockPredicater(gomock.NewController(t))
predicater.EXPECT().PredicateGas(gomock.Any()).Return(uint64(0), nil).Times(len(test.testTuple))
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 7005019485..47dd95abf9 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -32,6 +32,7 @@ import (
"errors"
"math/big"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/params"
"github.com/ethereum/go-ethereum/common"
@@ -85,7 +86,7 @@ type NumberHash struct {
Hash common.Hash
}
-// ReadAllHashesInRange retrieves all the hashes assigned to blocks at a certain
+// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
// heights, both canonical and reorged forks included.
// This method considers both limits to be _inclusive_.
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
@@ -204,12 +205,11 @@ func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- // Then try to look up the data in leveldb.
data, _ := db.Get(headerKey(number, hash))
if len(data) > 0 {
return data
}
- return nil // Can't find the data anywhere.
+ return nil
}
// HasHeader verifies the existence of a block header corresponding to the hash.
@@ -273,12 +273,11 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- // Then try to look up the data in leveldb.
data, _ := db.Get(blockBodyKey(number, hash))
if len(data) > 0 {
return data
}
- return nil // Can't find the data anywhere.
+ return nil
}
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
@@ -348,12 +347,11 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- // Then try to look up the data in leveldb.
data, _ := db.Get(blockReceiptsKey(number, hash))
if len(data) > 0 {
return data
}
- return nil // Can't find the data anywhere.
+ return nil
}
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
@@ -397,13 +395,19 @@ func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64,
return nil
}
header := ReadHeader(db, hash, number)
+
var baseFee *big.Int
if header == nil {
baseFee = big.NewInt(0)
} else {
baseFee = header.BaseFee
}
- if err := receipts.DeriveFields(config, hash, number, time, baseFee, body.Transactions); err != nil {
+ // Compute effective blob gas price.
+ var blobGasPrice *big.Int
+ if header != nil && header.ExcessBlobGas != nil {
+ blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas)
+ }
+ if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil {
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
return nil
}
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 17c407fe17..b9e1eddb4d 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -83,7 +83,7 @@ func TestBodyStorage(t *testing.T) {
WriteBody(db, hash, 0, body)
if entry := ReadBody(db, hash, 0); entry == nil {
t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
+ } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
}
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
@@ -137,7 +137,7 @@ func TestBlockStorage(t *testing.T) {
}
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
+ } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
}
// Delete the block and verify the execution
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
index 6ee68178f2..f887b7ea3d 100644
--- a/core/rawdb/accessors_indexes_test.go
+++ b/core/rawdb/accessors_indexes_test.go
@@ -18,41 +18,17 @@ package rawdb
import (
"bytes"
- "hash"
"math/big"
"testing"
"github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/internal/blocktest"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
- "golang.org/x/crypto/sha3"
)
-// testHasher is the helper tool for transaction/receipt list hashing.
-// The original hasher is trie, in order to get rid of import cycle,
-// use the testing hasher instead.
-type testHasher struct {
- hasher hash.Hash
-}
-
-func newHasher() *testHasher {
- return &testHasher{hasher: sha3.NewLegacyKeccak256()}
-}
-
-func (h *testHasher) Reset() {
- h.hasher.Reset()
-}
-
-func (h *testHasher) Update(key, val []byte) error {
- h.hasher.Write(key)
- h.hasher.Write(val)
- return nil
-}
-
-func (h *testHasher) Hash() common.Hash {
- return common.BytesToHash(h.hasher.Sum(nil))
-}
+var newTestHasher = blocktest.NewHasher
// Tests that positional lookup metadata can be stored and retrieved.
func TestLookupStorage(t *testing.T) {
@@ -99,7 +75,7 @@ func TestLookupStorage(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3}
- block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher())
+ block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher())
// Check that no transactions entries are in a pristine database
for i, tx := range txs {
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 1cb712d3e4..31f89b0d13 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -27,6 +27,8 @@
package rawdb
import (
+ "encoding/binary"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
@@ -77,3 +79,68 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
log.Crit("Failed to delete contract code", "err", err)
}
}
+
+// ReadStateID retrieves the state id with the provided state root.
+func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 {
+ data, err := db.Get(stateIDKey(root))
+ if err != nil || len(data) == 0 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteStateID writes the provided state lookup to database.
+func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) {
+ var buff [8]byte
+ binary.BigEndian.PutUint64(buff[:], id)
+ if err := db.Put(stateIDKey(root), buff[:]); err != nil {
+ log.Crit("Failed to store state ID", "err", err)
+ }
+}
+
+// DeleteStateID deletes the specified state lookup from the database.
+func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) {
+ if err := db.Delete(stateIDKey(root)); err != nil {
+ log.Crit("Failed to delete state ID", "err", err)
+ }
+}
+
+// ReadPersistentStateID retrieves the id of the persistent state from the database.
+func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 {
+ data, _ := db.Get(persistentStateIDKey)
+ if len(data) != 8 {
+ return 0
+ }
+ return binary.BigEndian.Uint64(data)
+}
+
+// WritePersistentStateID stores the id of the persistent state into database.
+func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store the persistent state ID", "err", err)
+ }
+}
+
+// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at
+// the last shutdown.
+func ReadTrieJournal(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(trieJournalKey)
+ return data
+}
+
+// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at
+// shutdown.
+func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) {
+ if err := db.Put(trieJournalKey, journal); err != nil {
+ log.Crit("Failed to store tries journal", "err", err)
+ }
+}
+
+// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at
+// the last shutdown.
+func DeleteTrieJournal(db ethdb.KeyValueWriter) {
+ if err := db.Delete(trieJournalKey); err != nil {
+ log.Crit("Failed to remove tries journal", "err", err)
+ }
+}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index a5e3a517e2..4e020e11ca 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -56,21 +56,23 @@ const HashScheme = "hashScheme"
// on extra state diffs to survive deep reorg.
const PathScheme = "pathScheme"
-// nodeHasher used to derive the hash of trie node.
-type nodeHasher struct{ sha crypto.KeccakState }
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
var hasherPool = sync.Pool{
- New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
}
-func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) }
-func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) }
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
-func (h *nodeHasher) hashData(data []byte) (n common.Hash) {
- h.sha.Reset()
- h.sha.Write(data)
- h.sha.Read(n[:])
- return n
+func (h *hasher) release() {
+ hasherPool.Put(h)
}
// ReadAccountTrieNode retrieves the account trie node and the associated node
@@ -80,9 +82,9 @@ func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.H
if err != nil {
return nil, common.Hash{}
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return data, hasher.hashData(data)
+ h := newHasher()
+ defer h.release()
+ return data, h.hash(data)
}
// HasAccountTrieNode checks the account trie node presence with the specified
@@ -92,9 +94,9 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash)
if err != nil {
return false
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return hasher.hashData(data) == hash
+ h := newHasher()
+ defer h.release()
+ return h.hash(data) == hash
}
// WriteAccountTrieNode writes the provided account trie node into database.
@@ -118,9 +120,9 @@ func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path
if err != nil {
return nil, common.Hash{}
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return data, hasher.hashData(data)
+ h := newHasher()
+ defer h.release()
+ return data, h.hash(data)
}
// HasStorageTrieNode checks the storage trie node presence with the provided
@@ -130,9 +132,9 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [
if err != nil {
return false
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return hasher.hashData(data) == hash
+ h := newHasher()
+ defer h.release()
+ return h.hash(data) == hash
}
// WriteStorageTrieNode writes the provided storage trie node into database.
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
index fb2ee988f1..6242741a76 100644
--- a/core/rawdb/chain_iterator_test.go
+++ b/core/rawdb/chain_iterator_test.go
@@ -44,7 +44,7 @@ func TestChainIterator(t *testing.T) {
var block *types.Block
var txs []*types.Transaction
to := common.BytesToAddress([]byte{0x11})
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
for i := uint64(1); i <= 10; i++ {
@@ -70,7 +70,7 @@ func TestChainIterator(t *testing.T) {
})
}
txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
@@ -121,7 +121,7 @@ func TestIndexTransactions(t *testing.T) {
to := common.BytesToAddress([]byte{0x11})
// Write empty genesis block
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher())
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
@@ -148,7 +148,7 @@ func TestIndexTransactions(t *testing.T) {
})
}
txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 45f6a584f3..e0e85fb3b7 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -83,13 +83,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
}
// TruncateHead returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateHead(items uint64) error {
- return errNotSupported
+func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) {
+ return 0, errNotSupported
}
// TruncateTail returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateTail(items uint64) error {
- return errNotSupported
+func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) {
+ return 0, errNotSupported
}
// Sync returns an error as we don't have a backing chain freezer.
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index b972bf2681..261169ba13 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -48,6 +48,9 @@ var (
// headBlockKey tracks the latest known full block's hash.
headBlockKey = []byte("LastBlock")
+ // persistentStateIDKey tracks the id of latest stored state(for path-based only).
+ persistentStateIDKey = []byte("LastStateID")
+
// snapshotRootKey tracks the hash of the last snapshot.
snapshotRootKey = []byte("SnapshotRoot")
@@ -57,6 +60,9 @@ var (
// snapshotGeneratorKey tracks the snapshot generation marker across restarts.
snapshotGeneratorKey = []byte("SnapshotGenerator")
+ // trieJournalKey tracks the in-memory trie node layers across restarts.
+ trieJournalKey = []byte("TrieJournal")
+
// txIndexTailKey tracks the oldest block whose transactions have been indexed.
txIndexTailKey = []byte("TransactionIndexTail")
@@ -93,6 +99,7 @@ var (
// Path-based storage scheme of merkle patricia trie.
trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
+ stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db
@@ -224,6 +231,11 @@ func upgradeConfigKey(hash common.Hash) []byte {
return append(upgradeConfigPrefix, hash.Bytes()...)
}
+// stateIDKey = stateIDPrefix + root (32 bytes)
+func stateIDKey(root common.Hash) []byte {
+ return append(stateIDPrefix, root.Bytes()...)
+}
+
// accountTrieNodeKey = trieNodeAccountPrefix + nodePath.
func accountTrieNodeKey(path []byte) []byte {
return append(trieNodeAccountPrefix, path...)
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 9563f8cc5f..0fa34a8c06 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -107,13 +107,13 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err e
// TruncateHead is a noop passthrough that just forwards the request to the underlying
// database.
-func (t *table) TruncateHead(items uint64) error {
+func (t *table) TruncateHead(items uint64) (uint64, error) {
return t.db.TruncateHead(items)
}
// TruncateTail is a noop passthrough that just forwards the request to the underlying
// database.
-func (t *table) TruncateTail(items uint64) error {
+func (t *table) TruncateTail(items uint64) (uint64, error) {
return t.db.TruncateTail(items)
}
diff --git a/core/state/database.go b/core/state/database.go
index f550a53a75..15d1367b56 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -36,6 +36,7 @@ import (
"github.com/ava-labs/subnet-evm/trie/trienode"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
)
@@ -53,16 +54,16 @@ type Database interface {
OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account.
- OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
+ OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error)
// CopyTrie returns an independent copy of the given trie.
CopyTrie(Trie) Trie
// ContractCode retrieves a particular contract's code.
- ContractCode(addrHash, codeHash common.Hash) ([]byte, error)
+ ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error)
// ContractCodeSize retrieves a particular contracts code's size.
- ContractCodeSize(addrHash, codeHash common.Hash) (int, error)
+ ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error)
// DiskDB returns the underlying key-value disk database.
DiskDB() ethdb.KeyValueStore
@@ -103,6 +104,10 @@ type Trie interface {
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount) error
+ // UpdateContractCode abstracts code write to the trie. It is expected
+ // to be moved to the stateWriter interface when the latter is ready.
+ UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error
+
// DeleteStorage removes any existing value for key from the trie. If a node
// was not found in the database, a trie.MissingNodeError is returned.
DeleteStorage(addr common.Address, key []byte) error
@@ -120,11 +125,12 @@ type Trie interface {
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
- Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet)
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
- // starts at the key after the given start key.
- NodeIterator(startKey []byte) trie.NodeIterator
+ // starts at the key after the given start key. And error will be returned
+ // if fails to create node iterator.
+ NodeIterator(startKey []byte) (trie.NodeIterator, error)
// Prove constructs a Merkle proof for key. The result contains all encoded nodes
// on the path to the value at key. The value itself is also included in the last
@@ -133,7 +139,7 @@ type Trie interface {
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root), ending
// with the node that proves the absence of the key.
- Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
+ Prove(key []byte, proofDb ethdb.KeyValueWriter) error
}
// NewDatabase creates a backing store for state. The returned database is safe for
@@ -182,8 +188,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
}
// OpenStorageTrie opens the storage trie of an account.
-func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) {
- tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.triedb)
+func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) {
+ tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb)
if err != nil {
return nil, err
}
@@ -201,7 +207,7 @@ func (db *cachingDB) CopyTrie(t Trie) Trie {
}
// ContractCode retrieves a particular contract's code.
-func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
+func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) {
code, _ := db.codeCache.Get(codeHash)
if len(code) > 0 {
return code, nil
@@ -216,11 +222,11 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error
}
// ContractCodeSize retrieves a particular contracts code's size.
-func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
+func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) {
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
return cached, nil
}
- code, err := db.ContractCode(addrHash, codeHash)
+ code, err := db.ContractCode(addr, codeHash)
return len(code), err
}
diff --git a/core/state/dump.go b/core/state/dump.go
index ecbe833b00..65b1dca9f9 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -150,7 +150,11 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
log.Info("Trie dumping started", "root", s.trie.Hash())
c.OnRoot(s.trie.Hash())
- it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
+ trieIt, err := s.trie.NodeIterator(conf.Start)
+ if err != nil {
+ return nil
+ }
+ it := trie.NewIterator(trieIt)
for it.Next() {
var data types.StateAccount
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
@@ -177,18 +181,23 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
} else {
address = &addr
}
- obj := newObject(s, addr, data)
+ obj := newObject(s, addr, &data)
if !conf.SkipCode {
- account.Code = obj.Code(s.db)
+ account.Code = obj.Code()
}
if !conf.SkipStorage {
account.Storage = make(map[common.Hash]string)
- tr, err := obj.getTrie(s.db)
+ tr, err := obj.getTrie()
if err != nil {
log.Error("Failed to load storage trie", "err", err)
continue
}
- storageIt := trie.NewIterator(tr.NodeIterator(nil))
+ trieIt, err := tr.NodeIterator(nil)
+ if err != nil {
+ log.Error("Failed to create trie iterator", "err", err)
+ continue
+ }
+ storageIt := trie.NewIterator(trieIt)
for storageIt.Next() {
_, content, _, err := rlp.Split(storageIt.Value)
if err != nil {
diff --git a/core/state/iterator.go b/core/state/iterator.go
index 47b46b53e3..9129ce16ca 100644
--- a/core/state/iterator.go
+++ b/core/state/iterator.go
@@ -28,6 +28,7 @@ package state
import (
"bytes"
+ "errors"
"fmt"
"github.com/ava-labs/subnet-evm/core/types"
@@ -37,7 +38,8 @@ import (
)
// nodeIterator is an iterator to traverse the entire state trie post-order,
-// including all of the contract code and contract state tries.
+// including all of the contract code and contract state tries. Preimage is
+// required in order to resolve the contract address.
type nodeIterator struct {
state *StateDB // State being iterated
@@ -84,8 +86,12 @@ func (it *nodeIterator) step() error {
return nil
}
// Initialize the iterator if we've just started
+ var err error
if it.stateIt == nil {
- it.stateIt = it.state.trie.NodeIterator(nil)
+ it.stateIt, err = it.state.trie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
}
// If we had data nodes previously, we surely have at least state nodes
if it.dataIt != nil {
@@ -119,18 +125,28 @@ func (it *nodeIterator) step() error {
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
return err
}
- dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root)
+ // Lookup the preimage of account hash
+ preimage := it.state.trie.GetKey(it.stateIt.LeafKey())
+ if preimage == nil {
+ return errors.New("account address is not available")
+ }
+ address := common.BytesToAddress(preimage)
+
+ // Traverse the storage slots belong to the account
+ dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root)
+ if err != nil {
+ return err
+ }
+ it.dataIt, err = dataTrie.NodeIterator(nil)
if err != nil {
return err
}
- it.dataIt = dataTrie.NodeIterator(nil)
if !it.dataIt.Next(true) {
it.dataIt = nil
}
if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
it.codeHash = common.BytesToHash(account.CodeHash)
- addrHash := common.BytesToHash(it.stateIt.LeafKey())
- it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash))
+ it.code, err = it.state.db.ContractCode(address, common.BytesToHash(account.CodeHash))
if err != nil {
return fmt.Errorf("code %x: %v", account.CodeHash, err)
}
diff --git a/core/state/journal.go b/core/state/journal.go
index 1f62869fc3..4ba90fba5f 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -100,12 +100,19 @@ type (
account *common.Address
}
resetObjectChange struct {
+ account *common.Address
prev *stateObject
prevdestruct bool
+ prevAccount []byte
+ prevStorage map[common.Hash][]byte
+
+ prevAccountOriginExist bool
+ prevAccountOrigin []byte
+ prevStorageOrigin map[common.Hash][]byte
}
- suicideChange struct {
+ selfDestructChange struct {
account *common.Address
- prev bool // whether account had already suicided
+ prev bool // whether account had already self-destructed
prevbalance *big.Int
}
@@ -169,21 +176,33 @@ func (ch resetObjectChange) revert(s *StateDB) {
if !ch.prevdestruct {
delete(s.stateObjectsDestruct, ch.prev.address)
}
+ if ch.prevAccount != nil {
+ s.accounts[ch.prev.addrHash] = ch.prevAccount
+ }
+ if ch.prevStorage != nil {
+ s.storages[ch.prev.addrHash] = ch.prevStorage
+ }
+ if ch.prevAccountOriginExist {
+ s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin
+ }
+ if ch.prevStorageOrigin != nil {
+ s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin
+ }
}
func (ch resetObjectChange) dirtied() *common.Address {
- return nil
+ return ch.account
}
-func (ch suicideChange) revert(s *StateDB) {
+func (ch selfDestructChange) revert(s *StateDB) {
obj := s.getStateObject(*ch.account)
if obj != nil {
- obj.suicided = ch.prev
+ obj.selfDestructed = ch.prev
obj.setBalance(ch.prevbalance)
}
}
-func (ch suicideChange) dirtied() *common.Address {
+func (ch selfDestructChange) dirtied() *common.Address {
return ch.account
}
diff --git a/core/state/metrics.go b/core/state/metrics.go
index ff131416f0..5af6243c98 100644
--- a/core/state/metrics.go
+++ b/core/state/metrics.go
@@ -37,4 +37,11 @@ var (
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
+
+ slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil)
+ slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil)
+ slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil)
+ slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
+ slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
+ slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil)
)
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 84981a6d4a..1a5a342384 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -67,7 +67,6 @@ const (
// Config includes all the configurations for pruning.
type Config struct {
Datadir string // The directory of the state database
- Cachedir string // The directory of state clean cache
BloomSize uint64 // The Megabytes of memory allocated to bloom-filter
}
@@ -261,7 +260,7 @@ func (p *Pruner) Prune(root common.Hash) error {
return err
}
if stateBloomRoot != (common.Hash{}) {
- return RecoverPruning(p.config.Datadir, p.db, p.config.Cachedir)
+ return RecoverPruning(p.config.Datadir, p.db)
}
// If the target state root is not specified, return a fatal error.
@@ -276,11 +275,6 @@ func (p *Pruner) Prune(root common.Hash) error {
} else {
log.Info("Selecting last accepted block root as the pruning target", "root", root)
}
- // Before start the pruning, delete the clean trie cache first.
- // It's necessary otherwise in the next restart we will hit the
- // deleted state root in the "clean cache" so that the incomplete
- // state is picked for usage.
- deleteCleanTrieCache(p.config.Cachedir)
// Traverse the target state, re-construct the whole state trie and
// commit to the given bloom filter.
@@ -310,7 +304,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// pruning can be resumed. What's more if the bloom filter is constructed, the
// pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left
// in the disk.
-func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error {
+func RecoverPruning(datadir string, db ethdb.Database) error {
stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir)
if err != nil {
return err
@@ -328,12 +322,6 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
}
log.Info("Loaded state bloom filter", "path", stateBloomPath)
- // Before start the pruning, delete the clean trie cache first.
- // It's necessary otherwise in the next restart we will hit the
- // deleted state root in the "clean cache" so that the incomplete
- // state is picked for usage.
- deleteCleanTrieCache(trieCachePath)
-
// All the state roots of the middle layers should be forcibly pruned,
// otherwise the dangling state will be left.
if stateBloomRoot != headBlock.Root() {
@@ -358,7 +346,10 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if err != nil {
return err
}
- accIter := t.NodeIterator(nil)
+ accIter, err := t.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
for accIter.Next(true) {
hash := accIter.Hash()
@@ -379,7 +370,10 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if err != nil {
return err
}
- storageIter := storageTrie.NodeIterator(nil)
+ storageIter, err := storageTrie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
for storageIter.Next(true) {
hash := storageIter.Hash()
if hash != (common.Hash{}) {
@@ -429,23 +423,3 @@ func findBloomFilter(datadir string) (string, common.Hash, error) {
}
return stateBloomPath, stateBloomRoot, nil
}
-
-const warningLog = `
-
-WARNING!
-
-The clean trie cache is not found. Please delete it by yourself after the
-pruning. Remember don't start the Subnet-EVM without deleting the clean trie cache
-otherwise the entire database may be damaged!
-
-Check the configuration option "offline-pruning-enabled" for more details.
-`
-
-func deleteCleanTrieCache(path string) {
- if !common.FileExist(path) {
- log.Warn(warningLog)
- return
- }
- os.RemoveAll(path)
- log.Info("Deleted trie clean cache", "path", path)
-}
diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go
deleted file mode 100644
index 5a3444eb44..0000000000
--- a/core/state/snapshot/account.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package snapshot
-
-import (
- "bytes"
- "math/big"
-
- "github.com/ava-labs/subnet-evm/core/types"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/rlp"
-)
-
-// Account is a modified version of a state.Account, where the root is replaced
-// with a byte slice. This format can be used to represent full-consensus format
-// or slim-snapshot format which replaces the empty root and code hash as nil
-// byte slice.
-type Account struct {
- Nonce uint64
- Balance *big.Int
- Root []byte
- CodeHash []byte
-}
-
-// SlimAccount converts a state.Account content into a slim snapshot account
-func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account {
- slim := Account{
- Nonce: nonce,
- Balance: balance,
- }
- if root != types.EmptyRootHash {
- slim.Root = root[:]
- }
- if !bytes.Equal(codehash, types.EmptyCodeHash[:]) {
- slim.CodeHash = codehash
- }
- return slim
-}
-
-// SlimAccountRLP converts a state.Account content into a slim snapshot
-// version RLP encoded.
-func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte {
- data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash))
- if err != nil {
- panic(err)
- }
- return data
-}
-
-// FullAccount decodes the data on the 'slim RLP' format and return
-// the consensus format account.
-func FullAccount(data []byte) (Account, error) {
- var account Account
- if err := rlp.DecodeBytes(data, &account); err != nil {
- return Account{}, err
- }
- if len(account.Root) == 0 {
- account.Root = types.EmptyRootHash[:]
- }
- if len(account.CodeHash) == 0 {
- account.CodeHash = types.EmptyCodeHash[:]
- }
- return account, nil
-}
-
-// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
-func FullAccountRLP(data []byte) ([]byte, error) {
- account, err := FullAccount(data)
- if err != nil {
- return nil, err
- }
- return rlp.EncodeToBytes(account)
-}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index c731af79b0..e93a4fcb88 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -27,7 +27,6 @@
package snapshot
import (
- "bytes"
"encoding/binary"
"errors"
"fmt"
@@ -311,7 +310,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
fullData []byte
)
if leafCallback == nil {
- fullData, err = FullAccountRLP(it.(AccountIterator).Account())
+ fullData, err = types.FullAccountRLP(it.(AccountIterator).Account())
if err != nil {
return stop(err)
}
@@ -323,7 +322,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
return stop(err)
}
// Fetch the next account and process it concurrently
- account, err := FullAccount(it.(AccountIterator).Account())
+ account, err := types.FullAccount(it.(AccountIterator).Account())
if err != nil {
return stop(err)
}
@@ -333,7 +332,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
results <- err
return
}
- if !bytes.Equal(account.Root, subroot.Bytes()) {
+ if account.Root != subroot {
results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot)
return
}
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index 74dcfc92d5..664cb91721 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -31,14 +31,15 @@ import (
"fmt"
"math"
"math/rand"
- "sort"
"sync"
"sync/atomic"
"time"
+ "github.com/ava-labs/subnet-evm/core/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
bloomfilter "github.com/holiman/bloomfilter/v2"
+ "golang.org/x/exp/slices"
)
var (
@@ -289,7 +290,7 @@ func (dl *diffLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
-func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
+func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
@@ -297,7 +298,7 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
if len(data) == 0 { // can be both nil and []byte{}
return nil, nil
}
- account := new(Account)
+ account := new(types.SlimAccount)
if err := rlp.DecodeBytes(data, account); err != nil {
panic(err)
}
@@ -309,8 +310,8 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
//
// Note the returned account is not a copy, please don't modify it.
func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
- dl.lock.RLock()
// Check staleness before reaching further.
+ dl.lock.RLock()
if dl.Stale() {
dl.lock.RUnlock()
return nil, ErrSnapshotStale
@@ -541,7 +542,7 @@ func (dl *diffLayer) AccountList() []common.Hash {
dl.accountList = append(dl.accountList, hash)
}
}
- sort.Sort(hashes(dl.accountList))
+ slices.SortFunc(dl.accountList, common.Hash.Cmp)
dl.memory += uint64(len(dl.accountList) * common.HashLength)
return dl.accountList
}
@@ -579,7 +580,7 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool)
for k := range storageMap {
storageList = append(storageList, k)
}
- sort.Sort(hashes(storageList))
+ slices.SortFunc(storageList, common.Hash.Cmp)
dl.storageList[accountHash] = storageList
dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
return storageList, destructed
diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go
index 063d4435e5..08bbf4104d 100644
--- a/core/state/snapshot/difflayer_test.go
+++ b/core/state/snapshot/difflayer_test.go
@@ -246,7 +246,7 @@ func TestInsertAndMerge(t *testing.T) {
func emptyLayer() *diskLayer {
return &diskLayer{
diskdb: memorydb.New(),
- cache: utils.NewMeteredCache(500*1024, "", "", 0),
+ cache: utils.NewMeteredCache(500*1024, "", 0),
}
}
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index 7467ba8c9c..5e308fde9d 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -32,6 +32,7 @@ import (
"time"
"github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/trie"
"github.com/ava-labs/subnet-evm/utils"
"github.com/ethereum/go-ethereum/common"
@@ -88,7 +89,7 @@ func (dl *diskLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
-func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
+func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
@@ -96,7 +97,7 @@ func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
if len(data) == 0 { // can be both nil and []byte{}
return nil, nil
}
- account := new(Account)
+ account := new(types.SlimAccount)
if err := rlp.DecodeBytes(data, account); err != nil {
panic(err)
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index bf288d924e..2c9bd76ac1 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -30,7 +30,6 @@ import (
"bytes"
"encoding/binary"
"fmt"
- "math/big"
"time"
"github.com/ava-labs/subnet-evm/core/rawdb"
@@ -287,7 +286,15 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
accMarker = dl.genMarker[:common.HashLength]
}
- accIt := trie.NewIterator(accTrie.NodeIterator(accMarker))
+ nodeIt, err := accTrie.NodeIterator(accMarker)
+ if err != nil {
+ log.Error("Generator failed to create account iterator", "root", dl)
+ abort := <-dl.genAbort
+ dl.genStats = stats
+ close(abort)
+ return
+ }
+ accIt := trie.NewIterator(nodeIt)
batch := dl.diskdb.NewBatch()
// Iterate from the previous marker and continue generating the state snapshot
@@ -296,16 +303,11 @@ func (dl *diskLayer) generate(stats *generatorStats) {
// Retrieve the current account and flatten it into the internal format
accountHash := common.BytesToHash(accIt.Key)
- var acc struct {
- Nonce uint64
- Balance *big.Int
- Root common.Hash
- CodeHash []byte
- }
+ var acc types.StateAccount
if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
- data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+ data := types.SlimAccountRLP(acc)
// If the account is not yet in-progress, write it out
if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
@@ -339,7 +341,15 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
storeMarker = dl.genMarker[common.HashLength:]
}
- storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker))
+ nodeIt, err := storeTrie.NodeIterator(storeMarker)
+ if err != nil {
+ log.Error("Generator failed to create storage iterator", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err)
+ abort := <-dl.genAbort
+ dl.genStats = stats
+ close(abort)
+ return
+ }
+ storeIt := trie.NewIterator(nodeIt)
for storeIt.Next() {
rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value)
stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value))
@@ -399,5 +409,5 @@ func (dl *diskLayer) generate(stats *generatorStats) {
}
func newMeteredSnapshotCache(size int) *utils.MeteredCache {
- return utils.NewMeteredCache(size, "", snapshotCacheNamespace, snapshotCacheStatsUpdateFrequency)
+ return utils.NewMeteredCache(size, snapshotCacheNamespace, snapshotCacheStatsUpdateFrequency)
}
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 7b2c6d7874..9eb19764dc 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -62,9 +62,9 @@ func TestGeneration(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
@@ -97,16 +97,16 @@ func TestGenerateExistentState(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
root, snap := helper.CommitAndGenerate()
@@ -170,18 +170,17 @@ func newHelper() *testHelper {
}
}
-func (t *testHelper) addTrieAccount(acckey string, acc *Account) {
+func (t *testHelper) addTrieAccount(acckey string, acc *types.StateAccount) {
val, _ := rlp.EncodeToBytes(acc)
t.accTrie.MustUpdate([]byte(acckey), val)
}
-func (t *testHelper) addSnapAccount(acckey string, acc *Account) {
- val, _ := rlp.EncodeToBytes(acc)
+func (t *testHelper) addSnapAccount(acckey string, acc *types.StateAccount) {
key := hashData([]byte(acckey))
- rawdb.WriteAccountSnapshot(t.diskdb, key, val)
+ rawdb.WriteAccountSnapshot(t.diskdb, key, types.SlimAccountRLP(*acc))
}
-func (t *testHelper) addAccount(acckey string, acc *Account) {
+func (t *testHelper) addAccount(acckey string, acc *types.StateAccount) {
t.addTrieAccount(acckey, acc)
t.addSnapAccount(acckey, acc)
}
@@ -193,28 +192,28 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
}
}
-func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) []byte {
+func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) common.Hash {
id := trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash)
stTrie, _ := trie.NewStateTrie(id, t.triedb)
for i, k := range keys {
stTrie.MustUpdate([]byte(k), []byte(vals[i]))
}
if !commit {
- return stTrie.Hash().Bytes()
+ return stTrie.Hash()
}
- root, nodes := stTrie.Commit(false)
+ root, nodes, _ := stTrie.Commit(false)
if nodes != nil {
t.nodes.Merge(nodes)
}
- return root.Bytes()
+ return root
}
func (t *testHelper) Commit() common.Hash {
- root, nodes := t.accTrie.Commit(true)
+ root, nodes, _ := t.accTrie.Commit(true)
if nodes != nil {
t.nodes.Merge(nodes)
}
- t.triedb.Update(root, types.EmptyRootHash, t.nodes)
+ t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, nil)
t.triedb.Commit(root, false)
return root
}
@@ -247,28 +246,28 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
helper := newHelper()
// Account one, empty root but non-empty database
- helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
// Account two, non empty root but empty database
stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
// Miss slots
{
// Account three, non empty root but misses slots in the beginning
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
// Account four, non empty root but misses slots in the middle
helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
// Account five, non empty root but misses slots in the end
helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
}
@@ -276,22 +275,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
{
// Account six, non empty root but wrong slots in the beginning
helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
// Account seven, non empty root but wrong slots in the middle
helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
// Account eight, non empty root but wrong slots in the end
helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-8", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
// Account 9, non empty root but rotated slots
helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-9", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
}
@@ -299,17 +298,17 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
{
// Account 10, non empty root but extra slots in the beginning
helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-10", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
// Account 11, non empty root but extra slots in the middle
helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-11", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
// Account 12, non empty root but extra slots in the end
helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-12", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
}
@@ -349,25 +348,25 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
// Missing accounts, only in the trie
{
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
- helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
- helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
+ helper.addTrieAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
+ helper.addTrieAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
}
// Wrong accounts
{
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
}
// Extra accounts, only in the snap
{
- helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
- helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle
- helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
+ helper.addSnapAccount("acc-0", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
+ helper.addSnapAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: common.Hex2Bytes("0x1234")}) // Middle
+ helper.addSnapAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
}
root, snap := helper.CommitAndGenerate()
@@ -396,9 +395,9 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
// without any storage slots to keep the test smaller.
helper := newHelper()
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
@@ -430,15 +429,16 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
// two of which also has the same 3-slot storage trie attached.
helper := newHelper()
- stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+
root := helper.Commit()
// Delete a storage trie root and ensure the generator chokes
- helper.diskdb.Delete(stRoot) // We can only corrupt the disk database, so flush the tries out
+ helper.diskdb.Delete(stRoot.Bytes())
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil)
select {
@@ -463,11 +463,12 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
// two of which also has the same 3-slot storage trie attached.
helper := newHelper()
- stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
- stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+ stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+
root := helper.Commit()
// Delete a storage trie leaf and ensure the generator chokes
@@ -498,7 +499,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -518,7 +519,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte("acc-2"))
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
@@ -569,7 +570,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -583,7 +584,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
{
// 100 accounts exist only in snapshot
for i := 0; i < 1000; i++ {
- acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
@@ -619,7 +620,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
}
helper := newHelper()
{
- acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val)
@@ -655,7 +656,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
}
helper := newHelper()
{
- acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
@@ -692,7 +693,7 @@ func TestGenerateFromEmptySnap(t *testing.T) {
for i := 0; i < 400; i++ {
stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
- &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
}
root, snap := helper.CommitAndGenerate()
t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
@@ -728,7 +729,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) {
for i := 0; i < 8; i++ {
accKey := fmt.Sprintf("acc-%d", i)
stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true)
- helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount(accKey, &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
var moddedKeys []string
var moddedVals []string
for ii := 0; ii < 8; ii++ {
@@ -819,12 +820,12 @@ func populateDangling(disk ethdb.KeyValueStore) {
func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper()
- stRoot := helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
@@ -855,11 +856,11 @@ func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
populateDangling(helper.diskdb)
diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go
index 04a61d4a12..4e324ee28b 100644
--- a/core/state/snapshot/iterator_fast.go
+++ b/core/state/snapshot/iterator_fast.go
@@ -32,6 +32,7 @@ import (
"sort"
"github.com/ethereum/go-ethereum/common"
+ "golang.org/x/exp/slices"
)
// weightedIterator is a iterator with an assigned weight. It is used to prioritise
@@ -42,32 +43,25 @@ type weightedIterator struct {
priority int
}
-// weightedIterators is a set of iterators implementing the sort.Interface.
-type weightedIterators []*weightedIterator
-
-// Len implements sort.Interface, returning the number of active iterators.
-func (its weightedIterators) Len() int { return len(its) }
-
-// Less implements sort.Interface, returning which of two iterators in the stack
-// is before the other.
-func (its weightedIterators) Less(i, j int) bool {
+func (it *weightedIterator) Cmp(other *weightedIterator) int {
// Order the iterators primarily by the account hashes
- hashI := its[i].it.Hash()
- hashJ := its[j].it.Hash()
+ hashI := it.it.Hash()
+ hashJ := other.it.Hash()
switch bytes.Compare(hashI[:], hashJ[:]) {
case -1:
- return true
+ return -1
case 1:
- return false
+ return 1
}
// Same account/storage-slot in multiple layers, split by priority
- return its[i].priority < its[j].priority
-}
-
-// Swap implements sort.Interface, swapping two entries in the iterator stack.
-func (its weightedIterators) Swap(i, j int) {
- its[i], its[j] = its[j], its[i]
+ if it.priority < other.priority {
+ return -1
+ }
+ if it.priority > other.priority {
+ return 1
+ }
+ return 0
}
// fastIterator is a more optimized multi-layer iterator which maintains a
@@ -79,7 +73,7 @@ type fastIterator struct {
curAccount []byte
curSlot []byte
- iterators weightedIterators
+ iterators []*weightedIterator
initiated bool
account bool
fail error
@@ -176,7 +170,7 @@ func (fi *fastIterator) init() {
}
}
// Re-sort the entire list
- sort.Sort(fi.iterators)
+ slices.SortFunc(fi.iterators, func(a, b *weightedIterator) int { return a.Cmp(b) })
fi.initiated = false
}
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index 30d9882bbe..f31570f791 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -61,7 +61,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
// is present in the database (or crashed mid-update).
baseBlockHash := rawdb.ReadSnapshotBlockHash(diskdb)
if baseBlockHash == (common.Hash{}) {
- return nil, false, fmt.Errorf("missing or corrupted snapshot, no snapshot block hash")
+ return nil, false, errors.New("missing or corrupted snapshot, no snapshot block hash")
}
if baseBlockHash != blockHash {
return nil, false, fmt.Errorf("block hash stored on disk (%#x) does not match last accepted (%#x)", baseBlockHash, blockHash)
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 0d9707d029..2b29225d89 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -35,6 +35,7 @@ import (
"time"
"github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/metrics"
"github.com/ava-labs/subnet-evm/trie"
"github.com/ava-labs/subnet-evm/utils"
@@ -124,7 +125,7 @@ type Snapshot interface {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
- Account(hash common.Hash) (*Account, error)
+ Account(hash common.Hash) (*types.SlimAccount, error)
// AccountRLP directly retrieves the account RLP associated with a particular
// hash in the snapshot slim data format.
@@ -944,7 +945,7 @@ func NewDiskLayer(diskdb ethdb.KeyValueStore) Snapshot {
// state sync uses iterators to access data, so this cache is not used.
// initializing it out of caution.
- cache: utils.NewMeteredCache(32*1024, "", "", 0),
+ cache: utils.NewMeteredCache(32*1024, "", 0),
}
}
@@ -954,7 +955,7 @@ func NewTestTree(diskdb ethdb.KeyValueStore, blockHash, root common.Hash) *Tree
diskdb: diskdb,
root: root,
blockHash: blockHash,
- cache: utils.NewMeteredCache(128*256, "", "", 0),
+ cache: utils.NewMeteredCache(128*256, "", 0),
created: time.Now(),
}
return &Tree{
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index 72287e1f5b..12e6fcf603 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -51,11 +51,10 @@ func randomHash() common.Hash {
// randomAccount generates a random account and returns it RLP encoded.
func randomAccount() []byte {
- root := randomHash()
- a := Account{
+ a := &types.StateAccount{
Balance: big.NewInt(rand.Int63()),
Nonce: rand.Uint64(),
- Root: root[:],
+ Root: randomHash(),
CodeHash: types.EmptyCodeHash[:],
}
data, _ := rlp.EncodeToBytes(a)
@@ -696,7 +695,7 @@ func TestReadStateDuringFlattening(t *testing.T) {
snap := snaps.Snapshot(diffRootC)
// Register the testing hook to access the state after flattening
- var result = make(chan *Account)
+ var result = make(chan *types.SlimAccount)
snaps.onFlatten = func() {
// Spin up a thread to read the account from the pre-created
// snapshot handler. It's expected to be blocked.
diff --git a/core/state/state_object.go b/core/state/state_object.go
index d233fc2718..8846b81e49 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -31,7 +31,6 @@ import (
"fmt"
"io"
"math/big"
- "sync"
"time"
"github.com/ava-labs/subnet-evm/core/types"
@@ -68,33 +67,38 @@ func (s Storage) Copy() Storage {
// stateObject represents an Ethereum account which is being modified.
//
// The usage pattern is as follows:
-// First you need to obtain a state object.
-// Account values can be accessed and modified through the object.
-// Finally, call commitTrie to write the modified storage trie into a database.
+// - First you need to obtain a state object.
+// - Account values as well as storages can be accessed and modified through the object.
+// - Finally, call commit to return the changes of storage trie and update account data.
type stateObject struct {
- address common.Address
- addrHash common.Hash // hash of ethereum address of the account
- // dataLock protects the [data] field to prevent a race condition
- // in the transaction pool tests. TODO remove after re-implementing
- // tx pool to be synchronous.
- dataLock sync.RWMutex
- data types.StateAccount
db *StateDB
+ address common.Address // address of ethereum account
+ addrHash common.Hash // hash of ethereum address of the account
+ origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
+ data types.StateAccount // Account data with all mutations applied in the scope of block
// Write caches.
trie Trie // storage trie, which becomes non-nil on first access
code Code // contract bytecode, which gets set when code is loaded
- originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction
+ originStorage Storage // Storage cache of original entries to dedup rewrites
pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
- dirtyStorage Storage // Storage entries that have been modified in the current transaction execution
+ dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction
// Cache flags.
- // When an object is marked suicided it will be deleted from the trie
- // during the "update" phase of the state transition.
dirtyCode bool // true if the code was updated
- suicided bool
- deleted bool
+
+ // Flag whether the account was marked as self-destructed. The self-destructed account
+ // is still accessible in the scope of same transaction.
+ selfDestructed bool
+
+ // Flag whether the account was marked as deleted. A self-destructed account
+ // or an account that is considered as empty will be marked as deleted at
+ // the end of transaction and no longer accessible anymore.
+ deleted bool
+
+ // Flag whether the object was created in the current transaction
+ created bool
}
// empty returns whether the account is considered empty.
@@ -103,21 +107,17 @@ func (s *stateObject) empty() bool {
}
// newObject creates a state object.
-func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject {
- if data.Balance == nil {
- data.Balance = new(big.Int)
- }
- if data.CodeHash == nil {
- data.CodeHash = types.EmptyCodeHash.Bytes()
- }
- if data.Root == (common.Hash{}) {
- data.Root = types.EmptyRootHash
+func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject {
+ origin := acct
+ if acct == nil {
+ acct = types.NewEmptyStateAccount()
}
return &stateObject{
db: db,
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
- data: data,
+ origin: origin,
+ data: *acct,
originStorage: make(Storage),
pendingStorage: make(Storage),
dirtyStorage: make(Storage),
@@ -129,8 +129,8 @@ func (s *stateObject) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, &s.data)
}
-func (s *stateObject) markSuicided() {
- s.suicided = true
+func (s *stateObject) markSelfdestructed() {
+ s.selfDestructed = true
}
func (s *stateObject) touch() {
@@ -147,17 +147,15 @@ func (s *stateObject) touch() {
// getTrie returns the associated storage trie. The trie will be opened
// if it's not loaded previously. An error will be returned if trie can't
// be loaded.
-func (s *stateObject) getTrie(db Database) (Trie, error) {
+func (s *stateObject) getTrie() (Trie, error) {
if s.trie == nil {
// Try fetching from prefetcher first
- // We don't prefetch empty tries
if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil {
- // When the miner is creating the pending state, there is no
- // prefetcher
+ // When the miner is creating the pending state, there is no prefetcher
s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root)
}
if s.trie == nil {
- tr, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
+ tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root)
if err != nil {
return nil, err
}
@@ -168,18 +166,18 @@ func (s *stateObject) getTrie(db Database) (Trie, error) {
}
// GetState retrieves a value from the account storage trie.
-func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
+func (s *stateObject) GetState(key common.Hash) common.Hash {
// If we have a dirty value for this state entry, return it
value, dirty := s.dirtyStorage[key]
if dirty {
return value
}
// Otherwise return the entry's original value
- return s.GetCommittedState(db, key)
+ return s.GetCommittedState(key)
}
// GetCommittedState retrieves a value from the committed account storage trie.
-func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
+func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
// If we have a pending write or clean cached, return that
if value, pending := s.pendingStorage[key]; pending {
return value
@@ -198,8 +196,9 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If no live objects are available, attempt to use snapshots
var (
- enc []byte
- err error
+ enc []byte
+ err error
+ value common.Hash
)
if s.db.snap != nil {
start := time.Now()
@@ -207,16 +206,23 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if metrics.EnabledExpensive {
s.db.SnapshotStorageReads += time.Since(start)
}
+ if len(enc) > 0 {
+ _, content, _, err := rlp.Split(enc)
+ if err != nil {
+ s.db.setError(err)
+ }
+ value.SetBytes(content)
+ }
}
// If the snapshot is unavailable or reading from it fails, load from the database.
if s.db.snap == nil || err != nil {
start := time.Now()
- tr, err := s.getTrie(db)
+ tr, err := s.getTrie()
if err != nil {
s.db.setError(err)
return common.Hash{}
}
- enc, err = tr.GetStorage(s.address, key.Bytes())
+ val, err := tr.GetStorage(s.address, key.Bytes())
if metrics.EnabledExpensive {
s.db.StorageReads += time.Since(start)
}
@@ -224,23 +230,16 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
s.db.setError(err)
return common.Hash{}
}
- }
- var value common.Hash
- if len(enc) > 0 {
- _, content, _, err := rlp.Split(enc)
- if err != nil {
- s.db.setError(err)
- }
- value.SetBytes(content)
+ value.SetBytes(val)
}
s.originStorage[key] = value
return value
}
// SetState updates a value in account storage.
-func (s *stateObject) SetState(db Database, key, value common.Hash) {
+func (s *stateObject) SetState(key, value common.Hash) {
// If the new value is the same as old, don't set
- prev := s.GetState(db, key)
+ prev := s.GetState(key)
if prev == value {
return
}
@@ -278,7 +277,7 @@ func (s *stateObject) finalise(prefetch bool) {
// updateTrie writes cached storage modifications into the object's storage trie.
// It will return nil if the trie has not been loaded and no changes have been
// made. An error will be returned if the trie can't be loaded/updated correctly.
-func (s *stateObject) updateTrie(db Database) (Trie, error) {
+func (s *stateObject) updateTrie() (Trie, error) {
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false) // Don't prefetch anymore, pull directly if need be
if len(s.pendingStorage) == 0 {
@@ -291,9 +290,10 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
// The snapshot storage map for the object
var (
storage map[common.Hash][]byte
+ origin map[common.Hash][]byte
hasher = s.db.hasher
)
- tr, err := s.getTrie(db)
+ tr, err := s.getTrie()
if err != nil {
s.db.setError(err)
return nil, err
@@ -305,9 +305,11 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
if value == s.originStorage[key] {
continue
}
+ prev := s.originStorage[key]
s.originStorage[key] = value
- var v []byte
+ // rlp-encoded value to be used by the snapshot
+ var snapshotVal []byte
if (value == common.Hash{}) {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
@@ -315,25 +317,43 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
}
s.db.StorageDeleted += 1
} else {
+ trimmedVal := common.TrimLeftZeroes(value[:])
// Encoding []byte cannot fail, ok to ignore the error.
- v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
- if err := tr.UpdateStorage(s.address, key[:], v); err != nil {
+ snapshotVal, _ = rlp.EncodeToBytes(trimmedVal)
+ if err := tr.UpdateStorage(s.address, key[:], trimmedVal); err != nil {
s.db.setError(err)
return nil, err
}
s.db.StorageUpdated += 1
}
- // If state snapshotting is active, cache the data til commit
- if s.db.snap != nil {
- if storage == nil {
- // Retrieve the old storage map, if available, create a new one otherwise
- if storage = s.db.snapStorage[s.addrHash]; storage == nil {
- storage = make(map[common.Hash][]byte)
- s.db.snapStorage[s.addrHash] = storage
- }
+ // Cache the mutated storage slots until commit
+ if storage == nil {
+ if storage = s.db.storages[s.addrHash]; storage == nil {
+ storage = make(map[common.Hash][]byte)
+ s.db.storages[s.addrHash] = storage
+ }
+ }
+ khash := crypto.HashData(hasher, key[:])
+ storage[khash] = snapshotVal // snapshotVal will be nil if it's deleted
+
+ // Cache the original value of mutated storage slots
+ if origin == nil {
+ if origin = s.db.storagesOrigin[s.address]; origin == nil {
+ origin = make(map[common.Hash][]byte)
+ s.db.storagesOrigin[s.address] = origin
+ }
+ }
+ // Track the original value of slot only if it's mutated first time
+ if _, ok := origin[khash]; !ok {
+ if prev == (common.Hash{}) {
+ origin[khash] = nil // nil if it was not present previously
+ } else {
+ // Encoding []byte cannot fail, ok to ignore the error.
+ b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:]))
+ origin[khash] = b
}
- storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted
}
+ // Cache the items for preloading
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
}
if s.db.prefetcher != nil {
@@ -347,8 +367,8 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
// UpdateRoot sets the trie root to the current root hash of. An error
// will be returned if trie root hash is not computed correctly.
-func (s *stateObject) updateRoot(db Database) {
- tr, err := s.updateTrie(db)
+func (s *stateObject) updateRoot() {
+ tr, err := s.updateTrie()
if err != nil {
return
}
@@ -363,23 +383,29 @@ func (s *stateObject) updateRoot(db Database) {
s.data.Root = tr.Hash()
}
-// commitTrie submits the storage changes into the storage trie and re-computes
-// the root. Besides, all trie changes will be collected in a nodeset and returned.
-func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) {
- tr, err := s.updateTrie(db)
+// commit returns the changes made in storage trie and updates the account data.
+func (s *stateObject) commit() (*trienode.NodeSet, error) {
+ tr, err := s.updateTrie()
if err != nil {
return nil, err
}
// If nothing changed, don't bother with committing anything
if tr == nil {
+ s.origin = s.data.Copy()
return nil, nil
}
// Track the amount of time wasted on committing the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
}
- root, nodes := tr.Commit(false)
+ root, nodes, err := tr.Commit(false)
+ if err != nil {
+ return nil, err
+ }
s.data.Root = root
+
+ // Update original account data after commit
+ s.origin = s.data.Copy()
return nodes, nil
}
@@ -419,18 +445,24 @@ func (s *stateObject) setBalance(amount *big.Int) {
}
func (s *stateObject) deepCopy(db *StateDB) *stateObject {
- stateObject := newObject(db, s.address, s.data)
+ obj := &stateObject{
+ db: db,
+ address: s.address,
+ addrHash: s.addrHash,
+ origin: s.origin,
+ data: s.data,
+ }
if s.trie != nil {
- stateObject.trie = db.db.CopyTrie(s.trie)
+ obj.trie = db.db.CopyTrie(s.trie)
}
- stateObject.code = s.code
- stateObject.dirtyStorage = s.dirtyStorage.Copy()
- stateObject.originStorage = s.originStorage.Copy()
- stateObject.pendingStorage = s.pendingStorage.Copy()
- stateObject.suicided = s.suicided
- stateObject.dirtyCode = s.dirtyCode
- stateObject.deleted = s.deleted
- return stateObject
+ obj.code = s.code
+ obj.dirtyStorage = s.dirtyStorage.Copy()
+ obj.originStorage = s.originStorage.Copy()
+ obj.pendingStorage = s.pendingStorage.Copy()
+ obj.selfDestructed = s.selfDestructed
+ obj.dirtyCode = s.dirtyCode
+ obj.deleted = s.deleted
+ return obj
}
//
@@ -443,14 +475,14 @@ func (s *stateObject) Address() common.Address {
}
// Code returns the contract code associated with this object, if any.
-func (s *stateObject) Code(db Database) []byte {
+func (s *stateObject) Code() []byte {
if s.code != nil {
return s.code
}
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
return nil
}
- code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash()))
+ code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash()))
if err != nil {
s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
}
@@ -461,14 +493,14 @@ func (s *stateObject) Code(db Database) []byte {
// CodeSize returns the size of the contract code associated with this object,
// or zero if none. This method is an almost mirror of Code, but uses a cache
// inside the database to avoid loading codes seen recently.
-func (s *stateObject) CodeSize(db Database) int {
+func (s *stateObject) CodeSize() int {
if s.code != nil {
return len(s.code)
}
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
return 0
}
- size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash()))
+ size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash()))
if err != nil {
s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
}
@@ -476,7 +508,7 @@ func (s *stateObject) CodeSize(db Database) int {
}
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
- prevcode := s.Code(s.db.db)
+ prevcode := s.Code()
s.db.journal.append(codeChange{
account: &s.address,
prevhash: s.CodeHash(),
@@ -500,8 +532,6 @@ func (s *stateObject) SetNonce(nonce uint64) {
}
func (s *stateObject) setNonce(nonce uint64) {
- s.dataLock.Lock()
- defer s.dataLock.Unlock()
s.data.Nonce = nonce
}
@@ -514,7 +544,5 @@ func (s *stateObject) Balance() *big.Int {
}
func (s *stateObject) Nonce() uint64 {
- s.dataLock.RLock()
- defer s.dataLock.RUnlock()
return s.data.Nonce
}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 96d9570cff..547a599611 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -40,21 +40,22 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
)
-type stateTest struct {
+type stateEnv struct {
db ethdb.Database
state *StateDB
}
-func newStateTest() *stateTest {
+func newStateEnv() *stateEnv {
db := rawdb.NewMemoryDatabase()
sdb, _ := New(types.EmptyRootHash, NewDatabase(db), nil)
- return &stateTest{db: db, state: sdb}
+ return &stateEnv{db: db, state: sdb}
}
func TestIterativeDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- sdb, _ := New(types.EmptyRootHash, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
- s := &stateTest{db: db, state: sdb}
+ tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
+ sdb, _ := New(types.EmptyRootHash, tdb, nil)
+ s := &stateEnv{db: db, state: sdb}
// generate a few entries
obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
@@ -69,7 +70,8 @@ func TestIterativeDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
- s.state.Commit(false, false)
+ root, _ := s.state.Commit(0, false, false)
+ s.state, _ = New(root, tdb, nil)
b := &bytes.Buffer{}
s.state.IterativeDump(nil, json.NewEncoder(b))
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 3481669989..6fc489c228 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -42,6 +42,7 @@ import (
"github.com/ava-labs/subnet-evm/predicate"
"github.com/ava-labs/subnet-evm/trie"
"github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -67,27 +68,38 @@ func (n *proofList) Delete(key []byte) error {
// StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
+//
// * Contracts
// * Accounts
+//
+// Once the state is committed, tries cached in stateDB (including account
+// trie, storage tries) will no longer be functional. A new state instance
+// must be created with new root and updated database for accessing post-
+// commit states.
type StateDB struct {
db Database
prefetcher *triePrefetcher
trie Trie
hasher crypto.KeccakState
+ snap snapshot.Snapshot // Nil if snapshot is not available
// originalRoot is the pre-state root, before any changes were made.
// It will be updated when the Commit is called.
originalRoot common.Hash
- snap snapshot.Snapshot
- snapAccounts map[common.Hash][]byte
- snapStorage map[common.Hash]map[common.Hash][]byte
+ // These maps hold the state changes (including the corresponding
+ // original value) that occurred in this **block**.
+ accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding
+ storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format
+ accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding
+ storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format
- // This map holds 'live' objects, which will get modified while processing a state transition.
+ // This map holds 'live' objects, which will get modified while processing
+ // a state transition.
stateObjects map[common.Address]*stateObject
- stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
- stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
- stateObjectsDestruct map[common.Address]struct{} // State objects destructed in the block
+ stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
+ stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value
// DB error.
// State objects are used by the consensus core and VM which are
@@ -101,11 +113,13 @@ type StateDB struct {
// The refund counter, also used by state transitioning.
refund uint64
+ // The tx context and all occurred logs in the scope of transaction.
thash common.Hash
txIndex int
logs map[common.Hash][]*types.Log
logSize uint
+ // Preimages occurred seen by VM in the scope of block.
preimages map[common.Hash][]byte
// Per-transaction access list
@@ -165,10 +179,14 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St
db: db,
trie: tr,
originalRoot: root,
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountsOrigin: make(map[common.Address][]byte),
+ storagesOrigin: make(map[common.Address]map[common.Hash][]byte),
stateObjects: make(map[common.Address]*stateObject),
stateObjectsPending: make(map[common.Address]struct{}),
stateObjectsDirty: make(map[common.Address]struct{}),
- stateObjectsDestruct: make(map[common.Address]struct{}),
+ stateObjectsDestruct: make(map[common.Address]*types.StateAccount),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
journal: newJournal(),
@@ -182,8 +200,6 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St
return nil, fmt.Errorf("cannot create new statedb for root: %s, using snapshot with mismatched root: %s", root, snap.Root().Hex())
}
sdb.snap = snap
- sdb.snapAccounts = make(map[common.Hash][]byte)
- sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
}
return sdb, nil
}
@@ -296,7 +312,7 @@ func (s *StateDB) AddRefund(gas uint64) {
}
// SubRefund removes gas from the refund counter.
-// This method will panic if the refund counter goes below zero
+// This method will set the refund counter to 0 if the gas is greater than the current refund.
func (s *StateDB) SubRefund(gas uint64) {
s.journal.append(refundChange{prev: s.refund})
if gas > s.refund {
@@ -308,7 +324,7 @@ func (s *StateDB) SubRefund(gas uint64) {
}
// Exist reports whether the given account address exists in the state.
-// Notably this also returns true for suicided accounts.
+// Notably this also returns true for self-destructed accounts.
func (s *StateDB) Exist(addr common.Address) bool {
return s.getStateObject(addr) != nil
}
@@ -346,7 +362,7 @@ func (s *StateDB) TxIndex() int {
func (s *StateDB) GetCode(addr common.Address) []byte {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.Code(s.db)
+ return stateObject.Code()
}
return nil
}
@@ -354,7 +370,7 @@ func (s *StateDB) GetCode(addr common.Address) []byte {
func (s *StateDB) GetCodeSize(addr common.Address) int {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.CodeSize(s.db)
+ return stateObject.CodeSize()
}
return 0
}
@@ -371,7 +387,7 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.GetState(s.db, hash)
+ return stateObject.GetState(hash)
}
return common.Hash{}
}
@@ -384,7 +400,7 @@ func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) {
// GetProofByHash returns the Merkle proof for a given account.
func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) {
var proof proofList
- err := s.trie.Prove(addrHash[:], 0, &proof)
+ err := s.trie.Prove(addrHash[:], &proof)
return proof, err
}
@@ -398,7 +414,7 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte,
return nil, errors.New("storage trie for requested address does not exist")
}
var proof proofList
- err = trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
+ err = trie.Prove(crypto.Keccak256(key.Bytes()), &proof)
if err != nil {
return nil, err
}
@@ -409,7 +425,7 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte,
func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.GetCommittedState(s.db, hash)
+ return stateObject.GetCommittedState(hash)
}
return common.Hash{}
}
@@ -428,16 +444,16 @@ func (s *StateDB) StorageTrie(addr common.Address) (Trie, error) {
return nil, nil
}
cpy := stateObject.deepCopy(s)
- if _, err := cpy.updateTrie(s.db); err != nil {
+ if _, err := cpy.updateTrie(); err != nil {
return nil, err
}
- return cpy.getTrie(s.db)
+ return cpy.getTrie()
}
-func (s *StateDB) HasSuicided(addr common.Address) bool {
+func (s *StateDB) HasSelfDestructed(addr common.Address) bool {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.suicided
+ return stateObject.selfDestructed
}
return false
}
@@ -486,44 +502,59 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) {
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- stateObject.SetState(s.db, key, value)
+ stateObject.SetState(key, value)
}
}
// SetStorage replaces the entire storage for the specified account with given
-// storage. This function should only be used for debugging.
+// storage. This function should only be used for debugging and the mutations
+// must be discarded afterwards.
func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
// SetStorage needs to wipe existing storage. We achieve this by pretending
// that the account self-destructed earlier in this block, by flagging
// it in stateObjectsDestruct. The effect of doing so is that storage lookups
// will not hit disk, since it is assumed that the disk-data is belonging
// to a previous incarnation of the object.
- s.stateObjectsDestruct[addr] = struct{}{}
+ //
+ // TODO(rjl493456442) this function should only be supported by 'unwritable'
+ // state and all mutations made should all be discarded afterwards.
+ if _, ok := s.stateObjectsDestruct[addr]; !ok {
+ s.stateObjectsDestruct[addr] = nil
+ }
stateObject := s.GetOrNewStateObject(addr)
for k, v := range storage {
- stateObject.SetState(s.db, k, v)
+ stateObject.SetState(k, v)
}
}
-// Suicide marks the given account as suicided.
+// SelfDestruct marks the given account as selfdestructed.
// This clears the account balance.
//
// The account's state object is still available until the state is committed,
-// getStateObject will return a non-nil account after Suicide.
-func (s *StateDB) Suicide(addr common.Address) bool {
+// getStateObject will return a non-nil account after SelfDestruct.
+func (s *StateDB) SelfDestruct(addr common.Address) {
stateObject := s.getStateObject(addr)
if stateObject == nil {
- return false
+ return
}
- s.journal.append(suicideChange{
+ s.journal.append(selfDestructChange{
account: &addr,
- prev: stateObject.suicided,
+ prev: stateObject.selfDestructed,
prevbalance: new(big.Int).Set(stateObject.Balance()),
})
- stateObject.markSuicided()
+ stateObject.markSelfdestructed()
stateObject.data.Balance = new(big.Int)
+}
+
+func (s *StateDB) Selfdestruct6780(addr common.Address) {
+ stateObject := s.getStateObject(addr)
+ if stateObject == nil {
+ return
+ }
- return true
+ if stateObject.created {
+ s.SelfDestruct(addr)
+ }
}
// SetTransientState sets transient storage for a given account. It
@@ -568,13 +599,24 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
if err := s.trie.UpdateAccount(addr, &obj.data); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
}
-
- // If state snapshotting is active, cache the data til commit. Note, this
- // update mechanism is not symmetric to the deletion, because whereas it is
- // enough to track account updates at commit time, deletions need tracking
- // at transaction boundary level to ensure we capture state clearing.
- if s.snap != nil {
- s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ if obj.dirtyCode {
+ s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
+ }
+ // Cache the data until commit. Note, this update mechanism is not symmetric
+ // to the deletion, because whereas it is enough to track account updates
+ // at commit time, deletions need tracking at transaction boundary level to
+ // ensure we capture state clearing.
+ s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data)
+
+ // Track the original value of mutated account, nil means it was not present.
+ // Skip if it has been tracked (because updateStateObject may be called
+ // multiple times in a block).
+ if _, ok := s.accountsOrigin[obj.address]; !ok {
+ if obj.origin == nil {
+ s.accountsOrigin[obj.address] = nil
+ } else {
+ s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin)
+ }
}
}
@@ -653,7 +695,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
}
// Insert into the live set
- obj := newObject(s, addr, *data)
+ obj := newObject(s, addr, data)
s.setStateObject(obj)
return obj
}
@@ -675,20 +717,40 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
// the given address, it is overwritten and returned as the second return value.
func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
-
- var prevdestruct bool
- if prev != nil {
- _, prevdestruct = s.stateObjectsDestruct[prev.address]
- if !prevdestruct {
- s.stateObjectsDestruct[prev.address] = struct{}{}
- }
- }
- newobj = newObject(s, addr, types.StateAccount{})
+ newobj = newObject(s, addr, nil)
if prev == nil {
s.journal.append(createObjectChange{account: &addr})
} else {
- s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
+ // The original account should be marked as destructed and all cached
+ // account and storage data should be cleared as well. Note, it must
+ // be done here, otherwise the destruction event of "original account"
+ // will be lost.
+ _, prevdestruct := s.stateObjectsDestruct[prev.address]
+ if !prevdestruct {
+ s.stateObjectsDestruct[prev.address] = prev.origin
+ }
+ // There may be some cached account/storage data already since IntermediateRoot
+ // will be called for each transaction before byzantium fork which will always
+ // cache the latest account/storage data.
+ prevAccount, ok := s.accountsOrigin[prev.address]
+ s.journal.append(resetObjectChange{
+ account: &addr,
+ prev: prev,
+ prevdestruct: prevdestruct,
+ prevAccount: s.accounts[prev.addrHash],
+ prevStorage: s.storages[prev.addrHash],
+ prevAccountOriginExist: ok,
+ prevAccountOrigin: prevAccount,
+ prevStorageOrigin: s.storagesOrigin[prev.address],
+ })
+ delete(s.accounts, prev.addrHash)
+ delete(s.storages, prev.addrHash)
+ delete(s.accountsOrigin, prev.address)
+ delete(s.storagesOrigin, prev.address)
}
+
+ newobj.created = true
+
s.setStateObject(newobj)
if prev != nil && !prev.deleted {
return newobj, prev
@@ -713,19 +775,23 @@ func (s *StateDB) CreateAccount(addr common.Address) {
}
}
-func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
- so := db.getStateObject(addr)
+func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
+ so := s.getStateObject(addr)
if so == nil {
return nil
}
- tr, err := so.getTrie(db.db)
+ tr, err := so.getTrie()
+ if err != nil {
+ return err
+ }
+ trieIt, err := tr.NodeIterator(nil)
if err != nil {
return err
}
- it := trie.NewIterator(tr.NodeIterator(nil))
+ it := trie.NewIterator(trieIt)
for it.Next() {
- key := common.BytesToHash(db.trie.GetKey(it.Key))
+ key := common.BytesToHash(s.trie.GetKey(it.Key))
if value, dirty := so.dirtyStorage[key]; dirty {
if !cb(key, value) {
return nil
@@ -767,16 +833,26 @@ func (s *StateDB) Copy() *StateDB {
db: s.db,
trie: s.db.CopyTrie(s.trie),
originalRoot: s.originalRoot,
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountsOrigin: make(map[common.Address][]byte),
+ storagesOrigin: make(map[common.Address]map[common.Hash][]byte),
stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
- stateObjectsDestruct: make(map[common.Address]struct{}, len(s.stateObjectsDestruct)),
+ stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)),
refund: s.refund,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
preimages: make(map[common.Hash][]byte, len(s.preimages)),
journal: newJournal(),
hasher: crypto.NewKeccakState(),
+
+ // In order for the block producer to be able to use and make additions
+ // to the snapshot tree, we need to copy that as well. Otherwise, any
+ // block mined by ourselves will cause gaps in the tree, and force the
+ // miner to operate trie-backed only.
+ snap: s.snap,
}
// Copy the dirty states, logs, and preimages
for addr := range s.journal.dirties {
@@ -810,10 +886,18 @@ func (s *StateDB) Copy() *StateDB {
}
state.stateObjectsDirty[addr] = struct{}{}
}
- // Deep copy the destruction flag.
- for addr := range s.stateObjectsDestruct {
- state.stateObjectsDestruct[addr] = struct{}{}
+ // Deep copy the destruction markers.
+ for addr, value := range s.stateObjectsDestruct {
+ state.stateObjectsDestruct[addr] = value
}
+ // Deep copy the state changes made in the scope of block
+ // along with their original values.
+ state.accounts = copySet(s.accounts)
+ state.storages = copy2DSet(s.storages)
+ state.accountsOrigin = copySet(state.accountsOrigin)
+ state.storagesOrigin = copy2DSet(state.storagesOrigin)
+
+ // Deep copy the logs occurred in the scope of block
for hash, logs := range s.logs {
cpy := make([]*types.Log, len(logs))
for i, l := range logs {
@@ -822,6 +906,7 @@ func (s *StateDB) Copy() *StateDB {
}
state.logs[hash] = cpy
}
+ // Deep copy the preimages occurred in the scope of block
for hash, preimage := range s.preimages {
state.preimages[hash] = preimage
}
@@ -841,27 +926,6 @@ func (s *StateDB) Copy() *StateDB {
if s.prefetcher != nil {
state.prefetcher = s.prefetcher.copy()
}
- if s.snap != nil {
- // In order for the miner to be able to use and make additions
- // to the snapshot tree, we need to copy that as well.
- // Otherwise, any block mined by ourselves will cause gaps in the tree,
- // and force the miner to operate trie-backed only
- state.snap = s.snap
-
- // deep copy needed
- state.snapAccounts = make(map[common.Hash][]byte, len(s.snapAccounts))
- for k, v := range s.snapAccounts {
- state.snapAccounts[k] = v
- }
- state.snapStorage = make(map[common.Hash]map[common.Hash][]byte, len(s.snapStorage))
- for k, v := range s.snapStorage {
- temp := make(map[common.Hash][]byte, len(v))
- for kk, vv := range v {
- temp[kk] = vv
- }
- state.snapStorage[k] = temp
- }
- }
return state
}
@@ -910,24 +974,26 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// Thus, we can safely ignore it here
continue
}
- if obj.suicided || (deleteEmptyObjects && obj.empty()) {
+ if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) {
obj.deleted = true
// We need to maintain account deletions explicitly (will remain
- // set indefinitely).
- s.stateObjectsDestruct[obj.address] = struct{}{}
-
- // If state snapshotting is active, also mark the destruction there.
+ // set indefinitely). Note only the first occurred self-destruct
+ // event is tracked.
+ if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
+ s.stateObjectsDestruct[obj.address] = obj.origin
+ }
// Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then
// resurrect an account; but the snapshotter needs both events.
- if s.snap != nil {
- delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
- delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
- }
+ delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
+ delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect)
} else {
obj.finalise(true) // Prefetch slots in the background
}
+ obj.created = false
s.stateObjectsPending[addr] = struct{}{}
s.stateObjectsDirty[addr] = struct{}{}
@@ -971,7 +1037,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// to pull useful data from disk.
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted {
- obj.updateRoot(s.db)
+ obj.updateRoot()
}
}
// Now we're about to start to write changes to the trie. The trie is so far
@@ -1022,19 +1088,156 @@ func (s *StateDB) clearJournalAndRefund() {
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries
}
+// deleteStorage iterates the storage trie belongs to the account and mark all
+// slots inside as deleted.
+func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
+ start := time.Now()
+ tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root)
+ if err != nil {
+ return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
+ }
+ it, err := tr.NodeIterator(nil)
+ if err != nil {
+ return false, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
+ }
+ var (
+ set = trienode.NewNodeSet(addrHash)
+ slots = make(map[common.Hash][]byte)
+ stateSize common.StorageSize
+ nodeSize common.StorageSize
+ )
+ for it.Next(true) {
+ // arbitrary stateSize limit, make it configurable
+ if stateSize+nodeSize > 512*1024*1024 {
+ log.Info("Skip large storage deletion", "address", addr.Hex(), "states", stateSize, "nodes", nodeSize)
+ if metrics.EnabledExpensive {
+ slotDeletionSkip.Inc(1)
+ }
+ return true, nil, nil, nil
+ }
+ if it.Leaf() {
+ slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
+ stateSize += common.StorageSize(common.HashLength + len(it.LeafBlob()))
+ continue
+ }
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ nodeSize += common.StorageSize(len(it.Path()))
+ set.AddNode(it.Path(), trienode.NewDeleted())
+ }
+ if err := it.Error(); err != nil {
+ return false, nil, nil, err
+ }
+ if metrics.EnabledExpensive {
+ if int64(len(slots)) > slotDeletionMaxCount.Value() {
+ slotDeletionMaxCount.Update(int64(len(slots)))
+ }
+ if int64(stateSize+nodeSize) > slotDeletionMaxSize.Value() {
+ slotDeletionMaxSize.Update(int64(stateSize + nodeSize))
+ }
+ slotDeletionTimer.UpdateSince(start)
+ slotDeletionCount.Mark(int64(len(slots)))
+ slotDeletionSize.Mark(int64(stateSize + nodeSize))
+ }
+ return false, slots, set, nil
+}
+
+// handleDestruction processes all destruction markers and deletes the account
+// and associated storage slots if necessary. There are four possible situations
+// here:
+//
+// - the account was not existent and be marked as destructed
+//
+// - the account was not existent and be marked as destructed,
+// however, it's resurrected later in the same block.
+//
+// - the account was existent and be marked as destructed
+//
+// - the account was existent and be marked as destructed,
+// however it's resurrected later in the same block.
+//
+// In case (a), nothing needs be deleted, nil to nil transition can be ignored.
+//
+// In case (b), nothing needs be deleted, nil is used as the original value for
+// newly created account and storages
+//
+// In case (c), **original** account along with its storages should be deleted,
+// with their values be tracked as original value.
+//
+// In case (d), **original** account along with its storages should be deleted,
+// with their values be tracked as original value.
+func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) {
+ incomplete := make(map[common.Address]struct{})
+ for addr, prev := range s.stateObjectsDestruct {
+ // The original account was non-existing, and it's marked as destructed
+ // in the scope of block. It can be case (a) or (b).
+ // - for (a), skip it without doing anything.
+ // - for (b), track account's original value as nil. It may overwrite
+ // the data cached in s.accountsOrigin set by 'updateStateObject'.
+ addrHash := crypto.Keccak256Hash(addr[:])
+ if prev == nil {
+ if _, ok := s.accounts[addrHash]; ok {
+ s.accountsOrigin[addr] = nil // case (b)
+ }
+ continue
+ }
+ // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'.
+ s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d)
+
+ // Short circuit if the storage was empty.
+ if prev.Root == types.EmptyRootHash {
+ continue
+ }
+ // Remove storage slots belong to the account.
+ aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
+ if err != nil {
+ return nil, fmt.Errorf("failed to delete storage, err: %w", err)
+ }
+ // The storage is too huge to handle, skip it but mark as incomplete.
+ // For case (d), the account is resurrected might with a few slots
+ // created. In this case, wipe the entire storage state diff because
+ // of aborted deletion.
+ if aborted {
+ incomplete[addr] = struct{}{}
+ delete(s.storagesOrigin, addr)
+ continue
+ }
+ if s.storagesOrigin[addr] == nil {
+ s.storagesOrigin[addr] = slots
+ } else {
+ // It can overwrite the data in s.storagesOrigin[addrHash] set by
+ // 'object.updateTrie'.
+ for key, val := range slots {
+ s.storagesOrigin[addr][key] = val
+ }
+ }
+ if err := nodes.Merge(set); err != nil {
+ return nil, err
+ }
+ }
+ return incomplete, nil
+}
+
// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) Commit(deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) {
- return s.commit(deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot)
+func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) {
+ return s.commit(block, deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot)
}
// CommitWithSnap writes the state to the underlying in-memory trie database and
// generates a snapshot layer for the newly committed state.
-func (s *StateDB) CommitWithSnap(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
- return s.commit(deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot)
+func (s *StateDB) CommitWithSnap(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
+ return s.commit(block, deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot)
}
-// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
+// Once the state is committed, tries cached in stateDB (including account
+// trie, storage tries) will no longer be functional. A new state instance
+// must be created with new root and updated database for accessing post-
+// commit states.
+//
+// The associated block number of the state transition is also provided
+// for more chain context.
+func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@@ -1051,37 +1254,38 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
nodes = trienode.NewMergedNodeSet()
codeWriter = s.db.DiskDB().NewBatch()
)
+ // Handle all state deletions first
+ incomplete, err := s.handleDestruction(nodes)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Handle all state updates afterwards
for addr := range s.stateObjectsDirty {
- if obj := s.stateObjects[addr]; !obj.deleted {
- // Write any contract code associated with the state object
- if obj.code != nil && obj.dirtyCode {
- rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
- obj.dirtyCode = false
- }
- // Write any storage changes in the state object to its storage trie
- set, err := obj.commitTrie(s.db)
- if err != nil {
+ obj := s.stateObjects[addr]
+ if obj.deleted {
+ continue
+ }
+ // Write any contract code associated with the state object
+ if obj.code != nil && obj.dirtyCode {
+ rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+ obj.dirtyCode = false
+ }
+ // Write any storage changes in the state object to its storage trie
+ set, err := obj.commit()
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Merge the dirty nodes of storage trie into global set. It is possible
+ // that the account was destructed and then resurrected in the same block.
+ // In this case, the node set is shared by both accounts.
+ if set != nil {
+ if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
- // Merge the dirty nodes of storage trie into global set.
- if set != nil {
- if err := nodes.Merge(set); err != nil {
- return common.Hash{}, err
- }
- updates, deleted := set.Size()
- storageTrieNodesUpdated += updates
- storageTrieNodesDeleted += deleted
- }
+ updates, deleted := set.Size()
+ storageTrieNodesUpdated += updates
+ storageTrieNodesDeleted += deleted
}
- // If the contract is destructed, the storage is still left in the
- // database as dangling data. Theoretically it's should be wiped from
- // database as well, but in hash-based-scheme it's extremely hard to
- // determine that if the trie nodes are also referenced by other storage,
- // and in path-based-scheme some technical challenges are still unsolved.
- // Although it won't affect the correctness but please fix it TODO(rjl493456442).
- }
- if len(s.stateObjectsDirty) > 0 {
- s.stateObjectsDirty = make(map[common.Address]struct{})
}
if codeWriter.ValueSize() > 0 {
if err := codeWriter.Write(); err != nil {
@@ -1093,7 +1297,10 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
if metrics.EnabledExpensive {
start = time.Now()
}
- root, set := s.trie.Commit(true)
+ root, set, err := s.trie.Commit(true)
+ if err != nil {
+ return common.Hash{}, err
+ }
// Merge the dirty nodes of account trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
@@ -1121,16 +1328,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
if s.snap == nil {
log.Error(fmt.Sprintf("cannot commit with snaps without a pre-existing snap layer, parentHash: %s, blockHash: %s", parentHash, blockHash))
}
- if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.snapAccounts, s.snapStorage); err != nil {
+ if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil {
log.Warn("Failed to update snapshot tree", "to", root, "err", err)
}
if metrics.EnabledExpensive {
s.SnapshotCommits += time.Since(start)
}
- s.snap, s.snapAccounts, s.snapStorage = nil, nil, nil
- }
- if len(s.stateObjectsDestruct) > 0 {
- s.stateObjectsDestruct = make(map[common.Address]struct{})
+ s.snap = nil
}
if root == (common.Hash{}) {
root = types.EmptyRootHash
@@ -1142,11 +1346,11 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
if root != origin {
start := time.Now()
if referenceRoot {
- if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, nodes); err != nil {
+ if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil {
return common.Hash{}, err
}
} else {
- if err := s.db.TrieDB().Update(root, origin, nodes); err != nil {
+ if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil {
return common.Hash{}, err
}
}
@@ -1155,6 +1359,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
s.TrieDBCommits += time.Since(start)
}
}
+ // Clear all internal flags at the end of commit operation.
+ s.accounts = make(map[common.Hash][]byte)
+ s.storages = make(map[common.Hash]map[common.Hash][]byte)
+ s.accountsOrigin = make(map[common.Address][]byte)
+ s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
+ s.stateObjectsDirty = make(map[common.Address]struct{})
+ s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount)
return root, nil
}
@@ -1263,7 +1474,7 @@ func (s *StateDB) GetPredicateStorageSlots(address common.Address, index int) ([
}
// convertAccountSet converts a provided account set from address keyed to hash keyed.
-func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common.Hash]struct{} {
+func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} {
ret := make(map[common.Hash]struct{}, len(set))
for addr := range set {
obj, exist := s.stateObjects[addr]
@@ -1280,3 +1491,24 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common.
func (s *StateDB) SetPredicateStorageSlots(address common.Address, predicates [][]byte) {
s.predicateStorageSlots[address] = predicates
}
+
+// copySet returns a deep-copied set.
+func copySet[k comparable](set map[k][]byte) map[k][]byte {
+ copied := make(map[k][]byte, len(set))
+ for key, val := range set {
+ copied[key] = common.CopyBytes(val)
+ }
+ return copied
+}
+
+// copy2DSet returns a two-dimensional deep-copied set.
+func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte {
+ copied := make(map[k]map[common.Hash][]byte, len(set))
+ for addr, subset := range set {
+ copied[addr] = make(map[common.Hash][]byte, len(subset))
+ for key, val := range subset {
+ copied[addr][key] = common.CopyBytes(val)
+ }
+ }
+ return copied
+}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
new file mode 100644
index 0000000000..60c2d1df82
--- /dev/null
+++ b/core/state/statedb_fuzz_test.go
@@ -0,0 +1,386 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package state
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "testing/quick"
+
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// A stateTest checks that the state changes are correctly captured. Instances
+// of this test with pseudorandom content are created by Generate.
+//
+// The test works as follows:
+//
+// A list of states are created by applying actions. The state changes between
+// each state instance are tracked and be verified.
+type stateTest struct {
+ addrs []common.Address // all account addresses
+ actions [][]testAction // modifications to the state, grouped by block
+ chunk int // The number of actions per chunk
+ err error // failure details are reported through this field
+}
+
+// newStateTestAction creates a random action that changes state.
+func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction {
+ actions := []testAction{
+ {
+ name: "SetBalance",
+ fn: func(a testAction, s *StateDB) {
+ s.SetBalance(addr, big.NewInt(a.args[0]))
+ },
+ args: make([]int64, 1),
+ },
+ {
+ name: "SetNonce",
+ fn: func(a testAction, s *StateDB) {
+ s.SetNonce(addr, uint64(a.args[0]))
+ },
+ args: make([]int64, 1),
+ },
+ {
+ name: "SetState",
+ fn: func(a testAction, s *StateDB) {
+ var key, val common.Hash
+ binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
+ binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
+ s.SetState(addr, key, val)
+ },
+ args: make([]int64, 2),
+ },
+ {
+ name: "SetCode",
+ fn: func(a testAction, s *StateDB) {
+ code := make([]byte, 16)
+ binary.BigEndian.PutUint64(code, uint64(a.args[0]))
+ binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
+ s.SetCode(addr, code)
+ },
+ args: make([]int64, 2),
+ },
+ {
+ name: "CreateAccount",
+ fn: func(a testAction, s *StateDB) {
+ s.CreateAccount(addr)
+ },
+ },
+ {
+ name: "Selfdestruct",
+ fn: func(a testAction, s *StateDB) {
+ s.SelfDestruct(addr)
+ },
+ },
+ }
+ var nonRandom = index != -1
+ if index == -1 {
+ index = r.Intn(len(actions))
+ }
+ action := actions[index]
+ var names []string
+ if !action.noAddr {
+ names = append(names, addr.Hex())
+ }
+ for i := range action.args {
+ if nonRandom {
+ action.args[i] = rand.Int63n(10000) + 1 // set balance to non-zero
+ } else {
+ action.args[i] = rand.Int63n(10000)
+ }
+ names = append(names, fmt.Sprint(action.args[i]))
+ }
+ action.name += " " + strings.Join(names, ", ")
+ return action
+}
+
+// Generate returns a new snapshot test of the given size. All randomness is
+// derived from r.
+func (*stateTest) Generate(r *rand.Rand, size int) reflect.Value {
+ addrs := make([]common.Address, 5)
+ for i := range addrs {
+ addrs[i][0] = byte(i)
+ }
+ actions := make([][]testAction, rand.Intn(5)+1)
+
+ for i := 0; i < len(actions); i++ {
+ actions[i] = make([]testAction, size)
+ for j := range actions[i] {
+ if j == 0 {
+ // Always include a set balance action to make sure
+ // the state changes are not empty.
+ actions[i][j] = newStateTestAction(common.HexToAddress("0xdeadbeef"), r, 0)
+ continue
+ }
+ actions[i][j] = newStateTestAction(addrs[r.Intn(len(addrs))], r, -1)
+ }
+ }
+ chunk := int(math.Sqrt(float64(size)))
+ if size > 0 && chunk == 0 {
+ chunk = 1
+ }
+ return reflect.ValueOf(&stateTest{
+ addrs: addrs,
+ actions: actions,
+ chunk: chunk,
+ })
+}
+
+func (test *stateTest) String() string {
+ out := new(bytes.Buffer)
+ for i, actions := range test.actions {
+ fmt.Fprintf(out, "---- block %d ----\n", i)
+ for j, action := range actions {
+ if j%test.chunk == 0 {
+ fmt.Fprintf(out, "---- transaction %d ----\n", j/test.chunk)
+ }
+ fmt.Fprintf(out, "%4d: %s\n", j%test.chunk, action.name)
+ }
+ }
+ return out.String()
+}
+
+func (test *stateTest) run() bool {
+ var (
+ roots []common.Hash
+ accountList []map[common.Address][]byte
+ storageList []map[common.Address]map[common.Hash][]byte
+ onCommit = func(states *triestate.Set) {
+ accountList = append(accountList, copySet(states.Accounts))
+ storageList = append(storageList, copy2DSet(states.Storages))
+ }
+ disk = rawdb.NewMemoryDatabase()
+ tdb = trie.NewDatabaseWithConfig(disk, &trie.Config{OnCommit: onCommit})
+ sdb = NewDatabaseWithNodeDB(disk, tdb)
+ byzantium = rand.Intn(2) == 0
+ )
+ for i, actions := range test.actions {
+ root := types.EmptyRootHash
+ if i != 0 {
+ root = roots[len(roots)-1]
+ }
+ state, err := New(root, sdb, nil)
+ if err != nil {
+ panic(err)
+ }
+ for i, action := range actions {
+ if i%test.chunk == 0 && i != 0 {
+ if byzantium {
+ state.Finalise(true) // call finalise at the transaction boundary
+ } else {
+ state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
+ }
+ }
+ action.fn(action, state)
+ }
+ if byzantium {
+ state.Finalise(true) // call finalise at the transaction boundary
+ } else {
+ state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
+ }
+ nroot, err := state.Commit(0, true, false) // call commit at the block boundary
+ if err != nil {
+ panic(err)
+ }
+ if nroot == root {
+ return true // filter out non-change state transition
+ }
+ roots = append(roots, nroot)
+ }
+ for i := 0; i < len(test.actions); i++ {
+ root := types.EmptyRootHash
+ if i != 0 {
+ root = roots[i-1]
+ }
+ test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i])
+ if test.err != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// verifyAccountCreation this function is called once the state diff says that
+// specific account was not present. A serial of checks will be performed to
+// ensure the state diff is correct, includes:
+//
+// - the account was indeed not present in trie
+// - the account is present in new trie, nil->nil is regarded as invalid
+// - the slots transition is correct
+func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
+ // Verify account change
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ oBlob, err := otr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ nBlob, err := ntr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ if len(oBlob) != 0 {
+ return fmt.Errorf("unexpected account in old trie, %x", addrHash)
+ }
+ if len(nBlob) == 0 {
+ return fmt.Errorf("missing account in new trie, %x", addrHash)
+ }
+
+ // Verify storage changes
+ var nAcct types.StateAccount
+ if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
+ return err
+ }
+ // Account has no slot, empty slot set is expected
+ if nAcct.Root == types.EmptyRootHash {
+ if len(slots) != 0 {
+ return fmt.Errorf("unexpected slot changes %x", addrHash)
+ }
+ return nil
+ }
+ // Account has slots, ensure all new slots are contained
+ st, err := trie.New(trie.StorageTrieID(next, addrHash, nAcct.Root), db)
+ if err != nil {
+ return err
+ }
+ for key, val := range slots {
+ st.Update(key.Bytes(), val)
+ }
+ if st.Hash() != types.EmptyRootHash {
+ return errors.New("invalid slot changes")
+ }
+ return nil
+}
+
+// verifyAccountUpdate this function is called once the state diff says that
+// specific account was present. A serial of checks will be performed to
+// ensure the state diff is correct, includes:
+//
+// - the account was indeed present in trie
+// - the account in old trie matches the provided value
+// - the slots transition is correct
+func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
+ // Verify account change
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ oBlob, err := otr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ nBlob, err := ntr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ if len(oBlob) == 0 {
+ return fmt.Errorf("missing account in old trie, %x", addrHash)
+ }
+ full, err := types.FullAccountRLP(origin)
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(full, oBlob) {
+ return fmt.Errorf("account value is not matched, %x", addrHash)
+ }
+
+ // Decode accounts
+ var (
+ oAcct types.StateAccount
+ nAcct types.StateAccount
+ nRoot common.Hash
+ )
+ if err := rlp.DecodeBytes(oBlob, &oAcct); err != nil {
+ return err
+ }
+ if len(nBlob) == 0 {
+ nRoot = types.EmptyRootHash
+ } else {
+ if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
+ return err
+ }
+ nRoot = nAcct.Root
+ }
+
+ // Verify storage
+ st, err := trie.New(trie.StorageTrieID(next, addrHash, nRoot), db)
+ if err != nil {
+ return err
+ }
+ for key, val := range slots {
+ st.Update(key.Bytes(), val)
+ }
+ if st.Hash() != oAcct.Root {
+ return errors.New("invalid slot changes")
+ }
+ return nil
+}
+
+func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
+ otr, err := trie.New(trie.StateTrieID(root), db)
+ if err != nil {
+ return err
+ }
+ ntr, err := trie.New(trie.StateTrieID(next), db)
+ if err != nil {
+ return err
+ }
+ for addr, account := range accountsOrigin {
+ var err error
+ if len(account) == 0 {
+ err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr])
+ } else {
+ err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr])
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func TestStateChanges(t *testing.T) {
+ config := &quick.Config{MaxCount: 1000}
+ err := quick.Check((*stateTest).run, config)
+ if cerr, ok := err.(*quick.CheckError); ok {
+ test := cerr.In[0].(*stateTest)
+ t.Errorf("%v:\n%s", test.err, test)
+ } else if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index c1240ba46e..b999a6274a 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -29,6 +29,7 @@ package state
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"math"
"math/big"
@@ -40,8 +41,11 @@ import (
"testing/quick"
"github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/state/snapshot"
"github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
)
// Tests that updating a state trie does not leak any database writes prior to
@@ -112,7 +116,7 @@ func TestIntermediateLeaks(t *testing.T) {
}
// Commit and cross check the databases.
- transRoot, err := transState.Commit(false, false)
+ transRoot, err := transState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
@@ -120,7 +124,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
- finalRoot, err := finalState.Commit(false, false)
+ finalRoot, err := finalState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
@@ -307,9 +311,9 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
},
},
{
- name: "Suicide",
+ name: "SelfDestruct",
fn: func(a testAction, s *StateDB) {
- s.Suicide(addr)
+ s.SelfDestruct(addr)
},
},
{
@@ -459,7 +463,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
}
// Check basic accessor methods.
checkeq("Exist", state.Exist(addr), checkstate.Exist(addr))
- checkeq("HasSuicided", state.HasSuicided(addr), checkstate.HasSuicided(addr))
+ checkeq("HasSelfdestructed", state.HasSelfDestructed(addr), checkstate.HasSelfDestructed(addr))
checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr))
checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr))
checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr))
@@ -491,9 +495,9 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
}
func TestTouchDelete(t *testing.T) {
- s := newStateTest()
+ s := newStateEnv()
s.state.GetOrNewStateObject(common.Address{})
- root, _ := s.state.Commit(false, false)
+ root, _ := s.state.Commit(0, false, false)
s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap)
snapshot := s.state.Snapshot()
@@ -528,7 +532,8 @@ func TestCopyOfCopy(t *testing.T) {
//
// See https://github.com/ethereum/go-ethereum/issues/20106.
func TestCopyCommitCopy(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ tdb := NewDatabase(rawdb.NewMemoryDatabase())
+ state, _ := New(types.EmptyRootHash, tdb, nil)
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
@@ -565,20 +570,6 @@ func TestCopyCommitCopy(t *testing.T) {
if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
-
- copyOne.Commit(false, false)
- if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
- t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyOne.GetState(addr, skey); val != sval {
- t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyOne.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
- }
// Copy the copy and check the balance once more
copyTwo := copyOne.Copy()
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
@@ -590,8 +581,23 @@ func TestCopyCommitCopy(t *testing.T) {
if val := copyTwo.GetState(addr, skey); val != sval {
t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
- if val := copyTwo.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
+ if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ // Commit state, ensure states can be loaded from disk
+ root, _ := state.Commit(0, false, false)
+ state, _ = New(root, tdb, nil)
+ if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
+ t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
+ }
+ if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ t.Fatalf("state post-commit code mismatch: have %x, want %x", code, []byte("hello"))
+ }
+ if val := state.GetState(addr, skey); val != sval {
+ t.Fatalf("state post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ if val := state.GetCommittedState(addr, skey); val != sval {
+ t.Fatalf("state post-commit committed storage slot mismatch: have %x, want %x", val, sval)
}
}
@@ -651,19 +657,6 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
- copyTwo.Commit(false, false)
- if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
- t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyTwo.GetState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyTwo.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
- }
// Copy the copy-copy and check the balance once more
copyThree := copyTwo.Copy()
if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
@@ -675,11 +668,56 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if val := copyThree.GetState(addr, skey); val != sval {
t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
- if val := copyThree.GetCommittedState(addr, skey); val != sval {
+ if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval)
}
}
+// TestCommitCopy tests the copy from a committed state is not functional.
+func TestCommitCopy(t *testing.T) {
+ state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+
+ // Create an account and check if the retrieved balance is correct
+ addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
+ skey := common.HexToHash("aaa")
+ sval := common.HexToHash("bbb")
+
+ state.SetBalance(addr, big.NewInt(42)) // Change the account trie
+ state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetState(addr, skey, sval) // Change the storage trie
+
+ if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
+ t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
+ }
+ if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
+ }
+ if val := state.GetState(addr, skey); val != sval {
+ t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
+ }
+ // Copy the committed state database, the copied one is not functional.
+ state.Commit(0, true, false)
+ copied := state.Copy()
+ if balance := copied.GetBalance(addr); balance.Cmp(big.NewInt(0)) != 0 {
+ t.Fatalf("unexpected balance: have %v", balance)
+ }
+ if code := copied.GetCode(addr); code != nil {
+ t.Fatalf("unexpected code: have %x", code)
+ }
+ if val := copied.GetState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("unexpected storage slot: have %x", val)
+ }
+ if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("unexpected storage slot: have %x", val)
+ }
+ if !errors.Is(copied.Error(), trie.ErrCommitted) {
+ t.Fatalf("unexpected state error, %v", copied.Error())
+ }
+}
+
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
// while changing the internals of StateDB. The workflow is that a contract is
// self-destructed, then in a follow-up transaction (but same block) it's created
@@ -695,11 +733,11 @@ func TestDeleteCreateRevert(t *testing.T) {
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, big.NewInt(1))
- root, _ := state.Commit(false, false)
+ root, _ := state.Commit(0, false, false)
state, _ = NewWithSnapshot(root, state.db, state.snap)
// Simulate self-destructing in one transaction, then create-reverting in another
- state.Suicide(addr)
+ state.SelfDestruct(addr)
state.Finalise(true)
id := state.Snapshot()
@@ -707,7 +745,7 @@ func TestDeleteCreateRevert(t *testing.T) {
state.RevertToSnapshot(id)
// Commit the entire state and make sure we don't crash and have the correct state
- root, _ = state.Commit(true, false)
+ root, _ = state.Commit(0, true, false)
state, _ = NewWithSnapshot(root, state.db, state.snap)
if state.getStateObject(addr) != nil {
@@ -731,7 +769,7 @@ func TestMissingTrieNodes(t *testing.T) {
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, big.NewInt(100))
state.SetCode(a2, []byte{1, 2, 4})
- root, _ = state.Commit(false, false)
+ root, _ = state.Commit(0, false, false)
t.Logf("root: %x", root)
// force-flush
state.Database().TrieDB().Cap(0)
@@ -755,7 +793,7 @@ func TestMissingTrieNodes(t *testing.T) {
}
// Modify the state
state.SetBalance(addr, big.NewInt(2))
- root, err := state.Commit(false, false)
+ root, err := state.Commit(0, false, false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}
@@ -950,7 +988,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
}
}
- root, err := state.Commit(false, false)
+ root, err := state.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit state trie: %v", err)
}
@@ -1008,3 +1046,37 @@ func TestStateDBTransientStorage(t *testing.T) {
t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
}
}
+
+func TestResetObject(t *testing.T) {
+ var (
+ disk = rawdb.NewMemoryDatabase()
+ tdb = trie.NewDatabase(disk)
+ db = NewDatabaseWithNodeDB(disk, tdb)
+ snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash)
+ state, _ = New(types.EmptyRootHash, db, snaps)
+ addr = common.HexToAddress("0x1")
+ slotA = common.HexToHash("0x1")
+ slotB = common.HexToHash("0x2")
+ )
+ // Initialize account with balance and storage in first transaction.
+ state.SetBalance(addr, big.NewInt(1))
+ state.SetState(addr, slotA, common.BytesToHash([]byte{0x1}))
+ state.IntermediateRoot(true)
+
+ // Reset account and mutate balance and storages
+ state.CreateAccount(addr)
+ state.SetBalance(addr, big.NewInt(2))
+ state.SetState(addr, slotB, common.BytesToHash([]byte{0x2}))
+ root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false)
+
+ // Ensure the original account is wiped properly
+ snap := snaps.Snapshot(root)
+ slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes()))
+ if len(slot) != 0 {
+ t.Fatalf("Unexpected storage slot")
+ }
+ slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes()))
+ if !bytes.Equal(slot, []byte{0x2}) {
+ t.Fatalf("Unexpected storage slot value %v", slot)
+ }
+}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index 5a83a9ac91..96912962fc 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -21,6 +21,7 @@ import (
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
@@ -38,7 +39,7 @@ type testAccount struct {
func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
// Create an empty state
db := rawdb.NewMemoryDatabase()
- sdb := NewDatabase(db)
+ sdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
state, _ := New(types.EmptyRootHash, sdb, nil)
// Fill it with some arbitrary data
@@ -60,13 +61,13 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
if i%5 == 0 {
for j := byte(0); j < 5; j++ {
hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})
- obj.SetState(sdb, hash, hash)
+ obj.SetState(hash, hash)
}
}
state.updateStateObject(obj)
accounts = append(accounts, acc)
}
- root, _ := state.Commit(false, false)
+ root, _ := state.Commit(0, false, false)
// Return the generated state
return db, sdb, root, accounts
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 7c65fd12c4..3a3c29baa3 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -424,7 +424,7 @@ func newTrieOrchestrator(sf *subfetcher) *trieOrchestrator {
return nil
}
} else {
- base, err = sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root)
+ base, err = sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root)
if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return nil
diff --git a/core/state_processor.go b/core/state_processor.go
index 240764da38..b0c308a3e3 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -169,7 +169,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, blockContext
return nil, err
}
// Create a new context to be used in the EVM environment
- vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{BlobHashes: tx.BlobHashes()}, statedb, config, cfg)
return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
}
@@ -190,7 +190,7 @@ func ApplyPrecompileActivations(c *params.ChainConfig, parentTimestamp *uint64,
// (or deconfigure it if it is being disabled.)
if activatingConfig.IsDisabled() {
log.Info("Disabling precompile", "name", module.ConfigKey)
- statedb.Suicide(module.Address)
+ statedb.SelfDestruct(module.Address)
// Calling Finalise here effectively commits Suicide call and wipes the contract state.
// This enables re-configuration of the same contract state in the same block.
// Without an immediate Finalise call after the Suicide, a reconfigured precompiled state can be wiped out
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index d3776e7288..a0645bdba2 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -27,11 +27,13 @@
package core
import (
+ "crypto/ecdsa"
"math/big"
"testing"
"github.com/ava-labs/subnet-evm/consensus"
"github.com/ava-labs/subnet-evm/consensus/dummy"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/core/vm"
@@ -41,45 +43,10 @@ import (
"github.com/ava-labs/subnet-evm/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
-var (
- cpcfg = *params.TestChainConfig
- config = &cpcfg
- signer = types.LatestSigner(config)
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
-)
-
-func makeTx(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction {
- tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey)
- return tx
-}
-
-func mkDynamicTx(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction {
- tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{
- Nonce: nonce,
- GasTipCap: gasTipCap,
- GasFeeCap: gasFeeCap,
- Gas: gasLimit,
- To: &to,
- Value: big.NewInt(0),
- }), signer, testKey)
- return tx
-}
-
-func mkDynamicCreationTx(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction {
- tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{
- Nonce: nonce,
- GasTipCap: gasTipCap,
- GasFeeCap: gasFeeCap,
- Gas: gasLimit,
- Value: big.NewInt(0),
- Data: data,
- }), signer, testKey)
- return tx
-}
-
func u64(val uint64) *uint64 { return &val }
// TestStateProcessorErrors tests the output from the 'core' errors
@@ -87,7 +54,57 @@ func u64(val uint64) *uint64 { return &val }
// blockchain imports bad blocks, meaning blocks which have valid headers but
// contain invalid transactions
func TestStateProcessorErrors(t *testing.T) {
+ cpcfg := *params.TestChainConfig
+ config := &cpcfg
+ config.CancunTime = u64(0)
config.FeeConfig.MinBaseFee = big.NewInt(params.TestMaxBaseFee)
+
+ var (
+ signer = types.LatestSigner(config)
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ )
+ var makeTx = func(key *ecdsa.PrivateKey, nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction {
+ tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, key)
+ return tx
+ }
+ var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction {
+ tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{
+ Nonce: nonce,
+ GasTipCap: gasTipCap,
+ GasFeeCap: gasFeeCap,
+ Gas: gasLimit,
+ To: &to,
+ Value: big.NewInt(0),
+ }), signer, key1)
+ return tx
+ }
+ var mkDynamicCreationTx = func(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction {
+ tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{
+ Nonce: nonce,
+ GasTipCap: gasTipCap,
+ GasFeeCap: gasFeeCap,
+ Gas: gasLimit,
+ Value: big.NewInt(0),
+ Data: data,
+ }), signer, key1)
+ return tx
+ }
+ var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, hashes []common.Hash) *types.Transaction {
+ tx, err := types.SignTx(types.NewTx(&types.BlobTx{
+ Nonce: nonce,
+ GasTipCap: uint256.MustFromBig(gasTipCap),
+ GasFeeCap: uint256.MustFromBig(gasFeeCap),
+ Gas: gasLimit,
+ To: to,
+ BlobHashes: hashes,
+ Value: new(uint256.Int),
+ }), signer, key1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return tx
+ }
+
{ // Tests against a 'recent' chain definition
var (
db = rawdb.NewMemoryDatabase()
@@ -101,8 +118,10 @@ func TestStateProcessorErrors(t *testing.T) {
},
GasLimit: params.TestChainConfig.FeeConfig.GasLimit.Uint64(),
}
- blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false)
+ blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false)
+ tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
)
+
defer blockchain.Stop()
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
tooBigNumber := new(big.Int).Set(bigNumber)
@@ -113,32 +132,32 @@ func TestStateProcessorErrors(t *testing.T) {
}{
{ // ErrNonceTooLow
txs: []*types.Transaction{
- makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil),
- makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil),
+ makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil),
+ makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil),
},
want: "could not apply tx 1 [0x734d821c990099c6ae42d78072aadd3931c35328cf03ef4cf5b2a4ac9c398522]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1",
},
{ // ErrNonceTooHigh
txs: []*types.Transaction{
- makeTx(100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil),
+ makeTx(key1, 100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil),
},
want: "could not apply tx 0 [0x0df36254cfbef8ed6961b38fc68aecc777177166144c8a56bc8919e23a559bf4]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0",
},
{ // ErrGasLimitReached
txs: []*types.Transaction{
- makeTx(0, common.Address{}, big.NewInt(0), 15000001, big.NewInt(225000000000), nil),
+ makeTx(key1, 0, common.Address{}, big.NewInt(0), 15000001, big.NewInt(225000000000), nil),
},
want: "could not apply tx 0 [0x1354370681d2ab68247073d889736f8be4a8d87e35956f0c02658d3670803a66]: gas limit reached",
},
{ // ErrInsufficientFundsForTransfer
txs: []*types.Transaction{
- makeTx(0, common.Address{}, big.NewInt(4000000000000000000), params.TxGas, big.NewInt(225000000000), nil),
+ makeTx(key1, 0, common.Address{}, big.NewInt(4000000000000000000), params.TxGas, big.NewInt(225000000000), nil),
},
want: "could not apply tx 0 [0x1632f2bffcce84a5c91dd8ab2016128fccdbcfbe0485d2c67457e1c793c72a4b]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 4004725000000000000",
},
{ // ErrInsufficientFunds
txs: []*types.Transaction{
- makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil),
+ makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil),
},
want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 18900000000000000000000",
},
@@ -148,13 +167,13 @@ func TestStateProcessorErrors(t *testing.T) {
// multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment
{ // ErrIntrinsicGas
txs: []*types.Transaction{
- makeTx(0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(225000000000), nil),
+ makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(225000000000), nil),
},
want: "could not apply tx 0 [0x2fc3e3b5cc26917d413e26983fe189475f47d4f0757e32aaa5561fcb9c9dc432]: intrinsic gas too low: have 20000, want 21000",
},
{ // ErrGasLimitReached
txs: []*types.Transaction{
- makeTx(0, common.Address{}, big.NewInt(0), params.TxGas*762, big.NewInt(225000000000), nil),
+ makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas*762, big.NewInt(225000000000), nil),
},
want: "could not apply tx 0 [0x76c07cc2b32007eb1a9c3fa066d579a3d77ec4ecb79bbc266624a601d7b08e46]: gas limit reached",
},
@@ -199,6 +218,24 @@ func TestStateProcessorErrors(t *testing.T) {
},
want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000",
},
+ { // ErrMaxInitCodeSizeExceeded
+ txs: []*types.Transaction{
+ mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.TestInitialBaseFee), tooBigInitCode[:]),
+ },
+ want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152",
+ },
+ { // ErrIntrinsicGas: Not enough gas to cover init code
+ txs: []*types.Transaction{
+ mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.TestInitialBaseFee), make([]byte, 320)),
+ },
+ want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300",
+ },
+ { // ErrBlobFeeCapTooLow
+ txs: []*types.Transaction{
+ mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), []common.Hash{(common.Hash{1})}),
+ },
+ want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 225000000000",
+ },
} {
block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config)
_, err := blockchain.InsertChain(types.Blocks{block})
@@ -301,69 +338,6 @@ func TestStateProcessorErrors(t *testing.T) {
}
}
}
-
- // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (Durango/EIP-3860) enabled.
- {
- var (
- db = rawdb.NewMemoryDatabase()
- gspec = &Genesis{
- Config: ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: big.NewInt(0),
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- MandatoryNetworkUpgrades: params.MandatoryNetworkUpgrades{
- SubnetEVMTimestamp: utils.NewUint64(0),
- DurangoTimestamp: utils.NewUint64(0),
- },
- FeeConfig: params.DefaultFeeConfig,
- },
- Alloc: GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
- Balance: big.NewInt(1000000000000000000), // 1 ether
- Nonce: 0,
- },
- },
- GasLimit: params.DefaultFeeConfig.GasLimit.Uint64(),
- }
- blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false)
- tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
- smallInitCode = [320]byte{}
- )
- defer blockchain.Stop()
- for i, tt := range []struct {
- txs []*types.Transaction
- want string
- }{
- { // ErrMaxInitCodeSizeExceeded
- txs: []*types.Transaction{
- mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.TestInitialBaseFee), tooBigInitCode[:]),
- },
- want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152",
- },
- { // ErrIntrinsicGas: Not enough gas to cover init code
- txs: []*types.Transaction{
- mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.TestInitialBaseFee), smallInitCode[:]),
- },
- want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300",
- },
- } {
- block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config)
- _, err := blockchain.InsertChain(types.Blocks{block})
- if err == nil {
- t.Fatal("block imported without errors")
- }
- if have, want := err.Error(), tt.want; have != want {
- t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
- }
- }
- }
}
// TestBadTxAllowListBlock tests the output generated when the
@@ -475,6 +449,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
hasher := sha3.NewLegacyKeccak256()
hasher.Write(header.Number.Bytes())
var cumulativeGas uint64
+ var nBlobs int
for _, tx := range txs {
txh := tx.Hash()
hasher.Write(txh[:])
@@ -483,8 +458,20 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
receipt.GasUsed = tx.Gas()
receipts = append(receipts, receipt)
cumulativeGas += tx.Gas()
+ nBlobs += len(tx.BlobHashes())
}
header.Root = common.BytesToHash(hasher.Sum(nil))
+ if config.IsCancun(header.Number, header.Time) {
+ var pExcess, pUsed = uint64(0), uint64(0)
+ if parent.ExcessBlobGas() != nil {
+ pExcess = *parent.ExcessBlobGas()
+ pUsed = *parent.BlobGasUsed()
+ }
+ excess := eip4844.CalcExcessBlobGas(pExcess, pUsed)
+ used := uint64(nBlobs * params.BlobTxBlobGasPerBlob)
+ header.ExcessBlobGas = &excess
+ header.BlobGasUsed = &used
+ }
// Assemble and return the final block for sealing
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
}
diff --git a/core/state_transition.go b/core/state_transition.go
index 74b17f53e7..80c2013056 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -27,10 +27,12 @@
package core
import (
+ "errors"
"fmt"
"math"
"math/big"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/core/vm"
"github.com/ava-labs/subnet-evm/params"
@@ -184,16 +186,18 @@ func toWordSize(size uint64) uint64 {
// A Message contains the data derived from a single transaction that is relevant to state
// processing.
type Message struct {
- To *common.Address
- From common.Address
- Nonce uint64
- Value *big.Int
- GasLimit uint64
- GasPrice *big.Int
- GasFeeCap *big.Int
- GasTipCap *big.Int
- Data []byte
- AccessList types.AccessList
+ To *common.Address
+ From common.Address
+ Nonce uint64
+ Value *big.Int
+ GasLimit uint64
+ GasPrice *big.Int
+ GasFeeCap *big.Int
+ GasTipCap *big.Int
+ Data []byte
+ AccessList types.AccessList
+ BlobGasFeeCap *big.Int
+ BlobHashes []common.Hash
// When SkipAccountChecks is true, the message nonce is not checked against the
// account nonce in state. It also disables checking that the sender is an EOA.
@@ -214,6 +218,8 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In
Data: tx.Data(),
AccessList: tx.AccessList(),
SkipAccountChecks: false,
+ BlobHashes: tx.BlobHashes(),
+ BlobGasFeeCap: tx.BlobGasFeeCap(),
}
// If baseFee provided, set gasPrice to effectiveGasPrice.
if baseFee != nil {
@@ -287,12 +293,24 @@ func (st *StateTransition) to() common.Address {
func (st *StateTransition) buyGas() error {
mgval := new(big.Int).SetUint64(st.msg.GasLimit)
mgval = mgval.Mul(mgval, st.msg.GasPrice)
- balanceCheck := mgval
+ balanceCheck := new(big.Int).Set(mgval)
if st.msg.GasFeeCap != nil {
- balanceCheck = new(big.Int).SetUint64(st.msg.GasLimit)
- balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap)
+ balanceCheck.SetUint64(st.msg.GasLimit)
+ balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap)
balanceCheck.Add(balanceCheck, st.msg.Value)
}
+ if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) {
+ if blobGas := st.blobGasUsed(); blobGas > 0 {
+ // Check that the user has enough funds to cover blobGasUsed * tx.BlobGasFeeCap
+ blobBalanceCheck := new(big.Int).SetUint64(blobGas)
+ blobBalanceCheck.Mul(blobBalanceCheck, st.msg.BlobGasFeeCap)
+ balanceCheck.Add(balanceCheck, blobBalanceCheck)
+ // Pay for blobGasUsed * actual blob fee
+ blobFee := new(big.Int).SetUint64(blobGas)
+ blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas))
+ mgval.Add(mgval, blobFee)
+ }
+ }
if have, want := st.state.GetBalance(st.msg.From), balanceCheck; have.Cmp(want) < 0 {
return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want)
}
@@ -366,6 +384,29 @@ func (st *StateTransition) preCheck() error {
}
}
}
+ // Check the blob version validity
+ if msg.BlobHashes != nil {
+ if len(msg.BlobHashes) == 0 {
+ return errors.New("blob transaction missing blob hashes")
+ }
+ for i, hash := range msg.BlobHashes {
+ if hash[0] != params.BlobTxHashVersion {
+ return fmt.Errorf("blob %d hash version mismatch (have %d, supported %d)",
+ i, hash[0], params.BlobTxHashVersion)
+ }
+ }
+ }
+
+ if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) {
+ if st.blobGasUsed() > 0 {
+ // Check that the user is paying at least the current blob fee
+ blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)
+ if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 {
+ return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee)
+ }
+ }
+ }
+
return st.buyGas()
}
@@ -406,7 +447,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
var (
msg = st.msg
sender = vm.AccountRef(msg.From)
- rules = st.evm.ChainConfig().AvalancheRules(st.evm.Context.BlockNumber, st.evm.Context.Time)
+ rules = st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Time)
contractCreation = msg.To == nil
)
@@ -480,3 +521,8 @@ func (st *StateTransition) refundGas(subnetEVM bool) {
func (st *StateTransition) gasUsed() uint64 {
return st.initialGas - st.gasRemaining
}
+
+// blobGasUsed returns the amount of blob gas used by the message.
+func (st *StateTransition) blobGasUsed() uint64 {
+ return uint64(len(st.msg.BlobHashes) * params.BlobTxBlobGasPerBlob)
+}
diff --git a/core/trie_stress_bench_test.go b/core/trie_stress_bench_test.go
index 8f7c0b9ce2..faaea2ca10 100644
--- a/core/trie_stress_bench_test.go
+++ b/core/trie_stress_bench_test.go
@@ -32,6 +32,7 @@ import (
"testing"
"github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/params"
"github.com/ava-labs/subnet-evm/precompile/contract"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@@ -51,6 +52,10 @@ func BenchmarkTrie(t *testing.B) {
func stressTestTrieDb(t *testing.B, numContracts int, callsPerBlock int, elements int64, gasTxLimit uint64) func(int, *BlockGen) {
require := require.New(t)
+ config := params.TestChainConfig
+ signer := types.LatestSigner(config)
+ testKey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+
contractAddr := make([]common.Address, numContracts)
contractTxs := make([]*types.Transaction, numContracts)
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
new file mode 100644
index 0000000000..e4ab5dbfd7
--- /dev/null
+++ b/core/txpool/blobpool/blobpool.go
@@ -0,0 +1,1649 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package blobpool implements the EIP-4844 blob transaction pool.
+package blobpool
+
+import (
+ "container/heap"
+ "fmt"
+ "math"
+ "math/big"
+ "os"
+ "path/filepath"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ava-labs/subnet-evm/consensus/dummy"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
+ "github.com/ava-labs/subnet-evm/core"
+ "github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/metrics"
+ "github.com/ava-labs/subnet-evm/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+ "github.com/holiman/uint256"
+)
+
+const (
+ // blobSize is the protocol constrained byte size of a single blob in a
+ // transaction. There can be multiple of these embedded into a single tx.
+ blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement
+
+ // maxBlobsPerTransaction is the maximum number of blobs a single transaction
+ // is allowed to contain. Whilst the spec states it's unlimited, the block
+ // data slots are protocol bound, which implicitly also limit this.
+ maxBlobsPerTransaction = params.BlobTxMaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
+
+ // txAvgSize is an approximate byte size of a transaction metadata to avoid
+ // tiny overflows causing all txs to move a shelf higher, wasting disk space.
+ txAvgSize = 4 * 1024
+
+ // txMaxSize is the maximum size a single transaction can have, outside
+ // the included blobs. Since blob transactions are pulled instead of pushed,
+ // and only a small metadata is kept in ram, the rest is on disk, there is
+ // no critical limit that should be enforced. Still, capping it to some sane
+ // limit can never hurt.
+ txMaxSize = 1024 * 1024
+
+ // maxTxsPerAccount is the maximum number of blob transactions admitted from
+ // a single account. The limit is enforced to minimize the DoS potential of
+ // a private tx cancelling publicly propagated blobs.
+ //
+ // Note, transactions resurrected by a reorg are also subject to this limit,
+ // so pushing it down too aggressively might make resurrections non-functional.
+ maxTxsPerAccount = 16
+
+ // pendingTransactionStore is the subfolder containing the currently queued
+ // blob transactions.
+ pendingTransactionStore = "queue"
+
+ // limboedTransactionStore is the subfolder containing the currently included
+ // but not yet finalized transaction blobs.
+ limboedTransactionStore = "limbo"
+)
+
+// blobTx is a wrapper around types.BlobTx which also contains the literal blob
+// data along with all the transaction metadata.
+type blobTx struct {
+ Tx *types.Transaction
+
+ Blobs []kzg4844.Blob
+ Commits []kzg4844.Commitment
+ Proofs []kzg4844.Proof
+}
+
+// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
+// schedule the blob transactions into the following blocks. Only ever add the
+// bare minimum needed fields to keep the size down (and thus number of entries
+// larger with the same memory consumption).
+type blobTxMeta struct {
+ hash common.Hash // Transaction hash to maintain the lookup table
+ id uint64 // Storage ID in the pool's persistent store
+ size uint32 // Byte size in the pool's persistent store
+
+ nonce uint64 // Needed to prioritize inclusion order within an account
+ costCap *uint256.Int // Needed to validate cumulative balance sufficiency
+ execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
+ execFeeCap *uint256.Int // Needed to validate replacement price bump
+ blobFeeCap *uint256.Int // Needed to validate replacement price bump
+
+ basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
+ blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
+
+ evictionExecTip *uint256.Int // Worst gas tip across all previous nonces
+ evictionExecFeeJumps float64 // Worst base fee (converted to fee jumps) across all previous nonces
+ evictionBlobFeeJumps float64 // Worse blob fee (converted to fee jumps) across all previous nonces
+}
+
+// newBlobTxMeta retrieves the indexed metadata fields from a blob transaction
+// and assembles a helper struct to track in memory.
+func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
+ meta := &blobTxMeta{
+ hash: tx.Hash(),
+ id: id,
+ size: size,
+ nonce: tx.Nonce(),
+ costCap: uint256.MustFromBig(tx.Cost()),
+ execTipCap: uint256.MustFromBig(tx.GasTipCap()),
+ execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
+ blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()),
+ }
+ meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
+ meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
+
+ return meta
+}
+
+// BlobPool is the transaction pool dedicated to EIP-4844 blob transactions.
+//
+// Blob transactions are special snowflakes that are designed for a very specific
+// purpose (rollups) and are expected to adhere to that specific use case. These
+// behavioural expectations allow us to design a transaction pool that is more robust
+// (i.e. resending issues) and more resilient to DoS attacks (e.g. replace-flush
+// attacks) than the generic tx pool. These improvements will also mean, however,
+// that we enforce a significantly more aggressive strategy on entering and exiting
+// the pool:
+//
+// - Blob transactions are large. With the initial design aiming for 128KB blobs,
+// we must ensure that these only traverse the network the absolute minimum
+// number of times. Broadcasting to sqrt(peers) is out of the question, rather
+// these should only ever be announced and the remote side should request it if
+// it wants to.
+//
+// - Block blob-space is limited. With blocks being capped to a few blob txs, we
+// can make use of the very low expected churn rate within the pool. Notably,
+// we should be able to use a persistent disk backend for the pool, solving
+// the tx resend issue that plagues the generic tx pool, as long as there's no
+// artificial churn (i.e. pool wars).
+//
+// - Purpose of blobs are layer-2s. Layer-2s are meant to use blob transactions to
+// commit to their own current state, which is independent of Ethereum mainnet
+// (state, txs). This means that there's no reason for blob tx cancellation or
+// replacement, apart from a potential basefee / miner tip adjustment.
+//
+// - Replacements are expensive. Given their size, propagating a replacement
+// blob transaction to an existing one should be aggressively discouraged.
+// Whilst generic transactions can start at 1 Wei gas cost and require a 10%
+// fee bump to replace, we suggest requiring a higher min cost (e.g. 1 gwei)
+// and a more aggressive bump (100%).
+//
+// - Cancellation is prohibitive. Evicting an already propagated blob tx is a huge
+// DoS vector. As such, a) replacement (higher-fee) blob txs mustn't invalidate
+// already propagated (future) blob txs (cumulative fee); b) nonce-gapped blob
+// txs are disallowed; c) the presence of blob transactions exclude non-blob
+// transactions.
+//
+// - Malicious cancellations are possible. Although the pool might prevent txs
+// that cancel blobs, blocks might contain such transaction (malicious miner
+// or flashbotter). The pool should cap the total number of blob transactions
+// per account as to prevent propagating too much data before cancelling it
+// via a normal transaction. It should nonetheless be high enough to support
+// resurrecting reorged transactions. Perhaps 4-16.
+//
+// - Local txs are meaningless. Mining pools historically used local transactions
+// for payouts or for backdoor deals. With 1559 in place, the basefee usually
+// dominates the final price, so 0 or non-0 tip doesn't change much. Blob txs
+// retain the 1559 2D gas pricing (and introduce on top a dynamic blob gas fee),
+// so locality is moot. With a disk backed blob pool avoiding the resend issue,
+// there's also no need to save own transactions for later.
+//
+// - No-blob blob-txs are bad. Theoretically there's no strong reason to disallow
+// blob txs containing 0 blobs. In practice, admitting such txs into the pool
+// breaks the low-churn invariant as blob constraints don't apply anymore. Even
+// though we could accept blocks containing such txs, a reorg would require moving
+// them back into the blob pool, which can break invariants.
+//
+// - Dropping blobs needs delay. When normal transactions are included, they
+// are immediately evicted from the pool since they are contained in the
+// including block. Blobs however are not included in the execution chain,
+// so a mini reorg cannot re-pool "lost" blob transactions. To support reorgs,
+// blobs are retained on disk until they are finalised.
+//
+// - Blobs can arrive via flashbots. Blocks might contain blob transactions we
+// have never seen on the network. Since we cannot recover them from blocks
+// either, the engine_newPayload needs to give them to us, and we cache them
+// until finality to support reorgs without tx losses.
+//
+// Whilst some constraints above might sound overly aggressive, the general idea is
+// that the blob pool should work robustly for its intended use case and whilst
+// anyone is free to use blob transactions for arbitrary non-rollup use cases,
+// they should not be allowed to run amok the network.
+//
+// Implementation wise there are a few interesting design choices:
+//
+// - Adding a transaction to the pool blocks until persisted to disk. This is
+// viable because TPS is low (2-4 blobs per block initially, maybe 8-16 at
+// peak), so natural churn is a couple MB per block. Replacements doing O(n)
+// updates are forbidden and transaction propagation is pull based (i.e. no
+// pileup of pending data).
+//
+// - When transactions are chosen for inclusion, the primary criteria is the
+// signer tip (and having a basefee/data fee high enough of course). However,
+// same-tip transactions will be split by their basefee/datafee, preferring
+// those that are closer to the current network limits. The idea being that
+// very relaxed ones can be included even if the fees go up, when the closer
+// ones could already be invalid.
+//
+// When the pool eventually reaches saturation, some old transactions - that may
+// never execute - will need to be evicted in favor of newer ones. The eviction
+// strategy is quite complex:
+//
+// - Exceeding capacity evicts the highest-nonce of the account with the lowest
+// paying blob transaction anywhere in the pooled nonce-sequence, as that tx
+// would be executed the furthest in the future and is thus blocking anything
+// after it. The smallest is deliberately not evicted to avoid a nonce-gap.
+//
+// - Analogously, if the pool is full, the consideration price of a new tx for
+// evicting an old one is the smallest price in the entire nonce-sequence of
+// the account. This avoids malicious users DoSing the pool with seemingly
+// high paying transactions hidden behind a low-paying blocked one.
+//
+// - Since blob transactions have 3 price parameters: execution tip, execution
+// fee cap and data fee cap, there's no singular parameter to create a total
+// price ordering on. What's more, since the base fee and blob fee can move
+// independently of one another, there's no pre-defined way to combine them
+// into a stable order either. This leads to a multi-dimensional problem to
+// solve after every block.
+//
+// - The first observation is that comparing 1559 base fees or 4844 blob fees
+// needs to happen in the context of their dynamism. Since these fees jump
+// up or down in ~1.125 multipliers (at max) across blocks, comparing fees
+// in two transactions should be based on log1.125(fee) to eliminate noise.
+//
+// - The second observation is that the basefee and blobfee move independently,
+// so there's no way to split mixed txs on their own (A has higher base fee,
+// B has higher blob fee). Rather than look at the absolute fees, the useful
+// metric is the max time it can take to exceed the transaction's fee caps.
+// Specifically, we're interested in the number of jumps needed to go from
+// the current fee to the transaction's cap:
+//
+// jumps = log1.125(txfee) - log1.125(basefee)
+//
+// - The third observation is that the base fee tends to hover around rather
+// than swing wildly. The number of jumps needed from the current fee starts
+// to get less relevant the higher it is. To remove the noise here too, the
+// pool will use log(jumps) as the delta for comparing transactions.
+//
+// delta = sign(jumps) * log(abs(jumps))
+//
+// - To establish a total order, we need to reduce the dimensionality of the
+// two base fees (log jumps) to a single value. The interesting aspect from
+// the pool's perspective is how fast will a tx get executable (fees going
+// down, crossing the smaller negative jump counter) or non-executable (fees
+// going up, crossing the smaller positive jump counter). As such, the pool
+// cares only about the min of the two delta values for eviction priority.
+//
+// priority = min(delta-basefee, delta-blobfee)
+//
+// - The above very aggressive dimensionality and noise reduction should result
+// in transaction being grouped into a small number of buckets, the further
+// the fees the larger the buckets. This is good because it allows us to use
+// the miner tip meaningfully as a splitter.
+//
+// - For the scenario where the pool does not contain non-executable blob txs
+// anymore, it does not make sense to grant a later eviction priority to txs
+// with high fee caps since it could enable pool wars. As such, any positive
+// priority will be grouped together.
+//
+// priority = min(delta-basefee, delta-blobfee, 0)
+//
+// Optimisation tradeoffs:
+//
+// - Eviction relies on 3 fee minimums per account (exec tip, exec cap and blob
+// cap). Maintaining these values across all transactions from the account is
+// problematic as each transaction replacement or inclusion would require a
+// rescan of all other transactions to recalculate the minimum. Instead, the
+// pool maintains a rolling minimum across the nonce range. Updating all the
+// minimums will need to be done only starting at the swapped in/out nonce
+// and leading up to the first no-change.
+type BlobPool struct {
+ config Config // Pool configuration
+ reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools
+
+ store billy.Database // Persistent data store for the tx metadata and blobs
+ stored uint64 // Useful data size of all transactions on disk
+ limbo *limbo // Persistent data store for the non-finalized blobs
+
+ signer types.Signer // Transaction signer to use for sender recovery
+ chain BlockChain // Chain object to access the state through
+
+ head *types.Header // Current head of the chain
+ state *state.StateDB // Current state at the head of the chain
+ gasTip *uint256.Int // Currently accepted minimum gas tip
+
+ lookup map[common.Hash]uint64 // Lookup table mapping hashes to tx billy entries
+ index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce
+ spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
+ evict *evictHeap // Heap of cheapest accounts for eviction when full
+
+ eventFeed event.Feed // Event feed to send out new tx events on pool inclusion
+ eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination
+
+ lock sync.RWMutex // Mutex protecting the pool during reorg handling
+}
+
+// New creates a new blob transaction pool to gather, sort and filter inbound
+// blob transactions from the network.
+func New(config Config, chain BlockChain) *BlobPool {
+ // Sanitize the input to ensure no vulnerable gas prices are set
+ config = (&config).sanitize()
+
+ // Create the transaction pool with its initial settings
+ return &BlobPool{
+ config: config,
+ signer: types.LatestSigner(chain.Config()),
+ chain: chain,
+ lookup: make(map[common.Hash]uint64),
+ index: make(map[common.Address][]*blobTxMeta),
+ spent: make(map[common.Address]*uint256.Int),
+ }
+}
+
+// Filter returns whether the given transaction can be consumed by the blob pool.
+func (p *BlobPool) Filter(tx *types.Transaction) bool {
+ return tx.Type() == types.BlobTxType
+}
+
+// Init sets the gas price needed to keep a transaction in the pool and the chain
+// head to allow balance / nonce checks. The transaction journal will be loaded
+// from disk and filtered based on the provided starting settings.
+func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
+ p.reserve = reserve
+
+ var (
+ queuedir string
+ limbodir string
+ )
+ if p.config.Datadir != "" {
+ queuedir = filepath.Join(p.config.Datadir, pendingTransactionStore)
+ if err := os.MkdirAll(queuedir, 0700); err != nil {
+ return err
+ }
+ limbodir = filepath.Join(p.config.Datadir, limboedTransactionStore)
+ if err := os.MkdirAll(limbodir, 0700); err != nil {
+ return err
+ }
+ }
+ state, err := p.chain.StateAt(head.Root)
+ if err != nil {
+ return err
+ }
+ p.head, p.state = head, state
+
+ // Index all transactions on disk and delete anything inprocessable
+ var fails []uint64
+ index := func(id uint64, size uint32, blob []byte) {
+ if p.parseTransaction(id, size, blob) != nil {
+ fails = append(fails, id)
+ }
+ }
+ store, err := billy.Open(billy.Options{Path: queuedir}, newSlotter(), index)
+ if err != nil {
+ return err
+ }
+ p.store = store
+
+ if len(fails) > 0 {
+ log.Warn("Dropping invalidated blob transactions", "ids", fails)
+ for _, id := range fails {
+ if err := p.store.Delete(id); err != nil {
+ p.Close()
+ return err
+ }
+ }
+ }
+ // Sort the indexed transactions by nonce and delete anything gapped, create
+ // the eviction heap of anyone still standing
+ for addr := range p.index {
+ p.recheck(addr, nil)
+ }
+ feeConfig, _, err := p.chain.GetFeeConfigAt(p.head)
+ if err != nil {
+ p.Close()
+ return err
+ }
+ _, baseFee, err := dummy.EstimateNextBaseFee(
+ p.chain.Config(),
+ feeConfig,
+ p.head,
+ uint64(time.Now().Unix()),
+ )
+ if err != nil {
+ p.Close()
+ return err
+ }
+ var (
+ // basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
+ basefee = uint256.MustFromBig(baseFee)
+ blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice))
+ )
+ if p.head.ExcessBlobGas != nil {
+ blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas))
+ }
+ p.evict = newPriceHeap(basefee, blobfee, &p.index)
+
+ // Pool initialized, attach the blob limbo to it to track blobs included
+ // recently but not yet finalized
+ p.limbo, err = newLimbo(limbodir)
+ if err != nil {
+ p.Close()
+ return err
+ }
+ // Set the configured gas tip, triggering a filtering of anything just loaded
+ basefeeGauge.Update(int64(basefee.Uint64()))
+ blobfeeGauge.Update(int64(blobfee.Uint64()))
+
+ p.SetGasTip(gasTip)
+
+ // Since the user might have modified their pool's capacity, evict anything
+ // above the current allowance
+ for p.stored > p.config.Datacap {
+ p.drop()
+ }
+ // Update the metrics and return the constructed pool
+ datacapGauge.Update(int64(p.config.Datacap))
+ p.updateStorageMetrics()
+ return nil
+}
+
+// Close closes down the underlying persistent store.
+func (p *BlobPool) Close() error {
+ var errs []error
+ if p.limbo != nil {
+ if err := p.limbo.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if err := p.store.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ p.eventScope.Close()
+
+ switch {
+ case errs == nil:
+ return nil
+ case len(errs) == 1:
+ return errs[0]
+ default:
+ return fmt.Errorf("%v", errs)
+ }
+}
+
+// parseTransaction is a callback method on pool creation that gets called for
+// each transaction on disk to create the in-memory metadata index.
+func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
+ item := new(blobTx)
+ if err := rlp.DecodeBytes(blob, item); err != nil {
+ // This path is impossible unless the disk data representation changes
+ // across restarts. For that ever unprobable case, recover gracefully
+ // by ignoring this data entry.
+ log.Error("Failed to decode blob pool entry", "id", id, "err", err)
+ return err
+ }
+ meta := newBlobTxMeta(id, size, item.Tx)
+
+ sender, err := p.signer.Sender(item.Tx)
+ if err != nil {
+ // This path is impossible unless the signature validity changes across
+ // restarts. For that ever unprobable case, recover gracefully by ignoring
+ // this data entry.
+ log.Error("Failed to recover blob tx sender", "id", id, "hash", item.Tx.Hash(), "err", err)
+ return err
+ }
+ if _, ok := p.index[sender]; !ok {
+ if err := p.reserve(sender, true); err != nil {
+ return err
+ }
+ p.index[sender] = []*blobTxMeta{}
+ p.spent[sender] = new(uint256.Int)
+ }
+ p.index[sender] = append(p.index[sender], meta)
+ p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap)
+
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size)
+
+ return nil
+}
+
+// recheck verifies the pool's content for a specific account and drops anything
+// that does not fit anymore (dangling or filled nonce, overdraft).
+func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint64) {
+ // Sort the transactions belonging to the account so reinjects can be simpler
+ txs := p.index[addr]
+ if inclusions != nil && txs == nil { // during reorgs, we might find new accounts
+ return
+ }
+ sort.Slice(txs, func(i, j int) bool {
+ return txs[i].nonce < txs[j].nonce
+ })
+ // If there is a gap between the chain state and the blob pool, drop
+ // all the transactions as they are non-executable. Similarly, if the
+ // entire tx range was included, drop all.
+ var (
+ next = p.state.GetNonce(addr)
+ gapped = txs[0].nonce > next
+ filled = txs[len(txs)-1].nonce < next
+ )
+ if gapped || filled {
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for i := 0; i < len(txs); i++ {
+ ids = append(ids, txs[i].id)
+ nonces = append(nonces, txs[i].nonce)
+
+ p.stored -= uint64(txs[i].size)
+ delete(p.lookup, txs[i].hash)
+
+ // Included transactions blobs need to be moved to the limbo
+ if filled && inclusions != nil {
+ p.offload(addr, txs[i].nonce, txs[i].id, inclusions)
+ }
+ }
+ delete(p.index, addr)
+ delete(p.spent, addr)
+ if inclusions != nil { // only during reorgs will the heap will be initialized
+ heap.Remove(p.evict, p.evict.index[addr])
+ }
+ p.reserve(addr, false)
+
+ if gapped {
+ log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
+ } else {
+ log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
+ }
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ return
+ }
+ // If there is overlap between the chain state and the blob pool, drop
+ // anything below the current state
+ if txs[0].nonce < next {
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for txs[0].nonce < next {
+ ids = append(ids, txs[0].id)
+ nonces = append(nonces, txs[0].nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap)
+ p.stored -= uint64(txs[0].size)
+ delete(p.lookup, txs[0].hash)
+
+ // Included transactions blobs need to be moved to the limbo
+ if inclusions != nil {
+ p.offload(addr, txs[0].nonce, txs[0].id, inclusions)
+ }
+ txs = txs[1:]
+ }
+ log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ p.index[addr] = txs
+ }
+ // Iterate over the transactions to initialize their eviction thresholds
+ // and to detect any nonce gaps
+ txs[0].evictionExecTip = txs[0].execTipCap
+ txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
+ txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
+
+ for i := 1; i < len(txs); i++ {
+ // If there's no nonce gap, initialize the evicion thresholds as the
+ // minimum between the cumulative thresholds and the current tx fees
+ if txs[i].nonce == txs[i-1].nonce+1 {
+ txs[i].evictionExecTip = txs[i-1].evictionExecTip
+ if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
+ txs[i].evictionExecTip = txs[i].execTipCap
+ }
+ txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
+ if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
+ txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
+ }
+ txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
+ if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
+ txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ continue
+ }
+ // Sanity check that there's no double nonce. This case would be a coding
+ // error, but better know about it
+ if txs[i].nonce == txs[i-1].nonce {
+ log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce)
+ }
+ // Otherwise if there's a nonce gap evict all later transactions
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for j := i; j < len(txs); j++ {
+ ids = append(ids, txs[j].id)
+ nonces = append(nonces, txs[j].nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap)
+ p.stored -= uint64(txs[j].size)
+ delete(p.lookup, txs[j].hash)
+ }
+ txs = txs[:i]
+
+ log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ p.index[addr] = txs
+ break
+ }
+ // Ensure that there's no over-draft, this is expected to happen when some
+ // transactions get included without publishing on the network
+ var (
+ balance = uint256.MustFromBig(p.state.GetBalance(addr))
+ spent = p.spent[addr]
+ )
+ if spent.Cmp(balance) > 0 {
+ // Evict the highest nonce transactions until the pending set falls under
+ // the account's available balance
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for p.spent[addr].Cmp(balance) > 0 {
+ last := txs[len(txs)-1]
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ ids = append(ids, last.id)
+ nonces = append(nonces, last.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
+ p.stored -= uint64(last.size)
+ delete(p.lookup, last.hash)
+ }
+ if len(txs) == 0 {
+ delete(p.index, addr)
+ delete(p.spent, addr)
+ if inclusions != nil { // only during reorgs will the heap will be initialized
+ heap.Remove(p.evict, p.evict.index[addr])
+ }
+ p.reserve(addr, false)
+ } else {
+ p.index[addr] = txs
+ }
+ log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ }
+ // Sanity check that no account can have more queued transactions than the
+ // DoS protection threshold.
+ if len(txs) > maxTxsPerAccount {
+ // Evict the highest nonce transactions until the pending set falls under
+ // the account's transaction cap
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for len(txs) > maxTxsPerAccount {
+ last := txs[len(txs)-1]
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ ids = append(ids, last.id)
+ nonces = append(nonces, last.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
+ p.stored -= uint64(last.size)
+ delete(p.lookup, last.hash)
+ }
+ p.index[addr] = txs
+
+ log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ }
+ // Included cheap transactions might have left the remaining ones better from
+ // an eviction point, fix any potential issues in the heap.
+ if _, ok := p.index[addr]; ok && inclusions != nil {
+ heap.Fix(p.evict, p.evict.index[addr])
+ }
+}
+
+// offload removes a tracked blob transaction from the pool and moves it into the
+// limbo for tracking until finality.
+//
+// The method may log errors for various unexpcted scenarios but will not return
+// any of it since there's no clear error case. Some errors may be due to coding
+// issues, others caused by signers mining MEV stuff or swapping transactions. In
+// all cases, the pool needs to continue operating.
+func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusions map[common.Hash]uint64) {
+ data, err := p.store.Get(id)
+ if err != nil {
+ log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
+ return
+ }
+ item := new(blobTx)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
+ return
+ }
+ block, ok := inclusions[item.Tx.Hash()]
+ if !ok {
+ log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
+ return
+ }
+ if err := p.limbo.push(item.Tx.Hash(), block, item.Blobs, item.Commits, item.Proofs); err != nil {
+ log.Warn("Failed to offload blob tx into limbo", "err", err)
+ return
+ }
+}
+
+// Reset implements txpool.SubPool, allowing the blob pool's internal state to be
+// kept in sync with the main transacion pool's internal state.
+func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
+ waitStart := time.Now()
+ p.lock.Lock()
+ resetwaitHist.Update(time.Since(waitStart).Nanoseconds())
+ defer p.lock.Unlock()
+
+ defer func(start time.Time) {
+ resettimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ statedb, err := p.chain.StateAt(newHead.Root)
+ if err != nil {
+ log.Error("Failed to reset blobpool state", "err", err)
+ return
+ }
+ p.head = newHead
+ p.state = statedb
+
+ // Run the reorg between the old and new head and figure out which accounts
+ // need to be rechecked and which transactions need to be readded
+ if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
+ for addr, txs := range reinject {
+ // Blindly push all the lost transactions back into the pool
+ for _, tx := range txs {
+ p.reinject(addr, tx)
+ }
+ // Recheck the account's pooled transactions to drop included and
+ // invalidated one
+ p.recheck(addr, inclusions)
+ }
+ }
+ // Flush out any blobs from limbo that are older than the latest finality
+ if p.chain.Config().IsCancun(p.head.Number, p.head.Time) {
+ p.limbo.finalize(p.chain.CurrentFinalBlock())
+ }
+ feeConfig, _, err := p.chain.GetFeeConfigAt(p.head)
+ if err != nil {
+ log.Error("Failed to get fee config to reset blobpool fees", "err", err)
+ return
+ }
+ _, baseFee, err := dummy.EstimateNextBaseFee(
+ p.chain.Config(),
+ feeConfig,
+ p.head,
+ uint64(time.Now().Unix()),
+ )
+ if err != nil {
+ log.Error("Failed to estimate next base fee to reset blobpool fees", "err", err)
+ return
+ }
+ // Reset the price heap for the new set of basefee/blobfee pairs
+ var (
+ // basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), newHead))
+ basefee = uint256.MustFromBig(baseFee)
+ blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice))
+ )
+ if newHead.ExcessBlobGas != nil {
+ blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*newHead.ExcessBlobGas))
+ }
+ p.evict.reinit(basefee, blobfee, false)
+
+ basefeeGauge.Update(int64(basefee.Uint64()))
+ blobfeeGauge.Update(int64(blobfee.Uint64()))
+ p.updateStorageMetrics()
+}
+
+// reorg assembles all the transactors and missing transactions between an old
+// and new head to figure out which account's tx set needs to be rechecked and
+// which transactions need to be requeued.
+//
+// The transactionblock inclusion infos are also returned to allow tracking any
+// just-included blocks by block number in the limbo.
+func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*types.Transaction, map[common.Hash]uint64) {
+ // If the pool was not yet initialized, don't do anything
+ if oldHead == nil {
+ return nil, nil
+ }
+ // If the reorg is too deep, avoid doing it (will happen during snap sync)
+ oldNum := oldHead.Number.Uint64()
+ newNum := newHead.Number.Uint64()
+
+ if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
+ return nil, nil
+ }
+ // Reorg seems shallow enough to pull in all transactions into memory
+ var (
+ transactors = make(map[common.Address]struct{})
+ discarded = make(map[common.Address][]*types.Transaction)
+ included = make(map[common.Address][]*types.Transaction)
+ inclusions = make(map[common.Hash]uint64)
+
+ rem = p.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
+ add = p.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
+ )
+ if add == nil {
+ // if the new head is nil, it means that something happened between
+ // the firing of newhead-event and _now_: most likely a
+ // reorg caused by sync-reversion or explicit sethead back to an
+ // earlier block.
+ log.Warn("Blobpool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ if rem == nil {
+ // This can happen if a setHead is performed, where we simply discard
+ // the old head from the chain. If that is the case, we don't have the
+ // lost transactions anymore, and there's nothing to add.
+ if newNum >= oldNum {
+ // If we reorged to a same or higher number, then it's not a case
+ // of setHead
+ log.Warn("Blobpool reset with missing old head",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return nil, nil
+ }
+ // If the reorg ended up on a lower number, it's indicative of setHead
+ // being the cause
+ log.Debug("Skipping blobpool reset caused by setHead",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return nil, nil
+ }
+ // Both old and new blocks exist, traverse through the progression chain
+ // and accumulate the transactors and transactions
+ for rem.NumberU64() > add.NumberU64() {
+ for _, tx := range rem.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ discarded[from] = append(discarded[from], tx)
+ transactors[from] = struct{}{}
+ }
+ if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return nil, nil
+ }
+ }
+ for add.NumberU64() > rem.NumberU64() {
+ for _, tx := range add.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ included[from] = append(included[from], tx)
+ inclusions[tx.Hash()] = add.NumberU64()
+ transactors[from] = struct{}{}
+ }
+ if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ }
+ for rem.Hash() != add.Hash() {
+ for _, tx := range rem.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ discarded[from] = append(discarded[from], tx)
+ transactors[from] = struct{}{}
+ }
+ if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return nil, nil
+ }
+ for _, tx := range add.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ included[from] = append(included[from], tx)
+ inclusions[tx.Hash()] = add.NumberU64()
+ transactors[from] = struct{}{}
+ }
+ if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ }
+ // Generate the set of transactions per address to pull back into the pool,
+ // also updating the rest along the way
+ reinject := make(map[common.Address][]*types.Transaction)
+ for addr := range transactors {
+ // Generate the set that was lost to reinject into the pool
+ lost := make([]*types.Transaction, 0, len(discarded[addr]))
+ for _, tx := range types.TxDifference(discarded[addr], included[addr]) {
+ if p.Filter(tx) {
+ lost = append(lost, tx)
+ }
+ }
+ reinject[addr] = lost
+
+ // Update the set that was already reincluded to track the blocks in limbo
+ for _, tx := range types.TxDifference(included[addr], discarded[addr]) {
+ if p.Filter(tx) {
+ p.limbo.update(tx.Hash(), inclusions[tx.Hash()])
+ }
+ }
+ }
+ return reinject, inclusions
+}
+
+// reinject blindly pushes a transaction previously included in the chain - and
+// just reorged out - into the pool. The transaction is assumed valid (having
+// been in the chain), thus the only validation needed is nonce sorting and over-
+// draft checks after injection.
+//
+// Note, the method will not initialize the eviction cache values as those will
+// be done once for all transactions belonging to an account after all individual
+// transactions are injected back into the pool.
+func (p *BlobPool) reinject(addr common.Address, tx *types.Transaction) {
+ // Retrieve the associated blob from the limbo. Without the blobs, we cannot
+ // add the transaction back into the pool as it is not mineable.
+ blobs, commits, proofs, err := p.limbo.pull(tx.Hash())
+ if err != nil {
+ log.Error("Blobs unavailable, dropping reorged tx", "err", err)
+ return
+ }
+ // Serialize the transaction back into the primary datastore
+ blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
+ if err != nil {
+ log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
+ return
+ }
+ id, err := p.store.Put(blob)
+ if err != nil {
+ log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
+ return
+ }
+ // Update the indixes and metrics
+ meta := newBlobTxMeta(id, p.store.Size(id), tx)
+
+ if _, ok := p.index[addr]; !ok {
+ if err := p.reserve(addr, true); err != nil {
+ log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
+ return
+ }
+ p.index[addr] = []*blobTxMeta{meta}
+ p.spent[addr] = meta.costCap
+ p.evict.Push(addr)
+ } else {
+ p.index[addr] = append(p.index[addr], meta)
+ p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap)
+ }
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size)
+}
+
+// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
+// to be kept in sync with the main transacion pool's gas requirements.
+func (p *BlobPool) SetGasTip(tip *big.Int) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ // Store the new minimum gas tip
+ old := p.gasTip
+ p.gasTip = uint256.MustFromBig(tip)
+
+ // If the min miner fee increased, remove transactions below the new threshold
+ if old == nil || p.gasTip.Cmp(old) > 0 {
+ for addr, txs := range p.index {
+ for i, tx := range txs {
+ if tx.execTipCap.Cmp(p.gasTip) < 0 {
+ // Drop the offending transaction
+ var (
+ ids = []uint64{tx.id}
+ nonces = []uint64{tx.nonce}
+ )
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
+ p.stored -= uint64(tx.size)
+ delete(p.lookup, tx.hash)
+ txs[i] = nil
+
+ // Drop everything afterwards, no gaps allowed
+ for j, tx := range txs[i+1:] {
+ ids = append(ids, tx.id)
+ nonces = append(nonces, tx.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap)
+ p.stored -= uint64(tx.size)
+ delete(p.lookup, tx.hash)
+ txs[i+1+j] = nil
+ }
+ // Clear out the dropped transactions from the index
+ if i > 0 {
+ p.index[addr] = txs[:i]
+ heap.Fix(p.evict, p.evict.index[addr])
+ } else {
+ delete(p.index, addr)
+ delete(p.spent, addr)
+
+ heap.Remove(p.evict, p.evict.index[addr])
+ p.reserve(addr, false)
+ }
+ // Clear out the transactions from the data store
+ log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete dropped transaction", "id", id, "err", err)
+ }
+ }
+ break
+ }
+ }
+ }
+ }
+ log.Debug("Blobpool tip threshold updated", "tip", tip)
+ pooltipGauge.Update(tip.Int64())
+ p.updateStorageMetrics()
+}
+
+// validateTx checks whether a transaction is valid according to the consensus
+// rules and adheres to some heuristic limits of the local node (price and size).
+func (p *BlobPool) validateTx(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+ // Ensure the transaction adheres to basic pool filters (type, size, tip) and
+ // consensus rules
+ baseOpts := &txpool.ValidationOptions{
+ Config: p.chain.Config(),
+ Accept: 1 << types.BlobTxType,
+ MaxSize: txMaxSize,
+ MinTip: p.gasTip.ToBig(),
+ }
+ if err := txpool.ValidateTransaction(tx, blobs, commits, proofs, p.head, p.signer, baseOpts); err != nil {
+ return err
+ }
+ // Ensure the transaction adheres to the stateful pool filters (nonce, balance)
+ stateOpts := &txpool.ValidationOptionsWithState{
+ State: p.state,
+
+ FirstNonceGap: func(addr common.Address) uint64 {
+ // Nonce gaps are not permitted in the blob pool, the first gap will
+ // be the next nonce shifted by however many transactions we already
+ // have pooled.
+ return p.state.GetNonce(addr) + uint64(len(p.index[addr]))
+ },
+ UsedAndLeftSlots: func(addr common.Address) (int, int) {
+ have := len(p.index[addr])
+ if have >= maxTxsPerAccount {
+ return have, 0
+ }
+ return have, maxTxsPerAccount - have
+ },
+ ExistingExpenditure: func(addr common.Address) *big.Int {
+ if spent := p.spent[addr]; spent != nil {
+ return spent.ToBig()
+ }
+ return new(big.Int)
+ },
+ ExistingCost: func(addr common.Address, nonce uint64) *big.Int {
+ next := p.state.GetNonce(addr)
+ if uint64(len(p.index[addr])) > nonce-next {
+ return p.index[addr][int(tx.Nonce()-next)].costCap.ToBig()
+ }
+ return nil
+ },
+ }
+ if err := txpool.ValidateTransactionWithState(tx, p.signer, stateOpts); err != nil {
+ return err
+ }
+ // If the transaction replaces an existing one, ensure that price bumps are
+ // adhered to.
+ var (
+ from, _ = p.signer.Sender(tx) // already validated above
+ next = p.state.GetNonce(from)
+ )
+ if uint64(len(p.index[from])) > tx.Nonce()-next {
+ // Account can support the replacement, but the price bump must also be met
+ prev := p.index[from][int(tx.Nonce()-next)]
+ switch {
+ case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap)
+ case tx.GasTipCapIntCmp(prev.execTipCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap)
+ case tx.BlobGasFeeCapIntCmp(prev.blobFeeCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap)
+ }
+ var (
+ multiplier = uint256.NewInt(100 + p.config.PriceBump)
+ onehundred = uint256.NewInt(100)
+
+ minGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execFeeCap), onehundred)
+ minGasTipCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execTipCap), onehundred)
+ minBlobGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.blobFeeCap), onehundred)
+ )
+ switch {
+ case tx.GasFeeCapIntCmp(minGasFeeCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap, p.config.PriceBump)
+ case tx.GasTipCapIntCmp(minGasTipCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap, p.config.PriceBump)
+ case tx.BlobGasFeeCapIntCmp(minBlobGasFeeCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap, p.config.PriceBump)
+ }
+ }
+ return nil
+}
+
+// Has returns an indicator whether subpool has a transaction cached with the
+// given hash.
+func (p *BlobPool) Has(hash common.Hash) bool {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ _, ok := p.lookup[hash]
+ return ok
+}
+
+func (p *BlobPool) HasLocal(hash common.Hash) bool {
+ // TODO: add support to check local transactions
+ return p.Has(hash)
+}
+
+// Get returns a transaction if it is contained in the pool, or nil otherwise.
+func (p *BlobPool) Get(hash common.Hash) *txpool.Transaction {
+ // Track the amount of time waiting to retrieve a fully resolved blob tx from
+ // the pool and the amount of time actually spent on pulling the data from disk.
+ getStart := time.Now()
+ p.lock.RLock()
+ getwaitHist.Update(time.Since(getStart).Nanoseconds())
+ defer p.lock.RUnlock()
+
+ defer func(start time.Time) {
+ gettimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ // Pull the blob from disk and return an assembled response
+ id, ok := p.lookup[hash]
+ if !ok {
+ return nil
+ }
+ data, err := p.store.Get(id)
+ if err != nil {
+ log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err)
+ return nil
+ }
+ item := new(blobTx)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err)
+ return nil
+ }
+ return &txpool.Transaction{
+ Tx: item.Tx,
+ BlobTxBlobs: item.Blobs,
+ BlobTxCommits: item.Commits,
+ BlobTxProofs: item.Proofs,
+ }
+}
+
+// Add inserts a set of blob transactions into the pool if they pass validation (both
+// consensus validity and pool restictions).
+func (p *BlobPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
+ errs := make([]error, len(txs))
+ for i, tx := range txs {
+ errs[i] = p.add(tx.Tx, tx.BlobTxBlobs, tx.BlobTxCommits, tx.BlobTxProofs)
+ }
+ return errs
+}
+
+// Add inserts a new blob transaction into the pool if it passes validation (both
+// consensus validity and pool restictions).
+func (p *BlobPool) add(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) (err error) {
+ // The blob pool blocks on adding a transaction. This is because blob txs are
+ // only even pulled form the network, so this method will act as the overload
+ // protection for fetches.
+ waitStart := time.Now()
+ p.lock.Lock()
+ addwaitHist.Update(time.Since(waitStart).Nanoseconds())
+ defer p.lock.Unlock()
+
+ defer func(start time.Time) {
+ addtimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ // Ensure the transaction is valid from all perspectives
+ if err := p.validateTx(tx, blobs, commits, proofs); err != nil {
+ log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
+ return err
+ }
+ // If the address is not yet known, request exclusivity to track the account
+ // only by this subpool until all transactions are evicted
+ from, _ := types.Sender(p.signer, tx) // already validated above
+ if _, ok := p.index[from]; !ok {
+ if err := p.reserve(from, true); err != nil {
+ return err
+ }
+ defer func() {
+ // If the transaction is rejected by some post-validation check, remove
+ // the lock on the reservation set.
+ //
+ // Note, `err` here is the named error return, which will be initialized
+ // by a return statement before running deferred methods. Take care with
+ // removing or subscoping err as it will break this clause.
+ if err != nil {
+ p.reserve(from, false)
+ }
+ }()
+ }
+ // Transaction permitted into the pool from a nonce and cost perspective,
+ // insert it into the database and update the indices
+ blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
+ if err != nil {
+ log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
+ return err
+ }
+ id, err := p.store.Put(blob)
+ if err != nil {
+ return err
+ }
+ meta := newBlobTxMeta(id, p.store.Size(id), tx)
+
+ var (
+ next = p.state.GetNonce(from)
+ offset = int(tx.Nonce() - next)
+ newacc = false
+ )
+ var oldEvictionExecFeeJumps, oldEvictionBlobFeeJumps float64
+ if txs, ok := p.index[from]; ok {
+ oldEvictionExecFeeJumps = txs[len(txs)-1].evictionExecFeeJumps
+ oldEvictionBlobFeeJumps = txs[len(txs)-1].evictionBlobFeeJumps
+ }
+ if len(p.index[from]) > offset {
+ // Transaction replaces a previously queued one
+ prev := p.index[from][offset]
+ if err := p.store.Delete(prev.id); err != nil {
+ // Shitty situation, but try to recover gracefully instead of going boom
+ log.Error("Failed to delete replaced transaction", "id", prev.id, "err", err)
+ }
+ // Update the transaction index
+ p.index[from][offset] = meta
+ p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap)
+ p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
+
+ delete(p.lookup, prev.hash)
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size) - uint64(prev.size)
+ } else {
+ // Transaction extends previously scheduled ones
+ p.index[from] = append(p.index[from], meta)
+ if _, ok := p.spent[from]; !ok {
+ p.spent[from] = new(uint256.Int)
+ newacc = true
+ }
+ p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size)
+ }
+ // Recompute the rolling eviction fields. In case of a replacement, this will
+ // recompute all subsequent fields. In case of an append, this will only do
+ // the fresh calculation.
+ txs := p.index[from]
+
+ for i := offset; i < len(txs); i++ {
+ // The first transaction will always use itself
+ if i == 0 {
+ txs[0].evictionExecTip = txs[0].execTipCap
+ txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
+ txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
+
+ continue
+ }
+ // Subsequent transactions will use a rolling calculation
+ txs[i].evictionExecTip = txs[i-1].evictionExecTip
+ if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
+ txs[i].evictionExecTip = txs[i].execTipCap
+ }
+ txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
+ if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
+ txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
+ }
+ txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
+ if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
+ txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ }
+ // Update the eviction heap with the new information:
+ // - If the transaction is from a new account, add it to the heap
+ // - If the account had a singleton tx replaced, update the heap (new price caps)
+ // - If the account has a transaction replaced or appended, update the heap if significantly changed
+ switch {
+ case newacc:
+ heap.Push(p.evict, from)
+
+ case len(txs) == 1: // 1 tx and not a new acc, must be replacement
+ heap.Fix(p.evict, p.evict.index[from])
+
+ default: // replacement or new append
+ evictionExecFeeDiff := oldEvictionExecFeeJumps - txs[len(txs)-1].evictionExecFeeJumps
+ evictionBlobFeeDiff := oldEvictionBlobFeeJumps - txs[len(txs)-1].evictionBlobFeeJumps
+
+ if math.Abs(evictionExecFeeDiff) > 0.001 || math.Abs(evictionBlobFeeDiff) > 0.001 { // need math.Abs, can go up and down
+ heap.Fix(p.evict, p.evict.index[from])
+ }
+ }
+ // If the pool went over the allowed data limit, evict transactions until
+ // we're again below the threshold
+ for p.stored > p.config.Datacap {
+ p.drop()
+ }
+ p.updateStorageMetrics()
+
+ return nil
+}
+
+// drop removes the worst transaction from the pool. It is primarily used when a
+// freshly added transaction overflows the pool and needs to evict something. The
+// method is also called on startup if the user resizes their storage, might be an
+// expensive run but it should be fine-ish.
+func (p *BlobPool) drop() {
+ // Peek at the account with the worse transaction set to evict from (Go's heap
+ // stores the minimum at index zero of the heap slice) and retrieve it's last
+ // transaction.
+ var (
+ from = p.evict.addrs[0] // cannot call drop on empty pool
+
+ txs = p.index[from]
+ drop = txs[len(txs)-1]
+ last = len(txs) == 1
+ )
+ // Remove the transaction from the pool's index
+ if last {
+ delete(p.index, from)
+ delete(p.spent, from)
+ p.reserve(from, false)
+ } else {
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ p.index[from] = txs
+ p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap)
+ }
+ p.stored -= uint64(drop.size)
+ delete(p.lookup, drop.hash)
+
+ // Remove the transaction from the pool's evicion heap:
+ // - If the entire account was dropped, pop off the address
+ // - Otherwise, if the new tail has better eviction caps, fix the heap
+ if last {
+ heap.Pop(p.evict)
+ } else {
+ tail := txs[len(txs)-1] // new tail, surely exists
+
+ evictionExecFeeDiff := tail.evictionExecFeeJumps - drop.evictionExecFeeJumps
+ evictionBlobFeeDiff := tail.evictionBlobFeeJumps - drop.evictionBlobFeeJumps
+
+ if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing
+ heap.Fix(p.evict, 0)
+ }
+ }
+ // Remove the transaction from the data store
+ log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
+ if err := p.store.Delete(drop.id); err != nil {
+ log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
+ }
+}
+
+// Pending retrieves all currently processable transactions, grouped by origin
+// account and sorted by nonce.
+func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+ // Track the amount of time waiting to retrieve the list of pending blob txs
+ // from the pool and the amount of time actually spent on assembling the data.
+ // The latter will be pretty much moot, but we've kept it to have symmetric
+ // across all user operations.
+ pendStart := time.Now()
+ p.lock.RLock()
+ pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
+ defer p.lock.RUnlock()
+
+ defer func(start time.Time) {
+ pendtimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ pending := make(map[common.Address][]*txpool.LazyTransaction)
+ for addr, txs := range p.index {
+ var lazies []*txpool.LazyTransaction
+ for _, tx := range txs {
+ lazies = append(lazies, &txpool.LazyTransaction{
+ Pool: p,
+ Hash: tx.hash,
+ Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
+ GasFeeCap: tx.execFeeCap.ToBig(),
+ GasTipCap: tx.execTipCap.ToBig(),
+ })
+ }
+ if len(lazies) > 0 {
+ pending[addr] = lazies
+ }
+ }
+ return pending
+}
+
+func (p *BlobPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*txpool.LazyTransaction {
+ return p.Pending(enforceTips)
+}
+
+// PendingFrom returns the same set of transactions that would be returned from Pending restricted to only
+// transactions from [addrs].
+func (p *BlobPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+ // Track the amount of time waiting to retrieve the list of pending blob txs
+ // from the pool and the amount of time actually spent on assembling the data.
+ // The latter will be pretty much moot, but we've kept it to have symmetric
+ // across all user operations.
+ pendStart := time.Now()
+ p.lock.RLock()
+ pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
+ defer p.lock.RUnlock()
+
+ defer func(start time.Time) {
+ pendtimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ pending := make(map[common.Address][]*txpool.LazyTransaction)
+ for _, addr := range addrs {
+ txs, ok := p.index[addr]
+ if !ok {
+ continue
+ }
+ var lazies []*txpool.LazyTransaction
+ for _, tx := range txs {
+ lazies = append(lazies, &txpool.LazyTransaction{
+ Pool: p,
+ Hash: tx.hash,
+ Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
+ GasFeeCap: tx.execFeeCap.ToBig(),
+ GasTipCap: tx.execTipCap.ToBig(),
+ })
+ }
+ if len(lazies) > 0 {
+ pending[addr] = lazies
+ }
+ }
+ return pending
+}
+
+// IteratePending iterates over [pool.pending] until [f] returns false.
+// The caller must not modify [tx]. Returns false if iteration was interrupted.
+func (pool *BlobPool) IteratePending(f func(tx *txpool.Transaction) bool) bool {
+ pool.lock.RLock()
+ defer pool.lock.RUnlock()
+
+ for _, list := range pool.index {
+ for _, txId := range list {
+ tx := pool.Get(txId.hash)
+ if tx == nil {
+ continue
+ }
+ if !f(tx) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (p *BlobPool) SetMinFee(minFee *big.Int) {}
+
+// updateStorageMetrics retrieves a bunch of stats from the data store and pushes
+// them out as metrics.
+func (p *BlobPool) updateStorageMetrics() {
+ stats := p.store.Infos()
+
+ var (
+ dataused uint64
+ datareal uint64
+ slotused uint64
+
+ oversizedDataused uint64
+ oversizedDatagaps uint64
+ oversizedSlotused uint64
+ oversizedSlotgaps uint64
+ )
+ for _, shelf := range stats.Shelves {
+ slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
+ slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
+
+ dataused += slotDataused
+ datareal += slotDataused + slotDatagaps
+ slotused += shelf.FilledSlots
+
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
+
+ if shelf.SlotSize/blobSize > maxBlobsPerTransaction {
+ oversizedDataused += slotDataused
+ oversizedDatagaps += slotDatagaps
+ oversizedSlotused += shelf.FilledSlots
+ oversizedSlotgaps += shelf.GappedSlots
+ }
+ }
+ datausedGauge.Update(int64(dataused))
+ datarealGauge.Update(int64(datareal))
+ slotusedGauge.Update(int64(slotused))
+
+ oversizedDatausedGauge.Update(int64(oversizedDataused))
+ oversizedDatagapsGauge.Update(int64(oversizedDatagaps))
+ oversizedSlotusedGauge.Update(int64(oversizedSlotused))
+ oversizedSlotgapsGauge.Update(int64(oversizedSlotgaps))
+
+ p.updateLimboMetrics()
+}
+
+// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
+// // them out as metrics.
+func (p *BlobPool) updateLimboMetrics() {
+ stats := p.limbo.store.Infos()
+
+ var (
+ dataused uint64
+ datareal uint64
+ slotused uint64
+ )
+ for _, shelf := range stats.Shelves {
+ slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
+ slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
+
+ dataused += slotDataused
+ datareal += slotDataused + slotDatagaps
+ slotused += shelf.FilledSlots
+
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
+ }
+ limboDatausedGauge.Update(int64(dataused))
+ limboDatarealGauge.Update(int64(datareal))
+ limboSlotusedGauge.Update(int64(slotused))
+}
+
+// SubscribeTransactions registers a subscription of NewTxsEvent and
+// starts sending event to the given channel.
+func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
+ return p.eventScope.Track(p.eventFeed.Subscribe(ch))
+}
+
+// Nonce returns the next nonce of an account, with all transactions executable
+// by the pool already applied on top.
+func (p *BlobPool) Nonce(addr common.Address) uint64 {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if txs, ok := p.index[addr]; ok {
+ return txs[len(txs)-1].nonce + 1
+ }
+ return p.state.GetNonce(addr)
+}
+
+// Stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (p *BlobPool) Stats() (int, int) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ var pending int
+ for _, txs := range p.index {
+ pending += len(txs)
+ }
+ return pending, 0 // No non-executable txs in the blob pool
+}
+
+// Content retrieves the data content of the transaction pool, returning all the
+// pending as well as queued transactions, grouped by account and sorted by nonce.
+//
+// For the blob pool, this method will return nothing for now.
+// TODO(karalabe): Abstract out the returned metadata.
+func (p *BlobPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
+ return make(map[common.Address][]*types.Transaction), make(map[common.Address][]*types.Transaction)
+}
+
+// ContentFrom retrieves the data content of the transaction pool, returning the
+// pending as well as queued transactions of this address, grouped by nonce.
+//
+// For the blob pool, this method will return nothing for now.
+// TODO(karalabe): Abstract out the returned metadata.
+func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
+ return []*types.Transaction{}, []*types.Transaction{}
+}
+
+// Locals retrieves the accounts currently considered local by the pool.
+//
+// There is no notion of local accounts in the blob pool.
+func (p *BlobPool) Locals() []common.Address {
+ return []common.Address{}
+}
+
+// Status returns the known status (unknown/pending/queued) of a transaction
+// identified by their hashes.
+func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {
+ if p.Has(hash) {
+ return txpool.TxStatusPending
+ }
+ return txpool.TxStatusUnknown
+}
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
new file mode 100644
index 0000000000..07422590c8
--- /dev/null
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -0,0 +1,1273 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "errors"
+ "math"
+ "math/big"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ava-labs/subnet-evm/commontype"
+ "github.com/ava-labs/subnet-evm/consensus/dummy"
+ "github.com/ava-labs/subnet-evm/consensus/misc/eip4844"
+ "github.com/ava-labs/subnet-evm/core"
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+ "github.com/holiman/uint256"
+)
+
+var (
+ emptyBlob = kzg4844.Blob{}
+ emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
+ emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
+ emptyBlobVHash = blobHash(emptyBlobCommit)
+)
+
+func blobHash(commit kzg4844.Commitment) common.Hash {
+ hasher := sha256.New()
+ hasher.Write(commit[:])
+ hash := hasher.Sum(nil)
+
+ var vhash common.Hash
+ vhash[0] = params.BlobTxHashVersion
+ copy(vhash[1:], hash[1:])
+
+ return vhash
+}
+
+// Chain configuration with Cancun enabled.
+//
+// TODO(karalabe): replace with params.MainnetChainConfig after Cancun.
+var testChainConfig *params.ChainConfig
+
+func init() {
+ testChainConfig = new(params.ChainConfig)
+ *testChainConfig = *params.TestChainConfig
+ testChainConfig.FeeConfig.MinBaseFee = new(big.Int).SetUint64(1)
+
+ testChainConfig.CancunTime = new(uint64)
+ *testChainConfig.CancunTime = uint64(time.Now().Unix())
+}
+
+// testBlockChain is a mock of the live chain for testing the pool.
+type testBlockChain struct {
+ config *params.ChainConfig
+ basefee *uint256.Int
+ blobfee *uint256.Int
+ statedb *state.StateDB
+}
+
+func (bc *testBlockChain) Config() *params.ChainConfig {
+ return bc.config
+}
+
+func (bc *testBlockChain) CurrentBlock() *types.Header {
+ // Yolo, life is too short to invert mist.CalcBaseFee and misc.CalcBlobFee,
+ // just binary search it them.
+
+ // The base fee at 5714 ETH translates into the 21000 base gas higher than
+ // mainnet ether existence, use that as a cap for the tests.
+ var (
+ blockNumber = big.NewInt(1) // Note: London fork is not based on number in Avalanche
+ blockTime = *bc.config.CancunTime + 1
+ gasLimit = uint64(30_000_000)
+ )
+ lo := new(big.Int)
+ hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil))
+
+ for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
+ mid := new(big.Int).Add(lo, hi)
+ mid.Div(mid, big.NewInt(2))
+ parent := &types.Header{
+ Number: blockNumber,
+ Time: blockTime,
+ GasLimit: gasLimit,
+ GasUsed: 0,
+ BaseFee: mid,
+ Extra: make([]byte, params.DynamicFeeExtraDataSize),
+ }
+ _, baseFee, err := dummy.CalcBaseFee(
+ bc.config, bc.config.FeeConfig, parent, blockTime,
+ )
+ if err != nil {
+ panic(err)
+ }
+ if baseFee.Cmp(bc.basefee.ToBig()) > 0 {
+ hi = mid
+ } else {
+ lo = mid
+ }
+ }
+ baseFee := lo
+
+ // The excess blob gas at 2^27 translates into a blob fee higher than mainnet
+ // ether existence, use that as a cap for the tests.
+ lo = new(big.Int)
+ hi = new(big.Int).Exp(big.NewInt(2), big.NewInt(27), nil)
+
+ for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
+ mid := new(big.Int).Add(lo, hi)
+ mid.Div(mid, big.NewInt(2))
+
+ if eip4844.CalcBlobFee(mid.Uint64()).Cmp(bc.blobfee.ToBig()) > 0 {
+ hi = mid
+ } else {
+ lo = mid
+ }
+ }
+ excessBlobGas := lo.Uint64()
+
+ return &types.Header{
+ Number: blockNumber,
+ Time: blockTime,
+ GasLimit: gasLimit,
+ BaseFee: baseFee,
+ ExcessBlobGas: &excessBlobGas,
+ Extra: make([]byte, params.DynamicFeeExtraDataSize),
+ }
+}
+
+func (bc *testBlockChain) CurrentFinalBlock() *types.Header {
+ return &types.Header{
+ Number: big.NewInt(0),
+ }
+}
+
+func (bt *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+ return nil
+}
+
+func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
+ return bc.statedb, nil
+}
+
+func (bc *testBlockChain) GetFeeConfigAt(header *types.Header) (commontype.FeeConfig, *big.Int, error) {
+ return bc.config.FeeConfig, nil, nil
+}
+
+// makeAddressReserver is a utility method to sanity check that accounts are
+// properly reserved by the blobpool (no duplicate reserves or unreserves).
+func makeAddressReserver() txpool.AddressReserver {
+ var (
+ reserved = make(map[common.Address]struct{})
+ lock sync.Mutex
+ )
+ return func(addr common.Address, reserve bool) error {
+ lock.Lock()
+ defer lock.Unlock()
+
+ _, exists := reserved[addr]
+ if reserve {
+ if exists {
+ panic("already reserved")
+ }
+ reserved[addr] = struct{}{}
+ return nil
+ }
+ if !exists {
+ panic("not reserved")
+ }
+ delete(reserved, addr)
+ return nil
+ }
+}
+
+// makeTx is a utility method to construct a random blob transaction and sign it
+// with a valid key, only setting the interesting fields from the perspective of
+// the blob pool.
+func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
+ tx, _ := types.SignNewTx(key, types.LatestSigner(testChainConfig), makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap))
+ return tx
+}
+
+// makeUnsignedTx is a utility method to construct a random blob tranasaction
+// without signing it.
+func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
+ return &types.BlobTx{
+ ChainID: uint256.MustFromBig(testChainConfig.ChainID),
+ Nonce: nonce,
+ GasTipCap: uint256.NewInt(gasTipCap),
+ GasFeeCap: uint256.NewInt(gasFeeCap),
+ Gas: 21000,
+ BlobFeeCap: uint256.NewInt(blobFeeCap),
+ BlobHashes: []common.Hash{emptyBlobVHash},
+ Value: uint256.NewInt(100),
+ }
+}
+
+// verifyPoolInternals iterates over all the transactions in the pool and checks
+// that sort orders, calculated fields, cumulated fields are correct.
+func verifyPoolInternals(t *testing.T, pool *BlobPool) {
+ // Mark this method as a helper to remove from stack traces
+ t.Helper()
+
+ // Verify that all items in the index are present in the lookup and nothing more
+ seen := make(map[common.Hash]struct{})
+ for addr, txs := range pool.index {
+ for _, tx := range txs {
+ if _, ok := seen[tx.hash]; ok {
+ t.Errorf("duplicate hash #%x in transaction index: address %s, nonce %d", tx.hash, addr, tx.nonce)
+ }
+ seen[tx.hash] = struct{}{}
+ }
+ }
+ for hash, id := range pool.lookup {
+ if _, ok := seen[hash]; !ok {
+ t.Errorf("lookup entry missing from transaction index: hash #%x, id %d", hash, id)
+ }
+ delete(seen, hash)
+ }
+ for hash := range seen {
+ t.Errorf("indexed transaction hash #%x missing from lookup table", hash)
+ }
+ // Verify that transactions are sorted per account and contain no nonce gaps
+ for addr, txs := range pool.index {
+ for i := 1; i < len(txs); i++ {
+ if txs[i].nonce != txs[i-1].nonce+1 {
+ t.Errorf("addr %v, tx %d nonce mismatch: have %d, want %d", addr, i, txs[i].nonce, txs[i-1].nonce+1)
+ }
+ }
+ }
+ // Verify that calculated evacuation thresholds are correct
+ for addr, txs := range pool.index {
+ if !txs[0].evictionExecTip.Eq(txs[0].execTipCap) {
+ t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, 0, txs[0].evictionExecTip, txs[0].execTipCap)
+ }
+ if math.Abs(txs[0].evictionExecFeeJumps-txs[0].basefeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionExecFeeJumps, txs[0].basefeeJumps)
+ }
+ if math.Abs(txs[0].evictionBlobFeeJumps-txs[0].blobfeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionBlobFeeJumps, txs[0].blobfeeJumps)
+ }
+ for i := 1; i < len(txs); i++ {
+ wantExecTip := txs[i-1].evictionExecTip
+ if wantExecTip.Gt(txs[i].execTipCap) {
+ wantExecTip = txs[i].execTipCap
+ }
+ if !txs[i].evictionExecTip.Eq(wantExecTip) {
+ t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip)
+ }
+
+ wantExecFeeJumps := txs[i-1].evictionExecFeeJumps
+ if wantExecFeeJumps > txs[i].basefeeJumps {
+ wantExecFeeJumps = txs[i].basefeeJumps
+ }
+ if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps)
+ }
+
+ wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps
+ if wantBlobFeeJumps > txs[i].blobfeeJumps {
+ wantBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps)
+ }
+ }
+ }
+ // Verify that account balance accumulations are correct
+ for addr, txs := range pool.index {
+ spent := new(uint256.Int)
+ for _, tx := range txs {
+ spent.Add(spent, tx.costCap)
+ }
+ if !pool.spent[addr].Eq(spent) {
+ t.Errorf("addr %v expenditure mismatch: have %d, want %d", addr, pool.spent[addr], spent)
+ }
+ }
+ // Verify that pool storage size is correct
+ var stored uint64
+ for _, txs := range pool.index {
+ for _, tx := range txs {
+ stored += uint64(tx.size)
+ }
+ }
+ if pool.stored != stored {
+ t.Errorf("pool storage mismatch: have %d, want %d", pool.stored, stored)
+ }
+ // Verify the price heap internals
+ verifyHeapInternals(t, pool.evict)
+}
+
+// Tests that transactions can be loaded from disk on startup and that they are
+// correctly discarded if invalid.
+//
+// - 1. A transaction that cannot be decoded must be dropped
+// - 2. A transaction that cannot be recovered (bad signature) must be dropped
+// - 3. All transactions after a nonce gap must be dropped
+// - 4. All transactions after an underpriced one (including it) must be dropped
+func TestOpenDrops(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a malformed transaction to verify that decoding errors (or format
+ // changes) are handled gracefully (case 1)
+ malformed, _ := store.Put([]byte("this is a badly encoded transaction"))
+
+ // Insert a transaction with a bad signature to verify that stale junk after
+ // potential hard-forks can get evicted (case 2)
+ tx := types.NewTx(&types.BlobTx{
+ ChainID: uint256.MustFromBig(testChainConfig.ChainID),
+ GasTipCap: new(uint256.Int),
+ GasFeeCap: new(uint256.Int),
+ Gas: 0,
+ Value: new(uint256.Int),
+ Data: nil,
+ BlobFeeCap: new(uint256.Int),
+ V: new(uint256.Int),
+ R: new(uint256.Int),
+ S: new(uint256.Int),
+ })
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ badsig, _ := store.Put(blob)
+
+ // Insert a sequence of transactions with a nonce gap in between to verify
+ // that anything gapped will get evicted (case 3)
+ var (
+ gapper, _ = crypto.GenerateKey()
+
+ valids = make(map[uint64]struct{})
+ gapped = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5
+ tx := makeTx(nonce, 1, 1, 1, gapper)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce < 2 {
+ valids[id] = struct{}{}
+ } else {
+ gapped[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions with a gapped starting nonce to verify
+ // that the entire set will get dropped.
+ var (
+ dangler, _ = crypto.GenerateKey()
+ dangling = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling
+ tx := makeTx(nonce, 1, 1, 1, dangler)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ dangling[id] = struct{}{}
+ }
+ // Insert a sequence of transactions with already passed nonces to veirfy
+ // that the entire set will get dropped.
+ var (
+ filler, _ = crypto.GenerateKey()
+ filled = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled
+ tx := makeTx(nonce, 1, 1, 1, filler)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ filled[id] = struct{}{}
+ }
+ // Insert a sequence of transactions with partially passed nonces to veirfy
+ // that the included part of the set will get dropped
+ var (
+ overlapper, _ = crypto.GenerateKey()
+ overlapped = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled
+ tx := makeTx(nonce, 1, 1, 1, overlapper)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce >= 2 {
+ valids[id] = struct{}{}
+ } else {
+ overlapped[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions with an underpriced first to verify that
+ // the entire set will get dropped (case 4).
+ var (
+ underpayer, _ = crypto.GenerateKey()
+ underpaid = make(map[uint64]struct{})
+ )
+ for i := 0; i < 5; i++ { // make #0 underpriced
+ var tx *types.Transaction
+ if i == 0 {
+ tx = makeTx(uint64(i), 0, 0, 0, underpayer)
+ } else {
+ tx = makeTx(uint64(i), 1, 1, 1, underpayer)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ underpaid[id] = struct{}{}
+ }
+
+ // Insert a sequence of transactions with an underpriced in between to verify
+ // that it and anything newly gapped will get evicted (case 4).
+ var (
+ outpricer, _ = crypto.GenerateKey()
+ outpriced = make(map[uint64]struct{})
+ )
+ for i := 0; i < 5; i++ { // make #2 underpriced
+ var tx *types.Transaction
+ if i == 2 {
+ tx = makeTx(uint64(i), 0, 0, 0, outpricer)
+ } else {
+ tx = makeTx(uint64(i), 1, 1, 1, outpricer)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if i < 2 {
+ valids[id] = struct{}{}
+ } else {
+ outpriced[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions fully overdrafted to verify that the
+ // entire set will get invalidated.
+ var (
+ exceeder, _ = crypto.GenerateKey()
+ exceeded = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // nonce 0 overdrafts the account
+ var tx *types.Transaction
+ if nonce == 0 {
+ tx = makeTx(nonce, 1, 100, 1, exceeder)
+ } else {
+ tx = makeTx(nonce, 1, 1, 1, exceeder)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ exceeded[id] = struct{}{}
+ }
+ // Insert a sequence of transactions partially overdrafted to verify that part
+ // of the set will get invalidated.
+ var (
+ overdrafter, _ = crypto.GenerateKey()
+ overdrafted = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // nonce 1 overdrafts the account
+ var tx *types.Transaction
+ if nonce == 1 {
+ tx = makeTx(nonce, 1, 100, 1, overdrafter)
+ } else {
+ tx = makeTx(nonce, 1, 1, 1, overdrafter)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce < 1 {
+ valids[id] = struct{}{}
+ } else {
+ overdrafted[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions overflowing the account cap to verify
+ // that part of the set will get invalidated.
+ var (
+ overcapper, _ = crypto.GenerateKey()
+ overcapped = make(map[uint64]struct{})
+ )
+ for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ {
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: makeTx(nonce, 1, 1, 1, overcapper)})
+
+ id, _ := store.Put(blob)
+ if nonce < maxTxsPerAccount {
+ valids[id] = struct{}{}
+ } else {
+ overcapped[id] = struct{}{}
+ }
+ }
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), big.NewInt(1000000))
+ statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3)
+ statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), big.NewInt(1000000))
+ statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2)
+ statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), big.NewInt(10000000))
+ statedb.Commit(0, true, false)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(uint64(params.TestInitialBaseFee)),
+ blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the malformed (case 1), badly signed (case 2) and gapped (case
+ // 3) txs have been deleted from the pool
+ alive := make(map[uint64]struct{})
+ for _, txs := range pool.index {
+ for _, tx := range txs {
+ switch tx.id {
+ case malformed:
+ t.Errorf("malformed RLP transaction remained in storage")
+ case badsig:
+ t.Errorf("invalidly signed transaction remained in storage")
+ default:
+ if _, ok := dangling[tx.id]; ok {
+ t.Errorf("dangling transaction remained in storage: %d", tx.id)
+ } else if _, ok := filled[tx.id]; ok {
+ t.Errorf("filled transaction remained in storage: %d", tx.id)
+ } else if _, ok := overlapped[tx.id]; ok {
+ t.Errorf("overlapped transaction remained in storage: %d", tx.id)
+ } else if _, ok := gapped[tx.id]; ok {
+ t.Errorf("gapped transaction remained in storage: %d", tx.id)
+ } else if _, ok := underpaid[tx.id]; ok {
+ t.Errorf("underpaid transaction remained in storage: %d", tx.id)
+ } else if _, ok := outpriced[tx.id]; ok {
+ t.Errorf("outpriced transaction remained in storage: %d", tx.id)
+ } else if _, ok := exceeded[tx.id]; ok {
+ t.Errorf("fully overdrafted transaction remained in storage: %d", tx.id)
+ } else if _, ok := overdrafted[tx.id]; ok {
+ t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
+ } else if _, ok := overcapped[tx.id]; ok {
+ t.Errorf("overcapped transaction remained in storage: %d", tx.id)
+ } else {
+ alive[tx.id] = struct{}{}
+ }
+ }
+ }
+ }
+ // Verify that the rest of the transactions remained alive
+ if len(alive) != len(valids) {
+ t.Errorf("valid transaction count mismatch: have %d, want %d", len(alive), len(valids))
+ }
+ for id := range alive {
+ if _, ok := valids[id]; !ok {
+ t.Errorf("extra transaction %d", id)
+ }
+ }
+ for id := range valids {
+ if _, ok := alive[id]; !ok {
+ t.Errorf("missing transaction %d", id)
+ }
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that transactions loaded from disk are indexed corrently.
+//
+// - 1. Transactions must be groupped by sender, sorted by nonce
+// - 2. Eviction thresholds are calculated correctly for the sequences
+// - 3. Balance usage of an account is totals across all transactions
+func TestOpenIndex(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a sequence of transactions with varying price points to check that
+ // the cumulative minimumw will be maintained.
+ var (
+ key, _ = crypto.GenerateKey()
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+
+ txExecTipCaps = []uint64{10, 25, 5, 7, 1, 100}
+ txExecFeeCaps = []uint64{100, 90, 200, 10, 80, 300}
+ txBlobFeeCaps = []uint64{55, 66, 77, 33, 22, 11}
+
+ //basefeeJumps = []float64{39.098, 38.204, 44.983, 19.549, 37.204, 48.426} // log 1.125 (exec fee cap)
+ //blobfeeJumps = []float64{34.023, 35.570, 36.879, 29.686, 26.243, 20.358} // log 1.125 (blob fee cap)
+
+ evictExecTipCaps = []uint64{10, 10, 5, 5, 1, 1}
+ evictExecFeeJumps = []float64{39.098, 38.204, 38.204, 19.549, 19.549, 19.549} // min(log 1.125 (exec fee cap))
+ evictBlobFeeJumps = []float64{34.023, 34.023, 34.023, 29.686, 26.243, 20.358} // min(log 1.125 (blob fee cap))
+
+ totalSpent = uint256.NewInt(21000*(100+90+200+10+80+300) + blobSize*(55+66+77+33+22+11) + 100*6) // 21000 gas x price + 128KB x blobprice + value
+ )
+ for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load
+ tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ store.Put(blob)
+ }
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr, big.NewInt(1_000_000_000))
+ statedb.Commit(0, true, false)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(uint64(params.TestInitialBaseFee)),
+ blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the transactions have been sorted by nonce (case 1)
+ for i := 0; i < len(pool.index[addr]); i++ {
+ if pool.index[addr][i].nonce != uint64(i) {
+ t.Errorf("tx %d nonce mismatch: have %d, want %d", i, pool.index[addr][i].nonce, uint64(i))
+ }
+ }
+ // Verify that the cumulative fee minimums have been correctly calculated (case 2)
+ for i, cap := range evictExecTipCaps {
+ if !pool.index[addr][i].evictionExecTip.Eq(uint256.NewInt(cap)) {
+ t.Errorf("eviction tip cap %d mismatch: have %d, want %d", i, pool.index[addr][i].evictionExecTip, cap)
+ }
+ }
+ for i, jumps := range evictExecFeeJumps {
+ if math.Abs(pool.index[addr][i].evictionExecFeeJumps-jumps) > 0.001 {
+ t.Errorf("eviction fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionExecFeeJumps, jumps)
+ }
+ }
+ for i, jumps := range evictBlobFeeJumps {
+ if math.Abs(pool.index[addr][i].evictionBlobFeeJumps-jumps) > 0.001 {
+ t.Errorf("eviction blob fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionBlobFeeJumps, jumps)
+ }
+ }
+ // Verify that the balance usage has been correctly calculated (case 3)
+ if !pool.spent[addr].Eq(totalSpent) {
+ t.Errorf("expenditure mismatch: have %d, want %d", pool.spent[addr], totalSpent)
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that after indexing all the loaded transactions from disk, a price heap
+// is correctly constructed based on the head basefee and blobfee.
+func TestOpenHeap(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a few transactions from a few accounts. To remove randomness from
+ // the heap initialization, use a deterministic account/tx/priority ordering.
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+ )
+ if bytes.Compare(addr1[:], addr2[:]) > 0 {
+ key1, addr1, key2, addr2 = key2, addr2, key1, addr1
+ }
+ if bytes.Compare(addr1[:], addr3[:]) > 0 {
+ key1, addr1, key3, addr3 = key3, addr3, key1, addr1
+ }
+ if bytes.Compare(addr2[:], addr3[:]) > 0 {
+ key2, addr2, key3, addr3 = key3, addr3, key2, addr2
+ }
+ var (
+ tx1 = makeTx(0, 1, 1000, 90, key1)
+ tx2 = makeTx(0, 1, 800, 70, key2)
+ tx3 = makeTx(0, 1, 1500, 110, key3)
+
+ blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1})
+ blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2})
+ blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3})
+
+ heapOrder = []common.Address{addr2, addr1, addr3}
+ heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2}
+ )
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
+ statedb.Commit(0, true, false)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the heap's internal state matches the expectations
+ for i, addr := range pool.evict.addrs {
+ if addr != heapOrder[i] {
+ t.Errorf("slot %d mismatch: have %v, want %v", i, addr, heapOrder[i])
+ }
+ }
+ for addr, i := range pool.evict.index {
+ if i != heapIndex[addr] {
+ t.Errorf("index for %v mismatch: have %d, want %d", addr, i, heapIndex[addr])
+ }
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that after the pool's previous state is loaded back, any transactions
+// over the new storage cap will get dropped.
+func TestOpenCap(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a few transactions from a few accounts
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+
+ tx1 = makeTx(0, 1, 1000, 100, key1)
+ tx2 = makeTx(0, 1, 800, 70, key2)
+ tx3 = makeTx(0, 1, 1500, 110, key3)
+
+ blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+ blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+ blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+
+ keep = []common.Address{addr1, addr3}
+ drop = []common.Address{addr2}
+ size = uint64(2 * (txAvgSize + blobSize))
+ )
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Verify pool capping twice: first by reducing the data cap, then restarting
+ // with a high cap to ensure everything was persisted previously
+ for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
+ // Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
+ statedb.Commit(0, true, false)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage, Datacap: datacap}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ // Verify that enough transactions have been dropped to get the pool's size
+ // under the requested limit
+ if len(pool.index) != len(keep) {
+ t.Errorf("tracked account count mismatch: have %d, want %d", len(pool.index), len(keep))
+ }
+ for _, addr := range keep {
+ if _, ok := pool.index[addr]; !ok {
+ t.Errorf("expected account %v missing from pool", addr)
+ }
+ }
+ for _, addr := range drop {
+ if _, ok := pool.index[addr]; ok {
+ t.Errorf("unexpected account %v present in pool", addr)
+ }
+ }
+ if pool.stored != size {
+ t.Errorf("pool stored size mismatch: have %v, want %v", pool.stored, size)
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+
+ pool.Close()
+ }
+}
+
+// Tests that adding transaction will correctly store it in the persistent store
+// and update all the indices.
+//
+// Note, this tests mostly checks the pool transaction shuffling logic or things
+// specific to the blob pool. It does not do an exhaustive transaction validity
+// check.
+func TestAdd(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // seed is a helper tumpe to seed an initial state db and pool
+ type seed struct {
+ balance uint64
+ nonce uint64
+ txs []*types.BlobTx
+ }
+
+ // addtx is a helper sender/tx tuple to represent a new tx addition
+ type addtx struct {
+ from string
+ tx *types.BlobTx
+ err error
+ }
+
+ tests := []struct {
+ seeds map[string]seed
+ adds []addtx
+ }{
+ // Transactions from new accounts should be accepted if their initial
+ // nonce matches the expected one from the statedb. Higher or lower must
+ // be rejected.
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 21100 + blobSize},
+ "bob": {balance: 21100 + blobSize, nonce: 1},
+ "claire": {balance: 21100 + blobSize},
+ "dave": {balance: 21100 + blobSize, nonce: 1},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // Old account, 1 tx in chain, 0 pending: accept nonce 1
+ from: "bob",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, no previous txs: reject nonce 1
+ from: "claire",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ { // Old account, 1 tx in chain, 0 pending: reject nonce 0
+ from: "dave",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: core.ErrNonceTooLow,
+ },
+ { // Old account, 1 tx in chain, 0 pending: reject nonce 2
+ from: "dave",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ },
+ },
+ // Transactions from already pooled accounts should only be accepted if
+ // the nonces are contiguous (ignore prices for now, will check later)
+ {
+ seeds: map[string]seed{
+ "alice": {
+ balance: 1000000,
+ txs: []*types.BlobTx{
+ makeUnsignedTx(0, 1, 1, 1),
+ },
+ },
+ "bob": {
+ balance: 1000000,
+ nonce: 1,
+ txs: []*types.BlobTx{
+ makeUnsignedTx(1, 1, 1, 1),
+ },
+ },
+ },
+ adds: []addtx{
+ { // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 tx pending: accept nonce 1
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 txs pending: reject nonce 3
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ { // New account, 2 txs pending: accept nonce 2
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 3 txs pending: accept nonce 3 now
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: nil,
+ },
+ { // Old account, 1 tx in chain, 1 tx pending: reject replacement nonce 1 (ignore price for now)
+ from: "bob",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now)
+ from: "bob",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ },
+ },
+ // Transactions should only be accepted into the pool if the cumulative
+ // expenditure doesn't overflow the account balance
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 63299 + 3*blobSize}, // 3 tx - 1 wei
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0 with 21100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx with 21100 wei spent: accept nonce 1 with 21100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx with 42200 wei spent: reject nonce 2 with 21100 wei spend (1 wei overflow)
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: core.ErrInsufficientFunds,
+ },
+ },
+ },
+ // Transactions should only be accepted into the pool if the total count
+ // from the same account doesn't overflow the pool limits
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 10000000},
+ },
+ adds: []addtx{
+ { // New account, no previous txs, 16 slots left: accept nonce 0
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx, 15 slots left: accept nonce 1
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx, 14 slots left: accept nonce 2
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 3 pooled tx, 13 slots left: accept nonce 3
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 4 pooled tx, 12 slots left: accept nonce 4
+ from: "alice",
+ tx: makeUnsignedTx(4, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 5 pooled tx, 11 slots left: accept nonce 5
+ from: "alice",
+ tx: makeUnsignedTx(5, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 6 pooled tx, 10 slots left: accept nonce 6
+ from: "alice",
+ tx: makeUnsignedTx(6, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 7 pooled tx, 9 slots left: accept nonce 7
+ from: "alice",
+ tx: makeUnsignedTx(7, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 8 pooled tx, 8 slots left: accept nonce 8
+ from: "alice",
+ tx: makeUnsignedTx(8, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 9 pooled tx, 7 slots left: accept nonce 9
+ from: "alice",
+ tx: makeUnsignedTx(9, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 10 pooled tx, 6 slots left: accept nonce 10
+ from: "alice",
+ tx: makeUnsignedTx(10, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 11 pooled tx, 5 slots left: accept nonce 11
+ from: "alice",
+ tx: makeUnsignedTx(11, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 12 pooled tx, 4 slots left: accept nonce 12
+ from: "alice",
+ tx: makeUnsignedTx(12, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 13 pooled tx, 3 slots left: accept nonce 13
+ from: "alice",
+ tx: makeUnsignedTx(13, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 14 pooled tx, 2 slots left: accept nonce 14
+ from: "alice",
+ tx: makeUnsignedTx(14, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 15 pooled tx, 1 slots left: accept nonce 15
+ from: "alice",
+ tx: makeUnsignedTx(15, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 16 pooled tx, 0 slots left: accept nonce 15 replacement
+ from: "alice",
+ tx: makeUnsignedTx(15, 10, 10, 10),
+ err: nil,
+ },
+ { // New account, 16 pooled tx, 0 slots left: reject nonce 16 with overcap
+ from: "alice",
+ tx: makeUnsignedTx(16, 1, 1, 1),
+ err: txpool.ErrAccountLimitExceeded,
+ },
+ },
+ },
+ // Previously existing transactions should be allowed to be replaced iff
+ // the new cumulative expenditure can be covered by the account and the
+ // prices are bumped all around (no percentage check here).
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 2*100 + 5*21000 + 3*blobSize},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: reject nonce 0 with 341172 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 20, 1),
+ err: core.ErrInsufficientFunds,
+ },
+ { // New account, no previous txs: accept nonce 0 with 173172 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 2, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx with 173172 wei spent: accept nonce 1 with 152172 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with 599684 wei spend (173072 extra) (would overflow balance at nonce 1)
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 5, 2),
+ err: core.ErrInsufficientFunds,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gastip-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 3, 2),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gascap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 2, 2),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-blobcap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: accept nonce 0 with 84100 wei spend (42000 extra)
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 2),
+ err: nil,
+ },
+ },
+ },
+ // Previously existing transactions should be allowed to be replaced iff
+ // the new prices are bumped by a sufficient amount.
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 100 + 8*21000 + 4*blobSize},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 2),
+ err: nil,
+ },
+ { // New account, 1 pooled tx: reject nonce 0 with low-gastip-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 3, 8, 4),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 pooled tx: reject nonce 0 with low-gascap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 4, 6, 4),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 pooled tx: reject nonce 0 with low-blobcap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 4, 8, 3),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 pooled tx: accept nonce 0 with all-bumps
+ from: "alice",
+ tx: makeUnsignedTx(0, 4, 8, 4),
+ err: nil,
+ },
+ },
+ },
+ }
+ for i, tt := range tests {
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage) // late defer, still ok
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert the seed transactions for the pool startup
+ var (
+ keys = make(map[string]*ecdsa.PrivateKey)
+ addrs = make(map[string]common.Address)
+ )
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ for acc, seed := range tt.seeds {
+ // Generate a new random key/address for the seed account
+ keys[acc], _ = crypto.GenerateKey()
+ addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
+
+ // Seed the state database with this acocunt
+ statedb.AddBalance(addrs[acc], new(big.Int).SetUint64(seed.balance))
+ statedb.SetNonce(addrs[acc], seed.nonce)
+
+ // Sign the seed transactions and store them in the data store
+ for _, tx := range seed.txs {
+ var (
+ signed, _ = types.SignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
+ blob, _ = rlp.EncodeToBytes(&blobTx{Tx: signed, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+ )
+ store.Put(blob)
+ }
+ }
+ statedb.Commit(0, true, false)
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded dats
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("test %d: failed to create blob pool: %v", i, err)
+ }
+ verifyPoolInternals(t, pool)
+
+ // Add each transaction one by one, verifying the pool internals in between
+ for j, add := range tt.adds {
+ signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
+ if err := pool.add(signed, []kzg4844.Blob{emptyBlob}, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}); !errors.Is(err, add.err) {
+ t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
+ }
+ verifyPoolInternals(t, pool)
+ }
+ // Verify the pool internals and close down the test
+ verifyPoolInternals(t, pool)
+ pool.Close()
+ }
+}
diff --git a/core/txpool/blobpool/config.go b/core/txpool/blobpool/config.go
new file mode 100644
index 0000000000..6015b1baf6
--- /dev/null
+++ b/core/txpool/blobpool/config.go
@@ -0,0 +1,60 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Config are the configuration parameters of the blob transaction pool.
+type Config struct {
+ Datadir string // Data directory containing the currently executable blobs
+ Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead)
+ PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce
+}
+
+// DefaultConfig contains the default configurations for the transaction pool.
+var DefaultConfig = Config{
+ Datadir: "blobpool",
+ Datacap: 10 * 1024 * 1024 * 1024,
+ PriceBump: 100, // either have patience or be aggressive, no mushy ground
+}
+
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (config *Config) sanitize() Config {
+ conf := *config
+ if conf.Datacap < 1 {
+ log.Warn("Sanitizing invalid blobpool storage cap", "provided", conf.Datacap, "updated", DefaultConfig.Datacap)
+ conf.Datacap = DefaultConfig.Datacap
+ }
+ if conf.PriceBump < 1 {
+ log.Warn("Sanitizing invalid blobpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
+ conf.PriceBump = DefaultConfig.PriceBump
+ }
+ return conf
+}
diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go
new file mode 100644
index 0000000000..13e1f1f6ef
--- /dev/null
+++ b/core/txpool/blobpool/evictheap.go
@@ -0,0 +1,156 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "bytes"
+ "container/heap"
+ "math"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/holiman/uint256"
+)
+
+// evictHeap is a helper data structure to keep track of the cheapest bottleneck
+// transaction from each account to determine which account to evict from.
+//
+// The heap internally tracks a slice of cheapest transactions from each account
+// and a mapping from addresses to indices for direct removals/udates.
+//
+// The goal of the heap is to decide which account has the worst bottleneck to
+// evict transactions from.
+type evictHeap struct {
+ metas *map[common.Address][]*blobTxMeta // Pointer to the blob pool's index for price retrievals
+
+ basefeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the base fee
+ blobfeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the blob fee
+
+ addrs []common.Address // Heap of addresses to retrieve the cheapest out of
+ index map[common.Address]int // Indices into the heap for replacements
+}
+
+// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict
+// from in case of over saturation.
+func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap {
+ heap := &evictHeap{
+ metas: index,
+ index: make(map[common.Address]int),
+ }
+ // Populate the heap in account sort order. Not really needed in practice,
+ // but it makes the heap initialization deterministic and less annoying to
+ // test in unit tests.
+ addrs := make([]common.Address, 0, len(*index))
+ for addr := range *index {
+ addrs = append(addrs, addr)
+ }
+ sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i][:], addrs[j][:]) < 0 })
+
+ for _, addr := range addrs {
+ heap.index[addr] = len(heap.addrs)
+ heap.addrs = append(heap.addrs, addr)
+ }
+ heap.reinit(basefee, blobfee, true)
+ return heap
+}
+
+// reinit updates the pre-calculated dynamic fee jumps in the price heap and runs
+// the sorting algorithm from scratch on the entire heap.
+func (h *evictHeap) reinit(basefee *uint256.Int, blobfee *uint256.Int, force bool) {
+ // If the update is mostly the same as the old, don't sort pointlessly
+ basefeeJumps := dynamicFeeJumps(basefee)
+ blobfeeJumps := dynamicFeeJumps(blobfee)
+
+ if !force && math.Abs(h.basefeeJumps-basefeeJumps) < 0.01 && math.Abs(h.blobfeeJumps-blobfeeJumps) < 0.01 { // TODO(karalabe): 0.01 enough, maybe should be smaller? Maybe this optimization is moot?
+ return
+ }
+ // One or both of the dynamic fees jumped, resort the pool
+ h.basefeeJumps = basefeeJumps
+ h.blobfeeJumps = blobfeeJumps
+
+ heap.Init(h)
+}
+
+// Len implements sort.Interface as part of heap.Interface, returning the number
+// of accounts in the pool which can be considered for eviction.
+func (h *evictHeap) Len() int {
+ return len(h.addrs)
+}
+
+// Less implements sort.Interface as part of heap.Interface, returning which of
+// the two requested accounts has a cheaper bottleneck.
+func (h *evictHeap) Less(i, j int) bool {
+ txsI := (*(h.metas))[h.addrs[i]]
+ txsJ := (*(h.metas))[h.addrs[j]]
+
+ lastI := txsI[len(txsI)-1]
+ lastJ := txsJ[len(txsJ)-1]
+
+ prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps)
+ if prioI > 0 {
+ prioI = 0
+ }
+ prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps)
+ if prioJ > 0 {
+ prioJ = 0
+ }
+ if prioI == prioJ {
+ return lastI.evictionExecTip.Lt(lastJ.evictionExecTip)
+ }
+ return prioI < prioJ
+}
+
+// Swap implements sort.Interface as part of heap.Interface, maintaining both the
+// order of the accounts according to the heap, and the account->item slot mapping
+// for replacements.
+func (h *evictHeap) Swap(i, j int) {
+ h.index[h.addrs[i]], h.index[h.addrs[j]] = h.index[h.addrs[j]], h.index[h.addrs[i]]
+ h.addrs[i], h.addrs[j] = h.addrs[j], h.addrs[i]
+}
+
+// Push implements heap.Interface, appending an item to the end of the account
+// ordering as well as the address to item slot mapping.
+func (h *evictHeap) Push(x any) {
+ h.index[x.(common.Address)] = len(h.addrs)
+ h.addrs = append(h.addrs, x.(common.Address))
+}
+
+// Pop implements heap.Interface, removing and returning the last element of the
+// heap.
+//
+// Note, use `heap.Pop`, not `evictHeap.Pop`. This method is used by Go's heap,
+// to provide the functionality, it does not embed it.
+func (h *evictHeap) Pop() any {
+ // Remove the last element from the heap
+ size := len(h.addrs)
+ addr := h.addrs[size-1]
+ h.addrs = h.addrs[:size-1]
+
+ // Unindex the removed element and return
+ delete(h.index, addr)
+ return addr
+}
diff --git a/core/txpool/blobpool/evictheap_test.go b/core/txpool/blobpool/evictheap_test.go
new file mode 100644
index 0000000000..622a3869ea
--- /dev/null
+++ b/core/txpool/blobpool/evictheap_test.go
@@ -0,0 +1,330 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "container/heap"
+ mrand "math/rand"
+ "testing"
+
+ "github.com/ava-labs/subnet-evm/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/holiman/uint256"
+)
+
+var rand = mrand.New(mrand.NewSource(1))
+
+// verifyHeapInternals verifies that all accounts present in the index are also
+// present in the heap and internals are consistent across various indices.
+func verifyHeapInternals(t *testing.T, evict *evictHeap) {
+ t.Helper()
+
+ // Ensure that all accounts are present in the heap and no extras
+ seen := make(map[common.Address]struct{})
+ for i, addr := range evict.addrs {
+ seen[addr] = struct{}{}
+ if _, ok := (*evict.metas)[addr]; !ok {
+ t.Errorf("heap contains unexpected address at slot %d: %v", i, addr)
+ }
+ }
+ for addr := range *evict.metas {
+ if _, ok := seen[addr]; !ok {
+ t.Errorf("heap is missing required address %v", addr)
+ }
+ }
+ if len(evict.addrs) != len(*evict.metas) {
+ t.Errorf("heap size %d mismatches metadata size %d", len(evict.addrs), len(*evict.metas))
+ }
+ // Ensure that all accounts are present in the heap order index and no extras
+ have := make([]common.Address, len(evict.index))
+ for addr, i := range evict.index {
+ have[i] = addr
+ }
+ if len(have) != len(evict.addrs) {
+ t.Errorf("heap index size %d mismatches heap size %d", len(have), len(evict.addrs))
+ }
+ for i := 0; i < len(have) && i < len(evict.addrs); i++ {
+ if have[i] != evict.addrs[i] {
+ t.Errorf("heap index for slot %d mismatches: have %v, want %v", i, have[i], evict.addrs[i])
+ }
+ }
+}
+
+// Tests that the price heap can correctly sort its set of transactions based on
+// an input base- and blob fee.
+func TestPriceHeapSorting(t *testing.T) {
+ tests := []struct {
+ execTips []uint64
+ execFees []uint64
+ blobFees []uint64
+
+ basefee uint64
+ blobfee uint64
+
+ order []int
+ }{
+ // If everything is above the basefee and blobfee, order by miner tip
+ {
+ execTips: []uint64{1, 0, 2},
+ execFees: []uint64{1, 2, 3},
+ blobFees: []uint64{3, 2, 1},
+ basefee: 0,
+ blobfee: 0,
+ order: []int{1, 0, 2},
+ },
+ // If only basefees are used (blob fee matches with network), return the
+ // ones the furthest below the current basefee, splitting same ones with
+ // the tip. Anything above the basefee should be split by tip.
+ {
+ execTips: []uint64{100, 50, 100, 50, 1, 2, 3},
+ execFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000},
+ blobFees: []uint64{0, 0, 0, 0, 0, 0, 0},
+ basefee: 1999,
+ blobfee: 0,
+ order: []int{3, 2, 1, 0, 4, 5, 6},
+ },
+ // If only blobfees are used (base fee matches with network), return the
+ // ones the furthest below the current blobfee, splitting same ones with
+ // the tip. Anything above the blobfee should be split by tip.
+ {
+ execTips: []uint64{100, 50, 100, 50, 1, 2, 3},
+ execFees: []uint64{0, 0, 0, 0, 0, 0, 0},
+ blobFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000},
+ basefee: 0,
+ blobfee: 1999,
+ order: []int{3, 2, 1, 0, 4, 5, 6},
+ },
+ // If both basefee and blobfee is specified, sort by the larger distance
+ // of the two from the current network conditions, splitting same (loglog)
+ // ones via the tip.
+ //
+ // Basefee: 1000
+ // Blobfee: 100
+ //
+ // Tx #0: (800, 80) - 2 jumps below both => priority -1
+ // Tx #1: (630, 63) - 4 jumps below both => priority -2
+ // Tx #2: (800, 63) - 2 jumps below basefee, 4 jumps below blobfee => priority -2 (blob penalty dominates)
+ // Tx #3: (630, 80) - 4 jumps below basefee, 2 jumps below blobfee => priority -2 (base penalty dominates)
+ //
+ // Txs 1, 2, 3 share the same priority, split via tip, prefer 0 as the best
+ {
+ execTips: []uint64{1, 2, 3, 4},
+ execFees: []uint64{800, 630, 800, 630},
+ blobFees: []uint64{80, 63, 63, 80},
+ basefee: 1000,
+ blobfee: 100,
+ order: []int{1, 2, 3, 0},
+ },
+ }
+ for i, tt := range tests {
+ // Create an index of the transactions
+ index := make(map[common.Address][]*blobTxMeta)
+ for j := byte(0); j < byte(len(tt.execTips)); j++ {
+ addr := common.Address{j}
+
+ var (
+ execTip = uint256.NewInt(tt.execTips[j])
+ execFee = uint256.NewInt(tt.execFees[j])
+ blobFee = uint256.NewInt(tt.blobFees[j])
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(j),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and check the pop order
+ priceheap := newPriceHeap(uint256.NewInt(tt.basefee), uint256.NewInt(tt.blobfee), &index)
+ verifyHeapInternals(t, priceheap)
+
+ for j := 0; j < len(tt.order); j++ {
+ if next := heap.Pop(priceheap); int(next.(common.Address)[0]) != tt.order[j] {
+ t.Errorf("test %d, item %d: order mismatch: have %d, want %d", i, j, next.(common.Address)[0], tt.order[j])
+ } else {
+ delete(index, next.(common.Address)) // remove to simulate a correct pool for the test
+ }
+ verifyHeapInternals(t, priceheap)
+ }
+ }
+}
+
+// Benchmarks reheaping the entire set of accounts in the blob pool.
+func BenchmarkPriceHeapReinit1MB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024) }
+func BenchmarkPriceHeapReinit10MB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024) }
+func BenchmarkPriceHeapReinit100MB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024) }
+func BenchmarkPriceHeapReinit1GB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024*1024) }
+func BenchmarkPriceHeapReinit10GB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024*1024) }
+func BenchmarkPriceHeapReinit25GB(b *testing.B) { benchmarkPriceHeapReinit(b, 25*1024*1024*1024) }
+func BenchmarkPriceHeapReinit50GB(b *testing.B) { benchmarkPriceHeapReinit(b, 50*1024*1024*1024) }
+func BenchmarkPriceHeapReinit100GB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024*1024) }
+
+func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) {
+ // Calculate how many unique transactions we can fit into the provided disk
+ // data cap
+ blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob)
+
+ // Create a random set of transactions with random fees. Use a separate account
+ // for each transaction to make it worse case.
+ index := make(map[common.Address][]*blobTxMeta)
+ for i := 0; i < int(blobs); i++ {
+ var addr common.Address
+ rand.Read(addr[:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and reinit it over and over
+ heap := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index)
+
+ basefees := make([]*uint256.Int, b.N)
+ blobfees := make([]*uint256.Int, b.N)
+ for i := 0; i < b.N; i++ {
+ basefees[i] = uint256.NewInt(rand.Uint64())
+ blobfees[i] = uint256.NewInt(rand.Uint64())
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ heap.reinit(basefees[i], blobfees[i], true)
+ }
+}
+
+// Benchmarks overflowing the heap over and over (add and then drop).
+func BenchmarkPriceHeapOverflow1MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024) }
+func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024) }
+func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) }
+func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) }
+func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) }
+
+func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
+ // Calculate how many unique transactions we can fit into the provided disk
+ // data cap
+ blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob)
+
+ // Create a random set of transactions with random fees. Use a separate account
+ // for each transaction to make it worse case.
+ index := make(map[common.Address][]*blobTxMeta)
+ for i := 0; i < int(blobs); i++ {
+ var addr common.Address
+ rand.Read(addr[:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and overflow it over and over
+ evict := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index)
+ var (
+ addrs = make([]common.Address, b.N)
+ metas = make([]*blobTxMeta, b.N)
+ )
+ for i := 0; i < b.N; i++ {
+ rand.Read(addrs[i][:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ metas[i] = &blobTxMeta{
+ id: uint64(int(blobs) + i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ index[addrs[i]] = []*blobTxMeta{metas[i]}
+ heap.Push(evict, addrs[i])
+
+ drop := heap.Pop(evict)
+ delete(index, drop.(common.Address))
+ }
+}
diff --git a/core/txpool/blobpool/interface.go b/core/txpool/blobpool/interface.go
new file mode 100644
index 0000000000..d5603cf566
--- /dev/null
+++ b/core/txpool/blobpool/interface.go
@@ -0,0 +1,59 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "math/big"
+
+ "github.com/ava-labs/subnet-evm/commontype"
+ "github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/params"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// BlockChain defines the minimal set of methods needed to back a blob pool with
+// a chain. Exists to allow mocking the live chain out of tests.
+type BlockChain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // CurrentBlock returns the current head of the chain.
+ CurrentBlock() *types.Header
+
+ // CurrentFinalBlock returns the current block below which blobs should not
+ // be maintained anymore for reorg purposes.
+ CurrentFinalBlock() *types.Header
+
+ // GetBlock retrieves a specific block, used during pool resets.
+ GetBlock(hash common.Hash, number uint64) *types.Block
+
+ // StateAt returns a state database for a given root hash (generally the head).
+ StateAt(root common.Hash) (*state.StateDB, error)
+
+ GetFeeConfigAt(header *types.Header) (commontype.FeeConfig, *big.Int, error)
+}
diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go
new file mode 100644
index 0000000000..c8e7eed476
--- /dev/null
+++ b/core/txpool/blobpool/limbo.go
@@ -0,0 +1,268 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "errors"
+
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+)
+
+// limboBlob is a wrapper around an opaque blobset that also contains the tx hash
+// to which it belongs as well as the block number in which it was included for
+// finality eviction.
+type limboBlob struct {
+ Owner common.Hash // Owner transaction's hash to support resurrecting reorged txs
+ Block uint64 // Block in which the blob transaction was included
+
+ Blobs []kzg4844.Blob // The opaque blobs originally part of the transaction
+ Commits []kzg4844.Commitment // The commitments for the original blobs
+ Proofs []kzg4844.Proof // The proofs verifying the commitments
+}
+
+// limbo is a light, indexed database to temporarily store recently included
+// blobs until they are finalized. The purpose is to support small reorgs, which
+// would require pulling back up old blobs (which aren't part of the chain).
+//
+// TODO(karalabe): Currently updating the inclusion block of a blob needs a full db rewrite. Can we do without?
+type limbo struct {
+ store billy.Database // Persistent data store for limboed blobs
+
+ index map[common.Hash]uint64 // Mappings from tx hashes to datastore ids
+ groups map[uint64]map[uint64]common.Hash // Set of txs included in past blocks
+}
+
+// newLimbo opens and indexes a set of limboed blob transactions.
+func newLimbo(datadir string) (*limbo, error) {
+ l := &limbo{
+ index: make(map[common.Hash]uint64),
+ groups: make(map[uint64]map[uint64]common.Hash),
+ }
+ // Index all limboed blobs on disk and delete anything inprocessable
+ var fails []uint64
+ index := func(id uint64, size uint32, data []byte) {
+ if l.parseBlob(id, data) != nil {
+ fails = append(fails, id)
+ }
+ }
+ store, err := billy.Open(billy.Options{Path: datadir}, newSlotter(), index)
+ if err != nil {
+ return nil, err
+ }
+ l.store = store
+
+ if len(fails) > 0 {
+ log.Warn("Dropping invalidated limboed blobs", "ids", fails)
+ for _, id := range fails {
+ if err := l.store.Delete(id); err != nil {
+ l.Close()
+ return nil, err
+ }
+ }
+ }
+ return l, nil
+}
+
+// Close closes down the underlying persistent store.
+func (l *limbo) Close() error {
+ return l.store.Close()
+}
+
+// parseBlob is a callback method on limbo creation that gets called for each
+// limboed blob on disk to create the in-memory metadata index.
+func (l *limbo) parseBlob(id uint64, data []byte) error {
+ item := new(limboBlob)
+ if err := rlp.DecodeBytes(data, item); err != nil {
+ // This path is impossible unless the disk data representation changes
+ // across restarts. For that ever unprobable case, recover gracefully
+ // by ignoring this data entry.
+ log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
+ return err
+ }
+ if _, ok := l.index[item.Owner]; ok {
+ // This path is impossible, unless due to a programming error a blob gets
+ // inserted into the limbo which was already part of if. Recover gracefully
+ // by ignoring this data entry.
+ log.Error("Dropping duplicate blob limbo entry", "owner", item.Owner, "id", id)
+ return errors.New("duplicate blob")
+ }
+ l.index[item.Owner] = id
+
+ if _, ok := l.groups[item.Block]; !ok {
+ l.groups[item.Block] = make(map[uint64]common.Hash)
+ }
+ l.groups[item.Block][id] = item.Owner
+
+ return nil
+}
+
+// finalize evicts all blobs belonging to a recently finalized block or older.
+func (l *limbo) finalize(final *types.Header) {
+ // Just in case there's no final block yet (network not yet merged, weird
+ // restart, sethead, etc), fail gracefully.
+ if final == nil {
+ log.Error("Nil finalized block cannot evict old blobs")
+ return
+ }
+ for block, ids := range l.groups {
+ if block > final.Number.Uint64() {
+ continue
+ }
+ for id, owner := range ids {
+ if err := l.store.Delete(id); err != nil {
+ log.Error("Failed to drop finalized blob", "block", block, "id", id, "err", err)
+ }
+ delete(l.index, owner)
+ }
+ delete(l.groups, block)
+ }
+}
+
+// push stores a new blob transaction into the limbo, waiting until finality for
+// it to be automatically evicted.
+func (l *limbo) push(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+ // If the blobs are already tracked by the limbo, consider it a programming
+ // error. There's not much to do against it, but be loud.
+ if _, ok := l.index[tx]; ok {
+ log.Error("Limbo cannot push already tracked blobs", "tx", tx)
+ return errors.New("already tracked blob transaction")
+ }
+ if err := l.setAndIndex(tx, block, blobs, commits, proofs); err != nil {
+ log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err)
+ return err
+ }
+ return nil
+}
+
+// pull retrieves a previously pushed set of blobs back from the limbo, removing
+// it at the same time. This method should be used when a previously included blob
+// transaction gets reorged out.
+func (l *limbo) pull(tx common.Hash) ([]kzg4844.Blob, []kzg4844.Commitment, []kzg4844.Proof, error) {
+ // If the blobs are not tracked by the limbo, there's not much to do. This
+ // can happen for example if a blob transaction is mined without pushing it
+ // into the network first.
+ id, ok := l.index[tx]
+ if !ok {
+ log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx)
+ return nil, nil, nil, errors.New("unseen blob transaction")
+ }
+ item, err := l.getAndDrop(id)
+ if err != nil {
+ log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
+ return nil, nil, nil, err
+ }
+ return item.Blobs, item.Commits, item.Proofs, nil
+}
+
+// update changes the block number under which a blob transaction is tracked. This
+// method should be used when a reorg changes a transaction's inclusion block.
+//
+// The method may log errors for various unexpcted scenarios but will not return
+// any of it since there's no clear error case. Some errors may be due to coding
+// issues, others caused by signers mining MEV stuff or swapping transactions. In
+// all cases, the pool needs to continue operating.
+func (l *limbo) update(tx common.Hash, block uint64) {
+ // If the blobs are not tracked by the limbo, there's not much to do. This
+ // can happen for example if a blob transaction is mined without pushing it
+ // into the network first.
+ id, ok := l.index[tx]
+ if !ok {
+ log.Trace("Limbo cannot update non-tracked blobs", "tx", tx)
+ return
+ }
+ // If there was no change in the blob's inclusion block, don't mess around
+ // with heavy database operations.
+ if _, ok := l.groups[block][id]; ok {
+ log.Trace("Blob transaction unchanged in limbo", "tx", tx, "block", block)
+ return
+ }
+ // Retrieve the old blobs from the data store and write tehm back with a new
+ // block number. IF anything fails, there's not much to do, go on.
+ item, err := l.getAndDrop(id)
+ if err != nil {
+ log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
+ return
+ }
+ if err := l.setAndIndex(tx, block, item.Blobs, item.Commits, item.Proofs); err != nil {
+ log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
+ return
+ }
+ log.Trace("Blob transaction updated in limbo", "tx", tx, "old-block", item.Block, "new-block", block)
+}
+
+// getAndDrop retrieves a blob item from the limbo store and deletes it both from
+// the store and indices.
+func (l *limbo) getAndDrop(id uint64) (*limboBlob, error) {
+ data, err := l.store.Get(id)
+ if err != nil {
+ return nil, err
+ }
+ item := new(limboBlob)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ return nil, err
+ }
+ delete(l.index, item.Owner)
+ delete(l.groups[item.Block], id)
+ if len(l.groups[item.Block]) == 0 {
+ delete(l.groups, item.Block)
+ }
+ if err := l.store.Delete(id); err != nil {
+ return nil, err
+ }
+ return item, nil
+}
+
+// setAndIndex assembles a limbo blob database entry and stores it, also updating
+// the in-memory indices.
+func (l *limbo) setAndIndex(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+ item := &limboBlob{
+ Owner: tx,
+ Block: block,
+ Blobs: blobs,
+ Commits: commits,
+ Proofs: proofs,
+ }
+ data, err := rlp.EncodeToBytes(item)
+ if err != nil {
+ panic(err) // cannot happen runtime, dev error
+ }
+ id, err := l.store.Put(data)
+ if err != nil {
+ return err
+ }
+ l.index[tx] = id
+ if _, ok := l.groups[block]; !ok {
+ l.groups[block] = make(map[uint64]common.Hash)
+ }
+ l.groups[block][id] = tx
+ return nil
+}
diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go
new file mode 100644
index 0000000000..0b9c687cce
--- /dev/null
+++ b/core/txpool/blobpool/metrics.go
@@ -0,0 +1,88 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import "github.com/ava-labs/subnet-evm/metrics"
+
+var (
+ // datacapGauge tracks the user's configured capacity for the blob pool. It
+ // is mostly a way to expose/debug issues.
+ datacapGauge = metrics.NewRegisteredGauge("blobpool/datacap", nil)
+
+ // The below metrics track the per-datastore metrics for the primary blob
+ // store and the temporary limbo store.
+ datausedGauge = metrics.NewRegisteredGauge("blobpool/dataused", nil)
+ datarealGauge = metrics.NewRegisteredGauge("blobpool/datareal", nil)
+ slotusedGauge = metrics.NewRegisteredGauge("blobpool/slotused", nil)
+
+ limboDatausedGauge = metrics.NewRegisteredGauge("blobpool/limbo/dataused", nil)
+ limboDatarealGauge = metrics.NewRegisteredGauge("blobpool/limbo/datareal", nil)
+ limboSlotusedGauge = metrics.NewRegisteredGauge("blobpool/limbo/slotused", nil)
+
+ // The below metrics track the per-shelf metrics for the primary blob store
+ // and the temporary limbo store.
+ shelfDatausedGaugeName = "blobpool/shelf_%d/dataused"
+ shelfDatagapsGaugeName = "blobpool/shelf_%d/datagaps"
+ shelfSlotusedGaugeName = "blobpool/shelf_%d/slotused"
+ shelfSlotgapsGaugeName = "blobpool/shelf_%d/slotgaps"
+
+ limboShelfDatausedGaugeName = "blobpool/limbo/shelf_%d/dataused"
+ limboShelfDatagapsGaugeName = "blobpool/limbo/shelf_%d/datagaps"
+ limboShelfSlotusedGaugeName = "blobpool/limbo/shelf_%d/slotused"
+ limboShelfSlotgapsGaugeName = "blobpool/limbo/shelf_%d/slotgaps"
+
+ // The oversized metrics aggregate the shelf stats above the max blob count
+ // limits to track transactions that are just huge, but don't contain blobs.
+ //
+ // There are no oversized data in the limbo, it only contains blobs and some
+ // constant metadata.
+ oversizedDatausedGauge = metrics.NewRegisteredGauge("blobpool/oversized/dataused", nil)
+ oversizedDatagapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/datagaps", nil)
+ oversizedSlotusedGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotused", nil)
+ oversizedSlotgapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotgaps", nil)
+
+ // basefeeGauge and blobfeeGauge track the current network 1559 base fee and
+ // 4844 blob fee respectively.
+ basefeeGauge = metrics.NewRegisteredGauge("blobpool/basefee", nil)
+ blobfeeGauge = metrics.NewRegisteredGauge("blobpool/blobfee", nil)
+
+ // pooltipGauge is the configurable miner tip to permit a transaction into
+ // the pool.
+ pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
+
+ // addwait/time, resetwait/time and getwait/time track the rough health of
+ // the pool and wether or not it's capable of keeping up with the load from
+ // the network.
+ addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ gettimeHist = metrics.NewRegisteredHistogram("blobpool/gettime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pendwaitHist = metrics.NewRegisteredHistogram("blobpool/pendwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
+)
diff --git a/core/txpool/blobpool/priority.go b/core/txpool/blobpool/priority.go
new file mode 100644
index 0000000000..dd39927361
--- /dev/null
+++ b/core/txpool/blobpool/priority.go
@@ -0,0 +1,100 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "math"
+ "math/bits"
+
+ "github.com/holiman/uint256"
+)
+
+// log2_1_125 is used in the eviction priority calculation.
+var log2_1_125 = math.Log2(1.125)
+
+// evictionPriority calculates the eviction priority based on the algorithm
+// described in the BlobPool docs for a both fee components.
+//
+// This method takes about 8ns on a very recent laptop CPU, recalculating about
+// 125 million transaction priority values per second.
+func evictionPriority(basefeeJumps float64, txBasefeeJumps, blobfeeJumps, txBlobfeeJumps float64) int {
+ var (
+ basefeePriority = evictionPriority1D(basefeeJumps, txBasefeeJumps)
+ blobfeePriority = evictionPriority1D(blobfeeJumps, txBlobfeeJumps)
+ )
+ if basefeePriority < blobfeePriority {
+ return basefeePriority
+ }
+ return blobfeePriority
+}
+
+// evictionPriority1D calculates the eviction priority based on the algorithm
+// described in the BlobPool docs for a single fee component.
+func evictionPriority1D(basefeeJumps float64, txfeeJumps float64) int {
+ jumps := txfeeJumps - basefeeJumps
+ if int(jumps) == 0 {
+ return 0 // can't log2 0
+ }
+ if jumps < 0 {
+ return -intLog2(uint(-math.Floor(jumps)))
+ }
+ return intLog2(uint(math.Ceil(jumps)))
+}
+
+// dynamicFeeJumps calculates the log1.125(fee), namely the number of fee jumps
+// needed to reach the requested one. We only use it when calculating the jumps
+// between 2 fees, so it doesn't matter from what exact number with returns.
+// it returns the result from (0, 1, 1.125).
+//
+// This method is very expensive, taking about 75ns on a very recent laptop CPU,
+// but the result does not change with the lifetime of a transaction, so it can
+// be cached.
+func dynamicFeeJumps(fee *uint256.Int) float64 {
+ if fee.IsZero() {
+ return 0 // can't log2 zero, should never happen outside tests, but don't choke
+ }
+ return math.Log2(fee.Float64()) / log2_1_125
+}
+
+// intLog2 is a helper to calculate the integral part of a log2 of an unsigned
+// integer. It is a very specific calculation that's not particularly useful in
+// general, but it's what we need here (it's fast).
+func intLog2(n uint) int {
+ switch {
+ case n == 0:
+ panic("log2(0) is undefined")
+
+ case n < 2048:
+ return bits.UintSize - bits.LeadingZeros(n) - 1
+
+ default:
+ // The input is log1.125(uint256) = log2(uint256) / log2(1.125). At the
+ // most extreme, log2(uint256) will be a bit below 257, and the constant
+ // log2(1.125) ~= 0.17. The larges input thus is ~257 / ~0.17 ~= ~1511.
+ panic("dynamic fee jump diffs cannot reach this")
+ }
+}
diff --git a/core/txpool/blobpool/priority_test.go b/core/txpool/blobpool/priority_test.go
new file mode 100644
index 0000000000..3c9523d512
--- /dev/null
+++ b/core/txpool/blobpool/priority_test.go
@@ -0,0 +1,97 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "testing"
+
+ "github.com/holiman/uint256"
+)
+
+// Tests that the priority fees are calculated correctly as the log2 of the fee
+// jumps needed to go from the base fee to the tx's fee cap.
+func TestPriorityCalculation(t *testing.T) {
+ tests := []struct {
+ basefee uint64
+ txfee uint64
+ result int
+ }{
+ {basefee: 7, txfee: 10, result: 2}, // 3.02 jumps, 4 ceil, 2 log2
+ {basefee: 17_200_000_000, txfee: 17_200_000_000, result: 0}, // 0 jumps, special case 0 log2
+ {basefee: 9_853_941_692, txfee: 11_085_092_510, result: 0}, // 0.99 jumps, 1 ceil, 0 log2
+ {basefee: 11_544_106_391, txfee: 10_356_781_100, result: 0}, // -0.92 jumps, -1 floor, 0 log2
+ {basefee: 17_200_000_000, txfee: 7, result: -7}, // -183.57 jumps, -184 floor, -7 log2
+ {basefee: 7, txfee: 17_200_000_000, result: 7}, // 183.57 jumps, 184 ceil, 7 log2
+ }
+ for i, tt := range tests {
+ var (
+ baseJumps = dynamicFeeJumps(uint256.NewInt(tt.basefee))
+ feeJumps = dynamicFeeJumps(uint256.NewInt(tt.txfee))
+ )
+ if prio := evictionPriority1D(baseJumps, feeJumps); prio != tt.result {
+ t.Errorf("test %d priority mismatch: have %d, want %d", i, prio, tt.result)
+ }
+ }
+}
+
+// Benchmarks how many dynamic fee jump values can be done.
+func BenchmarkDynamicFeeJumpCalculation(b *testing.B) {
+ fees := make([]*uint256.Int, b.N)
+ for i := 0; i < b.N; i++ {
+ fees[i] = uint256.NewInt(rand.Uint64())
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ dynamicFeeJumps(fees[i])
+ }
+}
+
+// Benchmarks how many priority recalculations can be done.
+func BenchmarkPriorityCalculation(b *testing.B) {
+ // The basefee and blob fee is constant for all transactions across a block,
+ // so we can assume theit absolute jump counts can be pre-computed.
+ basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number
+ blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be
+
+ basefeeJumps := dynamicFeeJumps(basefee)
+ blobfeeJumps := dynamicFeeJumps(blobfee)
+
+ // The transaction's fee cap and blob fee cap are constant across the life
+ // of the transaction, so we can pre-calculate and cache them.
+ txBasefeeJumps := make([]float64, b.N)
+ txBlobfeeJumps := make([]float64, b.N)
+ for i := 0; i < b.N; i++ {
+ txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64()))
+ txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64()))
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ evictionPriority(basefeeJumps, txBasefeeJumps[i], blobfeeJumps, txBlobfeeJumps[i])
+ }
+}
diff --git a/core/state/snapshot/sort.go b/core/txpool/blobpool/slotter.go
similarity index 53%
rename from core/state/snapshot/sort.go
rename to core/txpool/blobpool/slotter.go
index 6254d37943..656dc51d8c 100644
--- a/core/state/snapshot/sort.go
+++ b/core/txpool/blobpool/slotter.go
@@ -1,4 +1,4 @@
-// (c) 2019-2020, Ava Labs, Inc.
+// (c) 2024, Ava Labs, Inc.
//
// This file is a derived work, based on the go-ethereum library whose original
// notices appear below.
@@ -8,7 +8,7 @@
//
// Much love to the original authors for their work.
// **********
-// Copyright 2019 The go-ethereum Authors
+// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -24,23 +24,25 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package snapshot
+package blobpool
-import (
- "bytes"
-
- "github.com/ethereum/go-ethereum/common"
-)
-
-// hashes is a helper to implement sort.Interface.
-type hashes []common.Hash
-
-// Len is the number of elements in the collection.
-func (hs hashes) Len() int { return len(hs) }
+// newSlotter creates a helper method for the Billy datastore that returns the
+// individual shelf sizes used to store transactions in.
+//
+// The slotter will create shelves for each possible blob count + some tx metadata
+// wiggle room, up to the max permitted limits.
+//
+// The slotter also creates a shelf for 0-blob transactions. Whilst those are not
+// allowed in the current protocol, having an empty shelf is not a relevant use
+// of resources, but it makes stress testing with junk transactions simpler.
+func newSlotter() func() (uint32, bool) {
+ slotsize := uint32(txAvgSize)
+ slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return
-// Less reports whether the element with index i should sort before the element
-// with index j.
-func (hs hashes) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 }
+ return func() (size uint32, done bool) {
+ slotsize += blobSize
+ finished := slotsize > maxBlobsPerTransaction*blobSize+txMaxSize
-// Swap swaps the elements with indexes i and j.
-func (hs hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] }
+ return slotsize, finished
+ }
+}
diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go
new file mode 100644
index 0000000000..9a89f42d7d
--- /dev/null
+++ b/core/txpool/blobpool/slotter_test.go
@@ -0,0 +1,68 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import "testing"
+
+// Tests that the slotter creates the expected database shelves.
+func TestNewSlotter(t *testing.T) {
+ // Generate the database shelve sizes
+ slotter := newSlotter()
+
+ var shelves []uint32
+ for {
+ shelf, done := slotter()
+ shelves = append(shelves, shelf)
+ if done {
+ break
+ }
+ }
+ // Compare the database shelves to the expected ones
+ want := []uint32{
+ 0*blobSize + txAvgSize, // 0 blob + some expected tx infos
+ 1*blobSize + txAvgSize, // 1 blob + some expected tx infos
+ 2*blobSize + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 3*blobSize + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 4*blobSize + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 5*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 6*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 7*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 8*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 9*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 10*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 11*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 12*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size
+ }
+ if len(shelves) != len(want) {
+ t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want))
+ }
+ for i := 0; i < len(shelves) && i < len(want); i++ {
+ if shelves[i] != want[i] {
+ t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i])
+ }
+ }
+}
diff --git a/core/txpool/errors.go b/core/txpool/errors.go
new file mode 100644
index 0000000000..7ecbfef35f
--- /dev/null
+++ b/core/txpool/errors.go
@@ -0,0 +1,67 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txpool
+
+import "errors"
+
+var (
+ // ErrAlreadyKnown is returned if the transactions is already contained
+ // within the pool.
+ ErrAlreadyKnown = errors.New("already known")
+
+ // ErrInvalidSender is returned if the transaction contains an invalid signature.
+ ErrInvalidSender = errors.New("invalid sender")
+
+ // ErrUnderpriced is returned if a transaction's gas price is below the minimum
+ // configured for the transaction pool.
+ ErrUnderpriced = errors.New("transaction underpriced")
+
+ // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
+ // with a different one without the required price bump.
+ ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
+
+ // ErrAccountLimitExceeded is returned if a transaction would exceed the number
+ // allowed by a pool for a single account.
+ ErrAccountLimitExceeded = errors.New("account limit exceeded")
+
+ // ErrGasLimit is returned if a transaction's requested gas limit exceeds the
+ // maximum allowance of the current block.
+ ErrGasLimit = errors.New("exceeds block gas limit")
+
+ // ErrNegativeValue is a sanity error to ensure no one is able to specify a
+ // transaction with a negative value.
+ ErrNegativeValue = errors.New("negative value")
+
+ // ErrOversizedData is returned if the input data of a transaction is greater
+ // than some meaningful limit a user might use. This is not a consensus error
+ // making the transaction invalid, rather a DOS protection.
+ ErrOversizedData = errors.New("oversized data")
+
+ // ErrFutureReplacePending is returned if a future transaction replaces a pending
+ // transaction. Future transactions should only be able to replace other future transactions.
+ ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
+)
diff --git a/core/txpool/journal.go b/core/txpool/legacypool/journal.go
similarity index 99%
rename from core/txpool/journal.go
rename to core/txpool/legacypool/journal.go
index 11ec2ccd36..2065fb36e0 100644
--- a/core/txpool/journal.go
+++ b/core/txpool/legacypool/journal.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"errors"
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
new file mode 100644
index 0000000000..8c74f1efbb
--- /dev/null
+++ b/core/txpool/legacypool/legacypool.go
@@ -0,0 +1,2141 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package legacypool implements the normal EVM execution transaction pool.
+package legacypool
+
+import (
+ "errors"
+ "math"
+ "math/big"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ava-labs/subnet-evm/commontype"
+ "github.com/ava-labs/subnet-evm/consensus/dummy"
+ "github.com/ava-labs/subnet-evm/core"
+ "github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/metrics"
+ "github.com/ava-labs/subnet-evm/params"
+ "github.com/ava-labs/subnet-evm/precompile/contracts/feemanager"
+ "github.com/ava-labs/subnet-evm/utils"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+const (
+ // txSlotSize is used to calculate how many data slots a single transaction
+ // takes up based on its size. The slots are used as DoS protection, ensuring
+ // that validating a new transaction remains a constant operation (in reality
+ // O(maxslots), where max slots are 4 currently).
+ txSlotSize = 32 * 1024
+
+ // txMaxSize is the maximum size a single transaction can have. This field has
+ // non-trivial consequences: larger transactions are significantly harder and
+ // more expensive to propagate; larger transactions also take more resources
+ // to validate whether they fit into the pool or not.
+ //
+ // Note: the max contract size is 24KB
+ txMaxSize = 4 * txSlotSize // 128KB
+)
+
+var (
+ // ErrAlreadyKnown is returned if the transactions is already contained
+ // within the pool.
+ ErrAlreadyKnown = errors.New("already known")
+
+ // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
+ // another remote transaction.
+ ErrTxPoolOverflow = errors.New("txpool is full")
+)
+
+var (
+ evictionInterval = time.Minute // Time interval to check for evictable transactions
+ statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
+ baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after SubnetEVM is enabled
+)
+
+var (
+ // Metrics for the pending pool
+ pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
+ pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
+ pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
+ pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds
+
+ // Metrics for the queued pool
+ queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
+ queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
+ queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
+ queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
+ queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime
+
+ // General tx metrics
+ knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil)
+ validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil)
+ invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
+ underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
+ overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
+
+ // throttleTxMeter counts how many transactions are rejected due to too-many-changes between
+ // txpool reorgs.
+ throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
+ // reorgDurationTimer measures how long time a txpool reorg takes.
+ reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
+ // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
+ // that this number is pretty low, since txpool reorgs happen very frequently.
+ dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
+ queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
+ localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
+ slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
+
+ reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
+)
+
+// BlockChain defines the minimal set of methods needed to back a tx pool with
+// a chain. Exists to allow mocking the live chain out of tests.
+type BlockChain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // CurrentBlock returns the current head of the chain.
+ CurrentBlock() *types.Header
+
+ // GetBlock retrieves a specific block, used during pool resets.
+ GetBlock(hash common.Hash, number uint64) *types.Block
+
+ // StateAt returns a state database for a given root hash (generally the head).
+ StateAt(root common.Hash) (*state.StateDB, error)
+
+ SenderCacher() *core.TxSenderCacher
+ GetFeeConfigAt(parent *types.Header) (commontype.FeeConfig, *big.Int, error)
+}
+
+// Config are the configuration parameters of the transaction pool.
+type Config struct {
+ Locals []common.Address // Addresses that should be treated by default as local
+ NoLocals bool // Whether local transaction handling should be disabled
+ Journal string // Journal of local transactions to survive node restarts
+ Rejournal time.Duration // Time interval to regenerate the local transaction journal
+
+ PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
+ PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
+
+ AccountSlots uint64 // Number of executable transaction slots guaranteed per account
+ GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
+ AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
+ GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
+
+ Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
+}
+
+// DefaultConfig contains the default configurations for the transaction pool.
+var DefaultConfig = Config{
+ // If we re-enable txpool journaling, we should also add the saved local
+ // transactions to the p2p gossip on startup.
+ Journal: "",
+ Rejournal: time.Hour,
+
+ PriceLimit: 1,
+ PriceBump: 10,
+
+ AccountSlots: 16,
+ GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
+ AccountQueue: 64,
+ GlobalQueue: 1024,
+
+ Lifetime: 10 * time.Minute,
+}
+
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (config *Config) sanitize() Config {
+ conf := *config
+ if conf.Rejournal < time.Second {
+ log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
+ conf.Rejournal = time.Second
+ }
+ if conf.PriceLimit < 1 {
+ log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
+ conf.PriceLimit = DefaultConfig.PriceLimit
+ }
+ if conf.PriceBump < 1 {
+ log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
+ conf.PriceBump = DefaultConfig.PriceBump
+ }
+ if conf.AccountSlots < 1 {
+ log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots)
+ conf.AccountSlots = DefaultConfig.AccountSlots
+ }
+ if conf.GlobalSlots < 1 {
+ log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots)
+ conf.GlobalSlots = DefaultConfig.GlobalSlots
+ }
+ if conf.AccountQueue < 1 {
+ log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue)
+ conf.AccountQueue = DefaultConfig.AccountQueue
+ }
+ if conf.GlobalQueue < 1 {
+ log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue)
+ conf.GlobalQueue = DefaultConfig.GlobalQueue
+ }
+ if conf.Lifetime < 1 {
+ log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime)
+ conf.Lifetime = DefaultConfig.Lifetime
+ }
+ return conf
+}
+
+// LegacyPool contains all currently known transactions. Transactions
+// enter the pool when they are received from the network or submitted
+// locally. They exit the pool when they are included in the blockchain.
+//
+// The pool separates processable transactions (which can be applied to the
+// current state) and future transactions. Transactions move between those
+// two states over time as they are received and processed.
+type LegacyPool struct {
+ config Config
+ chainconfig *params.ChainConfig
+ chain BlockChain
+ gasTip atomic.Pointer[big.Int]
+ minimumFee *big.Int
+ txFeed event.Feed
+ scope event.SubscriptionScope
+ signer types.Signer
+ mu sync.RWMutex
+
+ // [currentStateLock] is required to allow concurrent access to address nonces
+ // and balances during reorgs and gossip handling.
+ currentStateLock sync.Mutex
+ // closed when the transaction pool is stopped. Any goroutine can listen
+ // to this to be notified if it should shut down.
+ generalShutdownChan chan struct{}
+
+ currentHead atomic.Pointer[types.Header] // Current head of the blockchain
+ currentState *state.StateDB // Current state in the blockchain head
+ pendingNonces *noncer // Pending state tracking virtual nonces
+
+ locals *accountSet // Set of local transaction to exempt from eviction rules
+ journal *journal // Journal of local transaction to back up to disk
+
+ reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools
+ pending map[common.Address]*list // All currently processable transactions
+ queue map[common.Address]*list // Queued but non-processable transactions
+ beats map[common.Address]time.Time // Last heartbeat from each known account
+ all *lookup // All transactions to allow lookups
+ priced *pricedList // All transactions sorted by price
+
+ reqResetCh chan *txpoolResetRequest
+ reqPromoteCh chan *accountSet
+ queueTxEventCh chan *types.Transaction
+ reorgDoneCh chan chan struct{}
+ reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
+ wg sync.WaitGroup // tracks loop, scheduleReorgLoop
+ initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
+
+ changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
+}
+
+type txpoolResetRequest struct {
+ oldHead, newHead *types.Header
+}
+
+// New creates a new transaction pool to gather, sort and filter inbound
+// transactions from the network.
+func New(config Config, chain BlockChain) *LegacyPool {
+ // Sanitize the input to ensure no vulnerable gas prices are set
+ config = (&config).sanitize()
+
+ // Create the transaction pool with its initial settings
+ pool := &LegacyPool{
+ config: config,
+ chain: chain,
+ chainconfig: chain.Config(),
+ signer: types.LatestSigner(chain.Config()),
+ pending: make(map[common.Address]*list),
+ queue: make(map[common.Address]*list),
+ beats: make(map[common.Address]time.Time),
+ all: newLookup(),
+ reqResetCh: make(chan *txpoolResetRequest),
+ reqPromoteCh: make(chan *accountSet),
+ queueTxEventCh: make(chan *types.Transaction),
+ reorgDoneCh: make(chan chan struct{}),
+ reorgShutdownCh: make(chan struct{}),
+ initDoneCh: make(chan struct{}),
+ generalShutdownChan: make(chan struct{}),
+ }
+ pool.locals = newAccountSet(pool.signer)
+ for _, addr := range config.Locals {
+ log.Info("Setting new local account", "address", addr)
+ pool.locals.add(addr)
+ }
+ pool.priced = newPricedList(pool.all)
+
+ if !config.NoLocals && config.Journal != "" {
+ pool.journal = newTxJournal(config.Journal)
+ }
+ return pool
+}
+
+// Filter returns whether the given transaction can be consumed by the legacy
+// pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction.
+func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
+ switch tx.Type() {
+ case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType:
+ return true
+ default:
+ return false
+ }
+}
+
+// Init sets the gas price needed to keep a transaction in the pool and the chain
+// head to allow balance / nonce checks. The transaction journal will be loaded
+// from disk and filtered based on the provided starting settings. The internal
+// goroutines will be spun up and the pool deemed operational afterwards.
+func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
+ // Set the address reserver to request exclusive access to pooled accounts
+ pool.reserve = reserve
+
+ // Set the basic pool parameters
+ pool.gasTip.Store(gasTip)
+ pool.reset(nil, head)
+
+ // Start the reorg loop early, so it can handle requests generated during
+ // journal loading.
+ pool.wg.Add(1)
+ go pool.scheduleReorgLoop()
+
+ // If local transactions and journaling is enabled, load from disk
+ if pool.journal != nil {
+ if err := pool.journal.load(pool.addLocals); err != nil {
+ log.Warn("Failed to load transaction journal", "err", err)
+ }
+ if err := pool.journal.rotate(pool.local()); err != nil {
+ log.Warn("Failed to rotate transaction journal", "err", err)
+ }
+ }
+ pool.wg.Add(1)
+ go pool.loop()
+
+ pool.startPeriodicFeeUpdate()
+
+ return nil
+}
+
+// loop is the transaction pool's main event loop, waiting for and reacting to
+// outside blockchain events as well as for various reporting and transaction
+// eviction events.
+func (pool *LegacyPool) loop() {
+ defer pool.wg.Done()
+
+ var (
+ prevPending, prevQueued, prevStales int
+
+ // Start the stats reporting and transaction eviction tickers
+ report = time.NewTicker(statsReportInterval)
+ evict = time.NewTicker(evictionInterval)
+ journal = time.NewTicker(pool.config.Rejournal)
+ )
+ defer report.Stop()
+ defer evict.Stop()
+ defer journal.Stop()
+
+ // Notify tests that the init phase is done
+ close(pool.initDoneCh)
+ for {
+ select {
+ // Handle pool shutdown
+ case <-pool.reorgShutdownCh:
+ return
+
+ // Handle stats reporting ticks
+ case <-report.C:
+ pool.mu.RLock()
+ pending, queued := pool.stats()
+ pool.mu.RUnlock()
+ stales := int(pool.priced.stales.Load())
+
+ if pending != prevPending || queued != prevQueued || stales != prevStales {
+ log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
+ prevPending, prevQueued, prevStales = pending, queued, stales
+ }
+
+ // Handle inactive account transaction eviction
+ case <-evict.C:
+ pool.mu.Lock()
+ for addr := range pool.queue {
+ // Skip local transactions from the eviction mechanism
+ if pool.locals.contains(addr) {
+ continue
+ }
+ // Any non-locals old enough should be removed
+ if time.Since(pool.beats[addr]) > pool.config.Lifetime {
+ list := pool.queue[addr].Flatten()
+ for _, tx := range list {
+ pool.removeTx(tx.Hash(), true, true)
+ }
+ queuedEvictionMeter.Mark(int64(len(list)))
+ }
+ }
+ pool.mu.Unlock()
+
+ // Handle local transaction journal rotation
+ case <-journal.C:
+ if pool.journal != nil {
+ pool.mu.Lock()
+ if err := pool.journal.rotate(pool.local()); err != nil {
+ log.Warn("Failed to rotate local tx journal", "err", err)
+ }
+ pool.mu.Unlock()
+ }
+ }
+ }
+}
+
+// Close terminates the transaction pool.
+func (pool *LegacyPool) Close() error {
+ // Unsubscribe all subscriptions registered from txpool
+ pool.scope.Close()
+
+ close(pool.generalShutdownChan)
+
+ // Terminate the pool reorger and return
+ close(pool.reorgShutdownCh)
+ pool.wg.Wait()
+
+ if pool.journal != nil {
+ pool.journal.close()
+ }
+ log.Info("Transaction pool stopped")
+ return nil
+}
+
+// Reset implements txpool.SubPool, allowing the legacy pool's internal state to be
+// kept in sync with the main transacion pool's internal state.
+func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
+ wait := pool.requestReset(oldHead, newHead)
+ <-wait
+}
+
+// SubscribeTransactions registers a subscription of NewTxsEvent and
+// starts sending event to the given channel.
+func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
+ return pool.scope.Track(pool.txFeed.Subscribe(ch))
+}
+
+// SetGasTip updates the minimum gas tip required by the transaction pool for a
+// new transaction, and drops all transactions below this threshold.
+func (pool *LegacyPool) SetGasTip(tip *big.Int) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ old := pool.gasTip.Load()
+ pool.gasTip.Store(new(big.Int).Set(tip))
+
+ // If the min miner fee increased, remove transactions below the new threshold
+ if tip.Cmp(old) > 0 {
+ // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
+ drop := pool.all.RemotesBelowTip(tip)
+ for _, tx := range drop {
+ pool.removeTx(tx.Hash(), false, true)
+ }
+ pool.priced.Removed(len(drop))
+ }
+ log.Info("Legacy pool tip threshold updated", "tip", tip)
+}
+
+func (pool *LegacyPool) SetMinFee(minFee *big.Int) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pool.minimumFee = minFee
+}
+
+// Nonce returns the next nonce of an account, with all transactions executable
+// by the pool already applied on top.
+func (pool *LegacyPool) Nonce(addr common.Address) uint64 {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ return pool.pendingNonces.get(addr)
+}
+
+// Stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (pool *LegacyPool) Stats() (int, int) {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ return pool.stats()
+}
+
+// stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (pool *LegacyPool) stats() (int, int) {
+ pending := 0
+ for _, list := range pool.pending {
+ pending += list.Len()
+ }
+ queued := 0
+ for _, list := range pool.queue {
+ queued += list.Len()
+ }
+ return pending, queued
+}
+
+// Content retrieves the data content of the transaction pool, returning all the
+// pending as well as queued transactions, grouped by account and sorted by nonce.
+func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pending := make(map[common.Address][]*types.Transaction, len(pool.pending))
+ for addr, list := range pool.pending {
+ pending[addr] = list.Flatten()
+ }
+ queued := make(map[common.Address][]*types.Transaction, len(pool.queue))
+ for addr, list := range pool.queue {
+ queued[addr] = list.Flatten()
+ }
+ return pending, queued
+}
+
+// ContentFrom retrieves the data content of the transaction pool, returning the
+// pending as well as queued transactions of this address, grouped by nonce.
+func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ var pending []*types.Transaction
+ if list, ok := pool.pending[addr]; ok {
+ pending = list.Flatten()
+ }
+ var queued []*types.Transaction
+ if list, ok := pool.queue[addr]; ok {
+ queued = list.Flatten()
+ }
+ return pending, queued
+}
+
+// Pending retrieves all currently processable transactions, grouped by origin
+// account and sorted by nonce. The returned transaction set is a copy and can be
+// freely modified by calling code.
+//
+// The enforceTips parameter can be used to do an extra filtering on the pending
+// transactions and only return those whose **effective** tip is large enough in
+// the next pending execution environment.
+func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+ return pool.PendingWithBaseFee(enforceTips, nil)
+}
+
+// If baseFee is nil, then pool.priced.urgent.baseFee is used.
+func (pool *LegacyPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*txpool.LazyTransaction {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ if baseFee == nil {
+ baseFee = pool.priced.urgent.baseFee
+ }
+
+ pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
+ for addr, list := range pool.pending {
+ txs := list.Flatten()
+
+ // If the miner requests tip enforcement, cap the lists now
+ if enforceTips && !pool.locals.contains(addr) {
+ for i, tx := range txs {
+ if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), baseFee) < 0 {
+ txs = txs[:i]
+ break
+ }
+ }
+ }
+ if len(txs) > 0 {
+ lazies := make([]*txpool.LazyTransaction, len(txs))
+ for i := 0; i < len(txs); i++ {
+ lazies[i] = &txpool.LazyTransaction{
+ Pool: pool,
+ Hash: txs[i].Hash(),
+ Tx: &txpool.Transaction{Tx: txs[i]},
+ Time: txs[i].Time(),
+ GasFeeCap: txs[i].GasFeeCap(),
+ GasTipCap: txs[i].GasTipCap(),
+ }
+ }
+ pending[addr] = lazies
+ }
+ }
+ return pending
+}
+
+// PendingFrom returns the same set of transactions that would be returned from Pending restricted to only
+// transactions from [addrs].
+func (pool *LegacyPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
+ for _, addr := range addrs {
+ list, ok := pool.pending[addr]
+ if !ok {
+ continue
+ }
+ txs := list.Flatten()
+
+ // If the miner requests tip enforcement, cap the lists now
+ if enforceTips && !pool.locals.contains(addr) {
+ for i, tx := range txs {
+ if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), pool.priced.urgent.baseFee) < 0 {
+ txs = txs[:i]
+ break
+ }
+ }
+ }
+ if len(txs) > 0 {
+ lazies := make([]*txpool.LazyTransaction, len(txs))
+ for i := 0; i < len(txs); i++ {
+ lazies[i] = &txpool.LazyTransaction{
+ Pool: pool,
+ Hash: txs[i].Hash(),
+ Tx: &txpool.Transaction{Tx: txs[i]},
+ Time: txs[i].Time(),
+ GasFeeCap: txs[i].GasFeeCap(),
+ GasTipCap: txs[i].GasTipCap(),
+ }
+ }
+ pending[addr] = lazies
+ }
+ }
+ return pending
+}
+
+// IteratePending iterates over [pool.pending] until [f] returns false.
+// The caller must not modify [tx]. Returns false if iteration was interrupted.
+func (pool *LegacyPool) IteratePending(f func(tx *txpool.Transaction) bool) bool {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ for _, list := range pool.pending {
+ for _, tx := range list.txs.items {
+ if !f(&txpool.Transaction{Tx: tx}) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Locals retrieves the accounts currently considered local by the pool.
+func (pool *LegacyPool) Locals() []common.Address {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ return pool.locals.flatten()
+}
+
+// local retrieves all currently known local transactions, grouped by origin
+// account and sorted by nonce. The returned transaction set is a copy and can be
+// freely modified by calling code.
+func (pool *LegacyPool) local() map[common.Address]types.Transactions {
+ txs := make(map[common.Address]types.Transactions)
+ for addr := range pool.locals.accounts {
+ if pending := pool.pending[addr]; pending != nil {
+ txs[addr] = append(txs[addr], pending.Flatten()...)
+ }
+ if queued := pool.queue[addr]; queued != nil {
+ txs[addr] = append(txs[addr], queued.Flatten()...)
+ }
+ }
+ return txs
+}
+
+// validateTxBasics checks whether a transaction is valid according to the consensus
+// rules, but does not check state-dependent validation such as sufficient balance.
+// This check is meant as an early check which only needs to be performed once,
+// and does not require the pool mutex to be held.
+func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error {
+ opts := &txpool.ValidationOptions{
+ Config: pool.chainconfig,
+ Accept: 0 |
+ 1< pool.config.GlobalSlots+pool.config.GlobalQueue {
+ // If the new transaction is underpriced, don't accept it
+ if !isLocal && pool.priced.Underpriced(tx) {
+ log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ underpricedTxMeter.Mark(1)
+ return false, txpool.ErrUnderpriced
+ }
+
+ // We're about to replace a transaction. The reorg does a more thorough
+ // analysis of what to remove and how, but it runs async. We don't want to
+ // do too many replacements between reorg-runs, so we cap the number of
+ // replacements to 25% of the slots
+ if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
+ throttleTxMeter.Mark(1)
+ return false, ErrTxPoolOverflow
+ }
+
+ // New transaction is better than our worse ones, make room for it.
+ // If it's a local transaction, forcibly discard all available transactions.
+ // Otherwise if we can't make enough room for new one, abort the operation.
+ drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
+
+ // Special case, we still can't make the room for the new remote one.
+ if !isLocal && !success {
+ log.Trace("Discarding overflown transaction", "hash", hash)
+ overflowedTxMeter.Mark(1)
+ return false, ErrTxPoolOverflow
+ }
+
+ // If the new transaction is a future transaction it should never churn pending transactions
+ if !isLocal && pool.isGapped(from, tx) {
+ var replacesPending bool
+ for _, dropTx := range drop {
+ dropSender, _ := types.Sender(pool.signer, dropTx)
+ if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
+ replacesPending = true
+ break
+ }
+ }
+ // Add all transactions back to the priced queue
+ if replacesPending {
+ for _, dropTx := range drop {
+ pool.priced.Put(dropTx, false)
+ }
+ log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
+ return false, txpool.ErrFutureReplacePending
+ }
+ }
+
+ // Kick out the underpriced remote transactions.
+ for _, tx := range drop {
+ log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ underpricedTxMeter.Mark(1)
+
+ sender, _ := types.Sender(pool.signer, tx)
+ dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc
+
+ pool.changesSinceReorg += dropped
+ }
+ }
+
+ // Try to replace an existing transaction in the pending pool
+ if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
+ // Nonce already pending, check if required price bump is met
+ inserted, old := list.Add(tx, pool.config.PriceBump)
+ if !inserted {
+ pendingDiscardMeter.Mark(1)
+ return false, txpool.ErrReplaceUnderpriced
+ }
+ // New transaction is better, replace old one
+ if old != nil {
+ pool.all.Remove(old.Hash())
+ pool.priced.Removed(1)
+ pendingReplaceMeter.Mark(1)
+ }
+ pool.all.Add(tx, isLocal)
+ pool.priced.Put(tx, isLocal)
+ pool.journalTx(from, tx)
+ pool.queueTxEvent(tx)
+ log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
+
+ // Successful promotion, bump the heartbeat
+ pool.beats[from] = time.Now()
+ return old != nil, nil
+ }
+ // New transaction isn't replacing a pending one, push into queue
+ replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
+ if err != nil {
+ return false, err
+ }
+ // Mark local addresses and journal local transactions
+ if local && !pool.locals.contains(from) {
+ log.Info("Setting new local account", "address", from)
+ pool.locals.add(from)
+ pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
+ }
+ if isLocal {
+ localGauge.Inc(1)
+ }
+ pool.journalTx(from, tx)
+
+ log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
+ return replaced, nil
+}
+
+// isGapped reports whether the given transaction is immediately executable.
+func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool {
+ // Short circuit if transaction falls within the scope of the pending list
+ // or matches the next pending nonce which can be promoted as an executable
+ // transaction afterwards. Note, the tx staleness is already checked in
+ // 'validateTx' function previously.
+ next := pool.pendingNonces.get(from)
+ if tx.Nonce() <= next {
+ return false
+ }
+ // The transaction has a nonce gap with pending list, it's only considered
+ // as executable if transactions in queue can fill up the nonce gap.
+ queue, ok := pool.queue[from]
+ if !ok {
+ return true
+ }
+ for nonce := next; nonce < tx.Nonce(); nonce++ {
+ if !queue.Contains(nonce) {
+ return true // txs in queue can't fill up the nonce gap
+ }
+ }
+ return false
+}
+
+// enqueueTx inserts a new transaction into the non-executable transaction queue.
+//
+// Note, this method assumes the pool lock is held!
+func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
+ // Try to insert the transaction into the future queue
+ from, _ := types.Sender(pool.signer, tx) // already validated
+ if pool.queue[from] == nil {
+ pool.queue[from] = newList(false)
+ }
+ inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
+ if !inserted {
+ // An older transaction was better, discard this
+ queuedDiscardMeter.Mark(1)
+ return false, txpool.ErrReplaceUnderpriced
+ }
+ // Discard any previous transaction and mark this
+ if old != nil {
+ pool.all.Remove(old.Hash())
+ pool.priced.Removed(1)
+ queuedReplaceMeter.Mark(1)
+ } else {
+ // Nothing was replaced, bump the queued counter
+ queuedGauge.Inc(1)
+ }
+ // If the transaction isn't in lookup set but it's expected to be there,
+ // show the error log.
+ if pool.all.Get(hash) == nil && !addAll {
+ log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
+ }
+ if addAll {
+ pool.all.Add(tx, local)
+ pool.priced.Put(tx, local)
+ }
+ // If we never record the heartbeat, do it right now.
+ if _, exist := pool.beats[from]; !exist {
+ pool.beats[from] = time.Now()
+ }
+ return old != nil, nil
+}
+
+// journalTx adds the specified transaction to the local disk journal if it is
+// deemed to have been sent from a local account.
+func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) {
+ // Only journal if it's enabled and the transaction is local
+ if pool.journal == nil || !pool.locals.contains(from) {
+ return
+ }
+ if err := pool.journal.insert(tx); err != nil {
+ log.Warn("Failed to journal local transaction", "err", err)
+ }
+}
+
+// promoteTx adds a transaction to the pending (processable) list of transactions
+// and returns whether it was inserted or an older was better.
+//
+// Note, this method assumes the pool lock is held!
+func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
+ // Try to insert the transaction into the pending queue
+ if pool.pending[addr] == nil {
+ pool.pending[addr] = newList(true)
+ }
+ list := pool.pending[addr]
+
+ inserted, old := list.Add(tx, pool.config.PriceBump)
+ if !inserted {
+ // An older transaction was better, discard this
+ pool.all.Remove(hash)
+ pool.priced.Removed(1)
+ pendingDiscardMeter.Mark(1)
+ return false
+ }
+ // Otherwise discard any previous transaction and mark this
+ if old != nil {
+ pool.all.Remove(old.Hash())
+ pool.priced.Removed(1)
+ pendingReplaceMeter.Mark(1)
+ } else {
+ // Nothing was replaced, bump the pending counter
+ pendingGauge.Inc(1)
+ }
+ // Set the potentially new pending nonce and notify any subsystems of the new tx
+ pool.pendingNonces.set(addr, tx.Nonce()+1)
+
+ // Successful promotion, bump the heartbeat
+ pool.beats[addr] = time.Now()
+ return true
+}
+
+// Add enqueues a batch of transactions into the pool if they are valid. Depending
+// on the local flag, full pricing contraints will or will not be applied.
+//
+// If sync is set, the method will block until all internal maintenance related
+// to the add is finished. Only use this during tests for determinism!
+func (pool *LegacyPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
+ unwrapped := make([]*types.Transaction, len(txs))
+ for i, tx := range txs {
+ unwrapped[i] = tx.Tx
+ }
+ return pool.addTxs(unwrapped, local, sync)
+}
+
+// addLocals enqueues a batch of transactions into the pool if they are valid, marking the
+// senders as a local ones, ensuring they go around the local pricing constraints.
+//
+// This method is used to add transactions from the RPC API and performs synchronous pool
+// reorganization and event propagation.
+func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
+ return pool.addTxs(txs, !pool.config.NoLocals, true)
+}
+
+// addLocal enqueues a single local transaction into the pool if it is valid. This is
+// a convenience wrapper around addLocals.
+func (pool *LegacyPool) addLocal(tx *types.Transaction) error {
+ errs := pool.addLocals([]*types.Transaction{tx})
+ return errs[0]
+}
+
+// addRemotes enqueues a batch of transactions into the pool if they are valid. If the
+// senders are not among the locally tracked ones, full pricing constraints will apply.
+//
+// This method is used to add transactions from the p2p network and does not wait for pool
+// reorganization and internal event propagation.
+func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
+ return pool.addTxs(txs, false, false)
+}
+
+// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
+// wrapper around addRemotes.
+func (pool *LegacyPool) addRemote(tx *types.Transaction) error {
+ errs := pool.addRemotes([]*types.Transaction{tx})
+ return errs[0]
+}
+
+// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
+func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
+ return pool.addTxs(txs, false, true)
+}
+
+// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
+func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
+ return pool.addTxs([]*types.Transaction{tx}, false, true)[0]
+}
+
+// addTxs attempts to queue a batch of transactions if they are valid.
+func (pool *LegacyPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
+ // Filter out known ones without obtaining the pool lock or recovering signatures
+ var (
+ errs = make([]error, len(txs))
+ news = make([]*types.Transaction, 0, len(txs))
+ )
+ for i, tx := range txs {
+ // If the transaction is known, pre-set the error slot
+ if pool.all.Get(tx.Hash()) != nil {
+ errs[i] = ErrAlreadyKnown
+ knownTxMeter.Mark(1)
+ continue
+ }
+ // Exclude transactions with basic errors, e.g invalid signatures and
+ // insufficient intrinsic gas as soon as possible and cache senders
+ // in transactions before obtaining lock
+ if err := pool.validateTxBasics(tx, local); err != nil {
+ errs[i] = err
+ invalidTxMeter.Mark(1)
+ continue
+ }
+ // Accumulate all unknown transactions for deeper processing
+ news = append(news, tx)
+ }
+ if len(news) == 0 {
+ return errs
+ }
+
+ // Process all the new transaction and merge any errors into the original slice
+ pool.mu.Lock()
+ newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
+ pool.mu.Unlock()
+
+ var nilSlot = 0
+ for _, err := range newErrs {
+ for errs[nilSlot] != nil {
+ nilSlot++
+ }
+ errs[nilSlot] = err
+ nilSlot++
+ }
+ // Reorg the pool internals if needed and return
+ done := pool.requestPromoteExecutables(dirtyAddrs)
+ if sync {
+ <-done
+ }
+ return errs
+}
+
+// addTxsLocked attempts to queue a batch of transactions if they are valid.
+// The transaction pool lock must be held.
+func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
+ dirty := newAccountSet(pool.signer)
+ errs := make([]error, len(txs))
+ for i, tx := range txs {
+ replaced, err := pool.add(tx, local)
+ errs[i] = err
+ if err == nil && !replaced {
+ dirty.addTx(tx)
+ }
+ }
+ validTxMeter.Mark(int64(len(dirty.accounts)))
+ return errs, dirty
+}
+
+// Status returns the status (unknown/pending/queued) of a batch of transactions
+// identified by their hashes.
+func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus {
+ tx := pool.get(hash)
+ if tx == nil {
+ return txpool.TxStatusUnknown
+ }
+ from, _ := types.Sender(pool.signer, tx) // already validated
+
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
+ return txpool.TxStatusPending
+ } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
+ return txpool.TxStatusQueued
+ }
+ return txpool.TxStatusUnknown
+}
+
+// Get returns a transaction if it is contained in the pool and nil otherwise.
+func (pool *LegacyPool) Get(hash common.Hash) *txpool.Transaction {
+ tx := pool.get(hash)
+ if tx == nil {
+ return nil
+ }
+ return &txpool.Transaction{Tx: tx}
+}
+
+// get returns a transaction if it is contained in the pool and nil otherwise.
+func (pool *LegacyPool) get(hash common.Hash) *types.Transaction {
+ return pool.all.Get(hash)
+}
+
+// Has returns an indicator whether txpool has a transaction cached with the
+// given hash.
+func (pool *LegacyPool) Has(hash common.Hash) bool {
+ return pool.all.Get(hash) != nil
+}
+
+func (pool *LegacyPool) HasLocal(hash common.Hash) bool {
+ return pool.all.GetLocal(hash) != nil
+}
+
+// removeTx removes a single transaction from the queue, moving all subsequent
+// transactions back to the future queue.
+//
+// In unreserve is false, the account will not be relinquished to the main txpool
+// even if there are no more references to it. This is used to handle a race when
+// a tx being added, and it evicts a previously scheduled tx from the same account,
+// which could lead to a premature release of the lock.
+//
+// Returns the number of transactions removed from the pending queue.
+func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int {
+ // Fetch the transaction we wish to delete
+ tx := pool.all.Get(hash)
+ if tx == nil {
+ return 0
+ }
+ addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
+
+ // If after deletion there are no more transactions belonging to this account,
+ // relinquish the address reservation. It's a bit convoluted do this, via a
+ // defer, but it's safer vs. the many return pathways.
+ if unreserve {
+ defer func() {
+ var (
+ _, hasPending = pool.pending[addr]
+ _, hasQueued = pool.queue[addr]
+ )
+ if !hasPending && !hasQueued {
+ pool.reserve(addr, false)
+ }
+ }()
+ }
+ // Remove it from the list of known transactions
+ pool.all.Remove(hash)
+ if outofbound {
+ pool.priced.Removed(1)
+ }
+ if pool.locals.contains(addr) {
+ localGauge.Dec(1)
+ }
+ // Remove the transaction from the pending lists and reset the account nonce
+ if pending := pool.pending[addr]; pending != nil {
+ if removed, invalids := pending.Remove(tx); removed {
+ // If no more pending transactions are left, remove the list
+ if pending.Empty() {
+ delete(pool.pending, addr)
+ }
+ // Postpone any invalidated transactions
+ for _, tx := range invalids {
+ // Internal shuffle shouldn't touch the lookup set.
+ pool.enqueueTx(tx.Hash(), tx, false, false)
+ }
+ // Update the account nonce if needed
+ pool.pendingNonces.setIfLower(addr, tx.Nonce())
+ // Reduce the pending counter
+ pendingGauge.Dec(int64(1 + len(invalids)))
+ return 1 + len(invalids)
+ }
+ }
+ // Transaction is in the future queue
+ if future := pool.queue[addr]; future != nil {
+ if removed, _ := future.Remove(tx); removed {
+ // Reduce the queued counter
+ queuedGauge.Dec(1)
+ }
+ if future.Empty() {
+ delete(pool.queue, addr)
+ delete(pool.beats, addr)
+ }
+ }
+ return 0
+}
+
+// requestReset requests a pool reset to the new head block.
+// The returned channel is closed when the reset has occurred.
+func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
+ select {
+ case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
+ return <-pool.reorgDoneCh
+ case <-pool.reorgShutdownCh:
+ return pool.reorgShutdownCh
+ }
+}
+
+// requestPromoteExecutables requests transaction promotion checks for the given addresses.
+// The returned channel is closed when the promotion checks have occurred.
+func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} {
+ select {
+ case pool.reqPromoteCh <- set:
+ return <-pool.reorgDoneCh
+ case <-pool.reorgShutdownCh:
+ return pool.reorgShutdownCh
+ }
+}
+
+// queueTxEvent enqueues a transaction event to be sent in the next reorg run.
+func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) {
+ select {
+ case pool.queueTxEventCh <- tx:
+ case <-pool.reorgShutdownCh:
+ }
+}
+
+// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
+// call those methods directly, but request them being run using requestReset and
+// requestPromoteExecutables instead.
+func (pool *LegacyPool) scheduleReorgLoop() {
+ defer pool.wg.Done()
+
+ var (
+ curDone chan struct{} // non-nil while runReorg is active
+ nextDone = make(chan struct{})
+ launchNextRun bool
+ reset *txpoolResetRequest
+ dirtyAccounts *accountSet
+ queuedEvents = make(map[common.Address]*sortedMap)
+ )
+ for {
+ // Launch next background reorg if needed
+ if curDone == nil && launchNextRun {
+ // Run the background reorg and announcements
+ go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
+
+ // Prepare everything for the next round of reorg
+ curDone, nextDone = nextDone, make(chan struct{})
+ launchNextRun = false
+
+ reset, dirtyAccounts = nil, nil
+ queuedEvents = make(map[common.Address]*sortedMap)
+ }
+
+ select {
+ case req := <-pool.reqResetCh:
+ // Reset request: update head if request is already pending.
+ if reset == nil {
+ reset = req
+ } else {
+ reset.newHead = req.newHead
+ }
+ launchNextRun = true
+ pool.reorgDoneCh <- nextDone
+
+ case req := <-pool.reqPromoteCh:
+ // Promote request: update address set if request is already pending.
+ if dirtyAccounts == nil {
+ dirtyAccounts = req
+ } else {
+ dirtyAccounts.merge(req)
+ }
+ launchNextRun = true
+ pool.reorgDoneCh <- nextDone
+
+ case tx := <-pool.queueTxEventCh:
+ // Queue up the event, but don't schedule a reorg. It's up to the caller to
+ // request one later if they want the events sent.
+ addr, _ := types.Sender(pool.signer, tx)
+ if _, ok := queuedEvents[addr]; !ok {
+ queuedEvents[addr] = newSortedMap()
+ }
+ queuedEvents[addr].Put(tx)
+
+ case <-curDone:
+ curDone = nil
+
+ case <-pool.reorgShutdownCh:
+ // Wait for current run to finish.
+ if curDone != nil {
+ <-curDone
+ }
+ close(nextDone)
+ return
+ }
+ }
+}
+
+// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
+func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) {
+ defer func(t0 time.Time) {
+ reorgDurationTimer.Update(time.Since(t0))
+ }(time.Now())
+ defer close(done)
+
+ var promoteAddrs []common.Address
+ if dirtyAccounts != nil && reset == nil {
+ // Only dirty accounts need to be promoted, unless we're resetting.
+ // For resets, all addresses in the tx queue will be promoted and
+ // the flatten operation can be avoided.
+ promoteAddrs = dirtyAccounts.flatten()
+ }
+ pool.mu.Lock()
+ if reset != nil {
+ // Reset from the old head to the new, rescheduling any reorged transactions
+ pool.reset(reset.oldHead, reset.newHead)
+
+ // Nonces were reset, discard any events that became stale
+ for addr := range events {
+ events[addr].Forward(pool.pendingNonces.get(addr))
+ if events[addr].Len() == 0 {
+ delete(events, addr)
+ }
+ }
+ // Reset needs promote for all addresses
+ promoteAddrs = make([]common.Address, 0, len(pool.queue))
+ for addr := range pool.queue {
+ promoteAddrs = append(promoteAddrs, addr)
+ }
+ }
+ // Check for pending transactions for every account that sent new ones
+ promoted := pool.promoteExecutables(promoteAddrs)
+
+ // If a new block appeared, validate the pool of pending transactions. This will
+ // remove any transaction that has been included in the block or was invalidated
+ // because of another transaction (e.g. higher gas price).
+ if reset != nil {
+ pool.demoteUnexecutables()
+ if reset.newHead != nil {
+ if pool.chainconfig.IsSubnetEVM(reset.newHead.Time) {
+ if err := pool.updateBaseFeeAt(reset.newHead); err != nil {
+ log.Error("error at updating base fee in tx pool", "error", err)
+ }
+ } else {
+ pool.priced.Reheap()
+ }
+ }
+ // Update all accounts to the latest known pending nonce
+ nonces := make(map[common.Address]uint64, len(pool.pending))
+ for addr, list := range pool.pending {
+ highestPending := list.LastElement()
+ nonces[addr] = highestPending.Nonce() + 1
+ }
+ pool.pendingNonces.setAll(nonces)
+ }
+ // Ensure pool.queue and pool.pending sizes stay within the configured limits.
+ pool.truncatePending()
+ pool.truncateQueue()
+
+ dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
+ pool.changesSinceReorg = 0 // Reset change counter
+ pool.mu.Unlock()
+
+ // Notify subsystems for newly added transactions
+ for _, tx := range promoted {
+ addr, _ := types.Sender(pool.signer, tx)
+ if _, ok := events[addr]; !ok {
+ events[addr] = newSortedMap()
+ }
+ events[addr].Put(tx)
+ }
+ if len(events) > 0 {
+ var txs []*types.Transaction
+ for _, set := range events {
+ txs = append(txs, set.Flatten()...)
+ }
+ pool.txFeed.Send(core.NewTxsEvent{Txs: txs})
+ }
+}
+
+// reset retrieves the current state of the blockchain and ensures the content
+// of the transaction pool is valid with regard to the chain state.
+func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
+ // If we're reorging an old state, reinject all dropped transactions
+ var reinject types.Transactions
+
+ if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
+ // If the reorg is too deep, avoid doing it (will happen during fast sync)
+ oldNum := oldHead.Number.Uint64()
+ newNum := newHead.Number.Uint64()
+
+ if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
+ log.Debug("Skipping deep transaction reorg", "depth", depth)
+ } else {
+ // Reorg seems shallow enough to pull in all transactions into memory
+ var (
+ rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
+ add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
+ )
+ if rem == nil {
+ // This can happen if a setHead is performed, where we simply discard the old
+ // head from the chain.
+ // If that is the case, we don't have the lost transactions anymore, and
+ // there's nothing to add
+ if newNum >= oldNum {
+ // If we reorged to a same or higher number, then it's not a case of setHead
+ log.Warn("Transaction pool reset with missing old head",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return
+ }
+ // If the reorg ended up on a lower number, it's indicative of setHead being the cause
+ log.Debug("Skipping transaction reset caused by setHead",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ // We still need to update the current state s.th. the lost transactions can be readded by the user
+ } else {
+ if add == nil {
+ // if the new head is nil, it means that something happened between
+ // the firing of newhead-event and _now_: most likely a
+ // reorg caused by sync-reversion or explicit sethead back to an
+ // earlier block.
+ log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
+ return
+ }
+ var discarded, included types.Transactions
+ for rem.NumberU64() > add.NumberU64() {
+ discarded = append(discarded, rem.Transactions()...)
+ if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return
+ }
+ }
+ for add.NumberU64() > rem.NumberU64() {
+ included = append(included, add.Transactions()...)
+ if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
+ return
+ }
+ }
+ for rem.Hash() != add.Hash() {
+ discarded = append(discarded, rem.Transactions()...)
+ if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return
+ }
+ included = append(included, add.Transactions()...)
+ if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
+ return
+ }
+ }
+ lost := make([]*types.Transaction, 0, len(discarded))
+ for _, tx := range types.TxDifference(discarded, included) {
+ if pool.Filter(tx) {
+ lost = append(lost, tx)
+ }
+ }
+ reinject = lost
+ }
+ }
+ }
+ // Initialize the internal state to the current head
+ if newHead == nil {
+ newHead = pool.chain.CurrentBlock() // Special case during testing
+ }
+ statedb, err := pool.chain.StateAt(newHead.Root)
+ if err != nil {
+ log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root)
+ return
+ }
+ pool.currentHead.Store(newHead)
+ pool.currentStateLock.Lock()
+ pool.currentState = statedb
+ pool.currentStateLock.Unlock()
+ pool.pendingNonces = newNoncer(statedb)
+
+ // when we reset txPool we should explicitly check if fee struct for min base fee has changed
+ // so that we can correctly drop txs with < minBaseFee from tx pool.
+ if pool.chainconfig.IsPrecompileEnabled(feemanager.ContractAddress, newHead.Time) {
+ feeConfig, _, err := pool.chain.GetFeeConfigAt(newHead)
+ if err != nil {
+ log.Error("Failed to get fee config state", "err", err, "root", newHead.Root)
+ return
+ }
+ pool.minimumFee = feeConfig.MinBaseFee
+ }
+
+ // Inject any transactions discarded due to reorgs
+ log.Debug("Reinjecting stale transactions", "count", len(reinject))
+ pool.chain.SenderCacher().Recover(pool.signer, reinject)
+ pool.addTxsLocked(reinject, false)
+}
+
+// promoteExecutables moves transactions that have become processable from the
+// future queue to the set of pending transactions. During this process, all
+// invalidated transactions (low nonce, low balance) are deleted.
+func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
+ pool.currentStateLock.Lock()
+ defer pool.currentStateLock.Unlock()
+
+ // Track the promoted transactions to broadcast them at once
+ var promoted []*types.Transaction
+
+ // Iterate over all accounts and promote any executable transactions
+ gasLimit := pool.currentHead.Load().GasLimit
+ for _, addr := range accounts {
+ list := pool.queue[addr]
+ if list == nil {
+ continue // Just in case someone calls with a non existing account
+ }
+ // Drop all transactions that are deemed too old (low nonce)
+ forwards := list.Forward(pool.currentState.GetNonce(addr))
+ for _, tx := range forwards {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ }
+ log.Trace("Removed old queued transactions", "count", len(forwards))
+ // Drop all transactions that are too costly (low balance or out of gas)
+ drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
+ for _, tx := range drops {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ }
+ log.Trace("Removed unpayable queued transactions", "count", len(drops))
+ queuedNofundsMeter.Mark(int64(len(drops)))
+
+ // Gather all executable transactions and promote them
+ readies := list.Ready(pool.pendingNonces.get(addr))
+ for _, tx := range readies {
+ hash := tx.Hash()
+ if pool.promoteTx(addr, hash, tx) {
+ promoted = append(promoted, tx)
+ }
+ }
+ log.Trace("Promoted queued transactions", "count", len(promoted))
+ queuedGauge.Dec(int64(len(readies)))
+
+ // Drop all transactions over the allowed limit
+ var caps types.Transactions
+ if !pool.locals.contains(addr) {
+ caps = list.Cap(int(pool.config.AccountQueue))
+ for _, tx := range caps {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
+ }
+ queuedRateLimitMeter.Mark(int64(len(caps)))
+ }
+ // Mark all the items dropped as removed
+ pool.priced.Removed(len(forwards) + len(drops) + len(caps))
+ queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ if pool.locals.contains(addr) {
+ localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ }
+ // Delete the entire queue entry if it became empty.
+ if list.Empty() {
+ delete(pool.queue, addr)
+ delete(pool.beats, addr)
+ if _, ok := pool.pending[addr]; !ok {
+ pool.reserve(addr, false)
+ }
+ }
+ }
+ return promoted
+}
+
+// truncatePending removes transactions from the pending queue if the pool is above the
+// pending limit. The algorithm tries to reduce transaction counts by an approximately
+// equal number for all for accounts with many pending transactions.
+func (pool *LegacyPool) truncatePending() {
+ pending := uint64(0)
+ for _, list := range pool.pending {
+ pending += uint64(list.Len())
+ }
+ if pending <= pool.config.GlobalSlots {
+ return
+ }
+
+ pendingBeforeCap := pending
+ // Assemble a spam order to penalize large transactors first
+ spammers := prque.New[int64, common.Address](nil)
+ for addr, list := range pool.pending {
+ // Only evict transactions from high rollers
+ if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
+ spammers.Push(addr, int64(list.Len()))
+ }
+ }
+ // Gradually drop transactions from offenders
+ offenders := []common.Address{}
+ for pending > pool.config.GlobalSlots && !spammers.Empty() {
+ // Retrieve the next offender if not local address
+ offender, _ := spammers.Pop()
+ offenders = append(offenders, offender)
+
+ // Equalize balances until all the same or below threshold
+ if len(offenders) > 1 {
+ // Calculate the equalization threshold for all current offenders
+ threshold := pool.pending[offender].Len()
+
+ // Iteratively reduce all offenders until below limit or threshold reached
+ for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
+ for i := 0; i < len(offenders)-1; i++ {
+ list := pool.pending[offenders[i]]
+
+ caps := list.Cap(list.Len() - 1)
+ for _, tx := range caps {
+ // Drop the transaction from the global pools too
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+
+ // Update the account nonce to the dropped transaction
+ pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
+ log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
+ }
+ pool.priced.Removed(len(caps))
+ pendingGauge.Dec(int64(len(caps)))
+ if pool.locals.contains(offenders[i]) {
+ localGauge.Dec(int64(len(caps)))
+ }
+ pending--
+ }
+ }
+ }
+ }
+
+ // If still above threshold, reduce to limit or min allowance
+ if pending > pool.config.GlobalSlots && len(offenders) > 0 {
+ for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
+ for _, addr := range offenders {
+ list := pool.pending[addr]
+
+ caps := list.Cap(list.Len() - 1)
+ for _, tx := range caps {
+ // Drop the transaction from the global pools too
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+
+ // Update the account nonce to the dropped transaction
+ pool.pendingNonces.setIfLower(addr, tx.Nonce())
+ log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
+ }
+ pool.priced.Removed(len(caps))
+ pendingGauge.Dec(int64(len(caps)))
+ if pool.locals.contains(addr) {
+ localGauge.Dec(int64(len(caps)))
+ }
+ pending--
+ }
+ }
+ }
+ pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
+}
+
+// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
+func (pool *LegacyPool) truncateQueue() {
+ queued := uint64(0)
+ for _, list := range pool.queue {
+ queued += uint64(list.Len())
+ }
+ if queued <= pool.config.GlobalQueue {
+ return
+ }
+
+ // Sort all accounts with queued transactions by heartbeat
+ addresses := make(addressesByHeartbeat, 0, len(pool.queue))
+ for addr := range pool.queue {
+ if !pool.locals.contains(addr) { // don't drop locals
+ addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
+ }
+ }
+ sort.Sort(sort.Reverse(addresses))
+
+ // Drop transactions until the total is below the limit or only locals remain
+ for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
+ addr := addresses[len(addresses)-1]
+ list := pool.queue[addr.address]
+
+ addresses = addresses[:len(addresses)-1]
+
+ // Drop all transactions if they are less than the overflow
+ if size := uint64(list.Len()); size <= drop {
+ for _, tx := range list.Flatten() {
+ pool.removeTx(tx.Hash(), true, true)
+ }
+ drop -= size
+ queuedRateLimitMeter.Mark(int64(size))
+ continue
+ }
+ // Otherwise drop only last few transactions
+ txs := list.Flatten()
+ for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
+ pool.removeTx(txs[i].Hash(), true, true)
+ drop--
+ queuedRateLimitMeter.Mark(1)
+ }
+ }
+}
+
+// demoteUnexecutables removes invalid and processed transactions from the pools
+// executable/pending queue and any subsequent transactions that become unexecutable
+// are moved back into the future queue.
+//
+// Note: transactions are not marked as removed in the priced list because re-heaping
+// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
+// to trigger a re-heap is this function
+func (pool *LegacyPool) demoteUnexecutables() {
+ pool.currentStateLock.Lock()
+ defer pool.currentStateLock.Unlock()
+
+ // Iterate over all accounts and demote any non-executable transactions
+ gasLimit := pool.currentHead.Load().GasLimit
+ for addr, list := range pool.pending {
+ nonce := pool.currentState.GetNonce(addr)
+
+ // Drop all transactions that are deemed too old (low nonce)
+ olds := list.Forward(nonce)
+ for _, tx := range olds {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ log.Trace("Removed old pending transaction", "hash", hash)
+ }
+ // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
+ drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
+ for _, tx := range drops {
+ hash := tx.Hash()
+ log.Trace("Removed unpayable pending transaction", "hash", hash)
+ pool.all.Remove(hash)
+ }
+ pendingNofundsMeter.Mark(int64(len(drops)))
+
+ for _, tx := range invalids {
+ hash := tx.Hash()
+ log.Trace("Demoting pending transaction", "hash", hash)
+
+ // Internal shuffle shouldn't touch the lookup set.
+ pool.enqueueTx(hash, tx, false, false)
+ }
+ pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+ if pool.locals.contains(addr) {
+ localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+ }
+ // If there's a gap in front, alert (should never happen) and postpone all transactions
+ if list.Len() > 0 && list.txs.Get(nonce) == nil {
+ gapped := list.Cap(0)
+ for _, tx := range gapped {
+ hash := tx.Hash()
+ log.Error("Demoting invalidated transaction", "hash", hash)
+
+ // Internal shuffle shouldn't touch the lookup set.
+ pool.enqueueTx(hash, tx, false, false)
+ }
+ pendingGauge.Dec(int64(len(gapped)))
+ }
+ // Delete the entire pending entry if it became empty.
+ if list.Empty() {
+ delete(pool.pending, addr)
+ if _, ok := pool.queue[addr]; !ok {
+ pool.reserve(addr, false)
+ }
+ }
+ }
+}
+
+func (pool *LegacyPool) startPeriodicFeeUpdate() {
+ if pool.chainconfig.SubnetEVMTimestamp == nil {
+ return
+ }
+
+ // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay
+ // when starting up in Subnet EVM before the base fee is updated.
+ if time.Now().After(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp)) {
+ pool.updateBaseFee()
+ }
+
+ pool.wg.Add(1)
+ go pool.periodicBaseFeeUpdate()
+}
+
+func (pool *LegacyPool) periodicBaseFeeUpdate() {
+ defer pool.wg.Done()
+
+ // Sleep until its time to start the periodic base fee update or the tx pool is shutting down
+ select {
+ case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp))):
+ case <-pool.generalShutdownChan:
+ return // Return early if shutting down
+ }
+
+ // Update the base fee every [baseFeeUpdateInterval]
+ // and shutdown when [generalShutdownChan] is closed by Stop()
+ for {
+ select {
+ case <-time.After(baseFeeUpdateInterval):
+ pool.updateBaseFee()
+ case <-pool.generalShutdownChan:
+ return
+ }
+ }
+}
+
+func (pool *LegacyPool) updateBaseFee() {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ err := pool.updateBaseFeeAt(pool.currentHead.Load())
+ if err != nil {
+ log.Error("failed to update base fee", "currentHead", pool.currentHead.Load().Hash(), "err", err)
+ }
+}
+
+// assumes lock is already held
+func (pool *LegacyPool) updateBaseFeeAt(head *types.Header) error {
+ feeConfig, _, err := pool.chain.GetFeeConfigAt(head)
+ if err != nil {
+ return err
+ }
+ _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, feeConfig, head, uint64(time.Now().Unix()))
+ if err != nil {
+ return err
+ }
+ pool.priced.SetBaseFee(baseFeeEstimate)
+ return nil
+}
+
+// addressByHeartbeat is an account address tagged with its last activity timestamp.
+type addressByHeartbeat struct {
+ address common.Address
+ heartbeat time.Time
+}
+
+type addressesByHeartbeat []addressByHeartbeat
+
+func (a addressesByHeartbeat) Len() int { return len(a) }
+func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
+func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// accountSet is simply a set of addresses to check for existence, and a signer
+// capable of deriving addresses from transactions.
+type accountSet struct {
+ accounts map[common.Address]struct{}
+ signer types.Signer
+ cache *[]common.Address
+}
+
+// newAccountSet creates a new address set with an associated signer for sender
+// derivations.
+func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
+ as := &accountSet{
+ accounts: make(map[common.Address]struct{}, len(addrs)),
+ signer: signer,
+ }
+ for _, addr := range addrs {
+ as.add(addr)
+ }
+ return as
+}
+
+// contains checks if a given address is contained within the set.
+func (as *accountSet) contains(addr common.Address) bool {
+ _, exist := as.accounts[addr]
+ return exist
+}
+
+// containsTx checks if the sender of a given tx is within the set. If the sender
+// cannot be derived, this method returns false.
+func (as *accountSet) containsTx(tx *types.Transaction) bool {
+ if addr, err := types.Sender(as.signer, tx); err == nil {
+ return as.contains(addr)
+ }
+ return false
+}
+
+// add inserts a new address into the set to track.
+func (as *accountSet) add(addr common.Address) {
+ as.accounts[addr] = struct{}{}
+ as.cache = nil
+}
+
+// addTx adds the sender of tx into the set.
+func (as *accountSet) addTx(tx *types.Transaction) {
+ if addr, err := types.Sender(as.signer, tx); err == nil {
+ as.add(addr)
+ }
+}
+
+// flatten returns the list of addresses within this set, also caching it for later
+// reuse. The returned slice should not be changed!
+func (as *accountSet) flatten() []common.Address {
+ if as.cache == nil {
+ accounts := make([]common.Address, 0, len(as.accounts))
+ for account := range as.accounts {
+ accounts = append(accounts, account)
+ }
+ as.cache = &accounts
+ }
+ return *as.cache
+}
+
+// merge adds all addresses from the 'other' set into 'as'.
+func (as *accountSet) merge(other *accountSet) {
+ for addr := range other.accounts {
+ as.accounts[addr] = struct{}{}
+ }
+ as.cache = nil
+}
+
+// lookup is used internally by LegacyPool to track transactions while allowing
+// lookup without mutex contention.
+//
+// Note, although this type is properly protected against concurrent access, it
+// is **not** a type that should ever be mutated or even exposed outside of the
+// transaction pool, since its internal state is tightly coupled with the pools
+// internal mechanisms. The sole purpose of the type is to permit out-of-bound
+// peeking into the pool in LegacyPool.Get without having to acquire the widely scoped
+// LegacyPool.mu mutex.
+//
+// This lookup set combines the notion of "local transactions", which is useful
+// to build upper-level structure.
+type lookup struct {
+ slots int
+ lock sync.RWMutex
+ locals map[common.Hash]*types.Transaction
+ remotes map[common.Hash]*types.Transaction
+}
+
+// newLookup returns a new lookup structure.
+func newLookup() *lookup {
+ return &lookup{
+ locals: make(map[common.Hash]*types.Transaction),
+ remotes: make(map[common.Hash]*types.Transaction),
+ }
+}
+
+// Range calls f on each key and value present in the map. The callback passed
+// should return the indicator whether the iteration needs to be continued.
+// Callers need to specify which set (or both) to be iterated.
+func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ if local {
+ for key, value := range t.locals {
+ if !f(key, value, true) {
+ return
+ }
+ }
+ }
+ if remote {
+ for key, value := range t.remotes {
+ if !f(key, value, false) {
+ return
+ }
+ }
+ }
+}
+
+// Get returns a transaction if it exists in the lookup, or nil if not found.
+func (t *lookup) Get(hash common.Hash) *types.Transaction {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ if tx := t.locals[hash]; tx != nil {
+ return tx
+ }
+ return t.remotes[hash]
+}
+
+// GetLocal returns a transaction if it exists in the lookup, or nil if not found.
+func (t *lookup) GetLocal(hash common.Hash) *types.Transaction {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.locals[hash]
+}
+
+// GetRemote returns a transaction if it exists in the lookup, or nil if not found.
+func (t *lookup) GetRemote(hash common.Hash) *types.Transaction {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.remotes[hash]
+}
+
+// Count returns the current number of transactions in the lookup.
+func (t *lookup) Count() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return len(t.locals) + len(t.remotes)
+}
+
+// LocalCount returns the current number of local transactions in the lookup.
+func (t *lookup) LocalCount() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return len(t.locals)
+}
+
+// RemoteCount returns the current number of remote transactions in the lookup.
+func (t *lookup) RemoteCount() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return len(t.remotes)
+}
+
+// Slots returns the current number of slots used in the lookup.
+func (t *lookup) Slots() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.slots
+}
+
+// Add adds a transaction to the lookup.
+func (t *lookup) Add(tx *types.Transaction, local bool) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ t.slots += numSlots(tx)
+ slotsGauge.Update(int64(t.slots))
+
+ if local {
+ t.locals[tx.Hash()] = tx
+ } else {
+ t.remotes[tx.Hash()] = tx
+ }
+}
+
+// Remove removes a transaction from the lookup.
+func (t *lookup) Remove(hash common.Hash) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ tx, ok := t.locals[hash]
+ if !ok {
+ tx, ok = t.remotes[hash]
+ }
+ if !ok {
+ log.Error("No transaction found to be deleted", "hash", hash)
+ return
+ }
+ t.slots -= numSlots(tx)
+ slotsGauge.Update(int64(t.slots))
+
+ delete(t.locals, hash)
+ delete(t.remotes, hash)
+}
+
+// RemoteToLocals migrates the transactions belongs to the given locals to locals
+// set. The assumption is held the locals set is thread-safe to be used.
+func (t *lookup) RemoteToLocals(locals *accountSet) int {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ var migrated int
+ for hash, tx := range t.remotes {
+ if locals.containsTx(tx) {
+ t.locals[hash] = tx
+ delete(t.remotes, hash)
+ migrated += 1
+ }
+ }
+ return migrated
+}
+
+// RemotesBelowTip finds all remote transactions below the given tip threshold.
+func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
+ found := make(types.Transactions, 0, 128)
+ t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
+ if tx.GasTipCapIntCmp(threshold) < 0 {
+ found = append(found, tx)
+ }
+ return true
+ }, false, true) // Only iterate remotes
+ return found
+}
+
+// numSlots calculates the number of slots needed for a single transaction.
+func numSlots(tx *types.Transaction) int {
+ return int((tx.Size() + txSlotSize - 1) / txSlotSize)
+}
diff --git a/core/txpool/txpool2_test.go b/core/txpool/legacypool/legacypool2_test.go
similarity index 84%
rename from core/txpool/txpool2_test.go
rename to core/txpool/legacypool/legacypool2_test.go
index cb0251356f..57dfeff8cc 100644
--- a/core/txpool/txpool2_test.go
+++ b/core/txpool/legacypool/legacypool2_test.go
@@ -23,7 +23,7 @@
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"crypto/ecdsa"
@@ -43,7 +43,7 @@ func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gaspric
return tx
}
-func count(t *testing.T, pool *TxPool) (pending int, queued int) {
+func count(t *testing.T, pool *LegacyPool) (pending int, queued int) {
t.Helper()
pending, queued = pool.stats()
if err := validatePoolInternals(pool); err != nil {
@@ -52,7 +52,7 @@ func count(t *testing.T, pool *TxPool) (pending int, queued int) {
return pending, queued
}
-func fillPool(t testing.TB, pool *TxPool) {
+func fillPool(t testing.TB, pool *LegacyPool) {
t.Helper()
// Create a number of test accounts, fund them and make transactions
executableTxs := types.Transactions{}
@@ -66,8 +66,8 @@ func fillPool(t testing.TB, pool *TxPool) {
}
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(executableTxs)
- pool.AddRemotesSync(nonExecutableTxs)
+ pool.addRemotesSync(executableTxs)
+ pool.addRemotesSync(nonExecutableTxs)
pending, queued := pool.Stats()
slots := pool.all.Slots()
// sanity-check that the test prerequisites are ok (pending full)
@@ -89,12 +89,13 @@ func TestTransactionFutureAttack(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := NewTxPool(config, eip1559Config, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
fillPool(t, pool)
pending, _ := pool.Stats()
// Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
@@ -106,7 +107,7 @@ func TestTransactionFutureAttack(t *testing.T) {
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key))
}
for i := 0; i < 5; i++ {
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
newPending, newQueued := count(t, pool)
t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots())
}
@@ -125,9 +126,10 @@ func TestTransactionFuture1559(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
- defer pool.Stop()
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a number of test accounts, fund them and make transactions
fillPool(t, pool)
@@ -141,7 +143,7 @@ func TestTransactionFuture1559(t *testing.T) {
for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key))
}
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
}
newPending, _ := pool.Stats()
// Pending should not have been touched
@@ -157,9 +159,10 @@ func TestTransactionZAttack(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
- defer pool.Stop()
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a number of test accounts, fund them and make transactions
fillPool(t, pool)
@@ -191,7 +194,7 @@ func TestTransactionZAttack(t *testing.T) {
key, _ := crypto.GenerateKey()
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key))
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
}
overDraftTxs := types.Transactions{}
@@ -202,11 +205,11 @@ func TestTransactionZAttack(t *testing.T) {
overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key))
}
}
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
newPending, newQueued := count(t, pool)
newIvPending := countInvalidPending()
@@ -224,12 +227,13 @@ func TestTransactionZAttack(t *testing.T) {
func BenchmarkFutureAttack(b *testing.B) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := NewTxPool(config, eip1559Config, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
fillPool(b, pool)
key, _ := crypto.GenerateKey()
@@ -241,6 +245,6 @@ func BenchmarkFutureAttack(b *testing.B) {
}
b.ResetTimer()
for i := 0; i < 5; i++ {
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
}
}
diff --git a/core/txpool/txpool_test.go b/core/txpool/legacypool/legacypool_test.go
similarity index 86%
rename from core/txpool/txpool_test.go
rename to core/txpool/legacypool/legacypool_test.go
index 0e4d438f35..446ed045c5 100644
--- a/core/txpool/txpool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"crypto/ecdsa"
@@ -34,7 +34,6 @@ import (
"math/big"
"math/rand"
"os"
- "strings"
"sync"
"sync/atomic"
"testing"
@@ -44,6 +43,7 @@ import (
"github.com/ava-labs/subnet-evm/core"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/txpool"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/params"
"github.com/ava-labs/subnet-evm/trie"
@@ -82,25 +82,21 @@ func init() {
}
type testBlockChain struct {
- statedb *state.StateDB
+ config *params.ChainConfig
gasLimit atomic.Uint64
+ statedb *state.StateDB
chainHeadFeed *event.Feed
lock sync.Mutex
}
-func newTestBlockChain(gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain {
- bc := testBlockChain{statedb: statedb, chainHeadFeed: chainHeadFeed}
+func newTestBlockChain(config *params.ChainConfig, gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain {
+ bc := testBlockChain{config: config, statedb: statedb, chainHeadFeed: new(event.Feed)}
bc.gasLimit.Store(gasLimit)
return &bc
}
-func (bc *testBlockChain) reset(statedb *state.StateDB, gasLimit uint64, chainHeadFeed *event.Feed) {
- bc.lock.Lock()
- defer bc.lock.Unlock()
-
- bc.statedb = statedb
- bc.gasLimit.Store(gasLimit)
- bc.chainHeadFeed = chainHeadFeed
+func (bc *testBlockChain) Config() *params.ChainConfig {
+ return bc.config
}
func (bc *testBlockChain) CurrentBlock() *types.Header {
@@ -172,24 +168,51 @@ func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *big.Int, tip *big.Int,
return tx
}
-func setupPool() (*TxPool, *ecdsa.PrivateKey) {
+func makeAddressReserver() txpool.AddressReserver {
+ var (
+ reserved = make(map[common.Address]struct{})
+ lock sync.Mutex
+ )
+ return func(addr common.Address, reserve bool) error {
+ lock.Lock()
+ defer lock.Unlock()
+
+ _, exists := reserved[addr]
+ if reserve {
+ if exists {
+ panic("already reserved")
+ }
+ reserved[addr] = struct{}{}
+ return nil
+ }
+ if !exists {
+ panic("not reserved")
+ }
+ delete(reserved, addr)
+ return nil
+ }
+}
+
+func setupPool() (*LegacyPool, *ecdsa.PrivateKey) {
return setupPoolWithConfig(params.TestChainConfig)
}
-func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) {
+func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(10000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed))
key, _ := crypto.GenerateKey()
- pool := NewTxPool(testTxPoolConfig, config, blockchain)
-
+ pool := New(testTxPoolConfig, blockchain)
+ if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()); err != nil {
+ panic(err)
+ }
// wait for the pool to initialize
<-pool.initDoneCh
return pool, key
}
// validatePoolInternals checks various consistency invariants within the pool.
-func validatePoolInternals(pool *TxPool) error {
+func validatePoolInternals(pool *LegacyPool) error {
pool.mu.RLock()
defer pool.mu.RUnlock()
@@ -293,20 +316,21 @@ func TestStateChangeDuringReset(t *testing.T) {
// setup pool with 2 transaction in it
statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether))
- blockchain := &testChain{newTestBlockChain(1000000000, statedb, new(event.Feed)), address, &trigger}
+ blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger}
tx0 := transaction(0, 100000, key)
tx1 := transaction(1, 100000, key)
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
nonce := pool.Nonce(address)
if nonce != 0 {
t.Fatalf("Invalid nonce, want 0, got %d", nonce)
}
- pool.AddRemotesSync([]*types.Transaction{tx0, tx1})
+ pool.addRemotesSync([]*types.Transaction{tx0, tx1})
nonce = pool.Nonce(address)
if nonce != 2 {
@@ -323,13 +347,13 @@ func TestStateChangeDuringReset(t *testing.T) {
}
}
-func testAddBalance(pool *TxPool, addr common.Address, amount *big.Int) {
+func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) {
pool.mu.Lock()
pool.currentState.AddBalance(addr, amount)
pool.mu.Unlock()
}
-func testSetNonce(pool *TxPool, addr common.Address, nonce uint64) {
+func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) {
pool.mu.Lock()
pool.currentState.SetNonce(addr, nonce)
pool.mu.Unlock()
@@ -339,36 +363,36 @@ func TestInvalidTransactions(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx := transaction(0, 100, key)
from, _ := deriveSender(tx)
// Intrinsic gas too low
testAddBalance(pool, from, big.NewInt(1))
- if err, want := pool.AddRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) {
+ if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
// Insufficient funds
tx = transaction(0, 100000, key)
- if err, want := pool.AddRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) {
+ if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
testSetNonce(pool, from, 1)
testAddBalance(pool, from, big.NewInt(0xffffffffffffff))
tx = transaction(0, 100000, key)
- if err, want := pool.AddRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) {
+ if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
tx = transaction(1, 100000, key)
- pool.gasPrice = big.NewInt(1000)
- if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) {
+ pool.gasTip.Store(big.NewInt(1000))
+ if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
- if err := pool.AddLocal(tx); err != nil {
+ if err := pool.addLocal(tx); err != nil {
t.Error("expected", nil, "got", err)
}
}
@@ -377,7 +401,7 @@ func TestQueue(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx := transaction(0, 100, key)
from, _ := deriveSender(tx)
@@ -408,7 +432,7 @@ func TestQueue2(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx1 := transaction(0, 100, key)
tx2 := transaction(10, 100, key)
@@ -434,13 +458,13 @@ func TestNegativeValue(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key)
from, _ := deriveSender(tx)
testAddBalance(pool, from, big.NewInt(1))
- if err := pool.AddRemote(tx); err != ErrNegativeValue {
- t.Error("expected", ErrNegativeValue, "got", err)
+ if err := pool.addRemote(tx); err != txpool.ErrNegativeValue {
+ t.Error("expected", txpool.ErrNegativeValue, "got", err)
}
}
@@ -448,11 +472,11 @@ func TestTipAboveFeeCap(t *testing.T) {
t.Parallel()
pool, key := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key)
- if err := pool.AddRemote(tx); err != core.ErrTipAboveFeeCap {
+ if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap {
t.Error("expected", core.ErrTipAboveFeeCap, "got", err)
}
}
@@ -461,18 +485,18 @@ func TestVeryHighValues(t *testing.T) {
t.Parallel()
pool, key := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
veryBigNumber := big.NewInt(1)
veryBigNumber.Lsh(veryBigNumber, 300)
tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key)
- if err := pool.AddRemote(tx); err != core.ErrTipVeryHigh {
+ if err := pool.addRemote(tx); err != core.ErrTipVeryHigh {
t.Error("expected", core.ErrTipVeryHigh, "got", err)
}
tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key)
- if err := pool.AddRemote(tx2); err != core.ErrFeeCapVeryHigh {
+ if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh {
t.Error("expected", core.ErrFeeCapVeryHigh, "got", err)
}
}
@@ -481,14 +505,14 @@ func TestChainFork(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(100000000000000))
- pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed))
+ pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
<-pool.requestReset(nil, nil)
}
resetState()
@@ -497,7 +521,7 @@ func TestChainFork(t *testing.T) {
if _, err := pool.add(tx, false); err != nil {
t.Error("didn't expect error", err)
}
- pool.removeTx(tx.Hash(), true)
+ pool.removeTx(tx.Hash(), true, true)
// reset the pool's internal state
resetState()
@@ -510,14 +534,14 @@ func TestDoubleNonce(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(100000000000000))
- pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed))
+ pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
<-pool.requestReset(nil, nil)
}
resetState()
@@ -561,7 +585,7 @@ func TestMissingNonce(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, addr, big.NewInt(100000000000000))
@@ -585,7 +609,7 @@ func TestNonceRecovery(t *testing.T) {
const n = 10
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
testSetNonce(pool, addr, n)
@@ -593,7 +617,7 @@ func TestNonceRecovery(t *testing.T) {
<-pool.requestReset(nil, nil)
tx := transaction(n, 100000, key)
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Error(err)
}
// simulate some weird re-order of transactions and missing nonce(s)
@@ -611,7 +635,7 @@ func TestDropping(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000))
@@ -687,8 +711,7 @@ func TestDropping(t *testing.T) {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4)
}
// Reduce the block gas limit, check that invalidated transactions are dropped
- tbc := pool.chain.(*testBlockChain)
- tbc.reset(tbc.statedb, 100, tbc.chainHeadFeed)
+ pool.chain.(*testBlockChain).gasLimit.Store(100)
<-pool.requestReset(nil, nil)
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
@@ -716,10 +739,11 @@ func TestPostponing(t *testing.T) {
// Create the pool to test the postponing with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create two test accounts to produce different gap profiles with
keys := make([]*ecdsa.PrivateKey, 2)
@@ -744,7 +768,7 @@ func TestPostponing(t *testing.T) {
txs = append(txs, tx)
}
}
- for i, err := range pool.AddRemotesSync(txs) {
+ for i, err := range pool.addRemotesSync(txs) {
if err != nil {
t.Fatalf("tx %d: failed to add transactions: %v", i, err)
}
@@ -829,7 +853,7 @@ func TestGapFilling(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -840,7 +864,7 @@ func TestGapFilling(t *testing.T) {
defer sub.Unsubscribe()
// Create a pending and a queued transaction with a nonce-gap in between
- pool.AddRemotesSync([]*types.Transaction{
+ pool.addRemotesSync([]*types.Transaction{
transaction(0, 100000, key),
transaction(2, 100000, key),
})
@@ -883,7 +907,7 @@ func TestQueueAccountLimiting(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -928,14 +952,15 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.NoLocals = nolocals
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a number of test accounts and fund them (last one will be the local)
keys := make([]*ecdsa.PrivateKey, 5)
@@ -957,7 +982,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
nonces[addr]++
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
queued := 0
for addr, list := range pool.queue {
@@ -974,7 +999,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
for i := uint64(0); i < 3*config.GlobalQueue; i++ {
txs = append(txs, transaction(i+1, 100000, local))
}
- pool.AddLocals(txs)
+ pool.addLocals(txs)
// If locals are disabled, the previous eviction algorithm should apply here too
if nolocals {
@@ -1020,14 +1045,15 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
// Create the pool to test the non-expiration enforcement
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.Lifetime = time.Second
config.NoLocals = nolocals
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
@@ -1037,10 +1063,10 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
// Add the two transactions and ensure they both are queued up
- if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil {
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
pending, queued := pool.Stats()
@@ -1107,7 +1133,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
}
// Queue gapped transactions
- if err := pool.AddLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil {
@@ -1116,7 +1142,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
time.Sleep(5 * evictionInterval) // A half lifetime pass
// Queue executable transactions, the life cycle should be restarted.
- if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil {
@@ -1130,7 +1156,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
if queued != 2 {
- t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
@@ -1164,7 +1190,7 @@ func TestPendingLimiting(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000000))
@@ -1205,13 +1231,14 @@ func TestPendingGlobalLimiting(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = config.AccountSlots * 10
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 5)
@@ -1231,7 +1258,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
}
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
pending := 0
for _, list := range pool.pending {
@@ -1253,7 +1280,7 @@ func TestAllowedTxSize(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000))
@@ -1262,7 +1289,7 @@ func TestAllowedTxSize(t *testing.T) {
//
// It is assumed the fields in the transaction (except of the data) are:
// - nonce <= 32 bytes
- // - gasPrice <= 32 bytes
+ // - gasTip <= 32 bytes
// - gasLimit <= 32 bytes
// - recipient == 20 bytes
// - value <= 32 bytes
@@ -1270,22 +1297,21 @@ func TestAllowedTxSize(t *testing.T) {
// All those fields are summed up to at most 213 bytes.
baseSize := uint64(213)
dataSize := txMaxSize - baseSize
- maxGas := pool.currentMaxGas.Load()
// Try adding a transaction with maximal allowed size
- tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize)
+ tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize)
if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
}
// Try adding a transaction with random allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
t.Fatalf("failed to add transaction of random allowed size: %v", err)
}
// Try adding a transaction of minimal not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil {
t.Fatalf("expected rejection on slightly oversize transaction")
}
// Try adding a transaction of random not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
t.Fatalf("expected rejection on oversize transaction")
}
// Run some sanity checks on the pool internals
@@ -1307,15 +1333,16 @@ func TestCapClearsFromAll(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.AccountSlots = 2
config.AccountQueue = 2
config.GlobalSlots = 8
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a number of test accounts and fund them
key, _ := crypto.GenerateKey()
@@ -1327,7 +1354,7 @@ func TestCapClearsFromAll(t *testing.T) {
txs = append(txs, transaction(uint64(j), 100000, key))
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotes(txs)
+ pool.addRemotes(txs)
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -1341,13 +1368,14 @@ func TestPendingMinimumAllowance(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = 1
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 5)
@@ -1367,7 +1395,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
}
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
for addr, list := range pool.pending {
if list.Len() != int(config.AccountSlots) {
@@ -1389,10 +1417,11 @@ func TestRepricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1423,8 +1452,8 @@ func TestRepricing(t *testing.T) {
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs)
- pool.AddLocal(ltx)
+ pool.addRemotesSync(txs)
+ pool.addLocal(ltx)
pending, queued := pool.Stats()
if pending != 7 {
@@ -1440,7 +1469,7 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Reprice the pool and check that underpriced transactions get dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats()
if pending != 2 {
@@ -1456,14 +1485,14 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Check that we can't add the old transactions back
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced)
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced)
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced)
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
if err := validateEvents(events, 0); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
@@ -1473,7 +1502,7 @@ func TestRepricing(t *testing.T) {
}
// However we can add local underpriced transactions
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3])
- if err := pool.AddLocal(tx); err != nil {
+ if err := pool.addLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
if pending, _ = pool.Stats(); pending != 3 {
@@ -1486,13 +1515,13 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// And we can fill gaps with properly priced transactions
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
+ if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
if err := validateEvents(events, 5); err != nil {
@@ -1513,7 +1542,7 @@ func TestRepricingDynamicFee(t *testing.T) {
// Create the pool to test the pricing enforcement with
pool, _ := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1544,8 +1573,8 @@ func TestRepricingDynamicFee(t *testing.T) {
ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs)
- pool.AddLocal(ltx)
+ pool.addRemotesSync(txs)
+ pool.addLocal(ltx)
pending, queued := pool.Stats()
if pending != 7 {
@@ -1561,7 +1590,7 @@ func TestRepricingDynamicFee(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Reprice the pool and check that underpriced transactions get dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats()
if pending != 2 {
@@ -1578,16 +1607,16 @@ func TestRepricingDynamicFee(t *testing.T) {
}
// Check that we can't add the old transactions back
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0])
- if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])
- if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
if err := validateEvents(events, 0); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
@@ -1597,7 +1626,7 @@ func TestRepricingDynamicFee(t *testing.T) {
}
// However we can add local underpriced transactions
tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3])
- if err := pool.AddLocal(tx); err != nil {
+ if err := pool.addLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
if pending, _ = pool.Stats(); pending != 3 {
@@ -1611,15 +1640,15 @@ func TestRepricingDynamicFee(t *testing.T) {
}
// And we can fill gaps with properly priced transactions
tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0])
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1])
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2])
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
if err := validateEvents(events, 5); err != nil {
@@ -1637,10 +1666,11 @@ func TestRepricingKeepsLocals(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 3)
@@ -1652,23 +1682,23 @@ func TestRepricingKeepsLocals(t *testing.T) {
for i := uint64(0); i < 500; i++ {
// Add pending transaction.
pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2])
- if err := pool.AddLocal(pendingTx); err != nil {
+ if err := pool.addLocal(pendingTx); err != nil {
t.Fatal(err)
}
// Add queued transaction.
queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2])
- if err := pool.AddLocal(queuedTx); err != nil {
+ if err := pool.addLocal(queuedTx); err != nil {
t.Fatal(err)
}
// Add pending dynamic fee transaction.
pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
- if err := pool.AddLocal(pendingTx); err != nil {
+ if err := pool.addLocal(pendingTx); err != nil {
t.Fatal(err)
}
// Add queued dynamic fee transaction.
queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
- if err := pool.AddLocal(queuedTx); err != nil {
+ if err := pool.addLocal(queuedTx); err != nil {
t.Fatal(err)
}
}
@@ -1690,13 +1720,13 @@ func TestRepricingKeepsLocals(t *testing.T) {
validate()
// Reprice the pool and check that nothing is dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
validate()
- pool.SetGasPrice(big.NewInt(2))
- pool.SetGasPrice(big.NewInt(4))
- pool.SetGasPrice(big.NewInt(8))
- pool.SetGasPrice(big.NewInt(100))
+ pool.SetGasTip(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(4))
+ pool.SetGasTip(big.NewInt(8))
+ pool.SetGasTip(big.NewInt(100))
validate()
}
@@ -1710,14 +1740,15 @@ func TestUnderpricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = 2
config.GlobalQueue = 2
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1741,8 +1772,8 @@ func TestUnderpricing(t *testing.T) {
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs)
- pool.AddLocal(ltx)
+ pool.addRemotes(txs)
+ pool.addLocal(ltx)
pending, queued := pool.Stats()
if pending != 3 {
@@ -1758,8 +1789,8 @@ func TestUnderpricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Ensure that adding an underpriced transaction on block limit fails
- if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
// Replace a future transaction with a future transaction
if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
@@ -1772,12 +1803,12 @@ func TestUnderpricing(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2
t.Fatalf("failed to add well priced transaction: %v", err)
}
- if err := pool.addRemoteSync(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3
+ if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3
t.Fatalf("failed to add well priced transaction: %v", err)
}
// Ensure that replacing a pending transaction with a future transaction fails
- if err := pool.addRemoteSync(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending {
- t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending)
+ if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending {
+ t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending)
}
pending, queued = pool.Stats()
if pending != 2 {
@@ -1794,11 +1825,11 @@ func TestUnderpricing(t *testing.T) {
}
// Ensure that adding local transactions can push out even higher priced ones
ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to append underpriced local transaction: %v", err)
}
ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to add new underpriced local transaction: %v", err)
}
pending, queued = pool.Stats()
@@ -1824,14 +1855,15 @@ func TestStableUnderpricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = 128
config.GlobalQueue = 0
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1849,7 +1881,7 @@ func TestStableUnderpricing(t *testing.T) {
for i := uint64(0); i < config.GlobalSlots; i++ {
txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0]))
}
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
pending, queued := pool.Stats()
if pending != int(config.GlobalSlots) {
@@ -1892,7 +1924,7 @@ func TestUnderpricingDynamicFee(t *testing.T) {
t.Parallel()
pool, _ := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
pool.config.GlobalSlots = 2
pool.config.GlobalQueue = 2
@@ -1919,8 +1951,8 @@ func TestUnderpricingDynamicFee(t *testing.T) {
ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs) // Pend K0:0, K0:1; Que K1:1
- pool.AddLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
+ pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1
+ pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
pending, queued := pool.Stats()
if pending != 3 {
@@ -1938,13 +1970,13 @@ func TestUnderpricingDynamicFee(t *testing.T) {
// Ensure that adding an underpriced transaction fails
tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.addRemoteSync(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
// Ensure that adding high priced transactions drops cheap ones, but not own
tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1])
- if err := pool.addRemoteSync(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
+ if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
t.Fatalf("failed to add well priced transaction: %v", err)
}
@@ -1971,11 +2003,11 @@ func TestUnderpricingDynamicFee(t *testing.T) {
}
// Ensure that adding local transactions can push out even higher priced ones
ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to append underpriced local transaction: %v", err)
}
ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to add new underpriced local transaction: %v", err)
}
pending, queued = pool.Stats()
@@ -1999,7 +2031,7 @@ func TestDualHeapEviction(t *testing.T) {
t.Parallel()
pool, _ := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
pool.config.GlobalSlots = 10
pool.config.GlobalQueue = 10
@@ -2028,7 +2060,7 @@ func TestDualHeapEviction(t *testing.T) {
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
highCap = tx
}
- pool.AddRemotesSync([]*types.Transaction{tx})
+ pool.addRemotesSync([]*types.Transaction{tx})
}
pending, queued := pool.Stats()
if pending+queued != 20 {
@@ -2058,10 +2090,11 @@ func TestDeduplication(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create a test account to add transactions with
key, _ := crypto.GenerateKey()
@@ -2076,7 +2109,7 @@ func TestDeduplication(t *testing.T) {
for i := 0; i < len(txs); i += 2 {
firsts = append(firsts, txs[i])
}
- errs := pool.AddRemotesSync(firsts)
+ errs := pool.addRemotesSync(firsts)
if len(errs) != len(firsts) {
t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts))
}
@@ -2093,7 +2126,7 @@ func TestDeduplication(t *testing.T) {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1)
}
// Try to add all of them now and ensure previous ones error out as knowns
- errs = pool.AddRemotesSync(txs)
+ errs = pool.addRemotesSync(txs)
if len(errs) != len(txs) {
t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs))
}
@@ -2124,10 +2157,11 @@ func TestReplacement(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -2145,10 +2179,10 @@ func TestReplacement(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original cheap pending transaction: %v", err)
}
if err := validateEvents(events, 2); err != nil {
@@ -2158,10 +2192,10 @@ func TestReplacement(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil {
t.Fatalf("failed to replace original proper pending transaction: %v", err)
}
if err := validateEvents(events, 2); err != nil {
@@ -2169,23 +2203,23 @@ func TestReplacement(t *testing.T) {
}
// Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original cheap queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil {
t.Fatalf("failed to replace original proper queued transaction: %v", err)
}
@@ -2204,7 +2238,7 @@ func TestReplacementDynamicFee(t *testing.T) {
// Create the pool to test the pricing enforcement with
pool, key := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
// Keep track of transaction events to ensure all executables get announced
@@ -2246,12 +2280,12 @@ func TestReplacementDynamicFee(t *testing.T) {
}
// 2. Don't bump tip or feecap => discard
tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 3. Bump both more than min => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key)
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err)
}
// 4. Check events match expected (2 new executable txs during pending, 0 during queue)
@@ -2269,27 +2303,27 @@ func TestReplacementDynamicFee(t *testing.T) {
}
// 6. Bump tip max allowed so it's still underpriced => discard
tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 7. Bump fee cap max allowed so it's still underpriced => discard
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 8. Bump tip min for acceptance => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 9. Bump fee cap min for acceptance => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 10. Check events match expected (3 new executable txs during pending, 0 during queue)
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key)
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err)
}
// 11. Check events match expected (3 new executable txs during pending, 0 during queue)
@@ -2329,14 +2363,15 @@ func testJournaling(t *testing.T, nolocals bool) {
// Create the original pool to inject transaction into the journal
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.NoLocals = nolocals
config.Journal = journal
config.Rejournal = time.Second
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
@@ -2346,13 +2381,13 @@ func testJournaling(t *testing.T, nolocals bool) {
testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
// Add three local and a remote transactions and ensure they are queued up
- if err := pool.AddLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
- if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
- if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil {
@@ -2369,11 +2404,12 @@ func testJournaling(t *testing.T, nolocals bool) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
- pool.Stop()
+ pool.Close()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
- blockchain = newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool = NewTxPool(config, params.TestChainConfig, blockchain)
+ pool = New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats()
if queued != 0 {
@@ -2395,11 +2431,12 @@ func testJournaling(t *testing.T, nolocals bool) {
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
<-pool.requestReset(nil, nil)
time.Sleep(2 * config.Rejournal)
- pool.Stop()
+ pool.Close()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
- blockchain = newTestBlockChain(1000000, statedb, new(event.Feed))
- pool = NewTxPool(config, params.TestChainConfig, blockchain)
+ blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+ pool = New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats()
if pending != 0 {
@@ -2417,7 +2454,7 @@ func testJournaling(t *testing.T, nolocals bool) {
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
- pool.Stop()
+ pool.Close()
}
// TestStatusCheck tests that the pool can correctly retrieve the
@@ -2427,10 +2464,11 @@ func TestStatusCheck(t *testing.T) {
// Create the pool to test the status retrievals with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
// Create the test accounts to check various transaction statuses with
keys := make([]*ecdsa.PrivateKey, 3)
@@ -2447,7 +2485,7 @@ func TestStatusCheck(t *testing.T) {
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only
// Import the transaction and ensure they are correctly added
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
pending, queued := pool.Stats()
if pending != 2 {
@@ -2465,13 +2503,11 @@ func TestStatusCheck(t *testing.T) {
hashes[i] = tx.Hash()
}
hashes = append(hashes, common.Hash{})
+ expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown}
- statuses := pool.Status(hashes)
- expect := []TxStatus{TxStatusPending, TxStatusPending, TxStatusQueued, TxStatusQueued, TxStatusUnknown}
-
- for i := 0; i < len(statuses); i++ {
- if statuses[i] != expect[i] {
- t.Errorf("transaction %d: status mismatch: have %v, want %v", i, statuses[i], expect[i])
+ for i := 0; i < len(hashes); i++ {
+ if status := pool.Status(hashes[i]); status != expect[i] {
+ t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i])
}
}
}
@@ -2503,7 +2539,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1
func benchmarkPendingDemotion(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -2528,7 +2564,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1
func benchmarkFuturePromotion(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -2556,7 +2592,7 @@ func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 1000
func benchmarkBatchInsert(b *testing.B, size int, local bool) {
// Generate a batch of transactions to enqueue into the pool
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000000000000))
@@ -2572,9 +2608,9 @@ func benchmarkBatchInsert(b *testing.B, size int, local bool) {
b.ResetTimer()
for _, batch := range batches {
if local {
- pool.AddLocals(batch)
+ pool.addLocals(batch)
} else {
- pool.AddRemotes(batch)
+ pool.addRemotes(batch)
}
}
}
@@ -2602,15 +2638,15 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
pool, _ := setupPool()
testAddBalance(pool, account, big.NewInt(100000000))
for _, local := range locals {
- pool.AddLocal(local)
+ pool.addLocal(local)
}
b.StartTimer()
// Assign a high enough balance for testing
testAddBalance(pool, remoteAddr, big.NewInt(100000000))
for i := 0; i < len(remotes); i++ {
- pool.AddRemotes([]*types.Transaction{remotes[i]})
+ pool.addRemotes([]*types.Transaction{remotes[i]})
}
- pool.Stop()
+ pool.Close()
}
}
@@ -2618,7 +2654,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
func BenchmarkMultiAccountBatchInsert(b *testing.B) {
// Generate a batch of transactions to enqueue into the pool
pool, _ := setupPool()
- defer pool.Stop()
+ defer pool.Close()
b.ReportAllocs()
batches := make(types.Transactions, b.N)
for i := 0; i < b.N; i++ {
@@ -2631,6 +2667,6 @@ func BenchmarkMultiAccountBatchInsert(b *testing.B) {
// Benchmark importing the transactions into the queue
b.ResetTimer()
for _, tx := range batches {
- pool.AddRemotesSync([]*types.Transaction{tx})
+ pool.addRemotesSync([]*types.Transaction{tx})
}
}
diff --git a/core/txpool/list.go b/core/txpool/legacypool/list.go
similarity index 99%
rename from core/txpool/list.go
rename to core/txpool/legacypool/list.go
index 44fd3e9eb1..92b4e673eb 100644
--- a/core/txpool/list.go
+++ b/core/txpool/legacypool/list.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"container/heap"
@@ -600,7 +600,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool {
func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) {
drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop
for slots > 0 {
- if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio || floatingRatio == 0 {
+ if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio {
// Discard stale transactions if found during cleanup
tx := heap.Pop(&l.urgent).(*types.Transaction)
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
diff --git a/core/txpool/list_test.go b/core/txpool/legacypool/list_test.go
similarity index 99%
rename from core/txpool/list_test.go
rename to core/txpool/legacypool/list_test.go
index fe8e8d5710..d7ca91844e 100644
--- a/core/txpool/list_test.go
+++ b/core/txpool/legacypool/list_test.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"math/big"
diff --git a/core/txpool/noncer.go b/core/txpool/legacypool/noncer.go
similarity index 99%
rename from core/txpool/noncer.go
rename to core/txpool/legacypool/noncer.go
index df416c0958..b0280882ff 100644
--- a/core/txpool/noncer.go
+++ b/core/txpool/legacypool/noncer.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"sync"
diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go
new file mode 100644
index 0000000000..fb00fb1abc
--- /dev/null
+++ b/core/txpool/subpool.go
@@ -0,0 +1,153 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txpool
+
+import (
+ "math/big"
+ "time"
+
+ "github.com/ava-labs/subnet-evm/core"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/event"
+)
+
+// Transaction is a helper struct to group together a canonical transaction with
+// satellite data items that are needed by the pool but are not part of the chain.
+type Transaction struct {
+ Tx *types.Transaction // Canonical transaction
+
+ BlobTxBlobs []kzg4844.Blob // Blobs needed by the blob pool
+ BlobTxCommits []kzg4844.Commitment // Commitments needed by the blob pool
+ BlobTxProofs []kzg4844.Proof // Proofs needed by the blob pool
+}
+
+// LazyTransaction contains a small subset of the transaction properties that is
+// enough for the miner and other APIs to handle large batches of transactions;
+// and supports pulling up the entire transaction when really needed.
+type LazyTransaction struct {
+ Pool SubPool // Transaction subpool to pull the real transaction up
+ Hash common.Hash // Transaction hash to pull up if needed
+ Tx *Transaction // Transaction if already resolved
+
+ Time time.Time // Time when the transaction was first seen
+ GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
+ GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay
+}
+
+// Resolve retrieves the full transaction belonging to a lazy handle if it is still
+// maintained by the transaction pool.
+func (ltx *LazyTransaction) Resolve() *Transaction {
+ if ltx.Tx == nil {
+ ltx.Tx = ltx.Pool.Get(ltx.Hash)
+ }
+ return ltx.Tx
+}
+
+// AddressReserver is passed by the main transaction pool to subpools, so they
+// may request (and relinquish) exclusive access to certain addresses.
+type AddressReserver func(addr common.Address, reserve bool) error
+
+// SubPool represents a specialized transaction pool that lives on its own (e.g.
+// blob pool). Since independent of how many specialized pools we have, they do
+// need to be updated in lockstep and assemble into one coherent view for block
+// production, this interface defines the common methods that allow the primary
+// transaction pool to manage the subpools.
+type SubPool interface {
+ // Filter is a selector used to decide whether a transaction whould be added
+ // to this particular subpool.
+ Filter(tx *types.Transaction) bool
+
+ // Init sets the base parameters of the subpool, allowing it to load any saved
+ // transactions from disk and also permitting internal maintenance routines to
+ // start up.
+ //
+ // These should not be passed as a constructor argument - nor should the pools
+ // start by themselves - in order to keep multiple subpools in lockstep with
+ // one another.
+ Init(gasTip *big.Int, head *types.Header, reserve AddressReserver) error
+
+ // Close terminates any background processing threads and releases any held
+ // resources.
+ Close() error
+
+ // Reset retrieves the current state of the blockchain and ensures the content
+ // of the transaction pool is valid with regard to the chain state.
+ Reset(oldHead, newHead *types.Header)
+
+ // SetGasTip updates the minimum price required by the subpool for a new
+ // transaction, and drops all transactions below this threshold.
+ SetGasTip(tip *big.Int)
+ SetMinFee(fee *big.Int)
+
+ // Has returns an indicator whether subpool has a transaction cached with the
+ // given hash.
+ Has(hash common.Hash) bool
+ HasLocal(hash common.Hash) bool
+
+ // Get returns a transaction if it is contained in the pool, or nil otherwise.
+ Get(hash common.Hash) *Transaction
+
+ // Add enqueues a batch of transactions into the pool if they are valid. Due
+ // to the large transaction churn, add may postpone fully integrating the tx
+ // to a later point to batch multiple ones together.
+ Add(txs []*Transaction, local bool, sync bool) []error
+
+ // Pending retrieves all currently processable transactions, grouped by origin
+ // account and sorted by nonce.
+ Pending(enforceTips bool) map[common.Address][]*LazyTransaction
+ PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*LazyTransaction
+ PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*LazyTransaction
+ IteratePending(f func(tx *Transaction) bool) bool // Returns false if iteration was interrupted.
+
+ // SubscribeTransactions subscribes to new transaction events.
+ SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription
+
+ // Nonce returns the next nonce of an account, with all transactions executable
+ // by the pool already applied on top.
+ Nonce(addr common.Address) uint64
+
+ // Stats retrieves the current pool stats, namely the number of pending and the
+ // number of queued (non-executable) transactions.
+ Stats() (int, int)
+
+ // Content retrieves the data content of the transaction pool, returning all the
+ // pending as well as queued transactions, grouped by account and sorted by nonce.
+ Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction)
+
+ // ContentFrom retrieves the data content of the transaction pool, returning the
+ // pending as well as queued transactions of this address, grouped by nonce.
+ ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction)
+
+ // Locals retrieves the accounts currently considered local by the pool.
+ Locals() []common.Address
+
+ // Status returns the known status (unknown/pending/queued) of a transaction
+ // identified by their hashes.
+ Status(hash common.Hash) TxStatus
+}
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index 20f11ddc39..252aca0cf8 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -8,7 +8,7 @@
//
// Much love to the original authors for their work.
// **********
-// Copyright 2014 The go-ethereum Authors
+// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -29,136 +29,24 @@ package txpool
import (
"errors"
"fmt"
- "math"
"math/big"
- "sort"
"sync"
"sync/atomic"
- "time"
- "github.com/ava-labs/subnet-evm/commontype"
- "github.com/ava-labs/subnet-evm/consensus/dummy"
"github.com/ava-labs/subnet-evm/core"
- "github.com/ava-labs/subnet-evm/core/state"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/metrics"
- "github.com/ava-labs/subnet-evm/params"
- "github.com/ava-labs/subnet-evm/precompile/contracts/feemanager"
- "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist"
- "github.com/ava-labs/subnet-evm/utils"
- "github.com/ava-labs/subnet-evm/vmerrs"
-
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
)
-const (
- // chainHeadChanSize is the size of channel listening to ChainHeadEvent.
- chainHeadChanSize = 10
-
- // txSlotSize is used to calculate how many data slots a single transaction
- // takes up based on its size. The slots are used as DoS protection, ensuring
- // that validating a new transaction remains a constant operation (in reality
- // O(maxslots), where max slots are 4 currently).
- txSlotSize = 32 * 1024
-
- // txMaxSize is the maximum size a single transaction can have. This field has
- // non-trivial consequences: larger transactions are significantly harder and
- // more expensive to propagate; larger transactions also take more resources
- // to validate whether they fit into the pool or not.
- //
- // Note: the max contract size is 24KB
- txMaxSize = 4 * txSlotSize // 128KB
-)
-
var (
- // ErrAlreadyKnown is returned if the transactions is already contained
- // within the pool.
- ErrAlreadyKnown = errors.New("already known")
-
- // ErrInvalidSender is returned if the transaction contains an invalid signature.
- ErrInvalidSender = errors.New("invalid sender")
-
- // ErrUnderpriced is returned if a transaction's gas price is below the minimum
- // configured for the transaction pool.
- ErrUnderpriced = errors.New("transaction underpriced")
-
- // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
- // another remote transaction.
- ErrTxPoolOverflow = errors.New("txpool is full")
-
- // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
- // with a different one without the required price bump.
- ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
-
- // ErrGasLimit is returned if a transaction's requested gas limit exceeds the
- // maximum allowance of the current block.
- ErrGasLimit = errors.New("exceeds block gas limit")
-
- // ErrNegativeValue is a sanity error to ensure no one is able to specify a
- // transaction with a negative value.
- ErrNegativeValue = errors.New("negative value")
-
- // ErrOversizedData is returned if the input data of a transaction is greater
- // than some meaningful limit a user might use. This is not a consensus error
- // making the transaction invalid, rather a DOS protection.
- ErrOversizedData = errors.New("oversized data")
-
- // ErrFutureReplacePending is returned if a future transaction replaces a pending
- // transaction. Future transactions should only be able to replace other future transactions.
- ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
-
// ErrOverdraft is returned if a transaction would cause the senders balance to go negative
// thus invalidating a potential large number of transactions.
ErrOverdraft = errors.New("transaction would cause overdraft")
)
-var (
- evictionInterval = time.Minute // Time interval to check for evictable transactions
- statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
- baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after SubnetEVM is enabled
-)
-
-var (
- // Metrics for the pending pool
- pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
- pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
- pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
- pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds
-
- // Metrics for the queued pool
- queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
- queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
- queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
- queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
- queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime
-
- // General tx metrics
- knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil)
- validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil)
- invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
- underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
- overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
-
- // throttleTxMeter counts how many transactions are rejected due to too-many-changes between
- // txpool reorgs.
- throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
- // reorgDurationTimer measures how long time a txpool reorg takes.
- reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
- // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
- // that this number is pretty low, since txpool reorgs happen very frequently.
- dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
-
- pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
- queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
- localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
- slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
-
- reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
-)
-
// TxStatus is the current status of a transaction as seen by the pool.
type TxStatus uint
@@ -168,432 +56,308 @@ const (
TxStatusPending
)
-// blockChain provides the state of blockchain and current gas limit to do
-// some pre checks in tx pool and event subscribers.
-type blockChain interface {
+var (
+ // reservationsGaugeName is the prefix of a per-subpool address reservation
+ // metric.
+ //
+ // This is mostly a sanity metric to ensure there's no bug that would make
+ // some subpool hog all the reservations due to mis-accounting.
+ reservationsGaugeName = "txpool/reservations"
+)
+
+// BlockChain defines the minimal set of methods needed to back a tx pool with
+// a chain. Exists to allow mocking the live chain out of tests.
+type BlockChain interface {
+ // CurrentBlock returns the current head of the chain.
CurrentBlock() *types.Header
- GetBlock(hash common.Hash, number uint64) *types.Block
- StateAt(root common.Hash) (*state.StateDB, error)
- SenderCacher() *core.TxSenderCacher
- GetFeeConfigAt(parent *types.Header) (commontype.FeeConfig, *big.Int, error)
+ // SubscribeChainHeadEvent subscribes to new blocks being added to the chain.
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
}
-// Config are the configuration parameters of the transaction pool.
-type Config struct {
- Locals []common.Address // Addresses that should be treated by default as local
- NoLocals bool // Whether local transaction handling should be disabled
- Journal string // Journal of local transactions to survive node restarts
- Rejournal time.Duration // Time interval to regenerate the local transaction journal
-
- PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
- PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
-
- AccountSlots uint64 // Number of executable transaction slots guaranteed per account
- GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
- AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
- GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
-
- Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
-}
-
-// DefaultConfig contains the default configurations for the transaction
-// pool.
-var DefaultConfig = Config{
- // If we re-enable txpool journaling, we should also add the saved local
- // transactions to the p2p gossip on startup.
- Journal: "",
- Rejournal: time.Hour,
-
- PriceLimit: 1,
- PriceBump: 10,
-
- AccountSlots: 16,
- GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
- AccountQueue: 64,
- GlobalQueue: 1024,
-
- Lifetime: 10 * time.Minute,
-}
-
-// sanitize checks the provided user configurations and changes anything that's
-// unreasonable or unworkable.
-func (config *Config) sanitize() Config {
- conf := *config
- if conf.Rejournal < time.Second {
- log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
- conf.Rejournal = time.Second
- }
- if conf.PriceLimit < 1 {
- log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
- conf.PriceLimit = DefaultConfig.PriceLimit
- }
- if conf.PriceBump < 1 {
- log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
- conf.PriceBump = DefaultConfig.PriceBump
- }
- if conf.AccountSlots < 1 {
- log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots)
- conf.AccountSlots = DefaultConfig.AccountSlots
- }
- if conf.GlobalSlots < 1 {
- log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots)
- conf.GlobalSlots = DefaultConfig.GlobalSlots
- }
- if conf.AccountQueue < 1 {
- log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue)
- conf.AccountQueue = DefaultConfig.AccountQueue
- }
- if conf.GlobalQueue < 1 {
- log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue)
- conf.GlobalQueue = DefaultConfig.GlobalQueue
- }
- if conf.Lifetime < 1 {
- log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime)
- conf.Lifetime = DefaultConfig.Lifetime
- }
- return conf
-}
-
-// TxPool contains all currently known transactions. Transactions
-// enter the pool when they are received from the network or submitted
-// locally. They exit the pool when they are included in the blockchain.
-//
-// The pool separates processable transactions (which can be applied to the
-// current state) and future transactions. Transactions move between those
-// two states over time as they are received and processed.
+// TxPool is an aggregator for various transaction specific pools, collectively
+// tracking all the transactions deemed interesting by the node. Transactions
+// enter the pool when they are received from the network or submitted locally.
+// They exit the pool when they are included in the blockchain or evicted due to
+// resource constraints.
type TxPool struct {
- config Config
- chainconfig *params.ChainConfig
- chain blockChain
- gasPrice *big.Int
- minimumFee *big.Int
- txFeed event.Feed
- headFeed event.Feed
- reorgFeed event.Feed
- scope event.SubscriptionScope
- signer types.Signer
- mu sync.RWMutex
-
- rules atomic.Pointer[params.Rules] // Rules for the currentHead
- eip2718 atomic.Bool // Fork indicator whether we are using EIP-2718 type transactions.
- eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions.
- eip3860 atomic.Bool // Fork indicator whether EIP-3860 is activated. (activated in Shanghai Upgrade in Ethereum)
-
- currentHead *types.Header
- // [currentState] is the state of the blockchain head. It is reset whenever
- // head changes.
- currentState *state.StateDB
- // [currentStateLock] is required to allow concurrent access to address nonces
- // and balances during reorgs and gossip handling.
- currentStateLock sync.Mutex
-
- pendingNonces *noncer // Pending state tracking virtual nonces
- currentMaxGas atomic.Uint64 // Current gas limit for transaction caps
-
- locals *accountSet // Set of local transaction to exempt from eviction rules
- journal *journal // Journal of local transaction to back up to disk
+ subpools []SubPool // List of subpools for specialized transaction handling
- pending map[common.Address]*list // All currently processable transactions
- queue map[common.Address]*list // Queued but non-processable transactions
- beats map[common.Address]time.Time // Last heartbeat from each known account
- all *lookup // All transactions to allow lookups
- priced *pricedList // All transactions sorted by price
+ reservations map[common.Address]SubPool // Map with the account to pool reservations
+ reserveLock sync.Mutex // Lock protecting the account reservations
- chainHeadCh chan core.ChainHeadEvent
- chainHeadSub event.Subscription
- reqResetCh chan *txpoolResetRequest
- reqPromoteCh chan *accountSet
- queueTxEventCh chan *types.Transaction
- reorgDoneCh chan chan struct{}
- reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
- generalShutdownChan chan struct{} // closed when the transaction pool is stopped. Any goroutine can listen
- // to this to be notified if it should shut down.
- wg sync.WaitGroup // tracks loop, scheduleReorgLoop
- initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
+ subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown
+ quit chan chan error // Quit channel to tear down the head updater
- changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
+ gasTip atomic.Pointer[big.Int] // Remember last value set so it can be retrieved
+ reorgFeed event.Feed
}
-type txpoolResetRequest struct {
- oldHead, newHead *types.Header
-}
-
-// NewTxPool creates a new transaction pool to gather, sort and filter inbound
+// New creates a new transaction pool to gather, sort and filter inbound
// transactions from the network.
-func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
- // Sanitize the input to ensure no vulnerable gas prices are set
- config = (&config).sanitize()
+func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) {
+ // Retrieve the current head so that all subpools and this main coordinator
+ // pool will have the same starting state, even if the chain moves forward
+ // during initialization.
+ head := chain.CurrentBlock()
- // Create the transaction pool with its initial settings
pool := &TxPool{
- config: config,
- chainconfig: chainconfig,
- chain: chain,
- signer: types.LatestSigner(chainconfig),
- pending: make(map[common.Address]*list),
- queue: make(map[common.Address]*list),
- beats: make(map[common.Address]time.Time),
- all: newLookup(),
- chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
- reqResetCh: make(chan *txpoolResetRequest),
- reqPromoteCh: make(chan *accountSet),
- queueTxEventCh: make(chan *types.Transaction),
- reorgDoneCh: make(chan chan struct{}),
- reorgShutdownCh: make(chan struct{}),
- initDoneCh: make(chan struct{}),
- generalShutdownChan: make(chan struct{}),
- gasPrice: new(big.Int).SetUint64(config.PriceLimit),
- }
- pool.locals = newAccountSet(pool.signer)
- for _, addr := range config.Locals {
- log.Info("Setting new local account", "address", addr)
- pool.locals.add(addr)
- }
- pool.priced = newPricedList(pool.all)
- pool.reset(nil, chain.CurrentBlock())
-
- // Start the reorg loop early so it can handle requests generated during journal loading.
- pool.wg.Add(1)
- go pool.scheduleReorgLoop()
-
- // If local transactions and journaling is enabled, load from disk
- if !config.NoLocals && config.Journal != "" {
- pool.journal = newTxJournal(config.Journal)
-
- if err := pool.journal.load(pool.AddLocals); err != nil {
- log.Warn("Failed to load transaction journal", "err", err)
+ subpools: subpools,
+ reservations: make(map[common.Address]SubPool),
+ quit: make(chan chan error),
+ }
+ for i, subpool := range subpools {
+ if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
+ for j := i - 1; j >= 0; j-- {
+ subpools[j].Close()
+ }
+ return nil, err
+ }
+ }
+ go pool.loop(head, chain)
+ return pool, nil
+}
+
+// reserver is a method to create an address reservation callback to exclusively
+// assign/deassign addresses to/from subpools. This can ensure that at any point
+// in time, only a single subpool is able to manage an account, avoiding cross
+// subpool eviction issues and nonce conflicts.
+func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
+ return func(addr common.Address, reserve bool) error {
+ p.reserveLock.Lock()
+ defer p.reserveLock.Unlock()
+
+ owner, exists := p.reservations[addr]
+ if reserve {
+ // Double reservations are forbidden even from the same pool to
+ // avoid subtle bugs in the long term.
+ if exists {
+ if owner == subpool {
+ log.Error("pool attempted to reserve already-owned address", "address", addr)
+ return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
+ }
+ return errors.New("address already reserved")
+ }
+ p.reservations[addr] = subpool
+ if metrics.Enabled {
+ m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
+ metrics.GetOrRegisterGauge(m, nil).Inc(1)
+ }
+ return nil
+ }
+ // Ensure subpools only attempt to unreserve their own owned addresses,
+ // otherwise flag as a programming error.
+ if !exists {
+ log.Error("pool attempted to unreserve non-reserved address", "address", addr)
+ return errors.New("address not reserved")
}
- if err := pool.journal.rotate(pool.local()); err != nil {
- log.Warn("Failed to rotate transaction journal", "err", err)
+ if subpool != owner {
+ log.Error("pool attempted to unreserve non-owned address", "address", addr)
+ return errors.New("address not owned")
}
+ delete(p.reservations, addr)
+ if metrics.Enabled {
+ m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
+ metrics.GetOrRegisterGauge(m, nil).Dec(1)
+ }
+ return nil
}
+}
- // Subscribe events from blockchain and start the main event loop.
- pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
- pool.wg.Add(1)
- go pool.loop()
+// Close terminates the transaction pool and all its subpools.
+func (p *TxPool) Close() error {
+ p.subs.Close()
- pool.startPeriodicFeeUpdate()
+ var errs []error
+
+ // Terminate the reset loop and wait for it to finish
+ errc := make(chan error)
+ p.quit <- errc
+ if err := <-errc; err != nil {
+ errs = append(errs, err)
+ }
- return pool
+ // Terminate each subpool
+ for _, subpool := range p.subpools {
+ if err := subpool.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) > 0 {
+ return fmt.Errorf("subpool close errors: %v", errs)
+ }
+ return nil
}
// loop is the transaction pool's main event loop, waiting for and reacting to
// outside blockchain events as well as for various reporting and transaction
// eviction events.
-func (pool *TxPool) loop() {
- defer pool.wg.Done()
-
+func (p *TxPool) loop(head *types.Header, chain BlockChain) {
+ // Subscribe to chain head events to trigger subpool resets
var (
- prevPending, prevQueued, prevStales int
- // Start the stats reporting and transaction eviction tickers
- report = time.NewTicker(statsReportInterval)
- evict = time.NewTicker(evictionInterval)
- journal = time.NewTicker(pool.config.Rejournal)
- // Track the previous head headers for transaction reorgs
- head = pool.chain.CurrentBlock()
+ newHeadCh = make(chan core.ChainHeadEvent)
+ newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh)
)
- defer report.Stop()
- defer evict.Stop()
- defer journal.Stop()
-
- // Notify tests that the init phase is done
- close(pool.initDoneCh)
- for {
- select {
- // Handle ChainHeadEvent
- case ev := <-pool.chainHeadCh:
- if ev.Block != nil {
- pool.requestReset(head, ev.Block.Header())
- head = ev.Block.Header()
- pool.headFeed.Send(core.NewTxPoolHeadEvent{Head: head})
- }
+ defer newHeadSub.Unsubscribe()
- // System shutdown.
- case <-pool.chainHeadSub.Err():
- close(pool.reorgShutdownCh)
- return
-
- // Handle stats reporting ticks
- case <-report.C:
- pool.mu.RLock()
- pending, queued := pool.stats()
- pool.mu.RUnlock()
- stales := int(pool.priced.stales.Load())
-
- if pending != prevPending || queued != prevQueued || stales != prevStales {
- log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
- prevPending, prevQueued, prevStales = pending, queued, stales
- }
-
- // Handle inactive account transaction eviction
- case <-evict.C:
- pool.mu.Lock()
- for addr := range pool.queue {
- // Skip local transactions from the eviction mechanism
- if pool.locals.contains(addr) {
- continue
- }
- // Any non-locals old enough should be removed
- if time.Since(pool.beats[addr]) > pool.config.Lifetime {
- list := pool.queue[addr].Flatten()
- for _, tx := range list {
- pool.removeTx(tx.Hash(), true)
+ // Track the previous and current head to feed to an idle reset
+ var (
+ oldHead = head
+ newHead = oldHead
+ )
+ // Consume chain head events and start resets when none is running
+ var (
+ resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
+ resetDone = make(chan *types.Header)
+ )
+ var errc chan error
+ for errc == nil {
+ // Something interesting might have happened, run a reset if there is
+ // one needed but none is running. The resetter will run on its own
+ // goroutine to allow chain head events to be consumed contiguously.
+ if newHead != oldHead {
+ // Try to inject a busy marker and start a reset if successful
+ select {
+ case resetBusy <- struct{}{}:
+ // Busy marker injected, start a new subpool reset
+ go func(oldHead, newHead *types.Header) {
+ for _, subpool := range p.subpools {
+ subpool.Reset(oldHead, newHead)
}
- queuedEvictionMeter.Mark(int64(len(list)))
- }
- }
- pool.mu.Unlock()
+ resetDone <- newHead
+ p.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: newHead})
+ }(oldHead, newHead)
- // Handle local transaction journal rotation
- case <-journal.C:
- if pool.journal != nil {
- pool.mu.Lock()
- if err := pool.journal.rotate(pool.local()); err != nil {
- log.Warn("Failed to rotate local tx journal", "err", err)
- }
- pool.mu.Unlock()
+ default:
+ // Reset already running, wait until it finishes
}
}
- }
-}
-
-// Stop terminates the transaction pool.
-func (pool *TxPool) Stop() {
- // Unsubscribe all subscriptions registered from txpool
- pool.scope.Close()
+ // Wait for the next chain head event or a previous reset finish
+ select {
+ case event := <-newHeadCh:
+ // Chain moved forward, store the head for later consumption
+ newHead = event.Block.Header()
- close(pool.generalShutdownChan)
- // Unsubscribe subscriptions registered from blockchain
- pool.chainHeadSub.Unsubscribe()
- pool.wg.Wait()
+ case head := <-resetDone:
+ // Previous reset finished, update the old head and allow a new reset
+ oldHead = head
+ <-resetBusy
- if pool.journal != nil {
- pool.journal.close()
+ case errc = <-p.quit:
+ // Termination requested, break out on the next loop round
+ }
}
- log.Info("Transaction pool stopped")
-}
-
-// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
-// starts sending event to the given channel.
-func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
- return pool.scope.Track(pool.txFeed.Subscribe(ch))
-}
-
-// SubscribeNewHeadEvent registers a subscription of NewHeadEvent and
-// starts sending event to the given channel.
-func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- core.NewTxPoolHeadEvent) event.Subscription {
- return pool.scope.Track(pool.headFeed.Subscribe(ch))
+ // Notify the closer of termination (no error possible for now)
+ errc <- nil
}
-// SubscribeNewReorgEvent registers a subscription of NewReorgEvent and
-// starts sending event to the given channel.
-func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- core.NewTxPoolReorgEvent) event.Subscription {
- return pool.scope.Track(pool.reorgFeed.Subscribe(ch))
-}
-
-// GasPrice returns the current gas price enforced by the transaction pool.
-func (pool *TxPool) GasPrice() *big.Int {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- return new(big.Int).Set(pool.gasPrice)
+// GasTip returns the current gas tip enforced by the transaction pool.
+func (p *TxPool) GasTip() *big.Int {
+ return new(big.Int).Set(p.gasTip.Load())
}
-// SetGasPrice updates the minimum price required by the transaction pool for a
+// SetGasTip updates the minimum gas tip required by the transaction pool for a
// new transaction, and drops all transactions below this threshold.
-func (pool *TxPool) SetGasPrice(price *big.Int) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
+func (p *TxPool) SetGasTip(tip *big.Int) {
+ p.gasTip.Store(new(big.Int).Set(tip))
- old := pool.gasPrice
- pool.gasPrice = price
- // if the min miner fee increased, remove transactions below the new threshold
- if price.Cmp(old) > 0 {
- // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
- drop := pool.all.RemotesBelowTip(price)
- for _, tx := range drop {
- pool.removeTx(tx.Hash(), false)
- }
- pool.priced.Removed(len(drop))
+ for _, subpool := range p.subpools {
+ subpool.SetGasTip(tip)
}
-
- log.Info("Transaction pool price threshold updated", "price", price)
}
-func (pool *TxPool) SetMinFee(minFee *big.Int) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pool.minimumFee = minFee
+// SetMinFee updates the minimum fee required by the transaction pool for a
+// new transaction, and drops all transactions below this threshold.
+func (p *TxPool) SetMinFee(fee *big.Int) {
+ for _, subpool := range p.subpools {
+ subpool.SetMinFee(fee)
+ }
}
-// Nonce returns the next nonce of an account, with all transactions executable
-// by the pool already applied on top.
-func (pool *TxPool) Nonce(addr common.Address) uint64 {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- return pool.pendingNonces.get(addr)
+// Has returns an indicator whether the pool has a transaction cached with the
+// given hash.
+func (p *TxPool) Has(hash common.Hash) bool {
+ for _, subpool := range p.subpools {
+ if subpool.Has(hash) {
+ return true
+ }
+ }
+ return false
}
-// Stats retrieves the current pool stats, namely the number of pending and the
-// number of queued (non-executable) transactions.
-func (pool *TxPool) Stats() (int, int) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- return pool.stats()
+// HasLocal returns an indicator whether the pool has a local transaction cached
+// with the given hash.
+func (p *TxPool) HasLocal(hash common.Hash) bool {
+ for _, subpool := range p.subpools {
+ if subpool.HasLocal(hash) {
+ return true
+ }
+ }
+ return false
}
-// stats retrieves the current pool stats, namely the number of pending and the
-// number of queued (non-executable) transactions.
-func (pool *TxPool) stats() (int, int) {
- pending := 0
- for _, list := range pool.pending {
- pending += list.Len()
- }
- queued := 0
- for _, list := range pool.queue {
- queued += list.Len()
+// Get returns a transaction if it is contained in the pool, or nil otherwise.
+func (p *TxPool) Get(hash common.Hash) *Transaction {
+ for _, subpool := range p.subpools {
+ if tx := subpool.Get(hash); tx != nil {
+ return tx
+ }
}
- return pending, queued
+ return nil
}
-// Content retrieves the data content of the transaction pool, returning all the
-// pending as well as queued transactions, grouped by account and sorted by nonce.
-func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
+// Add enqueues a batch of transactions into the pool if they are valid. Due
+// to the large transaction churn, add may postpone fully integrating the tx
+// to a later point to batch multiple ones together.
+func (p *TxPool) Add(txs []*Transaction, local bool, sync bool) []error {
+ // Split the input transactions between the subpools. It shouldn't really
+ // happen that we receive merged batches, but better graceful than strange
+ // errors.
+ //
+ // We also need to track how the transactions were split across the subpools,
+ // so we can piece back the returned errors into the original order.
+ txsets := make([][]*Transaction, len(p.subpools))
+ splits := make([]int, len(txs))
- pending := make(map[common.Address]types.Transactions, len(pool.pending))
- for addr, list := range pool.pending {
- pending[addr] = list.Flatten()
+ for i, tx := range txs {
+ // Mark this transaction belonging to no-subpool
+ splits[i] = -1
+
+ // Try to find a subpool that accepts the transaction
+ for j, subpool := range p.subpools {
+ if subpool.Filter(tx.Tx) {
+ txsets[j] = append(txsets[j], tx)
+ splits[i] = j
+ break
+ }
+ }
+ }
+ // Add the transactions split apart to the individual subpools and piece
+ // back the errors into the original sort order.
+ errsets := make([][]error, len(p.subpools))
+ for i := 0; i < len(p.subpools); i++ {
+ errsets[i] = p.subpools[i].Add(txsets[i], local, sync)
}
- queued := make(map[common.Address]types.Transactions, len(pool.queue))
- for addr, list := range pool.queue {
- queued[addr] = list.Flatten()
+ errs := make([]error, len(txs))
+ for i, split := range splits {
+ // If the transaction was rejected by all subpools, mark it unsupported
+ if split == -1 {
+ errs[i] = core.ErrTxTypeNotSupported
+ continue
+ }
+ // Find which subpool handled it and pull in the corresponding error
+ errs[i] = errsets[split][0]
+ errsets[split] = errsets[split][1:]
}
- return pending, queued
+ return errs
}
-// ContentFrom retrieves the data content of the transaction pool, returning the
-// pending as well as queued transactions of this address, grouped by nonce.
-func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- var pending types.Transactions
- if list, ok := pool.pending[addr]; ok {
- pending = list.Flatten()
- }
- var queued types.Transactions
- if list, ok := pool.queue[addr]; ok {
- queued = list.Flatten()
+func (p *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
+ wrapped := make([]*Transaction, len(txs))
+ for i, tx := range txs {
+ wrapped[i] = &Transaction{Tx: tx}
}
- return pending, queued
+ return p.Add(wrapped, false, true)
}
// Pending retrieves all currently processable transactions, grouped by origin
@@ -603,37 +367,20 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.
// The enforceTips parameter can be used to do an extra filtering on the pending
// transactions and only return those whose **effective** tip is large enough in
// the next pending execution environment.
-func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions {
- return pool.PendingWithBaseFee(enforceTips, nil)
+// account and sorted by nonce.
+func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction {
+ return p.PendingWithBaseFee(enforceTips, nil)
}
// If baseFee is nil, then pool.priced.urgent.baseFee is used.
-func (pool *TxPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address]types.Transactions {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- if baseFee == nil {
- baseFee = pool.priced.urgent.baseFee
- }
-
- pending := make(map[common.Address]types.Transactions, len(pool.pending))
- for addr, list := range pool.pending {
- txs := list.Flatten()
-
- // If the miner requests tip enforcement, cap the lists now
- if enforceTips && !pool.locals.contains(addr) {
- for i, tx := range txs {
- if tx.EffectiveGasTipIntCmp(pool.gasPrice, baseFee) < 0 {
- txs = txs[:i]
- break
- }
- }
- }
- if len(txs) > 0 {
- pending[addr] = txs
+func (p *TxPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*LazyTransaction {
+ txs := make(map[common.Address][]*LazyTransaction)
+ for _, subpool := range p.subpools {
+ for addr, set := range subpool.PendingWithBaseFee(enforceTips, baseFee) {
+ txs[addr] = set
}
}
- return pending
+ return txs
}
// PendingSize returns the number of pending txs in the tx pool.
@@ -641,1542 +388,142 @@ func (pool *TxPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[c
// The enforceTips parameter can be used to do an extra filtering on the pending
// transactions and only return those whose **effective** tip is large enough in
// the next pending execution environment.
-func (pool *TxPool) PendingSize(enforceTips bool) int {
- pending := pool.Pending(enforceTips)
+func (p *TxPool) PendingSize(enforceTips bool) int {
count := 0
- for _, txs := range pending {
- count += len(txs)
+ for _, subpool := range p.subpools {
+ for _, txs := range subpool.Pending(enforceTips) {
+ count += len(txs)
+ }
}
return count
}
// PendingFrom returns the same set of transactions that would be returned from Pending restricted to only
// transactions from [addrs].
-func (pool *TxPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address]types.Transactions {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pending := make(map[common.Address]types.Transactions)
- for _, addr := range addrs {
- list, ok := pool.pending[addr]
- if !ok {
- continue
- }
- txs := list.Flatten()
-
- // If the miner requests tip enforcement, cap the lists now
- if enforceTips && !pool.locals.contains(addr) {
- for i, tx := range txs {
- if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 {
- txs = txs[:i]
- break
- }
- }
- }
- if len(txs) > 0 {
- pending[addr] = txs
+func (p *TxPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*LazyTransaction {
+ txs := make(map[common.Address][]*LazyTransaction)
+ for _, subpool := range p.subpools {
+ for addr, set := range subpool.PendingFrom(addrs, enforceTips) {
+ txs[addr] = set
}
}
- return pending
+ return txs
}
// IteratePending iterates over [pool.pending] until [f] returns false.
// The caller must not modify [tx].
-func (pool *TxPool) IteratePending(f func(tx *types.Transaction) bool) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- for _, list := range pool.pending {
- for _, tx := range list.txs.items {
- if !f(tx) {
- return
- }
+func (p *TxPool) IteratePending(f func(tx *Transaction) bool) {
+ for _, subpool := range p.subpools {
+ if !subpool.IteratePending(f) {
+ return
}
}
}
-// Locals retrieves the accounts currently considered local by the pool.
-func (pool *TxPool) Locals() []common.Address {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- return pool.locals.flatten()
-}
-
-// local retrieves all currently known local transactions, grouped by origin
-// account and sorted by nonce. The returned transaction set is a copy and can be
-// freely modified by calling code.
-func (pool *TxPool) local() map[common.Address]types.Transactions {
- txs := make(map[common.Address]types.Transactions)
- for addr := range pool.locals.accounts {
- if pending := pool.pending[addr]; pending != nil {
- txs[addr] = append(txs[addr], pending.Flatten()...)
- }
- if queued := pool.queue[addr]; queued != nil {
- txs[addr] = append(txs[addr], queued.Flatten()...)
+// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending
+// events to the given channel.
+func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
+ subs := make([]event.Subscription, 0, len(p.subpools))
+ for _, subpool := range p.subpools {
+ sub := subpool.SubscribeTransactions(ch)
+ if sub == nil {
+ continue
}
+ subs = append(subs, sub)
}
- return txs
+ return p.subs.Track(event.JoinSubscriptions(subs...))
}
-// checks transaction validity against the current state.
-func (pool *TxPool) checkTxState(from common.Address, tx *types.Transaction) error {
- pool.currentStateLock.Lock()
- defer pool.currentStateLock.Unlock()
-
- txNonce := tx.Nonce()
- // Ensure the transaction adheres to nonce ordering
- if currentNonce := pool.currentState.GetNonce(from); currentNonce > txNonce {
- return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)",
- core.ErrNonceTooLow, from.Hex(), currentNonce, txNonce)
- }
-
- // cost == V + GP * GL
- balance := pool.currentState.GetBalance(from)
- if balance.Cmp(tx.Cost()) < 0 {
- return fmt.Errorf("%w: address %s have (%d) want (%d)", core.ErrInsufficientFunds, from.Hex(), balance, tx.Cost())
- }
-
- // Verify that replacing transactions will not result in overdraft
- list := pool.pending[from]
- if list != nil { // Sender already has pending txs
- sum := new(big.Int).Add(tx.Cost(), list.totalcost)
- if repl := list.txs.Get(tx.Nonce()); repl != nil {
- // Deduct the cost of a transaction replaced by this
- sum.Sub(sum, repl.Cost())
- }
- if balance.Cmp(sum) < 0 {
- log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum)
- return ErrOverdraft
- }
- }
-
- // If the tx allow list is enabled, return an error if the from address is not allow listed.
- if pool.rules.Load().IsPrecompileEnabled(txallowlist.ContractAddress) {
- txAllowListRole := txallowlist.GetTxAllowListStatus(pool.currentState, from)
- if !txAllowListRole.IsEnabled() {
- return fmt.Errorf("%w: %s", vmerrs.ErrSenderAddressNotAllowListed, from)
- }
- }
-
- return nil
+// SubscribeNewReorgEvent registers a subscription of NewReorgEvent and
+// starts sending event to the given channel.
+func (p *TxPool) SubscribeNewReorgEvent(ch chan<- core.NewTxPoolReorgEvent) event.Subscription {
+ return p.subs.Track(p.reorgFeed.Subscribe(ch))
}
-// validateTxBasics checks whether a transaction is valid according to the consensus
-// rules, but does not check state-dependent validation such as sufficient balance.
-// This check is meant as an early check which only needs to be performed once,
-// and does not require the pool mutex to be held.
-func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error {
- // Accept only legacy transactions until EIP-2718/2930 activates.
- if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject dynamic fee transactions until EIP-1559 activates.
- if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject blob transactions forever, those will have their own pool.
- if tx.Type() == types.BlobTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject transactions over defined size to prevent DOS attacks
- if tx.Size() > txMaxSize {
- return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, tx.Size(), txMaxSize)
- }
- // Check whether the init code size has been exceeded.
- if pool.eip3860.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
- return fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
- }
- // Transactions can't be negative. This may never happen using RLP decoded
- // transactions but may occur if you create a transaction using the RPC.
- if tx.Value().Sign() < 0 {
- return ErrNegativeValue
- }
- // Ensure the transaction doesn't exceed the current block limit gas.
- if txGas := tx.Gas(); pool.currentMaxGas.Load() < txGas {
- return fmt.Errorf(
- "%w: tx gas (%d) > current max gas (%d)",
- ErrGasLimit,
- txGas,
- pool.currentMaxGas.Load(),
- )
- }
- // Sanity check for extremely large numbers
- if tx.GasFeeCap().BitLen() > 256 {
- return core.ErrFeeCapVeryHigh
- }
- if tx.GasTipCap().BitLen() > 256 {
- return core.ErrTipVeryHigh
- }
- // Ensure gasFeeCap is greater than or equal to gasTipCap.
- if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
- return core.ErrTipAboveFeeCap
- }
- // Make sure the transaction is signed properly.
- from, err := types.Sender(pool.signer, tx)
- if err != nil {
- return ErrInvalidSender
- }
- // Drop non-local transactions under our own minimal accepted gas price or tip
- if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
- return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice)
- }
- // Ensure the transaction has more gas than the basic tx fee.
- intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, *pool.rules.Load())
- if err != nil {
- return err
- }
- if txGas := tx.Gas(); txGas < intrGas {
- return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", core.ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas)
+// Nonce returns the next nonce of an account, with all transactions executable
+// by the pool already applied on top.
+func (p *TxPool) Nonce(addr common.Address) uint64 {
+ // Since (for now) accounts are unique to subpools, only one pool will have
+ // (at max) a non-state nonce. To avoid stateful lookups, just return the
+ // highest nonce for now.
+ var nonce uint64
+ for _, subpool := range p.subpools {
+ if next := subpool.Nonce(addr); nonce < next {
+ nonce = next
+ }
}
- return nil
+ return nonce
}
-// validateTx checks whether a transaction is valid according to the consensus
-// rules and adheres to some heuristic limits of the local node (price and size).
-func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
- // Signature has been checked already, this cannot error.
- from, _ := types.Sender(pool.signer, tx)
- // Drop the transaction if the gas fee cap is below the pool's minimum fee
- if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 {
- return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee)
- }
+// Stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (p *TxPool) Stats() (int, int) {
+ var runnable, blocked int
+ for _, subpool := range p.subpools {
+ run, block := subpool.Stats()
- // Ensure the transaction adheres to nonce ordering
- // Transactor should have enough funds to cover the costs
- if err := pool.checkTxState(from, tx); err != nil {
- return err
+ runnable += run
+ blocked += block
}
- return nil
+ return runnable, blocked
}
-// add validates a transaction and inserts it into the non-executable queue for later
-// pending promotion and execution. If the transaction is a replacement for an already
-// pending or queued one, it overwrites the previous transaction if its price is higher.
-//
-// If a newly added transaction is marked as local, its sending account will be
-// be added to the allowlist, preventing any associated transaction from being dropped
-// out of the pool due to pricing constraints.
-func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
- // If the transaction is already known, discard it
- hash := tx.Hash()
- if pool.all.Get(hash) != nil {
- log.Trace("Discarding already known transaction", "hash", hash)
- knownTxMeter.Mark(1)
- return false, ErrAlreadyKnown
- }
- // Make the local flag. If it's from local source or it's from the network but
- // the sender is marked as local previously, treat it as the local transaction.
- isLocal := local || pool.locals.containsTx(tx)
-
- // If the transaction fails basic validation, discard it
- if err := pool.validateTx(tx, isLocal); err != nil {
- log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
- invalidTxMeter.Mark(1)
- return false, err
- }
-
- // already validated by this point
- from, _ := types.Sender(pool.signer, tx)
-
- // If the transaction pool is full, discard underpriced transactions
- if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
- // If the new transaction is underpriced, don't accept it
- if !isLocal && pool.priced.Underpriced(tx) {
- log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
- underpricedTxMeter.Mark(1)
- return false, ErrUnderpriced
- }
-
- // We're about to replace a transaction. The reorg does a more thorough
- // analysis of what to remove and how, but it runs async. We don't want to
- // do too many replacements between reorg-runs, so we cap the number of
- // replacements to 25% of the slots
- if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
- throttleTxMeter.Mark(1)
- return false, ErrTxPoolOverflow
- }
-
- // New transaction is better than our worse ones, make room for it.
- // If it's a local transaction, forcibly discard all available transactions.
- // Otherwise if we can't make enough room for new one, abort the operation.
- drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
-
- // Special case, we still can't make the room for the new remote one.
- if !isLocal && !success {
- log.Trace("Discarding overflown transaction", "hash", hash)
- overflowedTxMeter.Mark(1)
- return false, ErrTxPoolOverflow
- }
-
- // If the new transaction is a future transaction it should never churn pending transactions
- if !isLocal && pool.isGapped(from, tx) {
- var replacesPending bool
- for _, dropTx := range drop {
- dropSender, _ := types.Sender(pool.signer, dropTx)
- if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
- replacesPending = true
- break
- }
- }
- // Add all transactions back to the priced queue
- if replacesPending {
- for _, dropTx := range drop {
- pool.priced.Put(dropTx, false)
- }
- log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
- return false, ErrFutureReplacePending
- }
- }
-
- // Kick out the underpriced remote transactions.
- for _, tx := range drop {
- log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
- underpricedTxMeter.Mark(1)
- dropped := pool.removeTx(tx.Hash(), false)
- pool.changesSinceReorg += dropped
- }
- }
+// Content retrieves the data content of the transaction pool, returning all the
+// pending as well as queued transactions, grouped by account and sorted by nonce.
+func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
+ var (
+ runnable = make(map[common.Address][]*types.Transaction)
+ blocked = make(map[common.Address][]*types.Transaction)
+ )
+ for _, subpool := range p.subpools {
+ run, block := subpool.Content()
- // Try to replace an existing transaction in the pending pool
- if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
- // Nonce already pending, check if required price bump is met
- inserted, old := list.Add(tx, pool.config.PriceBump)
- if !inserted {
- pendingDiscardMeter.Mark(1)
- return false, ErrReplaceUnderpriced
+ for addr, txs := range run {
+ runnable[addr] = txs
}
- // New transaction is better, replace old one
- if old != nil {
- pool.all.Remove(old.Hash())
- pool.priced.Removed(1)
- pendingReplaceMeter.Mark(1)
+ for addr, txs := range block {
+ blocked[addr] = txs
}
- pool.all.Add(tx, isLocal)
- pool.priced.Put(tx, isLocal)
- pool.journalTx(from, tx)
- pool.queueTxEvent(tx)
- log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
-
- // Successful promotion, bump the heartbeat
- pool.beats[from] = time.Now()
- return old != nil, nil
- }
- // New transaction isn't replacing a pending one, push into queue
- replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
- if err != nil {
- return false, err
- }
- // Mark local addresses and journal local transactions
- if local && !pool.locals.contains(from) {
- log.Info("Setting new local account", "address", from)
- pool.locals.add(from)
- pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
}
- if isLocal {
- localGauge.Inc(1)
- }
- pool.journalTx(from, tx)
-
- log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
- return replaced, nil
+ return runnable, blocked
}
-// isGapped reports whether the given transaction is immediately executable.
-func (pool *TxPool) isGapped(from common.Address, tx *types.Transaction) bool {
- // Short circuit if transaction matches pending nonce and can be promoted
- // to pending list as an executable transaction.
- next := pool.pendingNonces.get(from)
- if tx.Nonce() == next {
- return false
- }
- // The transaction has a nonce gap with pending list, it's only considered
- // as executable if transactions in queue can fill up the nonce gap.
- queue, ok := pool.queue[from]
- if !ok {
- return true
- }
- for nonce := next; nonce < tx.Nonce(); nonce++ {
- if !queue.Contains(nonce) {
- return true // txs in queue can't fill up the nonce gap
+// ContentFrom retrieves the data content of the transaction pool, returning the
+// pending as well as queued transactions of this address, grouped by nonce.
+func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
+ for _, subpool := range p.subpools {
+ run, block := subpool.ContentFrom(addr)
+ if len(run) != 0 || len(block) != 0 {
+ return run, block
}
}
- return false
+ return []*types.Transaction{}, []*types.Transaction{}
}
-// enqueueTx inserts a new transaction into the non-executable transaction queue.
-//
-// Note, this method assumes the pool lock is held!
-func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
- // Try to insert the transaction into the future queue
- from, _ := types.Sender(pool.signer, tx) // already validated
- if pool.queue[from] == nil {
- pool.queue[from] = newList(false)
- }
- inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
- if !inserted {
- // An older transaction was better, discard this
- queuedDiscardMeter.Mark(1)
- return false, ErrReplaceUnderpriced
- }
- // Discard any previous transaction and mark this
- if old != nil {
- pool.all.Remove(old.Hash())
- pool.priced.Removed(1)
- queuedReplaceMeter.Mark(1)
- } else {
- // Nothing was replaced, bump the queued counter
- queuedGauge.Inc(1)
- }
- // If the transaction isn't in lookup set but it's expected to be there,
- // show the error log.
- if pool.all.Get(hash) == nil && !addAll {
- log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
- }
- if addAll {
- pool.all.Add(tx, local)
- pool.priced.Put(tx, local)
+// Locals retrieves the accounts currently considered local by the pool.
+func (p *TxPool) Locals() []common.Address {
+ // Retrieve the locals from each subpool and deduplicate them
+ locals := make(map[common.Address]struct{})
+ for _, subpool := range p.subpools {
+ for _, local := range subpool.Locals() {
+ locals[local] = struct{}{}
+ }
}
- // If we never record the heartbeat, do it right now.
- if _, exist := pool.beats[from]; !exist {
- pool.beats[from] = time.Now()
+ // Flatten and return the deduplicated local set
+ flat := make([]common.Address, 0, len(locals))
+ for local := range locals {
+ flat = append(flat, local)
}
- return old != nil, nil
+ return flat
}
-// journalTx adds the specified transaction to the local disk journal if it is
-// deemed to have been sent from a local account.
-func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
- // Only journal if it's enabled and the transaction is local
- if pool.journal == nil || !pool.locals.contains(from) {
- return
+// Status returns the known status (unknown/pending/queued) of a transaction
+// identified by their hashes.
+func (p *TxPool) Status(hash common.Hash) TxStatus {
+ for _, subpool := range p.subpools {
+ if status := subpool.Status(hash); status != TxStatusUnknown {
+ return status
+ }
}
- if err := pool.journal.insert(tx); err != nil {
- log.Warn("Failed to journal local transaction", "err", err)
- }
-}
-
-// promoteTx adds a transaction to the pending (processable) list of transactions
-// and returns whether it was inserted or an older was better.
-//
-// Note, this method assumes the pool lock is held!
-func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
- // Try to insert the transaction into the pending queue
- if pool.pending[addr] == nil {
- pool.pending[addr] = newList(true)
- }
- list := pool.pending[addr]
-
- inserted, old := list.Add(tx, pool.config.PriceBump)
- if !inserted {
- // An older transaction was better, discard this
- pool.all.Remove(hash)
- pool.priced.Removed(1)
- pendingDiscardMeter.Mark(1)
- return false
- }
- // Otherwise discard any previous transaction and mark this
- if old != nil {
- pool.all.Remove(old.Hash())
- pool.priced.Removed(1)
- pendingReplaceMeter.Mark(1)
- } else {
- // Nothing was replaced, bump the pending counter
- pendingGauge.Inc(1)
- }
- // Set the potentially new pending nonce and notify any subsystems of the new tx
- pool.pendingNonces.set(addr, tx.Nonce()+1)
-
- // Successful promotion, bump the heartbeat
- pool.beats[addr] = time.Now()
- return true
-}
-
-// AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
-// senders as a local ones, ensuring they go around the local pricing constraints.
-//
-// This method is used to add transactions from the RPC API and performs synchronous pool
-// reorganization and event propagation.
-func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
- return pool.addTxs(txs, !pool.config.NoLocals, true)
-}
-
-// AddLocal enqueues a single local transaction into the pool if it is valid. This is
-// a convenience wrapper around AddLocals.
-func (pool *TxPool) AddLocal(tx *types.Transaction) error {
- errs := pool.AddLocals([]*types.Transaction{tx})
- return errs[0]
-}
-
-// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
-// senders are not among the locally tracked ones, full pricing constraints will apply.
-//
-// This method is used to add transactions from the p2p network and does not wait for pool
-// reorganization and internal event propagation.
-func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
- return pool.addTxs(txs, false, false)
-}
-
-// AddRemotesSync is like AddRemotes, but waits for pool reorganization. Tests use this method.
-func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
- return pool.addTxs(txs, false, true)
-}
-
-// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
-func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
- errs := pool.AddRemotesSync([]*types.Transaction{tx})
- return errs[0]
-}
-
-// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
-// wrapper around AddRemotes.
-//
-// Deprecated: use AddRemotes
-func (pool *TxPool) AddRemote(tx *types.Transaction) error {
- errs := pool.AddRemotes([]*types.Transaction{tx})
- return errs[0]
-}
-
-// addTxs attempts to queue a batch of transactions if they are valid.
-func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
- // Filter out known ones without obtaining the pool lock or recovering signatures
- var (
- errs = make([]error, len(txs))
- news = make([]*types.Transaction, 0, len(txs))
- )
- for i, tx := range txs {
- // If the transaction is known, pre-set the error slot
- if pool.all.Get(tx.Hash()) != nil {
- errs[i] = ErrAlreadyKnown
- knownTxMeter.Mark(1)
- continue
- }
- // Exclude transactions with basic errors, e.g invalid signatures and
- // insufficient intrinsic gas as soon as possible and cache senders
- // in transactions before obtaining lock
-
- if err := pool.validateTxBasics(tx, local); err != nil {
- errs[i] = err
- invalidTxMeter.Mark(1)
- continue
- }
- // Accumulate all unknown transactions for deeper processing
- news = append(news, tx)
- }
- if len(news) == 0 {
- return errs
- }
-
- // Process all the new transaction and merge any errors into the original slice
- pool.mu.Lock()
- newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
- pool.mu.Unlock()
-
- var nilSlot = 0
- for _, err := range newErrs {
- for errs[nilSlot] != nil {
- nilSlot++
- }
- errs[nilSlot] = err
- nilSlot++
- }
- // Reorg the pool internals if needed and return
- done := pool.requestPromoteExecutables(dirtyAddrs)
- if sync {
- <-done
- }
- return errs
-}
-
-// addTxsLocked attempts to queue a batch of transactions if they are valid.
-// The transaction pool lock must be held.
-func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
- dirty := newAccountSet(pool.signer)
- errs := make([]error, len(txs))
- for i, tx := range txs {
- replaced, err := pool.add(tx, local)
- errs[i] = err
- if err == nil && !replaced {
- dirty.addTx(tx)
- }
- }
- validTxMeter.Mark(int64(len(dirty.accounts)))
- return errs, dirty
-}
-
-// Status returns the status (unknown/pending/queued) of a batch of transactions
-// identified by their hashes.
-func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
- status := make([]TxStatus, len(hashes))
- for i, hash := range hashes {
- tx := pool.Get(hash)
- if tx == nil {
- continue
- }
- from, _ := types.Sender(pool.signer, tx) // already validated
- pool.mu.RLock()
- if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
- status[i] = TxStatusPending
- } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
- status[i] = TxStatusQueued
- }
- // implicit else: the tx may have been included into a block between
- // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
- pool.mu.RUnlock()
- }
- return status
-}
-
-// Get returns a transaction if it is contained in the pool and nil otherwise.
-func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
- return pool.all.Get(hash)
-}
-
-// Has returns an indicator whether txpool has a transaction cached with the
-// given hash.
-func (pool *TxPool) Has(hash common.Hash) bool {
- return pool.all.Get(hash) != nil
-}
-
-// Has returns an indicator whether txpool has a local transaction cached with
-// the given hash.
-func (pool *TxPool) HasLocal(hash common.Hash) bool {
- return pool.all.GetLocal(hash) != nil
-}
-
-// RemoveTx removes a single transaction from the queue, moving all subsequent
-// transactions back to the future queue.
-func (pool *TxPool) RemoveTx(hash common.Hash) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pool.removeTx(hash, true)
-}
-
-// removeTx removes a single transaction from the queue, moving all subsequent
-// transactions back to the future queue.
-// Returns the number of transactions removed from the pending queue.
-func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int {
- // Fetch the transaction we wish to delete
- tx := pool.all.Get(hash)
- if tx == nil {
- return 0
- }
- addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
-
- // Remove it from the list of known transactions
- pool.all.Remove(hash)
- if outofbound {
- pool.priced.Removed(1)
- }
- if pool.locals.contains(addr) {
- localGauge.Dec(1)
- }
- // Remove the transaction from the pending lists and reset the account nonce
- if pending := pool.pending[addr]; pending != nil {
- if removed, invalids := pending.Remove(tx); removed {
- // If no more pending transactions are left, remove the list
- if pending.Empty() {
- delete(pool.pending, addr)
- }
- // Postpone any invalidated transactions
- for _, tx := range invalids {
- // Internal shuffle shouldn't touch the lookup set.
- pool.enqueueTx(tx.Hash(), tx, false, false)
- }
- // Update the account nonce if needed
- pool.pendingNonces.setIfLower(addr, tx.Nonce())
- // Reduce the pending counter
- pendingGauge.Dec(int64(1 + len(invalids)))
- return 1 + len(invalids)
- }
- }
- // Transaction is in the future queue
- if future := pool.queue[addr]; future != nil {
- if removed, _ := future.Remove(tx); removed {
- // Reduce the queued counter
- queuedGauge.Dec(1)
- }
- if future.Empty() {
- delete(pool.queue, addr)
- delete(pool.beats, addr)
- }
- }
- return 0
-}
-
-// requestReset requests a pool reset to the new head block.
-// The returned channel is closed when the reset has occurred.
-func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
- select {
- case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
- return <-pool.reorgDoneCh
- case <-pool.reorgShutdownCh:
- return pool.reorgShutdownCh
- }
-}
-
-// requestPromoteExecutables requests transaction promotion checks for the given addresses.
-// The returned channel is closed when the promotion checks have occurred.
-func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
- select {
- case pool.reqPromoteCh <- set:
- return <-pool.reorgDoneCh
- case <-pool.reorgShutdownCh:
- return pool.reorgShutdownCh
- }
-}
-
-// queueTxEvent enqueues a transaction event to be sent in the next reorg run.
-func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
- select {
- case pool.queueTxEventCh <- tx:
- case <-pool.reorgShutdownCh:
- }
-}
-
-// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
-// call those methods directly, but request them being run using requestReset and
-// requestPromoteExecutables instead.
-func (pool *TxPool) scheduleReorgLoop() {
- defer pool.wg.Done()
-
- var (
- curDone chan struct{} // non-nil while runReorg is active
- nextDone = make(chan struct{})
- launchNextRun bool
- reset *txpoolResetRequest
- dirtyAccounts *accountSet
- queuedEvents = make(map[common.Address]*sortedMap)
- )
- for {
- // Launch next background reorg if needed
- if curDone == nil && launchNextRun {
- // Run the background reorg and announcements
- go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
-
- // Prepare everything for the next round of reorg
- curDone, nextDone = nextDone, make(chan struct{})
- launchNextRun = false
-
- reset, dirtyAccounts = nil, nil
- queuedEvents = make(map[common.Address]*sortedMap)
- }
-
- select {
- case req := <-pool.reqResetCh:
- // Reset request: update head if request is already pending.
- if reset == nil {
- reset = req
- } else {
- reset.newHead = req.newHead
- }
- launchNextRun = true
- pool.reorgDoneCh <- nextDone
-
- case req := <-pool.reqPromoteCh:
- // Promote request: update address set if request is already pending.
- if dirtyAccounts == nil {
- dirtyAccounts = req
- } else {
- dirtyAccounts.merge(req)
- }
- launchNextRun = true
- pool.reorgDoneCh <- nextDone
-
- case tx := <-pool.queueTxEventCh:
- // Queue up the event, but don't schedule a reorg. It's up to the caller to
- // request one later if they want the events sent.
- addr, _ := types.Sender(pool.signer, tx)
- if _, ok := queuedEvents[addr]; !ok {
- queuedEvents[addr] = newSortedMap()
- }
- queuedEvents[addr].Put(tx)
-
- case <-curDone:
- curDone = nil
-
- case <-pool.reorgShutdownCh:
- // Wait for current run to finish.
- if curDone != nil {
- <-curDone
- }
- close(nextDone)
- return
- }
- }
-}
-
-// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
-func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) {
- defer func(t0 time.Time) {
- reorgDurationTimer.Update(time.Since(t0))
- }(time.Now())
- defer close(done)
-
- var promoteAddrs []common.Address
- if dirtyAccounts != nil && reset == nil {
- // Only dirty accounts need to be promoted, unless we're resetting.
- // For resets, all addresses in the tx queue will be promoted and
- // the flatten operation can be avoided.
- promoteAddrs = dirtyAccounts.flatten()
- }
- pool.mu.Lock()
- if reset != nil {
- // Reset from the old head to the new, rescheduling any reorged transactions
- pool.reset(reset.oldHead, reset.newHead)
-
- // Nonces were reset, discard any events that became stale
- for addr := range events {
- events[addr].Forward(pool.pendingNonces.get(addr))
- if events[addr].Len() == 0 {
- delete(events, addr)
- }
- }
- // Reset needs promote for all addresses
- promoteAddrs = make([]common.Address, 0, len(pool.queue))
- for addr := range pool.queue {
- promoteAddrs = append(promoteAddrs, addr)
- }
- }
- // Check for pending transactions for every account that sent new ones
- promoted := pool.promoteExecutables(promoteAddrs)
-
- // If a new block appeared, validate the pool of pending transactions. This will
- // remove any transaction that has been included in the block or was invalidated
- // because of another transaction (e.g. higher gas price).
- if reset != nil {
- pool.demoteUnexecutables()
- if reset.newHead != nil && pool.chainconfig.IsSubnetEVM(reset.newHead.Time) {
- if err := pool.updateBaseFeeAt(reset.newHead); err != nil {
- log.Error("error at updating base fee in tx pool", "error", err)
- }
- }
-
- // Update all accounts to the latest known pending nonce
- nonces := make(map[common.Address]uint64, len(pool.pending))
- for addr, list := range pool.pending {
- highestPending := list.LastElement()
- nonces[addr] = highestPending.Nonce() + 1
- }
- pool.pendingNonces.setAll(nonces)
- }
- // Ensure pool.queue and pool.pending sizes stay within the configured limits.
- pool.truncatePending()
- pool.truncateQueue()
-
- dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
- pool.changesSinceReorg = 0 // Reset change counter
- pool.mu.Unlock()
-
- if reset != nil && reset.newHead != nil {
- pool.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: reset.newHead})
- }
-
- // Notify subsystems for newly added transactions
- for _, tx := range promoted {
- addr, _ := types.Sender(pool.signer, tx)
- if _, ok := events[addr]; !ok {
- events[addr] = newSortedMap()
- }
- events[addr].Put(tx)
- }
- if len(events) > 0 {
- var txs []*types.Transaction
- for _, set := range events {
- txs = append(txs, set.Flatten()...)
- }
- pool.txFeed.Send(core.NewTxsEvent{Txs: txs})
- }
-}
-
-// reset retrieves the current state of the blockchain and ensures the content
-// of the transaction pool is valid with regard to the chain state.
-func (pool *TxPool) reset(oldHead, newHead *types.Header) {
- // If we're reorging an old state, reinject all dropped transactions
- var reinject types.Transactions
-
- if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
- // If the reorg is too deep, avoid doing it (will happen during fast sync)
- oldNum := oldHead.Number.Uint64()
- newNum := newHead.Number.Uint64()
-
- if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
- log.Debug("Skipping deep transaction reorg", "depth", depth)
- } else {
- // Reorg seems shallow enough to pull in all transactions into memory
- var discarded, included types.Transactions
- var (
- rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
- add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
- )
- if rem == nil {
- // This can happen if a setHead is performed, where we simply discard the old
- // head from the chain.
- // If that is the case, we don't have the lost transactions anymore, and
- // there's nothing to add
- if newNum >= oldNum {
- // If we reorged to a same or higher number, then it's not a case of setHead
- log.Warn("Transaction pool reset with missing oldhead",
- "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
- return
- }
- // If the reorg ended up on a lower number, it's indicative of setHead being the cause
- log.Debug("Skipping transaction reset caused by setHead",
- "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
- // We still need to update the current state s.th. the lost transactions can be readded by the user
- } else {
- for rem.NumberU64() > add.NumberU64() {
- discarded = append(discarded, rem.Transactions()...)
- if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
- log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
- return
- }
- }
- for add.NumberU64() > rem.NumberU64() {
- included = append(included, add.Transactions()...)
- if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
- log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
- return
- }
- }
- for rem.Hash() != add.Hash() {
- discarded = append(discarded, rem.Transactions()...)
- if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
- log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
- return
- }
- included = append(included, add.Transactions()...)
- if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
- log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
- return
- }
- }
- reinject = types.TxDifference(discarded, included)
- }
- }
- }
- // Initialize the internal state to the current head
- if newHead == nil {
- newHead = pool.chain.CurrentBlock() // Special case during testing
- }
- statedb, err := pool.chain.StateAt(newHead.Root)
- if err != nil {
- log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root)
- return
- }
- pool.currentHead = newHead
- pool.currentStateLock.Lock()
- pool.currentState = statedb
- pool.currentStateLock.Unlock()
- pool.pendingNonces = newNoncer(statedb)
- pool.currentMaxGas.Store(newHead.GasLimit)
-
- // when we reset txPool we should explicitly check if fee struct for min base fee has changed
- // so that we can correctly drop txs with < minBaseFee from tx pool.
- if pool.chainconfig.IsPrecompileEnabled(feemanager.ContractAddress, newHead.Time) {
- feeConfig, _, err := pool.chain.GetFeeConfigAt(newHead)
- if err != nil {
- log.Error("Failed to get fee config state", "err", err, "root", newHead.Root)
- return
- }
- pool.minimumFee = feeConfig.MinBaseFee
- }
-
- // Inject any transactions discarded due to reorgs
- log.Debug("Reinjecting stale transactions", "count", len(reinject))
- pool.chain.SenderCacher().Recover(pool.signer, reinject)
- pool.addTxsLocked(reinject, false)
-
- // Update all fork indicator by next pending block number.
- next := new(big.Int).Add(newHead.Number, big.NewInt(1))
- rules := pool.chainconfig.AvalancheRules(next, newHead.Time)
-
- pool.rules.Store(&rules)
- pool.eip2718.Store(rules.IsSubnetEVM)
- pool.eip1559.Store(rules.IsSubnetEVM)
- pool.eip3860.Store(rules.IsDurango)
-}
-
-// promoteExecutables moves transactions that have become processable from the
-// future queue to the set of pending transactions. During this process, all
-// invalidated transactions (low nonce, low balance) are deleted.
-func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
- pool.currentStateLock.Lock()
- defer pool.currentStateLock.Unlock()
-
- // Track the promoted transactions to broadcast them at once
- var promoted []*types.Transaction
-
- // Iterate over all accounts and promote any executable transactions
- for _, addr := range accounts {
- list := pool.queue[addr]
- if list == nil {
- continue // Just in case someone calls with a non existing account
- }
- // Drop all transactions that are deemed too old (low nonce)
- forwards := list.Forward(pool.currentState.GetNonce(addr))
- for _, tx := range forwards {
- hash := tx.Hash()
- pool.all.Remove(hash)
- }
- log.Trace("Removed old queued transactions", "count", len(forwards))
- // Drop all transactions that are too costly (low balance or out of gas)
- drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
- for _, tx := range drops {
- hash := tx.Hash()
- pool.all.Remove(hash)
- }
- log.Trace("Removed unpayable queued transactions", "count", len(drops))
- queuedNofundsMeter.Mark(int64(len(drops)))
-
- // Gather all executable transactions and promote them
- readies := list.Ready(pool.pendingNonces.get(addr))
- for _, tx := range readies {
- hash := tx.Hash()
- if pool.promoteTx(addr, hash, tx) {
- promoted = append(promoted, tx)
- }
- }
- log.Trace("Promoted queued transactions", "count", len(promoted))
- queuedGauge.Dec(int64(len(readies)))
-
- // Drop all transactions over the allowed limit
- var caps types.Transactions
- if !pool.locals.contains(addr) {
- caps = list.Cap(int(pool.config.AccountQueue))
- for _, tx := range caps {
- hash := tx.Hash()
- pool.all.Remove(hash)
- log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
- }
- queuedRateLimitMeter.Mark(int64(len(caps)))
- }
- // Mark all the items dropped as removed
- pool.priced.Removed(len(forwards) + len(drops) + len(caps))
- queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
- if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
- }
- // Delete the entire queue entry if it became empty.
- if list.Empty() {
- delete(pool.queue, addr)
- delete(pool.beats, addr)
- }
- }
- return promoted
-}
-
-// truncatePending removes transactions from the pending queue if the pool is above the
-// pending limit. The algorithm tries to reduce transaction counts by an approximately
-// equal number for all for accounts with many pending transactions.
-func (pool *TxPool) truncatePending() {
- pending := uint64(0)
- for _, list := range pool.pending {
- pending += uint64(list.Len())
- }
- if pending <= pool.config.GlobalSlots {
- return
- }
-
- pendingBeforeCap := pending
- // Assemble a spam order to penalize large transactors first
- spammers := prque.New[int64, common.Address](nil)
- for addr, list := range pool.pending {
- // Only evict transactions from high rollers
- if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
- spammers.Push(addr, int64(list.Len()))
- }
- }
- // Gradually drop transactions from offenders
- offenders := []common.Address{}
- for pending > pool.config.GlobalSlots && !spammers.Empty() {
- // Retrieve the next offender if not local address
- offender, _ := spammers.Pop()
- offenders = append(offenders, offender)
-
- // Equalize balances until all the same or below threshold
- if len(offenders) > 1 {
- // Calculate the equalization threshold for all current offenders
- threshold := pool.pending[offender].Len()
-
- // Iteratively reduce all offenders until below limit or threshold reached
- for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
- for i := 0; i < len(offenders)-1; i++ {
- list := pool.pending[offenders[i]]
-
- caps := list.Cap(list.Len() - 1)
- for _, tx := range caps {
- // Drop the transaction from the global pools too
- hash := tx.Hash()
- pool.all.Remove(hash)
-
- // Update the account nonce to the dropped transaction
- pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
- log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
- }
- pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
- if pool.locals.contains(offenders[i]) {
- localGauge.Dec(int64(len(caps)))
- }
- pending--
- }
- }
- }
- }
-
- // If still above threshold, reduce to limit or min allowance
- if pending > pool.config.GlobalSlots && len(offenders) > 0 {
- for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
- for _, addr := range offenders {
- list := pool.pending[addr]
-
- caps := list.Cap(list.Len() - 1)
- for _, tx := range caps {
- // Drop the transaction from the global pools too
- hash := tx.Hash()
- pool.all.Remove(hash)
-
- // Update the account nonce to the dropped transaction
- pool.pendingNonces.setIfLower(addr, tx.Nonce())
- log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
- }
- pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
- if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(caps)))
- }
- pending--
- }
- }
- }
- pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
-}
-
-// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
-func (pool *TxPool) truncateQueue() {
- queued := uint64(0)
- for _, list := range pool.queue {
- queued += uint64(list.Len())
- }
- if queued <= pool.config.GlobalQueue {
- return
- }
-
- // Sort all accounts with queued transactions by heartbeat
- addresses := make(addressesByHeartbeat, 0, len(pool.queue))
- for addr := range pool.queue {
- if !pool.locals.contains(addr) { // don't drop locals
- addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
- }
- }
- sort.Sort(sort.Reverse(addresses))
-
- // Drop transactions until the total is below the limit or only locals remain
- for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
- addr := addresses[len(addresses)-1]
- list := pool.queue[addr.address]
-
- addresses = addresses[:len(addresses)-1]
-
- // Drop all transactions if they are less than the overflow
- if size := uint64(list.Len()); size <= drop {
- for _, tx := range list.Flatten() {
- pool.removeTx(tx.Hash(), true)
- }
- drop -= size
- queuedRateLimitMeter.Mark(int64(size))
- continue
- }
- // Otherwise drop only last few transactions
- txs := list.Flatten()
- for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
- pool.removeTx(txs[i].Hash(), true)
- drop--
- queuedRateLimitMeter.Mark(1)
- }
- }
-}
-
-// demoteUnexecutables removes invalid and processed transactions from the pools
-// executable/pending queue and any subsequent transactions that become unexecutable
-// are moved back into the future queue.
-//
-// Note: transactions are not marked as removed in the priced list because re-heaping
-// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
-// to trigger a re-heap is this function
-func (pool *TxPool) demoteUnexecutables() {
- pool.currentStateLock.Lock()
- defer pool.currentStateLock.Unlock()
-
- // Iterate over all accounts and demote any non-executable transactions
- for addr, list := range pool.pending {
- nonce := pool.currentState.GetNonce(addr)
-
- // Drop all transactions that are deemed too old (low nonce)
- olds := list.Forward(nonce)
- for _, tx := range olds {
- hash := tx.Hash()
- pool.all.Remove(hash)
- log.Trace("Removed old pending transaction", "hash", hash)
- }
- // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
- drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
- for _, tx := range drops {
- hash := tx.Hash()
- log.Trace("Removed unpayable pending transaction", "hash", hash)
- pool.all.Remove(hash)
- }
- pendingNofundsMeter.Mark(int64(len(drops)))
-
- for _, tx := range invalids {
- hash := tx.Hash()
- log.Trace("Demoting pending transaction", "hash", hash)
-
- // Internal shuffle shouldn't touch the lookup set.
- pool.enqueueTx(hash, tx, false, false)
- }
- pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
- if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
- }
- // If there's a gap in front, alert (should never happen) and postpone all transactions
- if list.Len() > 0 && list.txs.Get(nonce) == nil {
- gapped := list.Cap(0)
- for _, tx := range gapped {
- hash := tx.Hash()
- log.Error("Demoting invalidated transaction", "hash", hash)
-
- // Internal shuffle shouldn't touch the lookup set.
- pool.enqueueTx(hash, tx, false, false)
- }
- pendingGauge.Dec(int64(len(gapped)))
- }
- // Delete the entire pending entry if it became empty.
- if list.Empty() {
- delete(pool.pending, addr)
- }
- }
-}
-
-func (pool *TxPool) startPeriodicFeeUpdate() {
- if pool.chainconfig.SubnetEVMTimestamp == nil {
- return
- }
-
- // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay
- // when starting up in Subnet EVM before the base fee is updated.
- if time.Now().After(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp)) {
- pool.updateBaseFee()
- }
-
- pool.wg.Add(1)
- go pool.periodicBaseFeeUpdate()
-}
-
-func (pool *TxPool) periodicBaseFeeUpdate() {
- defer pool.wg.Done()
-
- // Sleep until its time to start the periodic base fee update or the tx pool is shutting down
- select {
- case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp))):
- case <-pool.generalShutdownChan:
- return // Return early if shutting down
- }
-
- // Update the base fee every [baseFeeUpdateInterval]
- // and shutdown when [generalShutdownChan] is closed by Stop()
- for {
- select {
- case <-time.After(baseFeeUpdateInterval):
- pool.updateBaseFee()
- case <-pool.generalShutdownChan:
- return
- }
- }
-}
-
-func (pool *TxPool) updateBaseFee() {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- err := pool.updateBaseFeeAt(pool.currentHead)
- if err != nil {
- log.Error("failed to update base fee", "currentHead", pool.currentHead.Hash(), "err", err)
- }
-}
-
-// assumes lock is already held
-func (pool *TxPool) updateBaseFeeAt(head *types.Header) error {
- feeConfig, _, err := pool.chain.GetFeeConfigAt(head)
- if err != nil {
- return err
- }
- _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, feeConfig, head, uint64(time.Now().Unix()))
- if err != nil {
- return err
- }
- pool.priced.SetBaseFee(baseFeeEstimate)
- return nil
-}
-
-// addressByHeartbeat is an account address tagged with its last activity timestamp.
-type addressByHeartbeat struct {
- address common.Address
- heartbeat time.Time
-}
-
-type addressesByHeartbeat []addressByHeartbeat
-
-func (a addressesByHeartbeat) Len() int { return len(a) }
-func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
-func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-// accountSet is simply a set of addresses to check for existence, and a signer
-// capable of deriving addresses from transactions.
-type accountSet struct {
- accounts map[common.Address]struct{}
- signer types.Signer
- cache *[]common.Address
-}
-
-// newAccountSet creates a new address set with an associated signer for sender
-// derivations.
-func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
- as := &accountSet{
- accounts: make(map[common.Address]struct{}, len(addrs)),
- signer: signer,
- }
- for _, addr := range addrs {
- as.add(addr)
- }
- return as
-}
-
-// contains checks if a given address is contained within the set.
-func (as *accountSet) contains(addr common.Address) bool {
- _, exist := as.accounts[addr]
- return exist
-}
-
-// containsTx checks if the sender of a given tx is within the set. If the sender
-// cannot be derived, this method returns false.
-func (as *accountSet) containsTx(tx *types.Transaction) bool {
- if addr, err := types.Sender(as.signer, tx); err == nil {
- return as.contains(addr)
- }
- return false
-}
-
-// add inserts a new address into the set to track.
-func (as *accountSet) add(addr common.Address) {
- as.accounts[addr] = struct{}{}
- as.cache = nil
-}
-
-// addTx adds the sender of tx into the set.
-func (as *accountSet) addTx(tx *types.Transaction) {
- if addr, err := types.Sender(as.signer, tx); err == nil {
- as.add(addr)
- }
-}
-
-// flatten returns the list of addresses within this set, also caching it for later
-// reuse. The returned slice should not be changed!
-func (as *accountSet) flatten() []common.Address {
- if as.cache == nil {
- accounts := make([]common.Address, 0, len(as.accounts))
- for account := range as.accounts {
- accounts = append(accounts, account)
- }
- as.cache = &accounts
- }
- return *as.cache
-}
-
-// merge adds all addresses from the 'other' set into 'as'.
-func (as *accountSet) merge(other *accountSet) {
- for addr := range other.accounts {
- as.accounts[addr] = struct{}{}
- }
- as.cache = nil
-}
-
-// lookup is used internally by TxPool to track transactions while allowing
-// lookup without mutex contention.
-//
-// Note, although this type is properly protected against concurrent access, it
-// is **not** a type that should ever be mutated or even exposed outside of the
-// transaction pool, since its internal state is tightly coupled with the pools
-// internal mechanisms. The sole purpose of the type is to permit out-of-bound
-// peeking into the pool in TxPool.Get without having to acquire the widely scoped
-// TxPool.mu mutex.
-//
-// This lookup set combines the notion of "local transactions", which is useful
-// to build upper-level structure.
-type lookup struct {
- slots int
- lock sync.RWMutex
- locals map[common.Hash]*types.Transaction
- remotes map[common.Hash]*types.Transaction
-}
-
-// newLookup returns a new lookup structure.
-func newLookup() *lookup {
- return &lookup{
- locals: make(map[common.Hash]*types.Transaction),
- remotes: make(map[common.Hash]*types.Transaction),
- }
-}
-
-// Range calls f on each key and value present in the map. The callback passed
-// should return the indicator whether the iteration needs to be continued.
-// Callers need to specify which set (or both) to be iterated.
-func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- if local {
- for key, value := range t.locals {
- if !f(key, value, true) {
- return
- }
- }
- }
- if remote {
- for key, value := range t.remotes {
- if !f(key, value, false) {
- return
- }
- }
- }
-}
-
-// Get returns a transaction if it exists in the lookup, or nil if not found.
-func (t *lookup) Get(hash common.Hash) *types.Transaction {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- if tx := t.locals[hash]; tx != nil {
- return tx
- }
- return t.remotes[hash]
-}
-
-// GetLocal returns a transaction if it exists in the lookup, or nil if not found.
-func (t *lookup) GetLocal(hash common.Hash) *types.Transaction {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return t.locals[hash]
-}
-
-// GetRemote returns a transaction if it exists in the lookup, or nil if not found.
-func (t *lookup) GetRemote(hash common.Hash) *types.Transaction {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return t.remotes[hash]
-}
-
-// Count returns the current number of transactions in the lookup.
-func (t *lookup) Count() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return len(t.locals) + len(t.remotes)
-}
-
-// LocalCount returns the current number of local transactions in the lookup.
-func (t *lookup) LocalCount() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return len(t.locals)
-}
-
-// RemoteCount returns the current number of remote transactions in the lookup.
-func (t *lookup) RemoteCount() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return len(t.remotes)
-}
-
-// Slots returns the current number of slots used in the lookup.
-func (t *lookup) Slots() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return t.slots
-}
-
-// Add adds a transaction to the lookup.
-func (t *lookup) Add(tx *types.Transaction, local bool) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- t.slots += numSlots(tx)
- slotsGauge.Update(int64(t.slots))
-
- if local {
- t.locals[tx.Hash()] = tx
- } else {
- t.remotes[tx.Hash()] = tx
- }
-}
-
-// Remove removes a transaction from the lookup.
-func (t *lookup) Remove(hash common.Hash) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- tx, ok := t.locals[hash]
- if !ok {
- tx, ok = t.remotes[hash]
- }
- if !ok {
- log.Error("No transaction found to be deleted", "hash", hash)
- return
- }
- t.slots -= numSlots(tx)
- slotsGauge.Update(int64(t.slots))
-
- delete(t.locals, hash)
- delete(t.remotes, hash)
-}
-
-// RemoteToLocals migrates the transactions belongs to the given locals to locals
-// set. The assumption is held the locals set is thread-safe to be used.
-func (t *lookup) RemoteToLocals(locals *accountSet) int {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- var migrated int
- for hash, tx := range t.remotes {
- if locals.containsTx(tx) {
- t.locals[hash] = tx
- delete(t.remotes, hash)
- migrated += 1
- }
- }
- return migrated
-}
-
-// RemotesBelowTip finds all remote transactions below the given tip threshold.
-func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
- found := make(types.Transactions, 0, 128)
- t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
- if tx.GasTipCapIntCmp(threshold) < 0 {
- found = append(found, tx)
- }
- return true
- }, false, true) // Only iterate remotes
- return found
-}
-
-// numSlots calculates the number of slots needed for a single transaction.
-func numSlots(tx *types.Transaction) int {
- return int((tx.Size() + txSlotSize - 1) / txSlotSize)
+ return TxStatusUnknown
}
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
new file mode 100644
index 0000000000..7b355bcd7c
--- /dev/null
+++ b/core/txpool/validation.go
@@ -0,0 +1,272 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txpool
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "math/big"
+
+ "github.com/ava-labs/subnet-evm/core"
+ "github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/params"
+ "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist"
+ "github.com/ava-labs/subnet-evm/vmerrs"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// ValidationOptions define certain differences between transaction validation
+// across the different pools without having to duplicate those checks.
+type ValidationOptions struct {
+ Config *params.ChainConfig // Chain configuration to selectively validate based on current fork rules
+
+ Accept uint8 // Bitmap of transaction types that should be accepted for the calling pool
+ MaxSize uint64 // Maximum size of a transaction that the caller can meaningfully handle
+ MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool
+}
+
+// ValidateTransaction is a helper method to check whether a transaction is valid
+// according to the consensus rules, but does not check state-dependent validation
+// (balance, nonce, etc).
+//
+// This check is public to allow different transaction pools to check the basic
+// rules without duplicating code and running the risk of missed updates.
+func ValidateTransaction(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof, head *types.Header, signer types.Signer, opts *ValidationOptions) error {
+ // Ensure transactions not implemented by the calling pool are rejected
+ if opts.Accept&(1< opts.MaxSize {
+ return fmt.Errorf("%w: transaction size %v, limit %v", ErrOversizedData, tx.Size(), opts.MaxSize)
+ }
+ // Ensure only transactions that have been enabled are accepted
+ if !opts.Config.IsSubnetEVM(head.Time) && tx.Type() != types.LegacyTxType {
+ return fmt.Errorf("%w: type %d rejected, pool not yet in Berlin", core.ErrTxTypeNotSupported, tx.Type())
+ }
+ if !opts.Config.IsSubnetEVM(head.Time) && tx.Type() == types.DynamicFeeTxType {
+ return fmt.Errorf("%w: type %d rejected, pool not yet in London", core.ErrTxTypeNotSupported, tx.Type())
+ }
+ if !opts.Config.IsCancun(head.Number, head.Time) && tx.Type() == types.BlobTxType {
+ return fmt.Errorf("%w: type %d rejected, pool not yet in Cancun", core.ErrTxTypeNotSupported, tx.Type())
+ }
+ // Check whether the init code size has been exceeded
+ if opts.Config.IsDurango(head.Time) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
+ return fmt.Errorf("%w: code size %v, limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
+ }
+ // Transactions can't be negative. This may never happen using RLP decoded
+ // transactions but may occur for transactions created using the RPC.
+ if tx.Value().Sign() < 0 {
+ return ErrNegativeValue
+ }
+ // Ensure the transaction doesn't exceed the current block limit gas
+ if txGas := tx.Gas(); head.GasLimit < txGas {
+ return fmt.Errorf(
+ "%w: tx gas (%d) > current max gas (%d)",
+ ErrGasLimit,
+ txGas,
+ head.GasLimit,
+ )
+ }
+ // Sanity check for extremely large numbers (supported by RLP or RPC)
+ if tx.GasFeeCap().BitLen() > 256 {
+ return core.ErrFeeCapVeryHigh
+ }
+ if tx.GasTipCap().BitLen() > 256 {
+ return core.ErrTipVeryHigh
+ }
+ // Ensure gasFeeCap is greater than or equal to gasTipCap
+ if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
+ return core.ErrTipAboveFeeCap
+ }
+ // Make sure the transaction is signed properly
+ from, err := types.Sender(signer, tx)
+ if err != nil {
+ return ErrInvalidSender
+ }
+ // Ensure the transaction has more gas than the bare minimum needed to cover
+ // the transaction metadata
+ intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, opts.Config.Rules(head.Number, head.Time))
+ if err != nil {
+ return err
+ }
+ if txGas := tx.Gas(); txGas < intrGas {
+ return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", core.ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas)
+ }
+ // Ensure the gasprice is high enough to cover the requirement of the calling
+ // pool and/or block producer
+ if tx.GasTipCapIntCmp(opts.MinTip) < 0 {
+ return fmt.Errorf("%w: tip needed %v, tip permitted %v", ErrUnderpriced, opts.MinTip, tx.GasTipCap())
+ }
+ // Ensure blob transactions have valid commitments
+ if tx.Type() == types.BlobTxType {
+ // Ensure the number of items in the blob transaction and vairous side
+ // data match up before doing any expensive validations
+ hashes := tx.BlobHashes()
+ if len(hashes) == 0 {
+ return fmt.Errorf("blobless blob transaction")
+ }
+ if len(hashes) > params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
+ return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)
+ }
+ if len(blobs) != len(hashes) {
+ return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(blobs), len(hashes))
+ }
+ if len(commits) != len(hashes) {
+ return fmt.Errorf("invalid number of %d blob commitments compared to %d blob hashes", len(commits), len(hashes))
+ }
+ if len(proofs) != len(hashes) {
+ return fmt.Errorf("invalid number of %d blob proofs compared to %d blob hashes", len(proofs), len(hashes))
+ }
+ // Blob quantities match up, validate that the provers match with the
+ // transaction hash before getting to the cryptography
+ hasher := sha256.New()
+ for i, want := range hashes {
+ hasher.Write(commits[i][:])
+ hash := hasher.Sum(nil)
+ hasher.Reset()
+
+ var vhash common.Hash
+ vhash[0] = params.BlobTxHashVersion
+ copy(vhash[1:], hash[1:])
+
+ if vhash != want {
+ return fmt.Errorf("blob %d: computed hash %#x mismatches transaction one %#x", i, vhash, want)
+ }
+ }
+ // Blob commitments match with the hashes in the transaction, verify the
+ // blobs themselves via KZG
+ for i := range blobs {
+ if err := kzg4844.VerifyBlobProof(blobs[i], commits[i], proofs[i]); err != nil {
+ return fmt.Errorf("invalid blob %d: %v", i, err)
+ }
+ }
+ }
+ return nil
+}
+
+// ValidationOptionsWithState define certain differences between stateful transaction
+// validation across the different pools without having to duplicate those checks.
+type ValidationOptionsWithState struct {
+ State *state.StateDB // State database to check nonces and balances against
+
+ // FirstNonceGap is an optional callback to retrieve the first nonce gap in
+ // the list of pooled transactions of a specific account. If this method is
+ // set, nonce gaps will be checked and forbidden. If this method is not set,
+ // nonce gaps will be ignored and permitted.
+ FirstNonceGap func(addr common.Address) uint64
+
+ // UsedAndLeftSlots is a mandatory callback to retrieve the number of tx slots
+ // used and the number still permitted for an account. New transactions will
+ // be rejected once the number of remaining slots reaches zero.
+ UsedAndLeftSlots func(addr common.Address) (int, int)
+
+ // ExistingExpenditure is a mandatory callback to retrieve the cummulative
+ // cost of the already pooled transactions to check for overdrafts.
+ ExistingExpenditure func(addr common.Address) *big.Int
+
+ // ExistingCost is a mandatory callback to retrieve an already pooled
+ // transaction's cost with the given nonce to check for overdrafts.
+ ExistingCost func(addr common.Address, nonce uint64) *big.Int
+
+ Rules params.Rules
+ MinimumFee *big.Int
+}
+
+// ValidateTransactionWithState is a helper method to check whether a transaction
+// is valid according to the pool's internal state checks (balance, nonce, gaps).
+//
+// This check is public to allow different transaction pools to check the stateful
+// rules without duplicating code and running the risk of missed updates.
+func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, opts *ValidationOptionsWithState) error {
+ // Ensure the transaction adheres to nonce ordering
+ from, err := signer.Sender(tx) // already validated (and cached), but cleaner to check
+ if err != nil {
+ log.Error("Transaction sender recovery failed", "err", err)
+ return err
+ }
+
+ // Drop the transaction if the gas fee cap is below the pool's minimum fee
+ if opts.MinimumFee != nil && tx.GasFeeCapIntCmp(opts.MinimumFee) < 0 {
+ return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), opts.MinimumFee)
+ }
+
+ next := opts.State.GetNonce(from)
+ if next > tx.Nonce() {
+ return fmt.Errorf("%w: next nonce %v, tx nonce %v", core.ErrNonceTooLow, next, tx.Nonce())
+ }
+ // Ensure the transaction doesn't produce a nonce gap in pools that do not
+ // support arbitrary orderings
+ if opts.FirstNonceGap != nil {
+ if gap := opts.FirstNonceGap(from); gap < tx.Nonce() {
+ return fmt.Errorf("%w: tx nonce %v, gapped nonce %v", core.ErrNonceTooHigh, tx.Nonce(), gap)
+ }
+ }
+ // Ensure the transactor has enough funds to cover the transaction costs
+ var (
+ balance = opts.State.GetBalance(from)
+ cost = tx.Cost()
+ )
+ if balance.Cmp(cost) < 0 {
+ return fmt.Errorf("%w: balance %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, cost, new(big.Int).Sub(cost, balance))
+ }
+ // Ensure the transactor has enough funds to cover for replacements or nonce
+ // expansions without overdrafts
+ spent := opts.ExistingExpenditure(from)
+ if prev := opts.ExistingCost(from, tx.Nonce()); prev != nil {
+ bump := new(big.Int).Sub(cost, prev)
+ need := new(big.Int).Add(spent, bump)
+ if balance.Cmp(need) < 0 {
+ return fmt.Errorf("%w: balance %v, queued cost %v, tx bumped %v, overshot %v", core.ErrInsufficientFunds, balance, spent, bump, new(big.Int).Sub(need, balance))
+ }
+ } else {
+ need := new(big.Int).Add(spent, cost)
+ if balance.Cmp(need) < 0 {
+ return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance))
+ }
+ // Transaction takes a new nonce value out of the pool. Ensure it doesn't
+ // overflow the number of permitted transactions from a single accoun
+ // (i.e. max cancellable via out-of-bound transaction).
+ if used, left := opts.UsedAndLeftSlots(from); left <= 0 {
+ return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used)
+ }
+ }
+
+ // If the tx allow list is enabled, return an error if the from address is not allow listed.
+ if opts.Rules.IsPrecompileEnabled(txallowlist.ContractAddress) {
+ txAllowListRole := txallowlist.GetTxAllowListStatus(opts.State, from)
+ if !txAllowListRole.IsEnabled() {
+ return fmt.Errorf("%w: %s", vmerrs.ErrSenderAddressNotAllowListed, from)
+ }
+ }
+
+ return nil
+}
diff --git a/core/types/block.go b/core/types/block.go
index 06e7d6c1c0..a17d4c7422 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -94,21 +94,26 @@ type Header struct {
// headers.
BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"`
- // ExcessDataGas was added by EIP-4844 and is ignored in legacy headers.
- ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
+ // BlobGasUsed was added by EIP-4844 and is ignored in legacy headers.
+ BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
+
+ // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers.
+ ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
}
// field type overrides for gencodec
type headerMarshaling struct {
- Difficulty *hexutil.Big
- Number *hexutil.Big
- GasLimit hexutil.Uint64
- GasUsed hexutil.Uint64
- Time hexutil.Uint64
- Extra hexutil.Bytes
- BaseFee *hexutil.Big
- BlockGasCost *hexutil.Big
- Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
+ Difficulty *hexutil.Big
+ Number *hexutil.Big
+ GasLimit hexutil.Uint64
+ GasUsed hexutil.Uint64
+ Time hexutil.Uint64
+ Extra hexutil.Bytes
+ BaseFee *hexutil.Big
+ BlockGasCost *hexutil.Big
+ Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
+ BlobGasUsed *hexutil.Uint64
+ ExcessBlobGas *hexutil.Uint64
}
// Hash returns the block hash of the header, which is simply the keccak256 hash of its
@@ -147,7 +152,23 @@ type Body struct {
Uncles []*Header
}
-// Block represents an entire block in the Ethereum blockchain.
+// Block represents an Ethereum block.
+//
+// Note the Block type tries to be 'immutable', and contains certain caches that rely
+// on that. The rules around block immutability are as follows:
+//
+// - We copy all data when the block is constructed. This makes references held inside
+// the block independent of whatever value was passed in.
+//
+// - We copy all header data on access. This is because any change to the header would mess
+// up the cached hash and size values in the block. Calling code is expected to take
+// advantage of this to avoid over-allocating!
+//
+// - When new body data is attached to the block, a shallow copy of the block is returned.
+// This ensures block modifications are race-free.
+//
+// - We do not copy body data on access because it does not affect the caches, and also
+// because it would be too expensive.
type Block struct {
header *Header
uncles []*Header
@@ -165,9 +186,8 @@ type extblock struct {
Uncles []*Header
}
-// NewBlock creates a new block. The input data is copied,
-// changes to header and to the field values will not affect the
-// block.
+// NewBlock creates a new block. The input data is copied, changes to header and to the
+// field values will not affect the block.
//
// The values of TxHash, UncleHash, ReceiptHash and Bloom in header
// are ignored and set to values derived from the given txs, uncles
@@ -206,15 +226,7 @@ func NewBlock(
return b
}
-// NewBlockWithHeader creates a block with the given header data. The
-// header data is copied, changes to header and to the field values
-// will not affect the block.
-func NewBlockWithHeader(header *Header) *Block {
- return &Block{header: CopyHeader(header)}
-}
-
-// CopyHeader creates a deep copy of a block header to prevent side effects from
-// modifying a header variable.
+// CopyHeader creates a deep copy of a block header.
func CopyHeader(h *Header) *Header {
cpy := *h
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
@@ -233,10 +245,18 @@ func CopyHeader(h *Header) *Header {
cpy.Extra = make([]byte, len(h.Extra))
copy(cpy.Extra, h.Extra)
}
+ if h.ExcessBlobGas != nil {
+ cpy.ExcessBlobGas = new(uint64)
+ *cpy.ExcessBlobGas = *h.ExcessBlobGas
+ }
+ if h.BlobGasUsed != nil {
+ cpy.BlobGasUsed = new(uint64)
+ *cpy.BlobGasUsed = *h.BlobGasUsed
+ }
return &cpy
}
-// DecodeRLP decodes the Ethereum
+// DecodeRLP decodes a block from RLP.
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
_, size, _ := s.Kind()
@@ -248,16 +268,23 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error {
return nil
}
-// EncodeRLP serializes b into the Ethereum RLP block format.
+// EncodeRLP serializes a block as RLP.
func (b *Block) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, extblock{
+ return rlp.Encode(w, &extblock{
Header: b.header,
Txs: b.transactions,
Uncles: b.uncles,
})
}
-// TODO: copies
+// Body returns the non-header content of the block.
+// Note the returned data is not an independent copy.
+func (b *Block) Body() *Body {
+ return &Body{b.transactions, b.uncles}
+}
+
+// Accessors for body data. These do not return a copy because the content
+// of the body slices does not affect the cached hash/size in block.
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
@@ -271,6 +298,13 @@ func (b *Block) Transaction(hash common.Hash) *Transaction {
return nil
}
+// Header returns the block header (as a copy).
+func (b *Block) Header() *Header {
+ return CopyHeader(b.header)
+}
+
+// Header value accessors. These do copy!
+
func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) }
func (b *Block) GasLimit() uint64 { return b.header.GasLimit }
func (b *Block) GasUsed() uint64 { return b.header.GasUsed }
@@ -304,10 +338,23 @@ func (b *Block) BlockGasCost() *big.Int {
return new(big.Int).Set(b.header.BlockGasCost)
}
-func (b *Block) Header() *Header { return CopyHeader(b.header) }
+func (b *Block) ExcessBlobGas() *uint64 {
+ var excessBlobGas *uint64
+ if b.header.ExcessBlobGas != nil {
+ excessBlobGas = new(uint64)
+ *excessBlobGas = *b.header.ExcessBlobGas
+ }
+ return excessBlobGas
+}
-// Body returns the non-header content of the block.
-func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} }
+func (b *Block) BlobGasUsed() *uint64 {
+ var blobGasUsed *uint64
+ if b.header.BlobGasUsed != nil {
+ blobGasUsed = new(uint64)
+ *blobGasUsed = *b.header.BlobGasUsed
+ }
+ return blobGasUsed
+}
// Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previously cached value.
@@ -335,22 +382,27 @@ func CalcUncleHash(uncles []*Header) common.Hash {
return rlpHash(uncles)
}
+// NewBlockWithHeader creates a block with the given header data. The
+// header data is copied, changes to header and to the field values
+// will not affect the block.
+func NewBlockWithHeader(header *Header) *Block {
+ return &Block{header: CopyHeader(header)}
+}
+
// WithSeal returns a new block with the data from b but the header replaced with
// the sealed one.
func (b *Block) WithSeal(header *Header) *Block {
- cpy := *header
-
return &Block{
- header: &cpy,
+ header: CopyHeader(header),
transactions: b.transactions,
uncles: b.uncles,
}
}
-// WithBody returns a new block with the given transaction and uncle contents.
+// WithBody returns a copy of the block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
block := &Block{
- header: CopyHeader(b.header),
+ header: b.header,
transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)),
}
diff --git a/core/types/block_test.go b/core/types/block_test.go
index fb4ad2194a..c484dd268d 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -28,17 +28,16 @@ package types
import (
"bytes"
- "hash"
"math/big"
"reflect"
"testing"
+ "github.com/ava-labs/subnet-evm/internal/blocktest"
"github.com/ava-labs/subnet-evm/params"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
- "golang.org/x/crypto/sha3"
)
func TestBlockEncoding(t *testing.T) {
@@ -238,31 +237,6 @@ func BenchmarkEncodeBlock(b *testing.B) {
}
}
-// testHasher is the helper tool for transaction/receipt list hashing.
-// The original hasher is trie, in order to get rid of import cycle,
-// use the testing hasher instead.
-type testHasher struct {
- hasher hash.Hash
-}
-
-func newHasher() *testHasher {
- return &testHasher{hasher: sha3.NewLegacyKeccak256()}
-}
-
-func (h *testHasher) Reset() {
- h.hasher.Reset()
-}
-
-func (h *testHasher) Update(key, val []byte) error {
- h.hasher.Write(key)
- h.hasher.Write(val)
- return nil
-}
-
-func (h *testHasher) Hash() common.Hash {
- return common.BytesToHash(h.hasher.Sum(nil))
-}
-
func makeBenchBlock() *Block {
var (
key, _ = crypto.GenerateKey()
@@ -301,7 +275,7 @@ func makeBenchBlock() *Block {
Extra: []byte("benchmark uncle"),
}
}
- return NewBlock(header, txs, uncles, receipts, newHasher())
+ return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher())
}
func TestSubnetEVMBlockEncoding(t *testing.T) {
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index b0aa11fdae..224c2c3759 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -16,25 +16,26 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
- Coinbase common.Address `json:"miner" gencodec:"required"`
- Root common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
- ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
- Bloom Bloom `json:"logsBloom" gencodec:"required"`
- Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
- Number *hexutil.Big `json:"number" gencodec:"required"`
- GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash"`
- Nonce BlockNonce `json:"nonce"`
- BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
- BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"`
- ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
- Hash common.Hash `json:"hash"`
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
+ Coinbase common.Address `json:"miner" gencodec:"required"`
+ Root common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
+ ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
+ Bloom Bloom `json:"logsBloom" gencodec:"required"`
+ Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
+ Number *hexutil.Big `json:"number" gencodec:"required"`
+ GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
+ BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
+ BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
+ Hash common.Hash `json:"hash"`
}
var enc Header
enc.ParentHash = h.ParentHash
@@ -54,7 +55,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.Nonce = h.Nonce
enc.BaseFee = (*hexutil.Big)(h.BaseFee)
enc.BlockGasCost = (*hexutil.Big)(h.BlockGasCost)
- enc.ExcessDataGas = h.ExcessDataGas
+ enc.BlobGasUsed = (*hexutil.Uint64)(h.BlobGasUsed)
+ enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.Hash = h.Hash()
return json.Marshal(&enc)
}
@@ -79,7 +81,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
Nonce *BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"`
- ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -149,8 +152,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.BlockGasCost != nil {
h.BlockGasCost = (*big.Int)(dec.BlockGasCost)
}
- if dec.ExcessDataGas != nil {
- h.ExcessDataGas = dec.ExcessDataGas
+ if dec.BlobGasUsed != nil {
+ h.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
+ }
+ if dec.ExcessBlobGas != nil {
+ h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
return nil
}
diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go
index d869348c83..1735881553 100644
--- a/core/types/gen_header_rlp.go
+++ b/core/types/gen_header_rlp.go
@@ -42,8 +42,9 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBytes(obj.Nonce[:])
_tmp1 := obj.BaseFee != nil
_tmp2 := obj.BlockGasCost != nil
- _tmp3 := obj.ExcessDataGas != nil
- if _tmp1 || _tmp2 || _tmp3 {
+ _tmp3 := obj.BlobGasUsed != nil
+ _tmp4 := obj.ExcessBlobGas != nil
+ if _tmp1 || _tmp2 || _tmp3 || _tmp4 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
@@ -53,7 +54,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee)
}
}
- if _tmp2 || _tmp3 {
+ if _tmp2 || _tmp3 || _tmp4 {
if obj.BlockGasCost == nil {
w.Write(rlp.EmptyString)
} else {
@@ -63,14 +64,18 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BlockGasCost)
}
}
- if _tmp3 {
- if obj.ExcessDataGas == nil {
- w.Write(rlp.EmptyString)
+ if _tmp3 || _tmp4 {
+ if obj.BlobGasUsed == nil {
+ w.Write([]byte{0x80})
} else {
- if obj.ExcessDataGas.Sign() == -1 {
- return rlp.ErrNegativeBigInt
- }
- w.WriteBigInt(obj.ExcessDataGas)
+ w.WriteUint64((*obj.BlobGasUsed))
+ }
+ }
+ if _tmp4 {
+ if obj.ExcessBlobGas == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64((*obj.ExcessBlobGas))
}
}
w.ListEnd(_tmp0)
diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go
index d83be14477..4c641a9727 100644
--- a/core/types/gen_receipt_json.go
+++ b/core/types/gen_receipt_json.go
@@ -26,6 +26,8 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
ContractAddress common.Address `json:"contractAddress"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
+ BlobGasUsed hexutil.Uint64 `json:"blobGasUsed,omitempty"`
+ BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"`
BlockHash common.Hash `json:"blockHash,omitempty"`
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
TransactionIndex hexutil.Uint `json:"transactionIndex"`
@@ -41,6 +43,8 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
enc.ContractAddress = r.ContractAddress
enc.GasUsed = hexutil.Uint64(r.GasUsed)
enc.EffectiveGasPrice = (*hexutil.Big)(r.EffectiveGasPrice)
+ enc.BlobGasUsed = hexutil.Uint64(r.BlobGasUsed)
+ enc.BlobGasPrice = (*hexutil.Big)(r.BlobGasPrice)
enc.BlockHash = r.BlockHash
enc.BlockNumber = (*hexutil.Big)(r.BlockNumber)
enc.TransactionIndex = hexutil.Uint(r.TransactionIndex)
@@ -60,6 +64,8 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
ContractAddress *common.Address `json:"contractAddress"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed,omitempty"`
+ BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"`
BlockHash *common.Hash `json:"blockHash,omitempty"`
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
TransactionIndex *hexutil.Uint `json:"transactionIndex"`
@@ -103,6 +109,12 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
if dec.EffectiveGasPrice != nil {
r.EffectiveGasPrice = (*big.Int)(dec.EffectiveGasPrice)
}
+ if dec.BlobGasUsed != nil {
+ r.BlobGasUsed = uint64(*dec.BlobGasUsed)
+ }
+ if dec.BlobGasPrice != nil {
+ r.BlobGasPrice = (*big.Int)(dec.BlobGasPrice)
+ }
if dec.BlockHash != nil {
r.BlockHash = *dec.BlockHash
}
diff --git a/core/types/hashes.go b/core/types/hashes.go
index 69a034b6d8..2c29ce2b71 100644
--- a/core/types/hashes.go
+++ b/core/types/hashes.go
@@ -29,6 +29,7 @@ package types
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
)
var (
@@ -47,3 +48,13 @@ var (
// EmptyReceiptsHash is the known hash of the empty receipt set.
EmptyReceiptsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
)
+
+// TrieRootHash returns the hash itself if it's non-empty or the predefined
+// emptyHash one instead.
+func TrieRootHash(hash common.Hash) common.Hash {
+ if hash == (common.Hash{}) {
+ log.Error("Zero trie root hash!")
+ return EmptyRootHash
+ }
+ return hash
+}
diff --git a/core/types/receipt.go b/core/types/receipt.go
index 6ed57778d5..2518d5e49a 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -73,6 +73,8 @@ type Receipt struct {
ContractAddress common.Address `json:"contractAddress"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility
+ BlobGasUsed uint64 `json:"blobGasUsed,omitempty"`
+ BlobGasPrice *big.Int `json:"blobGasPrice,omitempty"`
// Inclusion information: These fields provide information about the inclusion of the
// transaction corresponding to this receipt.
@@ -88,6 +90,8 @@ type receiptMarshaling struct {
CumulativeGasUsed hexutil.Uint64
GasUsed hexutil.Uint64
EffectiveGasPrice *hexutil.Big
+ BlobGasUsed hexutil.Uint64
+ BlobGasPrice *hexutil.Big
BlockNumber *hexutil.Big
TransactionIndex hexutil.Uint
}
@@ -204,7 +208,7 @@ func (r *Receipt) decodeTyped(b []byte) error {
return errShortTypedReceipt
}
switch b[0] {
- case DynamicFeeTxType, AccessListTxType:
+ case DynamicFeeTxType, AccessListTxType, BlobTxType:
var data receiptRLP
err := rlp.DecodeBytes(b[1:], &data)
if err != nil {
@@ -306,14 +310,13 @@ func (rs Receipts) Len() int { return len(rs) }
func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) {
r := rs[i]
data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs}
- switch r.Type {
- case LegacyTxType:
- rlp.Encode(w, data)
- case AccessListTxType:
- w.WriteByte(AccessListTxType)
+ if r.Type == LegacyTxType {
rlp.Encode(w, data)
- case DynamicFeeTxType:
- w.WriteByte(DynamicFeeTxType)
+ return
+ }
+ w.WriteByte(r.Type)
+ switch r.Type {
+ case AccessListTxType, DynamicFeeTxType, BlobTxType:
rlp.Encode(w, data)
default:
// For unsupported types, write nothing. Since this is for
@@ -324,7 +327,7 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) {
// DeriveFields fills the receipts with their computed fields based on consensus
// data and contextual infos like containing block and transactions.
-func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, txs []*Transaction) error {
+func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs []*Transaction) error {
signer := MakeSigner(config, new(big.Int).SetUint64(number), time)
logIndex := uint(0)
@@ -335,9 +338,14 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu
// The transaction type and hash can be retrieved from the transaction itself
rs[i].Type = txs[i].Type()
rs[i].TxHash = txs[i].Hash()
-
rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee)
+ // EIP-4844 blob transaction fields
+ if txs[i].Type() == BlobTxType {
+ rs[i].BlobGasUsed = txs[i].BlobGas()
+ rs[i].BlobGasPrice = blobGasPrice
+ }
+
// block location fields
rs[i].BlockHash = hash
rs[i].BlockNumber = new(big.Int).SetUint64(number)
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index f310dc061f..c0661fd20c 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -140,22 +140,24 @@ var (
}),
// EIP-4844 transactions.
NewTx(&BlobTx{
- To: &to6,
+ To: to6,
Nonce: 6,
Value: uint256.NewInt(6),
Gas: 6,
GasTipCap: uint256.NewInt(66),
GasFeeCap: uint256.NewInt(1066),
BlobFeeCap: uint256.NewInt(100066),
+ BlobHashes: []common.Hash{{}},
}),
NewTx(&BlobTx{
- To: &to7,
+ To: to7,
Nonce: 7,
Value: uint256.NewInt(7),
Gas: 7,
GasTipCap: uint256.NewInt(77),
GasFeeCap: uint256.NewInt(1077),
BlobFeeCap: uint256.NewInt(100077),
+ BlobHashes: []common.Hash{{}, {}, {}},
}),
}
@@ -280,6 +282,8 @@ var (
TxHash: txs[5].Hash(),
GasUsed: 6,
EffectiveGasPrice: big.NewInt(1066),
+ BlobGasUsed: params.BlobTxBlobGasPerBlob,
+ BlobGasPrice: big.NewInt(920),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 5,
@@ -293,6 +297,8 @@ var (
TxHash: txs[6].Hash(),
GasUsed: 7,
EffectiveGasPrice: big.NewInt(1077),
+ BlobGasUsed: 3 * params.BlobTxBlobGasPerBlob,
+ BlobGasPrice: big.NewInt(920),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 6,
@@ -313,8 +319,9 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) {
func TestDeriveFields(t *testing.T) {
// Re-derive receipts.
basefee := big.NewInt(1000)
+ blobGasPrice := big.NewInt(920)
derivedReceipts := clearComputedFieldsOnReceipts(receipts)
- err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, txs)
+ err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, blobGasPrice, txs)
if err != nil {
t.Fatalf("DeriveFields(...) = %v, want ", err)
}
@@ -511,6 +518,9 @@ func clearComputedFieldsOnReceipt(receipt *Receipt) *Receipt {
cpy.ContractAddress = common.Address{0xff, 0xff, 0x33}
cpy.GasUsed = 0xffffffff
cpy.Logs = clearComputedFieldsOnLogs(receipt.Logs)
+ cpy.EffectiveGasPrice = big.NewInt(0)
+ cpy.BlobGasUsed = 0
+ cpy.BlobGasPrice = nil
return &cpy
}
diff --git a/core/types/state_account.go b/core/types/state_account.go
index 72066a12d1..1c1ef026f4 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -27,9 +27,11 @@
package types
import (
+ "bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
)
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -type StateAccount -out gen_account_rlp.go
@@ -42,3 +44,88 @@ type StateAccount struct {
Root common.Hash // merkle root of the storage trie
CodeHash []byte
}
+
+// NewEmptyStateAccount constructs an empty state account.
+func NewEmptyStateAccount() *StateAccount {
+ return &StateAccount{
+ Balance: new(big.Int),
+ Root: EmptyRootHash,
+ CodeHash: EmptyCodeHash.Bytes(),
+ }
+}
+
+// Copy returns a deep-copied state account object.
+func (acct *StateAccount) Copy() *StateAccount {
+ var balance *big.Int
+ if acct.Balance != nil {
+ balance = new(big.Int).Set(acct.Balance)
+ }
+ return &StateAccount{
+ Nonce: acct.Nonce,
+ Balance: balance,
+ Root: acct.Root,
+ CodeHash: common.CopyBytes(acct.CodeHash),
+ }
+}
+
+// SlimAccount is a modified version of an Account, where the root is replaced
+// with a byte slice. This format can be used to represent full-consensus format
+// or slim format which replaces the empty root and code hash as nil byte slice.
+type SlimAccount struct {
+ Nonce uint64
+ Balance *big.Int
+ Root []byte // Nil if root equals to types.EmptyRootHash
+ CodeHash []byte // Nil if hash equals to types.EmptyCodeHash
+}
+
+// SlimAccountRLP encodes the state account in 'slim RLP' format.
+func SlimAccountRLP(account StateAccount) []byte {
+ slim := SlimAccount{
+ Nonce: account.Nonce,
+ Balance: account.Balance,
+ }
+ if account.Root != EmptyRootHash {
+ slim.Root = account.Root[:]
+ }
+ if !bytes.Equal(account.CodeHash, EmptyCodeHash[:]) {
+ slim.CodeHash = account.CodeHash
+ }
+ data, err := rlp.EncodeToBytes(slim)
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
+
+// FullAccount decodes the data on the 'slim RLP' format and return
+// the consensus format account.
+func FullAccount(data []byte) (*StateAccount, error) {
+ var slim SlimAccount
+ if err := rlp.DecodeBytes(data, &slim); err != nil {
+ return nil, err
+ }
+ var account StateAccount
+ account.Nonce, account.Balance = slim.Nonce, slim.Balance
+
+ // Interpret the storage root and code hash in slim format.
+ if len(slim.Root) == 0 {
+ account.Root = EmptyRootHash
+ } else {
+ account.Root = common.BytesToHash(slim.Root)
+ }
+ if len(slim.CodeHash) == 0 {
+ account.CodeHash = EmptyCodeHash[:]
+ } else {
+ account.CodeHash = slim.CodeHash
+ }
+ return &account, nil
+}
+
+// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
+func FullAccountRLP(data []byte) ([]byte, error) {
+ account, err := FullAccount(data)
+ if err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(account)
+}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 34c185a7b7..0dcbf20c00 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -28,7 +28,6 @@ package types
import (
"bytes"
- "container/heap"
"errors"
"io"
"math/big"
@@ -299,10 +298,10 @@ func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.g
// GasFeeCap returns the fee cap per gas of the transaction.
func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) }
-// BlobGas returns the data gas limit of the transaction for blob transactions, 0 otherwise.
+// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise.
func (tx *Transaction) BlobGas() uint64 { return tx.inner.blobGas() }
-// BlobGasFeeCap returns the data gas fee cap per data gas of the transaction for blob transactions, nil otherwise.
+// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise.
func (tx *Transaction) BlobGasFeeCap() *big.Int { return tx.inner.blobGasFeeCap() }
// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise.
@@ -404,6 +403,19 @@ func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int {
return tx.inner.blobGasFeeCap().Cmp(other)
}
+// SetTime sets the decoding time of a transaction. This is used by tests to set
+// arbitrary times and by persistent transaction pools when loading old txs from
+// disk.
+func (tx *Transaction) SetTime(t time.Time) {
+ tx.time = t
+}
+
+// Time returns the time when the transaction was first seen on the network. It
+// is a heuristic to prefer mining older txs vs new all other things equal.
+func (tx *Transaction) Time() time.Time {
+ return tx.time
+}
+
// Hash returns the transaction hash.
func (tx *Transaction) Hash() common.Hash {
if hash := tx.hash.Load(); hash != nil {
@@ -449,16 +461,6 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e
return &Transaction{inner: cpy, time: tx.time}, nil
}
-// FirstSeen is the time a transaction is first seen.
-func (tx *Transaction) FirstSeen() time.Time {
- return tx.time
-}
-
-// SetFirstSeen sets overwrites the time a transaction is first seen.
-func (tx *Transaction) SetFirstSeen(t time.Time) {
- tx.time = t
-}
-
// Transactions implements DerivableList for transactions.
type Transactions []*Transaction
@@ -522,123 +524,6 @@ func (s TxByNonce) Len() int { return len(s) }
func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() }
func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap
-type TxWithMinerFee struct {
- Tx *Transaction
- minerFee *big.Int
-}
-
-// NewTxWithMinerFee creates a wrapped transaction, calculating the effective
-// miner gasTipCap if a base fee is provided.
-// Returns error in case of a negative effective miner gasTipCap.
-func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) {
- minerFee, err := tx.EffectiveGasTip(baseFee)
- if err != nil {
- return nil, err
- }
- return &TxWithMinerFee{
- Tx: tx,
- minerFee: minerFee,
- }, nil
-}
-
-// TxByPriceAndTime implements both the sort and the heap interface, making it useful
-// for all at once sorting as well as individually adding and removing elements.
-type TxByPriceAndTime []*TxWithMinerFee
-
-func (s TxByPriceAndTime) Len() int { return len(s) }
-func (s TxByPriceAndTime) Less(i, j int) bool {
- // If the prices are equal, use the time the transaction was first seen for
- // deterministic sorting
- cmp := s[i].minerFee.Cmp(s[j].minerFee)
- if cmp == 0 {
- return s[i].Tx.time.Before(s[j].Tx.time)
- }
- return cmp > 0
-}
-func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (s *TxByPriceAndTime) Push(x interface{}) {
- *s = append(*s, x.(*TxWithMinerFee))
-}
-
-func (s *TxByPriceAndTime) Pop() interface{} {
- old := *s
- n := len(old)
- x := old[n-1]
- old[n-1] = nil
- *s = old[0 : n-1]
- return x
-}
-
-// TransactionsByPriceAndNonce represents a set of transactions that can return
-// transactions in a profit-maximizing sorted order, while supporting removing
-// entire batches of transactions for non-executable accounts.
-type TransactionsByPriceAndNonce struct {
- txs map[common.Address]Transactions // Per account nonce-sorted list of transactions
- heads TxByPriceAndTime // Next transaction for each unique account (price heap)
- signer Signer // Signer for the set of transactions
- baseFee *big.Int // Current base fee
-}
-
-// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve
-// price sorted transactions in a nonce-honouring way.
-//
-// Note, the input map is reowned so the caller should not interact any more with
-// if after providing it to the constructor.
-func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce {
- // Initialize a price and received time based heap with the head transactions
- heads := make(TxByPriceAndTime, 0, len(txs))
- for from, accTxs := range txs {
- acc, _ := Sender(signer, accTxs[0])
- wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee)
- // Remove transaction if sender doesn't match from, or if wrapping fails.
- if acc != from || err != nil {
- delete(txs, from)
- continue
- }
- heads = append(heads, wrapped)
- txs[from] = accTxs[1:]
- }
- heap.Init(&heads)
-
- // Assemble and return the transaction set
- return &TransactionsByPriceAndNonce{
- txs: txs,
- heads: heads,
- signer: signer,
- baseFee: baseFee,
- }
-}
-
-// Peek returns the next transaction by price.
-func (t *TransactionsByPriceAndNonce) Peek() *Transaction {
- if len(t.heads) == 0 {
- return nil
- }
- return t.heads[0].Tx
-}
-
-// Shift replaces the current best head with the next one from the same account.
-func (t *TransactionsByPriceAndNonce) Shift() {
- acc, _ := Sender(t.signer, t.heads[0].Tx)
- if txs, ok := t.txs[acc]; ok && len(txs) > 0 {
- if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil {
- t.heads[0], t.txs[acc] = wrapped, txs[1:]
- heap.Fix(&t.heads, 0)
- return
- }
- }
- heap.Pop(&t.heads)
-}
-
-// Pop removes the best transaction, *not* replacing it with the next one from
-// the same account. This should be used when a transaction cannot be executed
-// and hence all subsequent ones should be discarded from the same account.
-func (t *TransactionsByPriceAndNonce) Pop() {
- heap.Pop(&t.heads)
-}
-
// copyAddressPtr copies an address.
func copyAddressPtr(a *common.Address) *common.Address {
if a == nil {
diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go
index 4d0dd2331f..2437a5b2f3 100644
--- a/core/types/transaction_marshalling.go
+++ b/core/types/transaction_marshalling.go
@@ -47,7 +47,7 @@ type txJSON struct {
GasPrice *hexutil.Big `json:"gasPrice"`
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
- MaxFeePerDataGas *hexutil.Big `json:"maxFeePerDataGas,omitempty"`
+ MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"`
Value *hexutil.Big `json:"value"`
Input *hexutil.Bytes `json:"input"`
AccessList *AccessList `json:"accessList,omitempty"`
@@ -55,11 +55,32 @@ type txJSON struct {
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
+ YParity *hexutil.Uint64 `json:"yParity,omitempty"`
// Only used for encoding:
Hash common.Hash `json:"hash"`
}
+// yParityValue returns the YParity value from JSON. For backwards-compatibility reasons,
+// this can be given in the 'v' field or the 'yParity' field. If both exist, they must match.
+func (tx *txJSON) yParityValue() (*big.Int, error) {
+ if tx.YParity != nil {
+ val := uint64(*tx.YParity)
+ if val != 0 && val != 1 {
+ return nil, errors.New("'yParity' field must be 0 or 1")
+ }
+ bigval := new(big.Int).SetUint64(val)
+ if tx.V != nil && tx.V.ToInt().Cmp(bigval) != 0 {
+ return nil, errors.New("'v' and 'yParity' fields do not match")
+ }
+ return bigval, nil
+ }
+ if tx.V != nil {
+ return tx.V.ToInt(), nil
+ }
+ return nil, errors.New("missing 'yParity' or 'v' field in transaction")
+}
+
// MarshalJSON marshals as JSON with a hash.
func (tx *Transaction) MarshalJSON() ([]byte, error) {
var enc txJSON
@@ -79,6 +100,9 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+ if tx.Protected() {
+ enc.ChainID = (*hexutil.Big)(tx.ChainId())
+ }
case *AccessListTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID)
@@ -92,6 +116,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+ yparity := itx.V.Uint64()
+ enc.YParity = (*hexutil.Uint64)(&yparity)
case *DynamicFeeTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID)
@@ -106,6 +132,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+ yparity := itx.V.Uint64()
+ enc.YParity = (*hexutil.Uint64)(&yparity)
case *BlobTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig())
@@ -113,7 +141,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.Gas = (*hexutil.Uint64)(&itx.Gas)
enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap.ToBig())
enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap.ToBig())
- enc.MaxFeePerDataGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig())
+ enc.MaxFeePerBlobGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig())
enc.Value = (*hexutil.Big)(itx.Value.ToBig())
enc.Input = (*hexutil.Bytes)(&itx.Data)
enc.AccessList = &itx.AccessList
@@ -122,6 +150,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V.ToBig())
enc.R = (*hexutil.Big)(itx.R.ToBig())
enc.S = (*hexutil.Big)(itx.S.ToBig())
+ yparity := itx.V.Uint64()
+ enc.YParity = (*hexutil.Uint64)(&yparity)
}
return json.Marshal(&enc)
}
@@ -129,7 +159,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (tx *Transaction) UnmarshalJSON(input []byte) error {
var dec txJSON
- if err := json.Unmarshal(input, &dec); err != nil {
+ err := json.Unmarshal(input, &dec)
+ if err != nil {
return err
}
@@ -162,20 +193,23 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'input' in transaction")
}
itx.Data = *dec.Input
- if dec.V == nil {
- return errors.New("missing required field 'v' in transaction")
- }
- itx.V = (*big.Int)(dec.V)
+
+ // signature R
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
itx.R = (*big.Int)(dec.R)
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
itx.S = (*big.Int)(dec.S)
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
+ // signature V
+ if dec.V == nil {
+ return errors.New("missing required field 'v' in transaction")
+ }
+ itx.V = (*big.Int)(dec.V)
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
if err := sanityCheckSignature(itx.V, itx.R, itx.S, true); err != nil {
return err
}
@@ -211,23 +245,26 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'input' in transaction")
}
itx.Data = *dec.Input
- if dec.V == nil {
- return errors.New("missing required field 'v' in transaction")
- }
if dec.AccessList != nil {
itx.AccessList = *dec.AccessList
}
- itx.V = (*big.Int)(dec.V)
+
+ // signature R
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
itx.R = (*big.Int)(dec.R)
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
itx.S = (*big.Int)(dec.S)
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
+ // signature V
+ itx.V, err = dec.yParityValue()
+ if err != nil {
+ return err
+ }
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil {
return err
}
@@ -273,17 +310,23 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
if dec.AccessList != nil {
itx.AccessList = *dec.AccessList
}
- itx.V = (*big.Int)(dec.V)
+
+ // signature R
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
itx.R = (*big.Int)(dec.R)
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
itx.S = (*big.Int)(dec.S)
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
+ // signature V
+ itx.V, err = dec.yParityValue()
+ if err != nil {
+ return err
+ }
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil {
return err
}
@@ -300,9 +343,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'nonce' in transaction")
}
itx.Nonce = uint64(*dec.Nonce)
- if dec.To != nil {
- itx.To = dec.To
+ if dec.To == nil {
+ return errors.New("missing required field 'to' in transaction")
}
+ itx.To = *dec.To
if dec.Gas == nil {
return errors.New("missing required field 'gas' for txdata")
}
@@ -315,10 +359,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'maxFeePerGas' for txdata")
}
itx.GasFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerGas))
- if dec.MaxFeePerDataGas == nil {
- return errors.New("missing required field 'maxFeePerDataGas' for txdata")
+ if dec.MaxFeePerBlobGas == nil {
+ return errors.New("missing required field 'maxFeePerBlobGas' for txdata")
}
- itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerDataGas))
+ itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerBlobGas))
if dec.Value == nil {
return errors.New("missing required field 'value' in transaction")
}
@@ -337,18 +381,35 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'blobVersionedHashes' in transaction")
}
itx.BlobHashes = dec.BlobVersionedHashes
- itx.V = uint256.MustFromBig((*big.Int)(dec.V))
+
+ // signature R
+ var ok bool
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
- itx.R = uint256.MustFromBig((*big.Int)(dec.R))
+ itx.R, ok = uint256.FromBig((*big.Int)(dec.R))
+ if !ok {
+ return errors.New("'r' value overflows uint256")
+ }
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
- itx.S = uint256.MustFromBig((*big.Int)(dec.S))
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
- if err := sanityCheckSignature(itx.V.ToBig(), itx.R.ToBig(), itx.S.ToBig(), false); err != nil {
+ itx.S, ok = uint256.FromBig((*big.Int)(dec.S))
+ if !ok {
+ return errors.New("'s' value overflows uint256")
+ }
+ // signature V
+ vbig, err := dec.yParityValue()
+ if err != nil {
+ return err
+ }
+ itx.V, ok = uint256.FromBig(vbig)
+ if !ok {
+ return errors.New("'v' value overflows uint256")
+ }
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
+ if err := sanityCheckSignature(vbig, itx.R.ToBig(), itx.S.ToBig(), false); err != nil {
return err
}
}
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index 1a69404f06..25d727a186 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -49,7 +49,7 @@ type sigCache struct {
// MakeSigner returns a Signer based on the given chain config and block number or time.
func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64) Signer {
switch {
- case config.IsCancun(blockTime):
+ case config.IsCancun(blockNumber, blockTime):
return NewCancunSigner(config.ChainID)
case config.IsSubnetEVM(blockTime):
return NewLondonSigner(config.ChainID)
@@ -63,9 +63,9 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint
}
// LatestSigner returns the 'most permissive' Signer available for the given chain
-// configuration. Specifically, this enables support of EIP-155 replay protection and
-// EIP-2930 access list transactions when their respective forks are scheduled to occur at
-// any block number in the chain config.
+// configuration. Specifically, this enables support of all types of transacrions
+// when their respective forks are scheduled to occur at any block number (or time)
+// in the chain config.
//
// Use this in transaction-handling code where the current block number is unknown. If you
// have the current block number available, use MakeSigner instead.
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index 995afbb9a0..1e583b90f4 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -33,10 +33,8 @@ import (
"errors"
"fmt"
"math/big"
- "math/rand"
"reflect"
"testing"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@@ -269,152 +267,6 @@ func TestRecipientNormal(t *testing.T) {
}
}
-func TestTransactionPriceNonceSortLegacy(t *testing.T) {
- testTransactionPriceNonceSort(t, nil)
-}
-
-func TestTransactionPriceNonceSort1559(t *testing.T) {
- testTransactionPriceNonceSort(t, big.NewInt(0))
- testTransactionPriceNonceSort(t, big.NewInt(5))
- testTransactionPriceNonceSort(t, big.NewInt(50))
-}
-
-// Tests that transactions can be correctly sorted according to their price in
-// decreasing order, but at the same time with increasing nonces when issued by
-// the same account.
-func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
- // Generate a batch of accounts to start with
- keys := make([]*ecdsa.PrivateKey, 25)
- for i := 0; i < len(keys); i++ {
- keys[i], _ = crypto.GenerateKey()
- }
- signer := LatestSignerForChainID(common.Big1)
-
- // Generate a batch of transactions with overlapping values, but shifted nonces
- groups := map[common.Address]Transactions{}
- expectedCount := 0
- for start, key := range keys {
- addr := crypto.PubkeyToAddress(key.PublicKey)
- count := 25
- for i := 0; i < 25; i++ {
- var tx *Transaction
- gasFeeCap := rand.Intn(50)
- if baseFee == nil {
- tx = NewTx(&LegacyTx{
- Nonce: uint64(start + i),
- To: &common.Address{},
- Value: big.NewInt(100),
- Gas: 100,
- GasPrice: big.NewInt(int64(gasFeeCap)),
- Data: nil,
- })
- } else {
- tx = NewTx(&DynamicFeeTx{
- Nonce: uint64(start + i),
- To: &common.Address{},
- Value: big.NewInt(100),
- Gas: 100,
- GasFeeCap: big.NewInt(int64(gasFeeCap)),
- GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))),
- Data: nil,
- })
- if count == 25 && int64(gasFeeCap) < baseFee.Int64() {
- count = i
- }
- }
- tx, err := SignTx(tx, signer, key)
- if err != nil {
- t.Fatalf("failed to sign tx: %s", err)
- }
- groups[addr] = append(groups[addr], tx)
- }
- expectedCount += count
- }
- // Sort the transactions and cross check the nonce ordering
- txset := NewTransactionsByPriceAndNonce(signer, groups, baseFee)
-
- txs := Transactions{}
- for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
- txs = append(txs, tx)
- txset.Shift()
- }
- if len(txs) != expectedCount {
- t.Errorf("expected %d transactions, found %d", expectedCount, len(txs))
- }
- for i, txi := range txs {
- fromi, _ := Sender(signer, txi)
-
- // Make sure the nonce order is valid
- for j, txj := range txs[i+1:] {
- fromj, _ := Sender(signer, txj)
- if fromi == fromj && txi.Nonce() > txj.Nonce() {
- t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
- }
- }
- // If the next tx has different from account, the price must be lower than the current one
- if i+1 < len(txs) {
- next := txs[i+1]
- fromNext, _ := Sender(signer, next)
- tip, err := txi.EffectiveGasTip(baseFee)
- nextTip, nextErr := next.EffectiveGasTip(baseFee)
- if err != nil || nextErr != nil {
- t.Errorf("error calculating effective tip")
- }
- if fromi != fromNext && tip.Cmp(nextTip) < 0 {
- t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
- }
- }
- }
-}
-
-// Tests that if multiple transactions have the same price, the ones seen earlier
-// are prioritized to avoid network spam attacks aiming for a specific ordering.
-func TestTransactionTimeSort(t *testing.T) {
- // Generate a batch of accounts to start with
- keys := make([]*ecdsa.PrivateKey, 5)
- for i := 0; i < len(keys); i++ {
- keys[i], _ = crypto.GenerateKey()
- }
- signer := HomesteadSigner{}
-
- // Generate a batch of transactions with overlapping prices, but different creation times
- groups := map[common.Address]Transactions{}
- for start, key := range keys {
- addr := crypto.PubkeyToAddress(key.PublicKey)
-
- tx, _ := SignTx(NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key)
- tx.time = time.Unix(0, int64(len(keys)-start))
-
- groups[addr] = append(groups[addr], tx)
- }
- // Sort the transactions and cross check the nonce ordering
- txset := NewTransactionsByPriceAndNonce(signer, groups, nil)
-
- txs := Transactions{}
- for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
- txs = append(txs, tx)
- txset.Shift()
- }
- if len(txs) != len(keys) {
- t.Errorf("expected %d transactions, found %d", len(keys), len(txs))
- }
- for i, txi := range txs {
- fromi, _ := Sender(signer, txi)
- if i+1 < len(txs) {
- next := txs[i+1]
- fromNext, _ := Sender(signer, next)
-
- if txi.GasPrice().Cmp(next.GasPrice()) < 0 {
- t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
- }
- // Make sure time order is ascending if the txs have the same gas price
- if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.time.After(next.time) {
- t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.time, i+1, fromNext[:4], next.time)
- }
- }
- }
-}
-
// TestTransactionCoding tests serializing/de-serializing to/from rlp and JSON.
func TestTransactionCoding(t *testing.T) {
key, err := crypto.GenerateKey()
@@ -536,7 +388,7 @@ func assertEqual(orig *Transaction, cpy *Transaction) error {
}
if orig.AccessList() != nil {
if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) {
- return fmt.Errorf("access list wrong!")
+ return errors.New("access list wrong!")
}
}
return nil
diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go
index 7ae956355f..f97b1bf3c5 100644
--- a/core/types/tx_blob.go
+++ b/core/types/tx_blob.go
@@ -31,11 +31,11 @@ type BlobTx struct {
GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas
GasFeeCap *uint256.Int // a.k.a. maxFeePerGas
Gas uint64
- To *common.Address `rlp:"nil"` // nil means contract creation
+ To common.Address
Value *uint256.Int
Data []byte
AccessList AccessList
- BlobFeeCap *uint256.Int // a.k.a. maxFeePerDataGas
+ BlobFeeCap *uint256.Int // a.k.a. maxFeePerBlobGas
BlobHashes []common.Hash
// Signature values
@@ -48,7 +48,7 @@ type BlobTx struct {
func (tx *BlobTx) copy() TxData {
cpy := &BlobTx{
Nonce: tx.Nonce,
- To: copyAddressPtr(tx.To),
+ To: tx.To,
Data: common.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are copied below.
@@ -104,8 +104,8 @@ func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() }
func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() }
func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() }
func (tx *BlobTx) nonce() uint64 { return tx.Nonce }
-func (tx *BlobTx) to() *common.Address { return tx.To }
-func (tx *BlobTx) blobGas() uint64 { return params.BlobTxDataGasPerBlob * uint64(len(tx.BlobHashes)) }
+func (tx *BlobTx) to() *common.Address { tmp := tx.To; return &tmp }
+func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) }
func (tx *BlobTx) blobGasFeeCap() *big.Int { return tx.BlobFeeCap.ToBig() }
func (tx *BlobTx) blobHashes() []common.Hash { return tx.BlobHashes }
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index aac80bf47e..879137a10a 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -43,6 +43,7 @@ import (
"github.com/ethereum/go-ethereum/crypto/blake2b"
"github.com/ethereum/go-ethereum/crypto/bls12381"
"github.com/ethereum/go-ethereum/crypto/bn256"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
"golang.org/x/crypto/ripemd160"
)
@@ -104,6 +105,21 @@ var PrecompiledContractsBerlin = map[common.Address]contract.StatefulPrecompiled
common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}),
}
+// PrecompiledContractsCancun contains the default set of pre-compiled Ethereum
+// contracts used in the Cancun release.
+var PrecompiledContractsCancun = map[common.Address]contract.StatefulPrecompiledContract{
+ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}),
+ common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}),
+ common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}),
+ common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}),
+ common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}),
+ common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}),
+ common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}),
+ common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}),
+ common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}),
+ common.BytesToAddress([]byte{0x0a}): newWrappedPrecompiledContract(&kzgPointEvaluation{}),
+}
+
// PrecompiledContractsBLS contains the set of pre-compiled Ethereum
// contracts specified in EIP-2537. These are exported for testing purposes.
var PrecompiledContractsBLS = map[common.Address]contract.StatefulPrecompiledContract{
@@ -119,6 +135,7 @@ var PrecompiledContractsBLS = map[common.Address]contract.StatefulPrecompiledCon
}
var (
+ PrecompiledAddressesCancun []common.Address
PrecompiledAddressesBerlin []common.Address
PrecompiledAddressesIstanbul []common.Address
PrecompiledAddressesByzantium []common.Address
@@ -140,6 +157,9 @@ func init() {
for k := range PrecompiledContractsBerlin {
PrecompiledAddressesBerlin = append(PrecompiledAddressesBerlin, k)
}
+ for k := range PrecompiledContractsCancun {
+ PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k)
+ }
for k := range PrecompiledContractsBLS {
PrecompiledAddressesBLS = append(PrecompiledAddressesBLS, k)
}
@@ -150,6 +170,7 @@ func init() {
addrsList := append(PrecompiledAddressesHomestead, PrecompiledAddressesByzantium...)
addrsList = append(addrsList, PrecompiledAddressesIstanbul...)
addrsList = append(addrsList, PrecompiledAddressesBerlin...)
+ addrsList = append(addrsList, PrecompiledAddressesCancun...)
addrsList = append(addrsList, PrecompiledAddressesBLS...)
for _, k := range addrsList {
PrecompileAllNativeAddresses[k] = struct{}{}
@@ -168,6 +189,8 @@ func init() {
// ActivePrecompiles returns the precompiles enabled with the current configuration.
func ActivePrecompiles(rules params.Rules) []common.Address {
switch {
+ case rules.IsCancun:
+ return PrecompiledAddressesCancun
case rules.IsSubnetEVM:
return PrecompiledAddressesBerlin
case rules.IsIstanbul:
@@ -1087,3 +1110,67 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) {
// Encode the G2 point to 256 bytes
return g.EncodePoint(r), nil
}
+
+// kzgPointEvaluation implements the EIP-4844 point evaluation precompile.
+type kzgPointEvaluation struct{}
+
+// RequiredGas estimates the gas required for running the point evaluation precompile.
+func (b *kzgPointEvaluation) RequiredGas(input []byte) uint64 {
+ return params.BlobTxPointEvaluationPrecompileGas
+}
+
+const (
+ blobVerifyInputLength = 192 // Max input length for the point evaluation precompile.
+ blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile.
+ blobPrecompileReturnValue = "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001"
+)
+
+var (
+ errBlobVerifyInvalidInputLength = errors.New("invalid input length")
+ errBlobVerifyMismatchedVersion = errors.New("mismatched versioned hash")
+ errBlobVerifyKZGProof = errors.New("error verifying kzg proof")
+)
+
+// Run executes the point evaluation precompile.
+func (b *kzgPointEvaluation) Run(input []byte) ([]byte, error) {
+ if len(input) != blobVerifyInputLength {
+ return nil, errBlobVerifyInvalidInputLength
+ }
+ // versioned hash: first 32 bytes
+ var versionedHash common.Hash
+ copy(versionedHash[:], input[:])
+
+ var (
+ point kzg4844.Point
+ claim kzg4844.Claim
+ )
+ // Evaluation point: next 32 bytes
+ copy(point[:], input[32:])
+ // Expected output: next 32 bytes
+ copy(claim[:], input[64:])
+
+ // input kzg point: next 48 bytes
+ var commitment kzg4844.Commitment
+ copy(commitment[:], input[96:])
+ if kZGToVersionedHash(commitment) != versionedHash {
+ return nil, errBlobVerifyMismatchedVersion
+ }
+
+ // Proof: next 48 bytes
+ var proof kzg4844.Proof
+ copy(proof[:], input[144:])
+
+ if err := kzg4844.VerifyProof(commitment, point, claim, proof); err != nil {
+ return nil, fmt.Errorf("%w: %v", errBlobVerifyKZGProof, err)
+ }
+
+ return common.Hex2Bytes(blobPrecompileReturnValue), nil
+}
+
+// kZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844
+func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
+ h := sha256.Sum256(kzg[:])
+ h[0] = blobCommitmentVersionKZG
+
+ return h
+}
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index e4114a046e..8608fdf36d 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -66,15 +66,17 @@ var allPrecompiles = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
common.BytesToAddress([]byte{9}): &blake2F{},
- common.BytesToAddress([]byte{10}): &bls12381G1Add{},
- common.BytesToAddress([]byte{11}): &bls12381G1Mul{},
- common.BytesToAddress([]byte{12}): &bls12381G1MultiExp{},
- common.BytesToAddress([]byte{13}): &bls12381G2Add{},
- common.BytesToAddress([]byte{14}): &bls12381G2Mul{},
- common.BytesToAddress([]byte{15}): &bls12381G2MultiExp{},
- common.BytesToAddress([]byte{16}): &bls12381Pairing{},
- common.BytesToAddress([]byte{17}): &bls12381MapG1{},
- common.BytesToAddress([]byte{18}): &bls12381MapG2{},
+ common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
+
+ common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{},
+ common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1Mul{},
+ common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G1MultiExp{},
+ common.BytesToAddress([]byte{0x0f, 0x0d}): &bls12381G2Add{},
+ common.BytesToAddress([]byte{0x0f, 0x0e}): &bls12381G2Mul{},
+ common.BytesToAddress([]byte{0x0f, 0x0f}): &bls12381G2MultiExp{},
+ common.BytesToAddress([]byte{0x0f, 0x10}): &bls12381Pairing{},
+ common.BytesToAddress([]byte{0x0f, 0x11}): &bls12381MapG1{},
+ common.BytesToAddress([]byte{0x0f, 0x12}): &bls12381MapG2{},
}
// EIP-152 test vectors
@@ -312,36 +314,38 @@ func benchJson(name, addr string, b *testing.B) {
}
}
-func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "0a", t) }
-func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "0b", t) }
-func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "0c", t) }
-func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "0d", t) }
-func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "0e", t) }
-func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "0f", t) }
-func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "10", t) }
-func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "11", t) }
-func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "12", t) }
-
-func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "0a", b) }
-func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "0b", b) }
-func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "0c", b) }
-func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "0d", b) }
-func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "0e", b) }
-func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "0f", b) }
-func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "10", b) }
-func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "11", b) }
-func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "12", b) }
+func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "f0a", t) }
+func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "f0b", t) }
+func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "f0c", t) }
+func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "f0d", t) }
+func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "f0e", t) }
+func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "f0f", t) }
+func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "f10", t) }
+func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "f11", t) }
+func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "f12", t) }
+
+func TestPrecompiledPointEvaluation(t *testing.T) { testJson("pointEvaluation", "0a", t) }
+
+func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "f0a", b) }
+func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "f0b", b) }
+func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "f0c", b) }
+func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "f0d", b) }
+func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "f0e", b) }
+func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "f0f", b) }
+func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "f10", b) }
+func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "f11", b) }
+func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "f12", b) }
// Failure tests
-func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "0a", t) }
-func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "0b", t) }
-func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "0c", t) }
-func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "0d", t) }
-func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "0e", t) }
-func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "0f", t) }
-func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "10", t) }
-func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "11", t) }
-func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "12", t) }
+func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "f0a", t) }
+func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "f0b", t) }
+func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "f0c", t) }
+func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "f0d", t) }
+func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "f0e", t) }
+func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "f0f", t) }
+func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "f10", t) }
+func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "f11", t) }
+func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "f12", t) }
func loadJson(name string) ([]precompiledTest, error) {
data, err := os.ReadFile(fmt.Sprintf("testdata/precompiles/%v.json", name))
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 254062d09a..3a96d275fb 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -37,6 +37,8 @@ import (
)
var activators = map[int]func(*JumpTable){
+ 5656: enable5656,
+ 6780: enable6780,
3855: enable3855,
3860: enable3860,
3198: enable3198,
@@ -236,9 +238,69 @@ func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
return nil, nil
}
-// ebnable3860 enables "EIP-3860: Limit and meter initcode"
+// enable3860 enables "EIP-3860: Limit and meter initcode"
// https://eips.ethereum.org/EIPS/eip-3860
func enable3860(jt *JumpTable) {
jt[CREATE].dynamicGas = gasCreateEip3860
jt[CREATE2].dynamicGas = gasCreate2Eip3860
}
+
+// enable5656 enables EIP-5656 (MCOPY opcode)
+// https://eips.ethereum.org/EIPS/eip-5656
+func enable5656(jt *JumpTable) {
+ jt[MCOPY] = &operation{
+ execute: opMcopy,
+ constantGas: GasFastestStep,
+ dynamicGas: gasMcopy,
+ minStack: minStack(3, 0),
+ maxStack: maxStack(3, 0),
+ memorySize: memoryMcopy,
+ }
+}
+
+// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656)
+func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ var (
+ dst = scope.Stack.pop()
+ src = scope.Stack.pop()
+ length = scope.Stack.pop()
+ )
+ // These values are checked for overflow during memory expansion calculation
+ // (the memorySize function on the opcode).
+ scope.Memory.Copy(dst.Uint64(), src.Uint64(), length.Uint64())
+ return nil, nil
+}
+
+// opBlobHash implements the BLOBHASH opcode
+func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ index := scope.Stack.peek()
+ if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) {
+ blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()]
+ index.SetBytes32(blobHash[:])
+ } else {
+ index.Clear()
+ }
+ return nil, nil
+}
+
+// enable4844 applies EIP-4844 (DATAHASH opcode)
+func enable4844(jt *JumpTable) {
+ // New opcode
+ jt[BLOBHASH] = &operation{
+ execute: opBlobHash,
+ constantGas: GasFastestStep,
+ minStack: minStack(1, 1),
+ maxStack: maxStack(1, 1),
+ }
+}
+
+// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT)
+func enable6780(jt *JumpTable) {
+ jt[SELFDESTRUCT] = &operation{
+ execute: opSelfdestruct6780,
+ dynamicGas: gasSelfdestructEIP3529,
+ constantGas: params.SelfdestructGasEIP150,
+ minStack: minStack(1, 0),
+ maxStack: maxStack(1, 0),
+ }
+}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 45394be102..6a44887f5e 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -33,6 +33,7 @@ import (
"github.com/ava-labs/avalanchego/snow"
"github.com/ava-labs/subnet-evm/constants"
+ "github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/params"
"github.com/ava-labs/subnet-evm/precompile/contract"
"github.com/ava-labs/subnet-evm/precompile/contracts/deployerallowlist"
@@ -60,10 +61,6 @@ func IsProhibited(addr common.Address) bool {
return modules.ReservedAddress(addr)
}
-// emptyCodeHash is used by create to ensure deployment is disallowed to already
-// deployed contract addresses (relevant after the account abstraction).
-var emptyCodeHash = crypto.Keccak256Hash(nil)
-
type (
// CanTransferFunc is the signature of a transfer guard function
CanTransferFunc func(StateDB, common.Address, *big.Int) bool
@@ -77,6 +74,8 @@ type (
func (evm *EVM) precompile(addr common.Address) (contract.StatefulPrecompiledContract, bool) {
var precompiles map[common.Address]contract.StatefulPrecompiledContract
switch {
+ case evm.chainRules.IsCancun:
+ precompiles = PrecompiledContractsCancun
case evm.chainRules.IsSubnetEVM:
precompiles = PrecompiledContractsBerlin
case evm.chainRules.IsIstanbul:
@@ -117,12 +116,13 @@ type BlockContext struct {
PredicateResults *predicate.Results
// Block information
- Coinbase common.Address // Provides information for COINBASE
- GasLimit uint64 // Provides information for GASLIMIT
- BlockNumber *big.Int // Provides information for NUMBER
- Time uint64 // Provides information for TIME
- Difficulty *big.Int // Provides information for DIFFICULTY
- BaseFee *big.Int // Provides information for BASEFEE
+ Coinbase common.Address // Provides information for COINBASE
+ GasLimit uint64 // Provides information for GASLIMIT
+ BlockNumber *big.Int // Provides information for NUMBER
+ Time uint64 // Provides information for TIME
+ Difficulty *big.Int // Provides information for DIFFICULTY
+ BaseFee *big.Int // Provides information for BASEFEE
+ ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data
}
func (b *BlockContext) Number() *big.Int {
@@ -144,8 +144,9 @@ func (b *BlockContext) GetPredicateResults(txHash common.Hash, address common.Ad
// All fields can change between transactions.
type TxContext struct {
// Message information
- Origin common.Address // Provides information for ORIGIN
- GasPrice *big.Int // Provides information for GASPRICE
+ Origin common.Address // Provides information for ORIGIN
+ GasPrice *big.Int // Provides information for GASPRICE
+ BlobHashes []common.Hash // Provides information for BLOBHASH
}
// EVM is the Ethereum Virtual Machine base object and provides
@@ -193,7 +194,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
StateDB: statedb,
Config: config,
chainConfig: chainConfig,
- chainRules: chainConfig.AvalancheRules(blockCtx.BlockNumber, blockCtx.Time),
+ chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Time),
}
evm.interpreter = NewEVMInterpreter(evm)
return evm
@@ -241,7 +242,8 @@ func (evm *EVM) Interpreter() *EVMInterpreter {
func (evm *EVM) SetBlockContext(blockCtx BlockContext) {
evm.Context = blockCtx
num := blockCtx.BlockNumber
- evm.chainRules = evm.chainConfig.AvalancheRules(num, blockCtx.Time)
+ timestamp := blockCtx.Time
+ evm.chainRules = evm.chainConfig.Rules(num, timestamp)
}
// Call executes the contract associated with the addr with the given input as
@@ -526,7 +528,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
}
// Ensure there's no existing contract already at the designated address
contractHash := evm.StateDB.GetCodeHash(address)
- if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) {
+ if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) {
return nil, common.Address{}, 0, vmerrs.ErrContractAddressCollision
}
// If the allow list is enabled, check that [evm.TxContext.Origin] has permission to deploy a contract.
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index 36f0251301..a8bf40a326 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -71,6 +71,7 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) {
// as argument:
// CALLDATACOPY (stack position 2)
// CODECOPY (stack position 2)
+// MCOPY (stack position 2)
// EXTCODECOPY (stack position 3)
// RETURNDATACOPY (stack position 2)
func memoryCopierGas(stackpos int) gasFunc {
@@ -100,6 +101,7 @@ func memoryCopierGas(stackpos int) gasFunc {
var (
gasCallDataCopy = memoryCopierGas(2)
gasCodeCopy = memoryCopierGas(2)
+ gasMcopy = memoryCopierGas(2)
gasExtCodeCopy = memoryCopierGas(3)
gasReturnDataCopy = memoryCopierGas(2)
)
@@ -481,7 +483,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
}
}
- if !evm.StateDB.HasSuicided(contract.Address()) {
+ if !evm.StateDB.HasSelfDestructed(contract.Address()) {
evm.StateDB.AddRefund(params.SelfdestructRefundGas)
}
return gas, nil
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index e02712b887..d36de0f053 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -418,7 +418,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
// emptyCodeHash. If the precompile account is not transferred any amount on a private or
// customized chain, the return value will be zero.
//
-// 5. Caller tries to get the code hash for an account which is marked as suicided
+// 5. Caller tries to get the code hash for an account which is marked as self-destructed
// in the current transaction, the code hash of this account should be returned.
//
// 6. Caller tries to get the code hash for an account which is marked as deleted, this
@@ -824,7 +824,23 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
beneficiary := scope.Stack.pop()
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
- interpreter.evm.StateDB.Suicide(scope.Contract.Address())
+ interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address())
+ if tracer := interpreter.evm.Config.Tracer; tracer != nil {
+ tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
+ tracer.CaptureExit([]byte{}, 0, nil)
+ }
+ return nil, errStopToken
+}
+
+func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ if interpreter.readOnly {
+ return nil, vmerrs.ErrWriteProtection
+ }
+ beneficiary := scope.Stack.pop()
+ balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
+ interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance)
+ interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
+ interpreter.evm.StateDB.Selfdestruct6780(scope.Contract.Address())
if tracer := interpreter.evm.Config.Tracer; tracer != nil {
tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
tracer.CaptureExit([]byte{}, 0, nil)
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index ca8f46e146..ce36b18bc8 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -32,13 +32,16 @@ import (
"fmt"
"math/big"
"os"
+ "strings"
"testing"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/state"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/params"
+ "github.com/ava-labs/subnet-evm/vmerrs"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/holiman/uint256"
)
@@ -736,7 +739,7 @@ func TestRandom(t *testing.T) {
for _, tt := range []testcase{
{name: "empty hash", random: common.Hash{}},
{name: "1", random: common.Hash{0}},
- {name: "emptyCodeHash", random: emptyCodeHash},
+ {name: "emptyCodeHash", random: types.EmptyCodeHash},
{name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})},
} {
var (
@@ -760,3 +763,183 @@ func TestRandom(t *testing.T) {
}
}
}
+
+func TestBlobHash(t *testing.T) {
+ type testcase struct {
+ name string
+ idx uint64
+ expect common.Hash
+ hashes []common.Hash
+ }
+ var (
+ zero = common.Hash{0}
+ one = common.Hash{1}
+ two = common.Hash{2}
+ three = common.Hash{3}
+ )
+ for _, tt := range []testcase{
+ {name: "[{1}]", idx: 0, expect: one, hashes: []common.Hash{one}},
+ {name: "[1,{2},3]", idx: 2, expect: three, hashes: []common.Hash{one, two, three}},
+ {name: "out-of-bounds (empty)", idx: 10, expect: zero, hashes: []common.Hash{}},
+ {name: "out-of-bounds", idx: 25, expect: zero, hashes: []common.Hash{one, two, three}},
+ {name: "out-of-bounds (nil)", idx: 25, expect: zero, hashes: nil},
+ } {
+ var (
+ env = NewEVM(BlockContext{}, TxContext{BlobHashes: tt.hashes}, nil, params.TestChainConfig, Config{})
+ stack = newstack()
+ pc = uint64(0)
+ evmInterpreter = env.interpreter
+ )
+ stack.push(uint256.NewInt(tt.idx))
+ opBlobHash(&pc, evmInterpreter, &ScopeContext{nil, stack, nil})
+ if len(stack.data) != 1 {
+ t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
+ }
+ actual := stack.pop()
+ expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.expect.Bytes()))
+ if overflow {
+ t.Errorf("Testcase %v: invalid overflow", tt.name)
+ }
+ if actual.Cmp(expected) != 0 {
+ t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual)
+ }
+ }
+}
+
+func TestOpMCopy(t *testing.T) {
+ // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases
+ for i, tc := range []struct {
+ dst, src, len string
+ pre string
+ want string
+ wantGas uint64
+ }{
+ { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0.
+ dst: "0x0", src: "0x20", len: "0x20",
+ pre: "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ want: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ wantGas: 6,
+ },
+
+ { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0.
+ dst: "0x0", src: "0x0", len: "0x20",
+ pre: "0101010101010101010101010101010101010101010101010101010101010101",
+ want: "0101010101010101010101010101010101010101010101010101010101010101",
+ wantGas: 6,
+ },
+ { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping).
+ dst: "0x0", src: "0x1", len: "0x8",
+ pre: "000102030405060708 000000000000000000000000000000000000000000000000",
+ want: "010203040506070808 000000000000000000000000000000000000000000000000",
+ wantGas: 6,
+ },
+ { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping).
+ dst: "0x1", src: "0x0", len: "0x8",
+ pre: "000102030405060708 000000000000000000000000000000000000000000000000",
+ want: "000001020304050607 000000000000000000000000000000000000000000000000",
+ wantGas: 6,
+ },
+ // Tests below are not in the EIP, but maybe should be added
+ { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping).
+ dst: "0xFFFFFFFFFFFF", src: "0xFFFFFFFFFFFF", len: "0x0",
+ pre: "11",
+ want: "11",
+ wantGas: 3,
+ },
+ { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds.
+ dst: "0xFFFFFFFFFFFF", src: "0x0", len: "0x0",
+ pre: "11",
+ want: "11",
+ wantGas: 3,
+ },
+ { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem
+ dst: "0x0", src: "0xFFFFFFFFFFFF", len: "0x0",
+ pre: "11",
+ want: "11",
+ wantGas: 3,
+ },
+ { // MCOPY - copy 1 from space outside of uint64 space
+ dst: "0x0", src: "0x10000000000000000", len: "0x1",
+ pre: "0",
+ },
+ { // MCOPY - copy 1 from 0 to space outside of uint64
+ dst: "0x10000000000000000", src: "0x0", len: "0x1",
+ pre: "0",
+ },
+ { // MCOPY - copy nothing from 0 to space outside of uint64
+ dst: "0x10000000000000000", src: "0x0", len: "0x0",
+ pre: "",
+ want: "",
+ wantGas: 3,
+ },
+ { // MCOPY - copy 1 from 0x20 to 0x10, with no prior allocated mem
+ dst: "0x10", src: "0x20", len: "0x1",
+ pre: "",
+ // 64 bytes
+ want: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ wantGas: 12,
+ },
+ { // MCOPY - copy 1 from 0x19 to 0x10, with no prior allocated mem
+ dst: "0x10", src: "0x19", len: "0x1",
+ pre: "",
+ // 32 bytes
+ want: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ wantGas: 9,
+ },
+ } {
+ var (
+ env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
+ stack = newstack()
+ pc = uint64(0)
+ evmInterpreter = env.interpreter
+ )
+ data := common.FromHex(strings.ReplaceAll(tc.pre, " ", ""))
+ // Set pre
+ mem := NewMemory()
+ mem.Resize(uint64(len(data)))
+ mem.Set(0, uint64(len(data)), data)
+ // Push stack args
+ len, _ := uint256.FromHex(tc.len)
+ src, _ := uint256.FromHex(tc.src)
+ dst, _ := uint256.FromHex(tc.dst)
+
+ stack.push(len)
+ stack.push(src)
+ stack.push(dst)
+ wantErr := (tc.wantGas == 0)
+ // Calc mem expansion
+ var memorySize uint64
+ if memSize, overflow := memoryMcopy(stack); overflow {
+ if wantErr {
+ continue
+ }
+ t.Errorf("overflow")
+ } else {
+ var overflow bool
+ if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow {
+ t.Error(vmerrs.ErrGasUintOverflow)
+ }
+ }
+ // and the dynamic cost
+ var haveGas uint64
+ if dynamicCost, err := gasMcopy(env, nil, stack, mem, memorySize); err != nil {
+ t.Error(err)
+ } else {
+ haveGas = GasFastestStep + dynamicCost
+ }
+ // Expand mem
+ if memorySize > 0 {
+ mem.Resize(memorySize)
+ }
+ // Do the copy
+ opMcopy(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
+ want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
+ if have := mem.store; !bytes.Equal(want, have) {
+ t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have)
+ }
+ wantGas := tc.wantGas
+ if haveGas != wantGas {
+ t.Errorf("case %d: gas wrong, want %d have %d\n", i, wantGas, haveGas)
+ }
+ }
+}
diff --git a/core/vm/interface.go b/core/vm/interface.go
index 5b91dfe379..34b3e714da 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -61,11 +61,13 @@ type StateDB interface {
GetTransientState(addr common.Address, key common.Hash) common.Hash
SetTransientState(addr common.Address, key, value common.Hash)
- Suicide(common.Address) bool
- HasSuicided(common.Address) bool
+ SelfDestruct(common.Address)
+ HasSelfDestructed(common.Address) bool
+
+ Selfdestruct6780(common.Address)
// Exist reports whether the given account exists in state.
- // Notably this should also return true for suicided accounts.
+ // Notably this should also return true for self-destructed accounts.
Exist(common.Address) bool
// Empty returns whether the given account is empty. Empty
// is defined according to EIP161 (balance = nonce = code = 0).
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index ef119ba26a..ecbe6bee73 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -67,6 +67,8 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter {
// If jump table was not initialised we set the default one.
var table *JumpTable
switch {
+ case evm.chainRules.IsCancun:
+ table = &cancunInstructionSet
case evm.chainRules.IsDurango:
table = &durangoInstructionSet
case evm.chainRules.IsSubnetEVM:
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index a8c2089c4b..458654145f 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -64,6 +64,7 @@ var (
istanbulInstructionSet = newIstanbulInstructionSet()
subnetEVMInstructionSet = newSubnetEVMInstructionSet()
durangoInstructionSet = newDurangoInstructionSet()
+ cancunInstructionSet = newCancunInstructionSet()
)
// JumpTable contains the EVM opcodes supported at a given fork.
@@ -87,12 +88,22 @@ func validate(jt JumpTable) JumpTable {
return jt
}
+func newCancunInstructionSet() JumpTable {
+ instructionSet := newDurangoInstructionSet()
+ enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode)
+ enable1153(&instructionSet) // EIP-1153 "Transient Storage"
+ enable5656(&instructionSet) // EIP-5656 (MCOPY opcode)
+ enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction
+ return validate(instructionSet)
+}
+
// newDurangoInstructionSet returns the frontier, homestead, byzantium,
// constantinople, istanbul, petersburg, subnet-evm, durango instructions.
func newDurangoInstructionSet() JumpTable {
instructionSet := newSubnetEVMInstructionSet()
enable3855(&instructionSet) // PUSH0 instruction
enable3860(&instructionSet) // Limit and meter initcode
+
return validate(instructionSet)
}
diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go
index da095ce605..7fdef835d2 100644
--- a/core/vm/jump_table_export.go
+++ b/core/vm/jump_table_export.go
@@ -24,6 +24,8 @@ import (
// the rules.
func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
switch {
+ case rules.IsCancun:
+ return newCancunInstructionSet(), nil
case rules.IsDurango:
return newDurangoInstructionSet(), nil
case rules.IsSubnetEVM:
diff --git a/core/vm/memory.go b/core/vm/memory.go
index eb6bc89078..259b7bf463 100644
--- a/core/vm/memory.go
+++ b/core/vm/memory.go
@@ -113,3 +113,14 @@ func (m *Memory) Len() int {
func (m *Memory) Data() []byte {
return m.store
}
+
+// Copy copies data from the src position slice into the dst position.
+// The source and destination may overlap.
+// OBS: This operation assumes that any necessary memory expansion has already been performed,
+// and this method may panic otherwise.
+func (m *Memory) Copy(dst, src, len uint64) {
+ if len == 0 {
+ return
+ }
+ copy(m.store[dst:], m.store[src:src+len])
+}
diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go
index 2e30f7c5d0..0a2fbe1f9e 100644
--- a/core/vm/memory_table.go
+++ b/core/vm/memory_table.go
@@ -58,6 +58,14 @@ func memoryMStore(stack *Stack) (uint64, bool) {
return calcMemSize64WithUint(stack.Back(0), 32)
}
+func memoryMcopy(stack *Stack) (uint64, bool) {
+ mStart := stack.Back(0) // stack[0]: dest
+ if stack.Back(1).Gt(mStart) {
+ mStart = stack.Back(1) // stack[1]: source
+ }
+ return calcMemSize64(mStart, stack.Back(2)) // stack[2]: length
+}
+
func memoryCreate(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(1), stack.Back(2))
}
@@ -80,7 +88,6 @@ func memoryCall(stack *Stack) (uint64, bool) {
}
return y, false
}
-
func memoryDelegateCall(stack *Stack) (uint64, bool) {
x, overflow := calcMemSize64(stack.Back(4), stack.Back(5))
if overflow {
diff --git a/core/vm/memory_test.go b/core/vm/memory_test.go
new file mode 100644
index 0000000000..ba36f8023c
--- /dev/null
+++ b/core/vm/memory_test.go
@@ -0,0 +1,69 @@
+package vm
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func TestMemoryCopy(t *testing.T) {
+ // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases
+ for i, tc := range []struct {
+ dst, src, len uint64
+ pre string
+ want string
+ }{
+ { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0.
+ 0, 32, 32,
+ "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ },
+
+ { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0.
+ 0, 0, 32,
+ "0101010101010101010101010101010101010101010101010101010101010101",
+ "0101010101010101010101010101010101010101010101010101010101010101",
+ },
+ { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping).
+ 0, 1, 8,
+ "000102030405060708 000000000000000000000000000000000000000000000000",
+ "010203040506070808 000000000000000000000000000000000000000000000000",
+ },
+ { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping).
+ 1, 0, 8,
+ "000102030405060708 000000000000000000000000000000000000000000000000",
+ "000001020304050607 000000000000000000000000000000000000000000000000",
+ },
+ // Tests below are not in the EIP, but maybe should be added
+ { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping).
+ 0xFFFFFFFFFFFF, 0xFFFFFFFFFFFF, 0,
+ "11",
+ "11",
+ },
+ { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds.
+ 0xFFFFFFFFFFFF, 0, 0,
+ "11",
+ "11",
+ },
+ { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem
+ 0, 0xFFFFFFFFFFFF, 0,
+ "11",
+ "11",
+ },
+ } {
+ m := NewMemory()
+ // Clean spaces
+ data := common.FromHex(strings.ReplaceAll(tc.pre, " ", ""))
+ // Set pre
+ m.Resize(uint64(len(data)))
+ m.Set(0, uint64(len(data)), data)
+ // Do the copy
+ m.Copy(tc.dst, tc.src, tc.len)
+ want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
+ if have := m.store; !bytes.Equal(want, have) {
+ t.Errorf("case %d: want: %#x\nhave: %#x\n", i, want, have)
+ }
+ }
+}
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index 8cf30b9abf..bc638a8570 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -108,6 +108,7 @@ const (
CHAINID OpCode = 0x46
SELFBALANCE OpCode = 0x47
BASEFEE OpCode = 0x48
+ BLOBHASH OpCode = 0x49
)
// 0x50 range - 'storage' and execution.
@@ -124,6 +125,9 @@ const (
MSIZE OpCode = 0x59
GAS OpCode = 0x5a
JUMPDEST OpCode = 0x5b
+ TLOAD OpCode = 0x5c
+ TSTORE OpCode = 0x5d
+ MCOPY OpCode = 0x5e
PUSH0 OpCode = 0x5f
)
@@ -212,12 +216,6 @@ const (
LOG4
)
-// 0xb0 range.
-const (
- TLOAD OpCode = 0xb3
- TSTORE OpCode = 0xb4
-)
-
// 0xf0 range - closures.
const (
CREATE OpCode = 0xf0
@@ -296,6 +294,7 @@ var opCodeToString = map[OpCode]string{
CHAINID: "CHAINID",
SELFBALANCE: "SELFBALANCE",
BASEFEE: "BASEFEE",
+ BLOBHASH: "BLOBHASH",
// 0x50 range - 'storage' and execution.
POP: "POP",
@@ -310,6 +309,9 @@ var opCodeToString = map[OpCode]string{
MSIZE: "MSIZE",
GAS: "GAS",
JUMPDEST: "JUMPDEST",
+ TLOAD: "TLOAD",
+ TSTORE: "TSTORE",
+ MCOPY: "MCOPY",
PUSH0: "PUSH0",
// 0x60 range - pushes.
@@ -389,10 +391,6 @@ var opCodeToString = map[OpCode]string{
LOG3: "LOG3",
LOG4: "LOG4",
- // 0xb0 range.
- TLOAD: "TLOAD",
- TSTORE: "TSTORE",
-
// 0xf0 range - closures.
CREATE: "CREATE",
CALL: "CALL",
@@ -453,6 +451,7 @@ var stringToOp = map[string]OpCode{
"CALLDATACOPY": CALLDATACOPY,
"CHAINID": CHAINID,
"BASEFEE": BASEFEE,
+ "BLOBHASH": BLOBHASH,
"DELEGATECALL": DELEGATECALL,
"STATICCALL": STATICCALL,
"CODESIZE": CODESIZE,
@@ -482,6 +481,9 @@ var stringToOp = map[string]OpCode{
"MSIZE": MSIZE,
"GAS": GAS,
"JUMPDEST": JUMPDEST,
+ "TLOAD": TLOAD,
+ "TSTORE": TSTORE,
+ "MCOPY": MCOPY,
"PUSH0": PUSH0,
"PUSH1": PUSH1,
"PUSH2": PUSH2,
@@ -552,8 +554,6 @@ var stringToOp = map[string]OpCode{
"LOG2": LOG2,
"LOG3": LOG3,
"LOG4": LOG4,
- "TLOAD": TLOAD,
- "TSTORE": TSTORE,
"CREATE": CREATE,
"CREATE2": CREATE2,
"CALL": CALL,
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index cd68df29a8..7d8aec3741 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -35,62 +35,52 @@ import (
"github.com/ethereum/go-ethereum/common/math"
)
-// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929
-//
-// When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys.
-// If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys.
-// Additionally, modify the parameters defined in EIP 2200 as follows:
-//
-// Parameter Old value New value
-// SLOAD_GAS 800 = WARM_STORAGE_READ_COST
-// SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST
-//
-// The other parameters defined in EIP 2200 are unchanged.
-// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
-func gasSStoreEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
- // If we fail the minimum gas availability invariant, fail (0)
- if contract.Gas <= params.SstoreSentryGasEIP2200 {
- return 0, errors.New("not enough gas for reentrancy sentry")
- }
- // Gas sentry honoured, do the actual gas calculation based on the stored value
- var (
- y, x = stack.Back(1), stack.peek()
- slot = common.Hash(x.Bytes32())
- current = evm.StateDB.GetState(contract.Address(), slot)
- cost = uint64(0)
- )
- // Check slot presence in the access list
- if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent {
- cost = params.ColdSloadCostEIP2929
- // If the caller cannot afford the cost, this change will be rolled back
- evm.StateDB.AddSlotToAccessList(contract.Address(), slot)
- if !addrPresent {
- // Once we're done with YOLOv2 and schedule this for mainnet, might
- // be good to remove this panic here, which is just really a
- // canary to have during testing
- panic("impossible case: address was not present in access list during sstore op")
+func makeGasSStoreFunc() gasFunc {
+ return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ // If we fail the minimum gas availability invariant, fail (0)
+ if contract.Gas <= params.SstoreSentryGasEIP2200 {
+ return 0, errors.New("not enough gas for reentrancy sentry")
}
- }
- value := common.Hash(y.Bytes32())
+ // Gas sentry honoured, do the actual gas calculation based on the stored value
+ var (
+ y, x = stack.Back(1), stack.peek()
+ slot = common.Hash(x.Bytes32())
+ current = evm.StateDB.GetState(contract.Address(), slot)
+ cost = uint64(0)
+ )
+ // Check slot presence in the access list
+ if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent {
+ cost = params.ColdSloadCostEIP2929
+ // If the caller cannot afford the cost, this change will be rolled back
+ evm.StateDB.AddSlotToAccessList(contract.Address(), slot)
+ if !addrPresent {
+ // Once we're done with YOLOv2 and schedule this for mainnet, might
+ // be good to remove this panic here, which is just really a
+ // canary to have during testing
+ panic("impossible case: address was not present in access list during sstore op")
+ }
+ }
+ value := common.Hash(y.Bytes32())
- if current == value { // noop (1)
- // EIP 2200 original clause:
- // return params.SloadGasEIP2200, nil
- return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS
- }
- original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32())
- if original == current {
- if original == (common.Hash{}) { // create slot (2.1.1)
- return cost + params.SstoreSetGasEIP2200, nil
+ if current == value { // noop (1)
+ // EIP 2200 original clause:
+ // return params.SloadGasEIP2200, nil
+ return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS
}
+ original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32())
+ if original == current {
+ if original == (common.Hash{}) { // create slot (2.1.1)
+ return cost + params.SstoreSetGasEIP2200, nil
+ }
+ // EIP-2200 original clause:
+ // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2)
+ return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2)
+ }
+
// EIP-2200 original clause:
- // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2)
- return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2)
+ //return params.SloadGasEIP2200, nil // dirty update (2.2)
+ return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2)
}
-
- // EIP-2200 original clause:
- //return params.SloadGasEIP2200, nil // dirty update (2.2)
- return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2)
}
// gasSLoadEIP2929 calculates dynamic gas for SLOAD according to EIP-2929
@@ -194,22 +184,44 @@ var (
gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall)
gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall)
gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode)
+ gasSelfdestructEIP2929 = makeSelfdestructGasFn(false) // Note: refunds were never enabled on Avalanche
+ // gasSelfdestructEIP3529 implements the changes in EIP-2539 (no refunds)
+ gasSelfdestructEIP3529 = makeSelfdestructGasFn(false)
+ // gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929
+ //
+ // When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys.
+ // If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys.
+ // Additionally, modify the parameters defined in EIP 2200 as follows:
+ //
+ // Parameter Old value New value
+ // SLOAD_GAS 800 = WARM_STORAGE_READ_COST
+ // SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST
+ //
+ //The other parameters defined in EIP 2200 are unchanged.
+ // see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
+ gasSStoreEIP2929 = makeGasSStoreFunc()
)
-func gasSelfdestructEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
- var (
- gas uint64
- address = common.Address(stack.peek().Bytes20())
- )
- if !evm.StateDB.AddressInAccessList(address) {
- // If the caller cannot afford the cost, this change will be rolled back
- evm.StateDB.AddAddressToAccessList(address)
- gas = params.ColdAccountAccessCostEIP2929
- }
- // if empty and transfers value
- if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
- gas += params.CreateBySelfdestructGas
+// makeSelfdestructGasFn can create the selfdestruct dynamic gas function for EIP-2929 and EIP-2539
+func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
+ gasFunc := func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ var (
+ gas uint64
+ address = common.Address(stack.peek().Bytes20())
+ )
+ if !evm.StateDB.AddressInAccessList(address) {
+ // If the caller cannot afford the cost, this change will be rolled back
+ evm.StateDB.AddAddressToAccessList(address)
+ gas = params.ColdAccountAccessCostEIP2929
+ }
+ // if empty and transfers value
+ if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
+ gas += params.CreateBySelfdestructGas
+ }
+ if refundsEnabled && !evm.StateDB.HasSelfDestructed(contract.Address()) {
+ evm.StateDB.AddRefund(params.SelfdestructRefundGas)
+ }
+ return gas, nil
}
-
- return gas, nil
+ return gasFunc
}
diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go
index 07a1dd7923..a3cd09570c 100644
--- a/core/vm/runtime/env.go
+++ b/core/vm/runtime/env.go
@@ -33,8 +33,9 @@ import (
func NewEnv(cfg *Config) *vm.EVM {
txContext := vm.TxContext{
- Origin: cfg.Origin,
- GasPrice: cfg.GasPrice,
+ Origin: cfg.Origin,
+ GasPrice: cfg.GasPrice,
+ BlobHashes: cfg.BlobHashes,
}
blockContext := vm.BlockContext{
CanTransfer: core.CanTransfer,
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index b1ed7667b7..fbe5b0c783 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -54,6 +54,7 @@ type Config struct {
Debug bool
EVMConfig vm.Config
BaseFee *big.Int
+ BlobHashes []common.Hash
State *state.StateDB
GetHashFn func(n uint64) common.Hash
@@ -122,7 +123,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
address = common.BytesToAddress([]byte("contract"))
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
- rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time)
+ rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time)
)
// Execute the preparatory steps for state transition which includes:
// - prepare accessList(post-berlin)
@@ -156,7 +157,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
var (
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
- rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time)
+ rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time)
)
// Execute the preparatory steps for state transition which includes:
// - prepare accessList(post-berlin)
@@ -185,7 +186,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
vmenv = NewEnv(cfg)
sender = cfg.State.GetOrNewStateObject(cfg.Origin)
statedb = cfg.State
- rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time)
+ rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time)
)
// Execute the preparatory steps for state transition which includes:
// - prepare accessList(post-berlin)
diff --git a/core/vm/testdata/precompiles/pointEvaluation.json b/core/vm/testdata/precompiles/pointEvaluation.json
new file mode 100644
index 0000000000..93fc66d836
--- /dev/null
+++ b/core/vm/testdata/precompiles/pointEvaluation.json
@@ -0,0 +1,9 @@
+[
+ {
+ "Input": "01d18459b334ffe8e2226eef1db874fda6db2bdd9357268b39220af2d59464fb564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d3630624d25032e67a7e6a4910df5834b8fe70e6bcfeeac0352434196bdf4b2485d5a1978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806942307f266e636553e94006d11423f2688945ff3bdf515859eba1005c1a7708d620a94d91a1c0c285f9584e75ec2f82a",
+ "Expected": "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001",
+ "Name": "pointEvaluation1",
+ "Gas": 50000,
+ "NoBenchmark": false
+ }
+]
diff --git a/eth/api.go b/eth/api.go
index 993f8faf4e..5842cb5625 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -27,26 +27,7 @@
package eth
import (
- "compress/gzip"
- "context"
- "errors"
- "fmt"
- "io"
- "os"
- "strings"
- "time"
-
- "github.com/ava-labs/subnet-evm/core"
- "github.com/ava-labs/subnet-evm/core/rawdb"
- "github.com/ava-labs/subnet-evm/core/state"
- "github.com/ava-labs/subnet-evm/core/types"
- "github.com/ava-labs/subnet-evm/internal/ethapi"
- "github.com/ava-labs/subnet-evm/rpc"
- "github.com/ava-labs/subnet-evm/trie"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
)
// EthereumAPI provides an API to access Ethereum full node-related information.
@@ -59,427 +40,12 @@ func NewEthereumAPI(e *Ethereum) *EthereumAPI {
return &EthereumAPI{e}
}
-// Etherbase is the address that mining rewards will be send to.
+// Etherbase is the address that mining rewards will be sent to.
func (api *EthereumAPI) Etherbase() (common.Address, error) {
return api.e.Etherbase()
}
-// Coinbase is the address that mining rewards will be send to (alias for Etherbase).
+// Coinbase is the address that mining rewards will be sent to (alias for Etherbase).
func (api *EthereumAPI) Coinbase() (common.Address, error) {
return api.Etherbase()
}
-
-// AdminAPI is the collection of Ethereum full node related APIs for node
-// administration.
-type AdminAPI struct {
- eth *Ethereum
-}
-
-// NewAdminAPI creates a new instance of AdminAPI.
-func NewAdminAPI(eth *Ethereum) *AdminAPI {
- return &AdminAPI{eth: eth}
-}
-
-// ExportChain exports the current blockchain into a local file,
-// or a range of blocks if first and last are non-nil.
-func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) {
- if first == nil && last != nil {
- return false, errors.New("last cannot be specified without first")
- }
- if first != nil && last == nil {
- head := api.eth.BlockChain().CurrentHeader().Number.Uint64()
- last = &head
- }
- if _, err := os.Stat(file); err == nil {
- // File already exists. Allowing overwrite could be a DoS vector,
- // since the 'file' may point to arbitrary paths on the drive.
- return false, errors.New("location would overwrite an existing file")
- }
- // Make sure we can create the file to export into
- out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return false, err
- }
- defer out.Close()
-
- var writer io.Writer = out
- if strings.HasSuffix(file, ".gz") {
- writer = gzip.NewWriter(writer)
- defer writer.(*gzip.Writer).Close()
- }
-
- // Export the blockchain
- if first != nil {
- if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil {
- return false, err
- }
- } else if err := api.eth.BlockChain().Export(writer); err != nil {
- return false, err
- }
- return true, nil
-}
-
-func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
- for _, b := range bs {
- if !chain.HasBlock(b.Hash(), b.NumberU64()) {
- return false
- }
- }
-
- return true
-}
-
-// ImportChain imports a blockchain from a local file.
-func (api *AdminAPI) ImportChain(file string) (bool, error) {
- // Make sure the can access the file to import
- in, err := os.Open(file)
- if err != nil {
- return false, err
- }
- defer in.Close()
-
- var reader io.Reader = in
- if strings.HasSuffix(file, ".gz") {
- if reader, err = gzip.NewReader(reader); err != nil {
- return false, err
- }
- }
-
- // Run actual the import in pre-configured batches
- stream := rlp.NewStream(reader, 0)
-
- blocks, index := make([]*types.Block, 0, 2500), 0
- for batch := 0; ; batch++ {
- // Load a batch of blocks from the input file
- for len(blocks) < cap(blocks) {
- block := new(types.Block)
- if err := stream.Decode(block); err == io.EOF {
- break
- } else if err != nil {
- return false, fmt.Errorf("block %d: failed to parse: %v", index, err)
- }
- blocks = append(blocks, block)
- index++
- }
- if len(blocks) == 0 {
- break
- }
-
- if hasAllBlocks(api.eth.BlockChain(), blocks) {
- blocks = blocks[:0]
- continue
- }
- // Import the batch and reset the buffer
- if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil {
- return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err)
- }
- blocks = blocks[:0]
- }
- return true, nil
-}
-
-// DebugAPI is the collection of Ethereum full node APIs for debugging the
-// protocol.
-type DebugAPI struct {
- eth *Ethereum
-}
-
-// NewDebugAPI creates a new DebugAPI instance.
-func NewDebugAPI(eth *Ethereum) *DebugAPI {
- return &DebugAPI{eth: eth}
-}
-
-// DumpBlock retrieves the entire state of the database at a given block.
-func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
- opts := &state.DumpConfig{
- OnlyWithAddresses: true,
- Max: AccountRangeMaxResults, // Sanity limit over RPC
- }
- var header *types.Header
- if blockNr.IsAccepted() {
- if api.eth.APIBackend.isLatestAndAllowed(blockNr) {
- header = api.eth.blockchain.CurrentHeader()
- } else {
- header = api.eth.LastAcceptedBlock().Header()
- }
- } else {
- block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
- if block == nil {
- return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
- }
- header = block.Header()
- }
- if header == nil {
- return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
- }
- stateDb, err := api.eth.BlockChain().StateAt(header.Root)
- if err != nil {
- return state.Dump{}, err
- }
- return stateDb.RawDump(opts), nil
-}
-
-// Preimage is a debug API function that returns the preimage for a sha3 hash, if known.
-func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
- if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil {
- return preimage, nil
- }
- return nil, errors.New("unknown preimage")
-}
-
-// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
-// and returns them as a JSON list of block hashes.
-func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*ethapi.BadBlockArgs, error) {
- internalAPI := ethapi.NewBlockChainAPI(api.eth.APIBackend)
- return internalAPI.GetBadBlocks(ctx)
-}
-
-// AccountRangeMaxResults is the maximum number of results to be returned per call
-const AccountRangeMaxResults = 256
-
-// AccountRange enumerates all accounts in the given block and start point in paging request
-func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) {
- var stateDb *state.StateDB
- var err error
-
- if number, ok := blockNrOrHash.Number(); ok {
- var header *types.Header
- if number.IsAccepted() {
- if api.eth.APIBackend.isLatestAndAllowed(number) {
- header = api.eth.blockchain.CurrentHeader()
- } else {
- header = api.eth.LastAcceptedBlock().Header()
- }
- } else {
- block := api.eth.blockchain.GetBlockByNumber(uint64(number))
- if block == nil {
- return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
- }
- header = block.Header()
- }
- if header == nil {
- return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
- }
- stateDb, err = api.eth.BlockChain().StateAt(header.Root)
- if err != nil {
- return state.IteratorDump{}, err
- }
- } else if hash, ok := blockNrOrHash.Hash(); ok {
- block := api.eth.blockchain.GetBlockByHash(hash)
- if block == nil {
- return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex())
- }
- stateDb, err = api.eth.BlockChain().StateAt(block.Root())
- if err != nil {
- return state.IteratorDump{}, err
- }
- } else {
- return state.IteratorDump{}, errors.New("either block number or block hash must be specified")
- }
-
- opts := &state.DumpConfig{
- SkipCode: nocode,
- SkipStorage: nostorage,
- OnlyWithAddresses: !incompletes,
- Start: start,
- Max: uint64(maxResults),
- }
- if maxResults > AccountRangeMaxResults || maxResults <= 0 {
- opts.Max = AccountRangeMaxResults
- }
- return stateDb.IteratorDump(opts), nil
-}
-
-// StorageRangeResult is the result of a debug_storageRangeAt API call.
-type StorageRangeResult struct {
- Storage storageMap `json:"storage"`
- NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie.
-}
-
-type storageMap map[common.Hash]storageEntry
-
-type storageEntry struct {
- Key *common.Hash `json:"key"`
- Value common.Hash `json:"value"`
-}
-
-// StorageRangeAt returns the storage at the given block height and transaction index.
-func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) {
- // Retrieve the block
- block := api.eth.blockchain.GetBlockByHash(blockHash)
- if block == nil {
- return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash)
- }
- _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0)
- if err != nil {
- return StorageRangeResult{}, err
- }
- defer release()
-
- st, err := statedb.StorageTrie(contractAddress)
- if err != nil {
- return StorageRangeResult{}, err
- }
- if st == nil {
- return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress)
- }
- return storageRangeAt(st, keyStart, maxResult)
-}
-
-func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {
- it := trie.NewIterator(st.NodeIterator(start))
- result := StorageRangeResult{Storage: storageMap{}}
- for i := 0; i < maxResult && it.Next(); i++ {
- _, content, _, err := rlp.Split(it.Value)
- if err != nil {
- return StorageRangeResult{}, err
- }
- e := storageEntry{Value: common.BytesToHash(content)}
- if preimage := st.GetKey(it.Key); preimage != nil {
- preimage := common.BytesToHash(preimage)
- e.Key = &preimage
- }
- result.Storage[common.BytesToHash(it.Key)] = e
- }
- // Add the 'next key' so clients can continue downloading.
- if it.Next() {
- next := common.BytesToHash(it.Key)
- result.NextKey = &next
- }
- return result, nil
-}
-
-// GetModifiedAccountsByNumber returns all accounts that have changed between the
-// two blocks specified. A change is defined as a difference in nonce, balance,
-// code hash, or storage hash.
-//
-// With one parameter, returns the list of accounts modified in the specified block.
-func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {
- var startBlock, endBlock *types.Block
-
- startBlock = api.eth.blockchain.GetBlockByNumber(startNum)
- if startBlock == nil {
- return nil, fmt.Errorf("start block %x not found", startNum)
- }
-
- if endNum == nil {
- endBlock = startBlock
- startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
- if startBlock == nil {
- return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
- }
- } else {
- endBlock = api.eth.blockchain.GetBlockByNumber(*endNum)
- if endBlock == nil {
- return nil, fmt.Errorf("end block %d not found", *endNum)
- }
- }
- return api.getModifiedAccounts(startBlock, endBlock)
-}
-
-// GetModifiedAccountsByHash returns all accounts that have changed between the
-// two blocks specified. A change is defined as a difference in nonce, balance,
-// code hash, or storage hash.
-//
-// With one parameter, returns the list of accounts modified in the specified block.
-func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {
- var startBlock, endBlock *types.Block
- startBlock = api.eth.blockchain.GetBlockByHash(startHash)
- if startBlock == nil {
- return nil, fmt.Errorf("start block %x not found", startHash)
- }
-
- if endHash == nil {
- endBlock = startBlock
- startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
- if startBlock == nil {
- return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
- }
- } else {
- endBlock = api.eth.blockchain.GetBlockByHash(*endHash)
- if endBlock == nil {
- return nil, fmt.Errorf("end block %x not found", *endHash)
- }
- }
- return api.getModifiedAccounts(startBlock, endBlock)
-}
-
-func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {
- if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
- return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
- }
- triedb := api.eth.BlockChain().StateCache().TrieDB()
-
- oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb)
- if err != nil {
- return nil, err
- }
- newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb)
- if err != nil {
- return nil, err
- }
- diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
- iter := trie.NewIterator(diff)
-
- var dirty []common.Address
- for iter.Next() {
- key := newTrie.GetKey(iter.Key)
- if key == nil {
- return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
- }
- dirty = append(dirty, common.BytesToAddress(key))
- }
- return dirty, nil
-}
-
-// GetAccessibleState returns the first number where the node has accessible
-// state on disk. Note this being the post-state of that block and the pre-state
-// of the next block.
-// The (from, to) parameters are the sequence of blocks to search, which can go
-// either forwards or backwards
-func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) {
- var resolveNum = func(num rpc.BlockNumber) (uint64, error) {
- // We don't have state for pending (-2), so treat it as latest
- if num.Int64() < 0 {
- block := api.eth.blockchain.CurrentBlock()
- if block == nil {
- return 0, errors.New("current block missing")
- }
- return block.Number.Uint64(), nil
- }
- return uint64(num.Int64()), nil
- }
- var (
- start uint64
- end uint64
- delta = int64(1)
- lastLog time.Time
- err error
- )
- if start, err = resolveNum(from); err != nil {
- return 0, err
- }
- if end, err = resolveNum(to); err != nil {
- return 0, err
- }
- if start == end {
- return 0, errors.New("from and to needs to be different")
- }
- if start > end {
- delta = -1
- }
- for i := int64(start); i != int64(end); i += delta {
- if time.Since(lastLog) > 8*time.Second {
- log.Info("Finding roots", "from", start, "to", end, "at", i)
- lastLog = time.Now()
- }
- h := api.eth.BlockChain().GetHeaderByNumber(uint64(i))
- if h == nil {
- return 0, fmt.Errorf("missing header %d", i)
- }
- if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok {
- return uint64(i), nil
- }
- }
- return 0, errors.New("no state found")
-}
diff --git a/eth/api_admin.go b/eth/api_admin.go
new file mode 100644
index 0000000000..e1fe683c01
--- /dev/null
+++ b/eth/api_admin.go
@@ -0,0 +1,149 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package eth
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/ava-labs/subnet-evm/core"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// AdminAPI is the collection of Ethereum full node related APIs for node
+// administration.
+type AdminAPI struct {
+ eth *Ethereum
+}
+
+// NewAdminAPI creates a new instance of AdminAPI.
+func NewAdminAPI(eth *Ethereum) *AdminAPI {
+ return &AdminAPI{eth: eth}
+}
+
+// ExportChain exports the current blockchain into a local file,
+// or a range of blocks if first and last are non-nil.
+func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) {
+ if first == nil && last != nil {
+ return false, errors.New("last cannot be specified without first")
+ }
+ if first != nil && last == nil {
+ head := api.eth.BlockChain().CurrentHeader().Number.Uint64()
+ last = &head
+ }
+ if _, err := os.Stat(file); err == nil {
+ // File already exists. Allowing overwrite could be a DoS vector,
+ // since the 'file' may point to arbitrary paths on the drive.
+ return false, errors.New("location would overwrite an existing file")
+ }
+ // Make sure we can create the file to export into
+ out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+ if err != nil {
+ return false, err
+ }
+ defer out.Close()
+
+ var writer io.Writer = out
+ if strings.HasSuffix(file, ".gz") {
+ writer = gzip.NewWriter(writer)
+ defer writer.(*gzip.Writer).Close()
+ }
+
+ // Export the blockchain
+ if first != nil {
+ if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil {
+ return false, err
+ }
+ } else if err := api.eth.BlockChain().Export(writer); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
+ for _, b := range bs {
+ if !chain.HasBlock(b.Hash(), b.NumberU64()) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// ImportChain imports a blockchain from a local file.
+func (api *AdminAPI) ImportChain(file string) (bool, error) {
+ // Make sure the can access the file to import
+ in, err := os.Open(file)
+ if err != nil {
+ return false, err
+ }
+ defer in.Close()
+
+ var reader io.Reader = in
+ if strings.HasSuffix(file, ".gz") {
+ if reader, err = gzip.NewReader(reader); err != nil {
+ return false, err
+ }
+ }
+
+ // Run actual the import in pre-configured batches
+ stream := rlp.NewStream(reader, 0)
+
+ blocks, index := make([]*types.Block, 0, 2500), 0
+ for batch := 0; ; batch++ {
+ // Load a batch of blocks from the input file
+ for len(blocks) < cap(blocks) {
+ block := new(types.Block)
+ if err := stream.Decode(block); err == io.EOF {
+ break
+ } else if err != nil {
+ return false, fmt.Errorf("block %d: failed to parse: %v", index, err)
+ }
+ blocks = append(blocks, block)
+ index++
+ }
+ if len(blocks) == 0 {
+ break
+ }
+
+ if hasAllBlocks(api.eth.BlockChain(), blocks) {
+ blocks = blocks[:0]
+ continue
+ }
+ // Import the batch and reset the buffer
+ if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil {
+ return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err)
+ }
+ blocks = blocks[:0]
+ }
+ return true, nil
+}
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 5e660bebad..edf78810fd 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -40,6 +40,7 @@ import (
"github.com/ava-labs/subnet-evm/core/bloombits"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/txpool"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/core/vm"
"github.com/ava-labs/subnet-evm/eth/gasprice"
@@ -53,7 +54,7 @@ import (
var ErrUnfinalizedData = errors.New("cannot query unfinalized data")
-// EthAPIBackend implements ethapi.Backend for full nodes
+// EthAPIBackend implements ethapi.Backend and tracers.Backend for full nodes
type EthAPIBackend struct {
extRPCEnabled bool
allowUnprotectedTxs bool
@@ -336,7 +337,7 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction)
if err := ctx.Err(); err != nil {
return err
}
- if err := b.eth.txPool.AddLocal(signedTx); err != nil {
+ if err := b.eth.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, false)[0]; err != nil {
return err
}
@@ -350,13 +351,20 @@ func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
pending := b.eth.txPool.Pending(false)
var txs types.Transactions
for _, batch := range pending {
- txs = append(txs, batch...)
+ for _, lazy := range batch {
+ if tx := lazy.Resolve(); tx != nil {
+ txs = append(txs, tx.Tx)
+ }
+ }
}
return txs, nil
}
func (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction {
- return b.eth.txPool.Get(hash)
+ if tx := b.eth.txPool.Get(hash); tx != nil {
+ return tx.Tx
+ }
+ return nil
}
func (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
@@ -382,15 +390,15 @@ func (b *EthAPIBackend) GetPoolNonce(ctx context.Context, addr common.Address) (
return b.eth.txPool.Nonce(addr), nil
}
-func (b *EthAPIBackend) Stats() (pending int, queued int) {
+func (b *EthAPIBackend) Stats() (runnable int, blocked int) {
return b.eth.txPool.Stats()
}
-func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
+func (b *EthAPIBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
return b.eth.txPool.Content()
}
-func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
+func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
return b.eth.txPool.ContentFrom(addr)
}
diff --git a/eth/api_debug.go b/eth/api_debug.go
new file mode 100644
index 0000000000..50d7b140ec
--- /dev/null
+++ b/eth/api_debug.go
@@ -0,0 +1,363 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package eth
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/internal/ethapi"
+ "github.com/ava-labs/subnet-evm/rpc"
+ "github.com/ava-labs/subnet-evm/trie"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// DebugAPI is the collection of Ethereum full node APIs for debugging the
+// protocol.
+type DebugAPI struct {
+ eth *Ethereum
+}
+
+// NewDebugAPI creates a new DebugAPI instance.
+func NewDebugAPI(eth *Ethereum) *DebugAPI {
+ return &DebugAPI{eth: eth}
+}
+
+// DumpBlock retrieves the entire state of the database at a given block.
+func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
+ opts := &state.DumpConfig{
+ OnlyWithAddresses: true,
+ Max: AccountRangeMaxResults, // Sanity limit over RPC
+ }
+ var header *types.Header
+ if blockNr.IsAccepted() {
+ if api.eth.APIBackend.isLatestAndAllowed(blockNr) {
+ header = api.eth.blockchain.CurrentHeader()
+ } else {
+ header = api.eth.LastAcceptedBlock().Header()
+ }
+ } else {
+ block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
+ if block == nil {
+ return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
+ }
+ header = block.Header()
+ }
+ if header == nil {
+ return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
+ }
+ stateDb, err := api.eth.BlockChain().StateAt(header.Root)
+ if err != nil {
+ return state.Dump{}, err
+ }
+ return stateDb.RawDump(opts), nil
+}
+
+// Preimage is a debug API function that returns the preimage for a sha3 hash, if known.
+func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
+ if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil {
+ return preimage, nil
+ }
+ return nil, errors.New("unknown preimage")
+}
+
+// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
+// and returns them as a JSON list of block hashes.
+func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*ethapi.BadBlockArgs, error) {
+ internalAPI := ethapi.NewBlockChainAPI(api.eth.APIBackend)
+ return internalAPI.GetBadBlocks(ctx)
+}
+
+// AccountRangeMaxResults is the maximum number of results to be returned per call
+const AccountRangeMaxResults = 256
+
+// AccountRange enumerates all accounts in the given block and start point in paging request
+func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) {
+ var stateDb *state.StateDB
+ var err error
+
+ if number, ok := blockNrOrHash.Number(); ok {
+ var header *types.Header
+ if number.IsAccepted() {
+ if api.eth.APIBackend.isLatestAndAllowed(number) {
+ header = api.eth.blockchain.CurrentHeader()
+ } else {
+ header = api.eth.LastAcceptedBlock().Header()
+ }
+ } else {
+ block := api.eth.blockchain.GetBlockByNumber(uint64(number))
+ if block == nil {
+ return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
+ }
+ header = block.Header()
+ }
+ if header == nil {
+ return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
+ }
+ stateDb, err = api.eth.BlockChain().StateAt(header.Root)
+ if err != nil {
+ return state.IteratorDump{}, err
+ }
+ } else if hash, ok := blockNrOrHash.Hash(); ok {
+ block := api.eth.blockchain.GetBlockByHash(hash)
+ if block == nil {
+ return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex())
+ }
+ stateDb, err = api.eth.BlockChain().StateAt(block.Root())
+ if err != nil {
+ return state.IteratorDump{}, err
+ }
+ } else {
+ return state.IteratorDump{}, errors.New("either block number or block hash must be specified")
+ }
+
+ opts := &state.DumpConfig{
+ SkipCode: nocode,
+ SkipStorage: nostorage,
+ OnlyWithAddresses: !incompletes,
+ Start: start,
+ Max: uint64(maxResults),
+ }
+ if maxResults > AccountRangeMaxResults || maxResults <= 0 {
+ opts.Max = AccountRangeMaxResults
+ }
+ return stateDb.IteratorDump(opts), nil
+}
+
+// StorageRangeResult is the result of a debug_storageRangeAt API call.
+type StorageRangeResult struct {
+ Storage storageMap `json:"storage"`
+ NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie.
+}
+
+type storageMap map[common.Hash]storageEntry
+
+type storageEntry struct {
+ Key *common.Hash `json:"key"`
+ Value common.Hash `json:"value"`
+}
+
+// StorageRangeAt returns the storage at the given block height and transaction index.
+func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) {
+ // Retrieve the block
+ block := api.eth.blockchain.GetBlockByHash(blockHash)
+ if block == nil {
+ return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash)
+ }
+ _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ defer release()
+
+ st, err := statedb.StorageTrie(contractAddress)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ if st == nil {
+ return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress)
+ }
+ return storageRangeAt(st, keyStart, maxResult)
+}
+
+func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {
+ trieIt, err := st.NodeIterator(start)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ it := trie.NewIterator(trieIt)
+ result := StorageRangeResult{Storage: storageMap{}}
+ for i := 0; i < maxResult && it.Next(); i++ {
+ _, content, _, err := rlp.Split(it.Value)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ e := storageEntry{Value: common.BytesToHash(content)}
+ if preimage := st.GetKey(it.Key); preimage != nil {
+ preimage := common.BytesToHash(preimage)
+ e.Key = &preimage
+ }
+ result.Storage[common.BytesToHash(it.Key)] = e
+ }
+ // Add the 'next key' so clients can continue downloading.
+ if it.Next() {
+ next := common.BytesToHash(it.Key)
+ result.NextKey = &next
+ }
+ return result, nil
+}
+
+// GetModifiedAccountsByNumber returns all accounts that have changed between the
+// two blocks specified. A change is defined as a difference in nonce, balance,
+// code hash, or storage hash.
+//
+// With one parameter, returns the list of accounts modified in the specified block.
+func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {
+ var startBlock, endBlock *types.Block
+
+ startBlock = api.eth.blockchain.GetBlockByNumber(startNum)
+ if startBlock == nil {
+ return nil, fmt.Errorf("start block %x not found", startNum)
+ }
+
+ if endNum == nil {
+ endBlock = startBlock
+ startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
+ if startBlock == nil {
+ return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
+ }
+ } else {
+ endBlock = api.eth.blockchain.GetBlockByNumber(*endNum)
+ if endBlock == nil {
+ return nil, fmt.Errorf("end block %d not found", *endNum)
+ }
+ }
+ return api.getModifiedAccounts(startBlock, endBlock)
+}
+
+// GetModifiedAccountsByHash returns all accounts that have changed between the
+// two blocks specified. A change is defined as a difference in nonce, balance,
+// code hash, or storage hash.
+//
+// With one parameter, returns the list of accounts modified in the specified block.
+func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {
+ var startBlock, endBlock *types.Block
+ startBlock = api.eth.blockchain.GetBlockByHash(startHash)
+ if startBlock == nil {
+ return nil, fmt.Errorf("start block %x not found", startHash)
+ }
+
+ if endHash == nil {
+ endBlock = startBlock
+ startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
+ if startBlock == nil {
+ return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
+ }
+ } else {
+ endBlock = api.eth.blockchain.GetBlockByHash(*endHash)
+ if endBlock == nil {
+ return nil, fmt.Errorf("end block %x not found", *endHash)
+ }
+ }
+ return api.getModifiedAccounts(startBlock, endBlock)
+}
+
+func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {
+ if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
+ return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
+ }
+ triedb := api.eth.BlockChain().StateCache().TrieDB()
+
+ oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb)
+ if err != nil {
+ return nil, err
+ }
+ newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb)
+ if err != nil {
+ return nil, err
+ }
+ oldIt, err := oldTrie.NodeIterator([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ newIt, err := newTrie.NodeIterator([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ diff, _ := trie.NewDifferenceIterator(oldIt, newIt)
+ iter := trie.NewIterator(diff)
+
+ var dirty []common.Address
+ for iter.Next() {
+ key := newTrie.GetKey(iter.Key)
+ if key == nil {
+ return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
+ }
+ dirty = append(dirty, common.BytesToAddress(key))
+ }
+ return dirty, nil
+}
+
+// GetAccessibleState returns the first number where the node has accessible
+// state on disk. Note this being the post-state of that block and the pre-state
+// of the next block.
+// The (from, to) parameters are the sequence of blocks to search, which can go
+// either forwards or backwards
+func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) {
+ var resolveNum = func(num rpc.BlockNumber) (uint64, error) {
+ // We don't have state for pending (-2), so treat it as latest
+ if num.Int64() < 0 {
+ block := api.eth.blockchain.CurrentBlock()
+ if block == nil {
+ return 0, errors.New("current block missing")
+ }
+ return block.Number.Uint64(), nil
+ }
+ return uint64(num.Int64()), nil
+ }
+ var (
+ start uint64
+ end uint64
+ delta = int64(1)
+ lastLog time.Time
+ err error
+ )
+ if start, err = resolveNum(from); err != nil {
+ return 0, err
+ }
+ if end, err = resolveNum(to); err != nil {
+ return 0, err
+ }
+ if start == end {
+ return 0, errors.New("from and to needs to be different")
+ }
+ if start > end {
+ delta = -1
+ }
+ for i := int64(start); i != int64(end); i += delta {
+ if time.Since(lastLog) > 8*time.Second {
+ log.Info("Finding roots", "from", start, "to", end, "at", i)
+ lastLog = time.Now()
+ }
+ h := api.eth.BlockChain().GetHeaderByNumber(uint64(i))
+ if h == nil {
+ return 0, fmt.Errorf("missing header %d", i)
+ }
+ if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok {
+ return uint64(i), nil
+ }
+ }
+ return 0, errors.New("no state found")
+}
diff --git a/eth/backend.go b/eth/backend.go
index a8de24fad9..9871559a09 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -30,6 +30,7 @@ package eth
import (
"errors"
"fmt"
+ "math/big"
"sync"
"time"
@@ -42,6 +43,8 @@ import (
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/state/pruner"
"github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/txpool/blobpool"
+ "github.com/ava-labs/subnet-evm/core/txpool/legacypool"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/core/vm"
"github.com/ava-labs/subnet-evm/eth/ethconfig"
@@ -81,7 +84,8 @@ type Ethereum struct {
config *Config
// Handlers
- txPool *txpool.TxPool
+ txPool *txpool.TxPool
+
blockchain *core.BlockChain
gossiper PushGossiper
@@ -152,7 +156,7 @@ func New(
// Since RecoverPruning will only continue a pruning run that already began, we do not need to ensure that
// reprocessState has already been called and completed successfully. To ensure this, we must maintain
// that Prune is only run after reprocessState has finished successfully.
- if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb, config.TrieCleanJournal); err != nil {
+ if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb); err != nil {
log.Error("Failed to recover state", "error", err)
}
@@ -193,8 +197,6 @@ func New(
}
cacheConfig = &core.CacheConfig{
TrieCleanLimit: config.TrieCleanCache,
- TrieCleanJournal: config.TrieCleanJournal,
- TrieCleanRejournal: config.TrieCleanRejournal,
TrieDirtyLimit: config.TrieDirtyCache,
TrieDirtyCommitTarget: config.TrieDirtyCommitTarget,
TriePrefetcherParallelism: config.TriePrefetcherParallelism,
@@ -219,7 +221,6 @@ func New(
if err := eth.precheckPopulateMissingTries(); err != nil {
return nil, err
}
-
var err error
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, eth.engine, vmConfig, lastAcceptedHash, config.SkipUpgradeCheck)
if err != nil {
@@ -237,7 +238,15 @@ func New(
eth.bloomIndexer.Start(eth.blockchain)
- eth.txPool = txpool.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain)
+ config.BlobPool.Datadir = ""
+ blobPool := blobpool.New(config.BlobPool, &chainWithFinalBlock{eth.blockchain})
+
+ legacyPool := legacypool.New(config.TxPool, eth.blockchain)
+
+ eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool, blobPool})
+ if err != nil {
+ return nil, err
+ }
eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, clock)
@@ -364,7 +373,7 @@ func (s *Ethereum) Start() {
func (s *Ethereum) Stop() error {
s.bloomIndexer.Close()
close(s.closeBloomHandler)
- s.txPool.Stop()
+ s.txPool.Close()
s.blockchain.Stop()
s.engine.Close()
@@ -447,7 +456,6 @@ func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, gspec *co
log.Info("Starting offline pruning", "dataDir", s.config.OfflinePruningDataDirectory, "bloomFilterSize", s.config.OfflinePruningBloomFilterSize)
prunerConfig := pruner.Config{
BloomSize: s.config.OfflinePruningBloomFilterSize,
- Cachedir: s.config.TrieCleanJournal,
Datadir: s.config.OfflinePruningDataDirectory,
}
diff --git a/eth/chain_with_final_block.go b/eth/chain_with_final_block.go
new file mode 100644
index 0000000000..df4ccf70dd
--- /dev/null
+++ b/eth/chain_with_final_block.go
@@ -0,0 +1,23 @@
+package eth
+
+import (
+ "github.com/ava-labs/subnet-evm/core"
+ "github.com/ava-labs/subnet-evm/core/types"
+)
+
+const blocksToKeep = 604_800 // Approx. 2 weeks worth of blocks assuming 2s block time
+
+type chainWithFinalBlock struct {
+ *core.BlockChain
+}
+
+// CurrentFinalBlock returns the current block below which blobs should not
+// be maintained anymore for reorg purposes.
+func (c *chainWithFinalBlock) CurrentFinalBlock() *types.Header {
+ lastAccepted := c.LastAcceptedBlock().Header().Number.Uint64()
+ if lastAccepted <= blocksToKeep {
+ return nil
+ }
+
+ return c.GetHeaderByNumber(lastAccepted - blocksToKeep)
+}
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 8a64e67683..8115198e4d 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -30,7 +30,8 @@ import (
"time"
"github.com/ava-labs/subnet-evm/core"
- "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/txpool/blobpool"
+ "github.com/ava-labs/subnet-evm/core/txpool/legacypool"
"github.com/ava-labs/subnet-evm/eth/gasprice"
"github.com/ava-labs/subnet-evm/miner"
"github.com/ethereum/go-ethereum/common"
@@ -61,7 +62,8 @@ func NewDefaultConfig() Config {
SnapshotCache: 256,
AcceptedCacheSize: 32,
Miner: miner.Config{},
- TxPool: txpool.DefaultConfig,
+ TxPool: legacypool.DefaultConfig,
+ BlobPool: blobpool.DefaultConfig,
RPCGasCap: 25000000,
RPCEVMTimeout: 5 * time.Second,
GPO: DefaultFullGPOConfig,
@@ -96,8 +98,6 @@ type Config struct {
// TrieDB and snapshot options
TrieCleanCache int
- TrieCleanJournal string
- TrieCleanRejournal time.Duration
TrieDirtyCache int
TrieDirtyCommitTarget int
TriePrefetcherParallelism int
@@ -112,7 +112,8 @@ type Config struct {
Miner miner.Config
// Transaction pool options
- TxPool txpool.Config
+ TxPool legacypool.Config
+ BlobPool blobpool.Config
// Gas Price Oracle options
GPO gasprice.Config
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 5bdd4e9344..4adea84f7a 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -386,12 +386,12 @@ func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
if api.sys.backend.IsAllowUnfinalizedQueries() {
logsSub, err = api.events.SubscribeLogs(interfaces.FilterQuery(crit), logs)
if err != nil {
- return rpc.ID(""), err
+ return "", err
}
} else {
logsSub, err = api.events.SubscribeAcceptedLogs(interfaces.FilterQuery(crit), logs)
if err != nil {
- return rpc.ID(""), err
+ return "", err
}
}
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index e4bb6bf087..15a8bc803b 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -132,37 +132,32 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
}
}
- // Short-cut if all we care about is pending logs
- if f.begin == rpc.PendingBlockNumber.Int64() {
- if f.end != rpc.PendingBlockNumber.Int64() {
- return nil, errors.New("invalid block range")
- }
- // There is no pending block, if the request specifies only the pending block, then return nil.
- return nil, nil
- }
- // Figure out the limits of the filter range
- // LatestBlockNumber is transformed into the last accepted block in HeaderByNumber
- // so it is left in place here.
- header, err := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
- if err != nil {
- return nil, err
+ var (
+ beginPending = f.begin == rpc.PendingBlockNumber.Int64()
+ endPending = f.end == rpc.PendingBlockNumber.Int64()
+ endSet = f.end >= 0
+ )
+
+ // special case for pending logs
+ if beginPending && !endPending {
+ return nil, errors.New("invalid block range")
}
- if header == nil {
+
+ // Short-cut if all we care about is pending logs
+ if beginPending && endPending {
return nil, nil
}
- var (
- head = header.Number.Int64()
- )
resolveSpecial := func(number int64) (int64, error) {
var hdr *types.Header
switch number {
- case rpc.LatestBlockNumber.Int64():
- return head, nil
- case rpc.PendingBlockNumber.Int64():
+ case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64():
// we should return head here since we've already captured
// that we need to get the pending logs in the pending boolean above
- return head, nil
+ hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
+ if hdr == nil {
+ return 0, errors.New("latest header not found")
+ }
case rpc.FinalizedBlockNumber.Int64():
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
if hdr == nil {
@@ -178,6 +173,9 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
}
return hdr.Number.Int64(), nil
}
+
+ var err error
+ // range query need to resolve the special begin/end block number
if f.begin, err = resolveSpecial(f.begin); err != nil {
return nil, err
}
@@ -191,7 +189,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
// We error in this case to prevent a bad UX where the caller thinks there
// are no logs from the specified beginning to end (when in reality there may
// be some).
- if f.end < f.begin {
+ if endSet && f.end < f.begin {
return nil, fmt.Errorf("begin block %d is greater than end block %d", f.begin, f.end)
}
@@ -201,43 +199,77 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
return nil, fmt.Errorf("requested too many blocks from %d to %d, maximum is set to %d", f.begin, f.end, maxBlocks)
}
// Gather all indexed logs, and finish with non indexed ones
+ logChan, errChan := f.rangeLogsAsync(ctx)
+ var logs []*types.Log
+ for {
+ select {
+ case log := <-logChan:
+ logs = append(logs, log)
+ case err := <-errChan:
+ if err != nil {
+ // if an error occurs during extraction, we do return the extracted data
+ return logs, err
+ }
+ return logs, nil
+ }
+ }
+}
+
+// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously,
+// it creates and returns two channels: one for delivering log data, and one for reporting errors.
+func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) {
var (
- logs []*types.Log
- end = uint64(f.end)
- size, sections = f.sys.backend.BloomStatus()
+ logChan = make(chan *types.Log)
+ errChan = make(chan error)
)
- if indexed := sections * size; indexed > uint64(f.begin) {
- if indexed > end {
- logs, err = f.indexedLogs(ctx, end)
- } else {
- logs, err = f.indexedLogs(ctx, indexed-1)
+
+ go func() {
+ defer func() {
+ close(errChan)
+ close(logChan)
+ }()
+
+ // Gather all indexed logs, and finish with non indexed ones
+ var (
+ end = uint64(f.end)
+ size, sections = f.sys.backend.BloomStatus()
+ err error
+ )
+ if indexed := sections * size; indexed > uint64(f.begin) {
+ if indexed > end {
+ indexed = end + 1
+ }
+ if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil {
+ errChan <- err
+ return
+ }
}
- if err != nil {
- return logs, err
+
+ if err := f.unindexedLogs(ctx, end, logChan); err != nil {
+ errChan <- err
+ return
}
- }
- rest, err := f.unindexedLogs(ctx, end)
- logs = append(logs, rest...)
- return logs, err
+
+ errChan <- nil
+ }()
+
+ return logChan, errChan
}
// indexedLogs returns the logs matching the filter criteria based on the bloom
// bits indexed available locally or via the network.
-func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
+func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
// Create a matcher session and request servicing from the backend
matches := make(chan uint64, 64)
session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
if err != nil {
- return nil, err
+ return err
}
defer session.Close()
f.sys.backend.ServiceFilter(ctx, session)
- // Iterate over the matches until exhausted or context closed
- var logs []*types.Log
-
for {
select {
case number, ok := <-matches:
@@ -247,47 +279,50 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
if err == nil {
f.begin = int64(end) + 1
}
- return logs, err
+ return err
}
f.begin = int64(number) + 1
// Retrieve the suggested block and pull any truly matching logs
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
if header == nil || err != nil {
- return logs, err
+ return err
}
found, err := f.checkMatches(ctx, header)
if err != nil {
- return logs, err
+ return err
+ }
+ for _, log := range found {
+ logChan <- log
}
- logs = append(logs, found...)
case <-ctx.Done():
- return logs, ctx.Err()
+ return ctx.Err()
}
}
}
// unindexedLogs returns the logs matching the filter criteria based on raw block
// iteration and bloom matching.
-func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
- var logs []*types.Log
-
+func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
for ; f.begin <= int64(end); f.begin++ {
- if f.begin%10 == 0 && ctx.Err() != nil {
- return logs, ctx.Err()
- }
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
if header == nil || err != nil {
- return logs, err
+ return err
}
found, err := f.blockLogs(ctx, header)
if err != nil {
- return logs, err
+ return err
+ }
+ for _, log := range found {
+ select {
+ case logChan <- log:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
}
- logs = append(logs, found...)
}
- return logs, nil
+ return nil
}
// blockLogs returns the logs matching the filter criteria within a single block.
@@ -308,22 +343,25 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*typ
unfiltered := types.FlattenLogs(logsList)
logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
- if len(logs) > 0 {
- // We have matching logs, check if we need to resolve full logs via the light client
- if logs[0].TxHash == (common.Hash{}) {
- receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash())
- if err != nil {
- return nil, err
- }
- unfiltered = unfiltered[:0]
- for _, receipt := range receipts {
- unfiltered = append(unfiltered, receipt.Logs...)
- }
- logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
- }
+ if len(logs) == 0 {
+ return nil, nil
+ }
+ // Most backends will deliver un-derived logs, but check nevertheless.
+ if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) {
return logs, nil
}
- return nil, nil
+ // We have matching logs, check if we need to resolve full logs via the light client
+ receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash())
+ if err != nil {
+ return nil, err
+ }
+ unfiltered = unfiltered[:0]
+ for _, receipt := range receipts {
+ unfiltered = append(unfiltered, receipt.Logs...)
+ }
+ logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
+
+ return logs, nil
}
func includes(addresses []common.Address, a common.Address) bool {
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 4ebd672d47..6b4a2d582d 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -518,15 +518,6 @@ func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) {
}
}
-func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {
- for _, f := range filters[LogsSubscription] {
- matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
- if len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
- }
-}
-
func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent, accepted bool) {
for _, f := range filters[PendingTransactionsSubscription] {
f.txs <- ev.Txs
@@ -578,7 +569,7 @@ func (es *EventSystem) eventLoop() {
case ev := <-es.logsAcceptedCh:
es.handleAcceptedLogs(index, ev)
case ev := <-es.rmLogsCh:
- es.handleRemovedLogs(index, ev)
+ es.handleLogs(index, ev.Logs)
case ev := <-es.pendingLogsCh:
es.handlePendingLogs(index, ev)
case ev := <-es.chainCh:
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index d56b8c7540..c44cc6559b 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -28,16 +28,21 @@ package filters
import (
"context"
+ "encoding/json"
"math/big"
- "reflect"
+ "strings"
"testing"
+ "time"
+ "github.com/ava-labs/subnet-evm/accounts/abi"
"github.com/ava-labs/subnet-evm/consensus/dummy"
"github.com/ava-labs/subnet-evm/core"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/core/vm"
"github.com/ava-labs/subnet-evm/params"
"github.com/ava-labs/subnet-evm/rpc"
+ "github.com/ava-labs/subnet-evm/trie"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
@@ -116,10 +121,53 @@ func BenchmarkFilters(b *testing.B) {
func TestFilters(t *testing.T) {
var (
- db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false)
- _, sys = newTestFilterSystem(t, db, Config{})
+ db = rawdb.NewMemoryDatabase()
+ _, sys = newTestFilterSystem(t, db, Config{})
+ // Sender account
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey)
+ signer = types.NewLondonSigner(big.NewInt(1))
+ // Logging contract
+ contract = common.Address{0xfe}
+ contract2 = common.Address{0xff}
+ abiStr = `[{"inputs":[],"name":"log0","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"}],"name":"log1","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"}],"name":"log2","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"}],"name":"log3","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"},{"internalType":"uint256","name":"t4","type":"uint256"}],"name":"log4","outputs":[],"stateMutability":"nonpayable","type":"function"}]`
+ /*
+ // SPDX-License-Identifier: GPL-3.0
+ pragma solidity >=0.7.0 <0.9.0;
+
+ contract Logger {
+ function log0() external {
+ assembly {
+ log0(0, 0)
+ }
+ }
+
+ function log1(uint t1) external {
+ assembly {
+ log1(0, 0, t1)
+ }
+ }
+
+ function log2(uint t1, uint t2) external {
+ assembly {
+ log2(0, 0, t1, t2)
+ }
+ }
+
+ function log3(uint t1, uint t2, uint t3) external {
+ assembly {
+ log3(0, 0, t1, t2, t3)
+ }
+ }
+
+ function log4(uint t1, uint t2, uint t3, uint t4) external {
+ assembly {
+ log4(0, 0, t1, t2, t3, t4)
+ }
+ }
+ }
+ */
+ bytecode = common.FromHex("608060405234801561001057600080fd5b50600436106100575760003560e01c80630aa731851461005c5780632a4c08961461006657806378b9a1f314610082578063c670f8641461009e578063c683d6a3146100ba575b600080fd5b6100646100d6565b005b610080600480360381019061007b9190610143565b6100dc565b005b61009c60048036038101906100979190610196565b6100e8565b005b6100b860048036038101906100b391906101d6565b6100f2565b005b6100d460048036038101906100cf9190610203565b6100fa565b005b600080a0565b808284600080a3505050565b8082600080a25050565b80600080a150565b80828486600080a450505050565b600080fd5b6000819050919050565b6101208161010d565b811461012b57600080fd5b50565b60008135905061013d81610117565b92915050565b60008060006060848603121561015c5761015b610108565b5b600061016a8682870161012e565b935050602061017b8682870161012e565b925050604061018c8682870161012e565b9150509250925092565b600080604083850312156101ad576101ac610108565b5b60006101bb8582860161012e565b92505060206101cc8582860161012e565b9150509250929050565b6000602082840312156101ec576101eb610108565b5b60006101fa8482850161012e565b91505092915050565b6000806000806080858703121561021d5761021c610108565b5b600061022b8782880161012e565b945050602061023c8782880161012e565b935050604061024d8782880161012e565b925050606061025e8782880161012e565b9150509295919450925056fea264697066735822122073a4b156f487e59970dc1ef449cc0d51467268f676033a17188edafcee861f9864736f6c63430008110033")
hash1 = common.BytesToHash([]byte("topic1"))
hash2 = common.BytesToHash([]byte("topic2"))
@@ -127,134 +175,213 @@ func TestFilters(t *testing.T) {
hash4 = common.BytesToHash([]byte("topic4"))
gspec = &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
+ Config: params.TestChainConfig,
+ Alloc: core.GenesisAlloc{
+ addr: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))},
+ contract: {Balance: big.NewInt(0), Code: bytecode},
+ contract2: {Balance: big.NewInt(0), Code: bytecode},
+ },
BaseFee: big.NewInt(1),
}
)
- defer db.Close()
- _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 1000, 10, func(i int, gen *core.BlockGen) {
+ contractABI, err := abi.JSON(strings.NewReader(abiStr))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Hack: GenerateChainWithGenesis creates a new db.
+ // Commit the genesis manually and use GenerateChain.
+ _, err = gspec.Commit(db, trie.NewDatabase(db))
+ if err != nil {
+ t.Fatal(err)
+ }
+ //_, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 1000, 10, func(i int, gen *core.BlockGen) {
+ chain, _, err := core.GenerateChain(gspec.Config, gspec.ToBlock(), dummy.NewFaker(), db, 1000, 10, func(i int, gen *core.BlockGen) {
switch i {
case 1:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash1},
- },
+ data, err := contractABI.Pack("log1", hash1.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, gen.BaseFee(), nil))
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 0,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
+ tx2, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 1,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract2,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx2)
case 2:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash2},
- },
+ data, err := contractABI.Pack("log2", hash2.Big(), hash1.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, gen.BaseFee(), nil))
-
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 2,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
case 998:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash3},
- },
+ data, err := contractABI.Pack("log1", hash3.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress("0x998"), big.NewInt(998), 998, gen.BaseFee(), nil))
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 3,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract2,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
case 999:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash4},
- },
+ data, err := contractABI.Pack("log1", hash4.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 4,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
}
})
require.NoError(t, err)
- // The test txs are not properly signed, can't simply create a chain
- // and then import blocks. TODO(rjl493456442) try to get rid of the
- // manual database writes.
- gspec.MustCommit(db)
- for i, block := range chain {
- rawdb.WriteBlock(db, block)
- rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
- rawdb.WriteHeadBlockHash(db, block.Hash())
- rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
+ bc, err := core.NewBlockChain(db, core.DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, gspec.ToBlock().Hash(), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = bc.InsertChain(chain)
+ if err != nil {
+ t.Fatal(err)
}
// Set block 998 as Finalized (-3)
- // rawdb.WriteFinalizedBlockHash(db, chain[998].Hash())
+ // bc.SetFinalized(chain[998].Header())
err = rawdb.WriteAcceptorTip(db, chain[998].Hash())
require.NoError(t, err)
- filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
- logs, _ := filter.Logs(context.Background())
- if len(logs) != 4 {
- t.Error("expected 4 log, got", len(logs))
- }
-
for i, tc := range []struct {
- f *Filter
- wantHashes []common.Hash
+ f *Filter
+ want string
+ err string
}{
{
- sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}),
- []common.Hash{hash3},
+ f: sys.NewBlockFilter(chain[2].Hash(), []common.Address{contract}, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash3}}),
- []common.Hash{hash3},
+ f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}),
- []common.Hash{hash1, hash2},
+ f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}),
}, {
- sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}),
- nil,
+ f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}),
+ want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil),
- nil,
+ f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}),
- nil,
+ f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xdba3e2ea9a7d690b722d70ee605fd67ba4c00d1d3aecd5cf187a7b92ad8eb3df","transactionIndex":"0x1","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4},
+ f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}),
}, {
- sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash3, hash4},
+ f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil),
}, {
- sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), []common.Hash{hash3},
+ f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}),
}, {
- sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), nil,
+ f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), nil,
+ f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), nil,
+ f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil),
+ want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`,
}, {
- sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), nil,
+ f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil),
}, {
- sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), nil,
+ f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ err: "safe header not found",
+ }, {
+ f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil),
+ err: "safe header not found",
+ }, {
+ f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil),
+ err: "safe header not found",
+ }, {
+ f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil),
+ }, {
+ f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`,
+ }, {
+ f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ err: "invalid block range",
},
} {
- logs, _ := tc.f.Logs(context.Background())
- var haveHashes []common.Hash
- for _, l := range logs {
- haveHashes = append(haveHashes, l.Topics[0])
- }
- if have, want := len(haveHashes), len(tc.wantHashes); have != want {
- t.Fatalf("test %d, have %d logs, want %d", i, have, want)
+ logs, err := tc.f.Logs(context.Background())
+ if err == nil && tc.err != "" {
+ t.Fatalf("test %d, expected error %q, got nil", i, tc.err)
+ } else if err != nil && err.Error() != tc.err {
+ t.Fatalf("test %d, expected error %q, got %q", i, tc.err, err.Error())
}
- if len(haveHashes) == 0 {
+ if tc.want == "" && len(logs) == 0 {
continue
}
- if !reflect.DeepEqual(tc.wantHashes, haveHashes) {
- t.Fatalf("test %d, have %v want %v", i, haveHashes, tc.wantHashes)
+ tc.want = patchWant(t, tc.want, chain)
+ have, err := json.Marshal(logs)
+ if err != nil {
+ t.Fatal(err)
}
+ if string(have) != tc.want {
+ t.Fatalf("test %d, have:\n%s\nwant:\n%s", i, have, tc.want)
+ }
+ }
+
+ t.Run("timeout", func(t *testing.T) {
+ f := sys.NewRangeFilter(0, -1, nil, nil)
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Hour))
+ defer cancel()
+ _, err := f.Logs(ctx)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if err != context.DeadlineExceeded {
+ t.Fatalf("expected context.DeadlineExceeded, got %v", err)
+ }
+ })
+}
+
+func patchWant(t *testing.T, want string, blocks []*types.Block) string {
+ var logs []*types.Log
+ err := json.Unmarshal([]byte(want), &logs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, log := range logs {
+ blockIndex := log.BlockNumber - 1
+ log.BlockHash = blocks[blockIndex].Hash()
+ log.TxHash = blocks[blockIndex].Transactions()[log.TxIndex].Hash()
+ }
+ result, err := json.Marshal(logs)
+ if err != nil {
+ t.Fatal(err)
}
+ return string(result)
}
diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go
index 81fb379c7a..143adac2d4 100644
--- a/eth/gasprice/feehistory.go
+++ b/eth/gasprice/feehistory.go
@@ -31,7 +31,7 @@ import (
"errors"
"fmt"
"math/big"
- "sort"
+ "slices"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/rpc"
@@ -46,26 +46,16 @@ var (
)
// txGasAndReward is sorted in ascending order based on reward
-type (
- txGasAndReward struct {
- gasUsed uint64
- reward *big.Int
- }
- sortGasAndReward []txGasAndReward
- slimBlock struct {
- GasUsed uint64
- GasLimit uint64
- BaseFee *big.Int
- Txs []txGasAndReward
- }
-)
-
-func (s sortGasAndReward) Len() int { return len(s) }
-func (s sortGasAndReward) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
+type txGasAndReward struct {
+ gasUsed uint64
+ reward *big.Int
}
-func (s sortGasAndReward) Less(i, j int) bool {
- return s[i].reward.Cmp(s[j].reward) < 0
+
+type slimBlock struct {
+ GasUsed uint64
+ GasLimit uint64
+ BaseFee *big.Int
+ Txs []txGasAndReward
}
// processBlock prepares a [slimBlock] from a retrieved block and list of
@@ -77,12 +67,14 @@ func processBlock(block *types.Block, receipts types.Receipts) *slimBlock {
}
sb.GasUsed = block.GasUsed()
sb.GasLimit = block.GasLimit()
- sorter := make(sortGasAndReward, len(block.Transactions()))
+ sorter := make([]txGasAndReward, len(block.Transactions()))
for i, tx := range block.Transactions() {
reward, _ := tx.EffectiveGasTip(sb.BaseFee)
sorter[i] = txGasAndReward{gasUsed: receipts[i].GasUsed, reward: reward}
}
- sort.Stable(sorter)
+ slices.SortStableFunc(sorter, func(a, b txGasAndReward) int {
+ return a.reward.Cmp(b.reward)
+ })
sb.Txs = sorter
return &sb
}
diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go
index 79a33f807b..899c2eba0e 100644
--- a/eth/gasprice/gasprice.go
+++ b/eth/gasprice/gasprice.go
@@ -30,7 +30,6 @@ import (
"context"
"fmt"
"math/big"
- "sort"
"sync"
"github.com/ava-labs/avalanchego/utils/timer/mockable"
@@ -46,6 +45,7 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
+ "golang.org/x/exp/slices"
)
const (
@@ -396,12 +396,12 @@ func (oracle *Oracle) suggestDynamicFees(ctx context.Context) (*big.Int, *big.In
price := lastPrice
baseFee := lastBaseFee
if len(tipResults) > 0 {
- sort.Sort(bigIntArray(tipResults))
+ slices.SortFunc(tipResults, func(a, b *big.Int) int { return a.Cmp(b) })
price = tipResults[(len(tipResults)-1)*oracle.percentile/100]
}
if len(baseFeeResults) > 0 {
- sort.Sort(bigIntArray(baseFeeResults))
+ slices.SortFunc(baseFeeResults, func(a, b *big.Int) int { return a.Cmp(b) })
baseFee = baseFeeResults[(len(baseFeeResults)-1)*oracle.percentile/100]
}
if price.Cmp(oracle.maxPrice) > 0 {
@@ -434,9 +434,3 @@ func (oracle *Oracle) getFeeInfo(ctx context.Context, number uint64) (*feeInfo,
}
return oracle.feeInfoProvider.addHeader(ctx, header)
}
-
-type bigIntArray []*big.Int
-
-func (s bigIntArray) Len() int { return len(s) }
-func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
-func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 302a7e10de..e435fdb34f 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -176,7 +176,7 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()), true)
+ root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), true)
if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index 10a6ca4d1d..1b81756747 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -27,7 +27,6 @@
package tracers
import (
- "bytes"
"context"
"crypto/ecdsa"
"encoding/json"
@@ -35,7 +34,6 @@ import (
"fmt"
"math/big"
"reflect"
- "sort"
"sync/atomic"
"testing"
@@ -54,6 +52,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "golang.org/x/exp/slices"
)
var (
@@ -821,19 +820,13 @@ type Account struct {
addr common.Address
}
-type Accounts []Account
-
-func (a Accounts) Len() int { return len(a) }
-func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 }
-
-func newAccounts(n int) (accounts Accounts) {
+func newAccounts(n int) (accounts []Account) {
for i := 0; i < n; i++ {
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
accounts = append(accounts, Account{key: key, addr: addr})
}
- sort.Sort(accounts)
+ slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) })
return accounts
}
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 7e259ea054..b89a50a364 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -382,7 +382,6 @@ func TestInternals(t *testing.T) {
Balance: big.NewInt(500000000000000),
},
}, false)
-
evm := vm.NewEVM(context, txContext, statedb, params.TestPreSubnetEVMConfig, vm.Config{Tracer: tc.tracer})
msg := &core.Message{
To: &to,
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index 4e8316cd58..7c296f3e6a 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -102,10 +102,9 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
}
// Configure a blockchain with the given prestate
var (
- blockNumber = new(big.Int).SetUint64(uint64(test.Context.Number))
- signer = types.MakeSigner(test.Genesis.Config, blockNumber, uint64(test.Context.Time))
- origin, _ = signer.Sender(tx)
- txContext = vm.TxContext{
+ signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
+ origin, _ = signer.Sender(tx)
+ txContext = vm.TxContext{
Origin: origin,
GasPrice: tx.GasPrice(),
}
@@ -113,7 +112,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
- BlockNumber: blockNumber,
+ BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
Time: uint64(test.Context.Time),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go
index 9c2bdac9a4..cb40e1bec8 100644
--- a/eth/tracers/js/goja.go
+++ b/eth/tracers/js/goja.go
@@ -96,7 +96,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b
if !obj.Get("constructor").SameAs(bufType) {
break
}
- b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes()
+ b := obj.Export().([]byte)
return b, nil
}
return nil, errors.New("invalid buffer type")
@@ -255,7 +255,7 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr
t.ctx["value"] = valueBig
t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber.Uint64())
// Update list of precompiles based on current block
- rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time)
+ rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go
index 83e3d631ab..27d726ea97 100644
--- a/eth/tracers/logger/logger.go
+++ b/eth/tracers/logger/logger.go
@@ -157,6 +157,7 @@ func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, s
if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) {
return
}
+
memory := scope.Memory
stack := scope.Stack
contract := scope.Contract
@@ -418,6 +419,7 @@ type StructLogRes struct {
Depth int `json:"depth"`
Error string `json:"error,omitempty"`
Stack *[]string `json:"stack,omitempty"`
+ ReturnData string `json:"returnData,omitempty"`
Memory *[]string `json:"memory,omitempty"`
Storage *map[string]string `json:"storage,omitempty"`
RefundCounter uint64 `json:"refund,omitempty"`
@@ -443,6 +445,9 @@ func formatLogs(logs []StructLog) []StructLogRes {
}
formatted[index].Stack = &stack
}
+ if trace.ReturnData != nil && len(trace.ReturnData) > 0 {
+ formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String()
+ }
if trace.Memory != nil {
memory := make([]string, 0, (len(trace.Memory)+31)/32)
for i := 0; i+32 <= len(trace.Memory); i += 32 {
diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go
index 14a39356f0..50a4b23c39 100644
--- a/eth/tracers/native/4byte.go
+++ b/eth/tracers/native/4byte.go
@@ -91,7 +91,7 @@ func (t *fourByteTracer) store(id []byte, size int) {
// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
// Update list of precompiles based on current block
- rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time)
+ rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
// Save the outer calldata also
diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go
index e67bed2040..c83e24a25c 100644
--- a/eth/tracers/native/call_flat.go
+++ b/eth/tracers/native/call_flat.go
@@ -158,7 +158,7 @@ func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Trace
func (t *flatCallTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
t.tracer.CaptureStart(env, from, to, create, input, gas, value)
// Update list of precompiles based on current block
- rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Timestamp())
+ rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
@@ -259,7 +259,7 @@ func flatFromNested(input *callFrame, traceAddress []int, convertErrs bool, ctx
case vm.CREATE, vm.CREATE2:
frame = newFlatCreate(input)
case vm.SELFDESTRUCT:
- frame = newFlatSuicide(input)
+ frame = newFlatSelfdestruct(input)
case vm.CALL, vm.STATICCALL, vm.CALLCODE, vm.DELEGATECALL:
frame = newFlatCall(input)
default:
@@ -341,7 +341,7 @@ func newFlatCall(input *callFrame) *flatCallFrame {
}
}
-func newFlatSuicide(input *callFrame) *flatCallFrame {
+func newFlatSelfdestruct(input *callFrame) *flatCallFrame {
return &flatCallFrame{
Type: "suicide",
Action: flatCallAction{
diff --git a/ethclient/subnetevmclient/subnet_evm_client.go b/ethclient/subnetevmclient/subnet_evm_client.go
index cf4ed51773..1ce9a50b3f 100644
--- a/ethclient/subnetevmclient/subnet_evm_client.go
+++ b/ethclient/subnetevmclient/subnet_evm_client.go
@@ -105,6 +105,11 @@ func (ec *Client) GetProof(ctx context.Context, account common.Address, keys []s
StorageProof []storageResult `json:"storageProof"`
}
+ // Avoid keys being 'null'.
+ if keys == nil {
+ keys = []string{}
+ }
+
var res accountResult
err := ec.c.CallContext(ctx, &res, "eth_getProof", account, keys, ethclient.ToBlockNumArg(blockNumber))
// Turn hexutils back to normal datatypes
diff --git a/go.mod b/go.mod
index 4e84054be0..158237581c 100644
--- a/go.mod
+++ b/go.mod
@@ -10,18 +10,20 @@ require (
github.com/deckarep/golang-set/v2 v2.1.0
github.com/docker/docker v1.6.2
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
- github.com/ethereum/go-ethereum v1.12.0
+ github.com/ethereum/go-ethereum v1.12.2
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08
github.com/go-cmd/cmd v1.4.1
+ github.com/golang/protobuf v1.5.3
github.com/google/uuid v1.6.0
github.com/gorilla/rpc v1.2.0
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
+ github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7
github.com/holiman/bloomfilter/v2 v2.0.3
- github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c
+ github.com/holiman/uint256 v1.2.3
github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.16
@@ -37,14 +39,15 @@ require (
github.com/status-im/keycard-go v0.2.0
github.com/stretchr/testify v1.8.4
github.com/tyler-smith/go-bip39 v1.1.0
- github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
+ github.com/urfave/cli/v2 v2.24.1
go.uber.org/goleak v1.3.0
go.uber.org/mock v0.4.0
golang.org/x/crypto v0.18.0
+ golang.org/x/exp v0.0.0-20231127185646-65229373498e
golang.org/x/sync v0.6.0
golang.org/x/sys v0.16.0
golang.org/x/text v0.14.0
- golang.org/x/time v0.1.0
+ golang.org/x/time v0.3.0
google.golang.org/protobuf v1.32.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
@@ -53,8 +56,9 @@ require (
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/DataDog/zstd v1.5.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
- github.com/ava-labs/coreth v0.13.1-rc.5 // indirect
+ github.com/ava-labs/coreth v0.13.2-0.20240304213436-8afbf2d68461 // indirect
github.com/beorn7/perks v1.0.1 // indirect
+ github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/btcsuite/btcd/btcutil v1.1.3 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
@@ -63,9 +67,13 @@ require (
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect
+ github.com/consensys/bavard v0.1.13 // indirect
+ github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
+ github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
+ github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -74,7 +82,6 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-cmp v0.6.0 // indirect
@@ -95,6 +102,7 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
+ github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
@@ -126,7 +134,6 @@ require (
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
- golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/term v0.16.0 // indirect
golang.org/x/tools v0.16.0 // indirect
@@ -137,4 +144,5 @@ require (
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
+ rsc.io/tmplfunc v0.0.3 // indirect
)
diff --git a/go.sum b/go.sum
index 40cbf87166..624152a4e4 100644
--- a/go.sum
+++ b/go.sum
@@ -58,11 +58,13 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/ava-labs/avalanchego v1.11.2 h1:8iodZ+RjqpRwHdiXPPtvaNt72qravge7voGzw3yPRzg=
github.com/ava-labs/avalanchego v1.11.2/go.mod h1:oTVnF9idL57J4LM/6RByTmKhI4QvV6OCnF99ysyBljE=
-github.com/ava-labs/coreth v0.13.1-rc.5 h1:YcTs9nryZLkf4gPmMyFx1TREFpDTPdg/VCNGGHSF2TY=
-github.com/ava-labs/coreth v0.13.1-rc.5/go.mod h1:4y1igTe/sFOIrpAtXoY+AdmfftNHrmrhBBRVfGCAPcw=
+github.com/ava-labs/coreth v0.13.2-0.20240304213436-8afbf2d68461 h1:SIwGF3eVEwmexLm7is/MvG7W5sbmpGXaUT6RfUPP3jw=
+github.com/ava-labs/coreth v0.13.2-0.20240304213436-8afbf2d68461/go.mod h1:v24MTMbxFSvyM7YeQFyWiXjIzVo2+UVs7tgH7xrByew=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
+github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA=
@@ -117,12 +119,18 @@ github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lg
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
+github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
+github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
+github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
+github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A=
+github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -158,8 +166,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0=
-github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs=
+github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=
+github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
+github.com/ethereum/go-ethereum v1.12.2 h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6gDq/6Y=
+github.com/ethereum/go-ethereum v1.12.2/go.mod h1:1cRAEV+rp/xX0zraSCBnu9Py3HQ+geRMj3HdR+k0wfI=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
@@ -289,6 +299,7 @@ github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8q
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg=
github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -316,10 +327,12 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
+github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
-github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8=
-github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
+github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
+github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
@@ -406,6 +419,9 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
+github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
+github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
+github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -539,8 +555,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
+github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU=
+github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
@@ -805,8 +821,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
-golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1013,3 +1029,5 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
+rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
diff --git a/internal/blocktest/test_hash.go b/internal/blocktest/test_hash.go
new file mode 100644
index 0000000000..014e9ff4b0
--- /dev/null
+++ b/internal/blocktest/test_hash.go
@@ -0,0 +1,69 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package utesting provides a standalone replacement for package testing.
+//
+// This package exists because package testing cannot easily be embedded into a
+// standalone go program. It provides an API that mirrors the standard library
+// testing API.
+
+package blocktest
+
+import (
+ "hash"
+
+ "github.com/ethereum/go-ethereum/common"
+ "golang.org/x/crypto/sha3"
+)
+
+// testHasher is the helper tool for transaction/receipt list hashing.
+// The original hasher is trie, in order to get rid of import cycle,
+// use the testing hasher instead.
+type testHasher struct {
+ hasher hash.Hash
+}
+
+// NewHasher returns a new testHasher instance.
+func NewHasher() *testHasher {
+ return &testHasher{hasher: sha3.NewLegacyKeccak256()}
+}
+
+// Reset resets the hash state.
+func (h *testHasher) Reset() {
+ h.hasher.Reset()
+}
+
+// Update updates the hash state with the given key and value.
+func (h *testHasher) Update(key, val []byte) error {
+ h.hasher.Write(key)
+ h.hasher.Write(val)
+ return nil
+}
+
+// Hash returns the hash value.
+func (h *testHasher) Hash() common.Hash {
+ return common.BytesToHash(h.hasher.Sum(nil))
+}
diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go
index 442ed99197..9856302647 100644
--- a/internal/cmdtest/test_cmd.go
+++ b/internal/cmdtest/test_cmd.go
@@ -65,13 +65,13 @@ type TestCmd struct {
Err error
}
-var id int32
+var id atomic.Int32
// Run exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (tt *TestCmd) Run(name string, args ...string) {
- id := atomic.AddInt32(&id, 1)
- tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)}
+ id.Add(1)
+ tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id.Load())}
tt.cmd = &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{name}, args...),
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 0bd4675fa9..1453b92049 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -29,6 +29,7 @@ package debug
import (
"fmt"
"io"
+ "net"
"net/http"
_ "net/http/pprof"
"os"
@@ -316,7 +317,7 @@ func Setup(ctx *cli.Context) error {
port := ctx.Int(pprofPortFlag.Name)
- address := fmt.Sprintf("%s:%d", listenHost, port)
+ address := net.JoinHostPort(listenHost, fmt.Sprintf("%d", port))
StartPProf(address)
}
if len(logFile) > 0 || rotation {
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index c0f3d7a69d..37e2be6ae3 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -691,46 +691,78 @@ type StorageResult struct {
Proof []string `json:"proof"`
}
+// proofList implements ethdb.KeyValueWriter and collects the proofs as
+// hex-strings for delivery to rpc-caller.
+type proofList []string
+
+func (n *proofList) Put(key []byte, value []byte) error {
+ *n = append(*n, hexutil.Encode(value))
+ return nil
+}
+
+func (n *proofList) Delete(key []byte) error {
+ panic("not supported")
+}
+
// GetProof returns the Merkle-proof for a given account and optionally some storage keys.
func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) {
+ var (
+ keys = make([]common.Hash, len(storageKeys))
+ keyLengths = make([]int, len(storageKeys))
+ storageProof = make([]StorageResult, len(storageKeys))
+ storageTrie state.Trie
+ storageHash = types.EmptyRootHash
+ codeHash = types.EmptyCodeHash
+ )
+ // Deserialize all keys. This prevents state access on invalid input.
+ for i, hexKey := range storageKeys {
+ var err error
+ keys[i], keyLengths[i], err = decodeHash(hexKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
- storageTrie, err := state.StorageTrie(address)
- if err != nil {
+ if storageTrie, err = state.StorageTrie(address); err != nil {
return nil, err
}
- storageHash := types.EmptyRootHash
- codeHash := state.GetCodeHash(address)
- storageProof := make([]StorageResult, len(storageKeys))
- // if we have a storageTrie, (which means the account exists), we can update the storagehash
+ // If we have a storageTrie, the account exists and we must update
+ // the storage root hash and the code hash.
if storageTrie != nil {
storageHash = storageTrie.Hash()
- } else {
- // no storageTrie means the account does not exist, so the codeHash is the hash of an empty bytearray.
- codeHash = crypto.Keccak256Hash(nil)
- }
+ codeHash = state.GetCodeHash(address)
+ }
+ // Create the proofs for the storageKeys.
+ for i, key := range keys {
+ // Output key encoding is a bit special: if the input was a 32-byte hash, it is
+ // returned as such. Otherwise, we apply the QUANTITY encoding mandated by the
+ // JSON-RPC spec for getProof. This behavior exists to preserve backwards
+ // compatibility with older client versions.
+ var outputKey string
+ if keyLengths[i] != 32 {
+ outputKey = hexutil.EncodeBig(key.Big())
+ } else {
+ outputKey = hexutil.Encode(key[:])
+ }
- // create the proof for the storageKeys
- for i, hexKey := range storageKeys {
- key, err := decodeHash(hexKey)
- if err != nil {
- return nil, err
+ if storageTrie == nil {
+ storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}}
+ continue
}
- if storageTrie != nil {
- proof, storageError := state.GetStorageProof(address, key)
- if storageError != nil {
- return nil, storageError
- }
- storageProof[i] = StorageResult{hexKey, (*hexutil.Big)(state.GetState(address, key).Big()), toHexSlice(proof)}
- } else {
- storageProof[i] = StorageResult{hexKey, &hexutil.Big{}, []string{}}
+ var proof proofList
+ if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil {
+ return nil, err
}
+ value := (*hexutil.Big)(state.GetState(address, key).Big())
+ storageProof[i] = StorageResult{outputKey, value, proof}
}
- // create the accountProof
+ // Create the accountProof.
accountProof, proofErr := state.GetProof(address)
if proofErr != nil {
return nil, proofErr
@@ -749,7 +781,7 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
// decodeHash parses a hex-encoded 32-byte hash. The input may optionally
// be prefixed by 0x and can have a byte length up to 32.
-func decodeHash(s string) (common.Hash, error) {
+func decodeHash(s string) (h common.Hash, inputLength int, err error) {
if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") {
s = s[2:]
}
@@ -758,17 +790,19 @@ func decodeHash(s string) (common.Hash, error) {
}
b, err := hex.DecodeString(s)
if err != nil {
- return common.Hash{}, errors.New("hex string invalid")
+ return common.Hash{}, 0, errors.New("hex string invalid")
}
if len(b) > 32 {
- return common.Hash{}, errors.New("hex string too long, want at most 32 bytes")
+ return common.Hash{}, len(b), errors.New("hex string too long, want at most 32 bytes")
}
- return common.BytesToHash(b), nil
+ return common.BytesToHash(b), len(b), nil
}
// GetHeaderByNumber returns the requested canonical block header.
-// * When blockNr is -1 the chain head is returned.
-// * When blockNr is -2 the pending chain head is returned.
+// - When blockNr is -1 the chain pending header is returned.
+// - When blockNr is -2 the chain latest header is returned.
+// - When blockNr is -3 the chain finalized header is returned.
+// - When blockNr is -4 the chain safe header is returned.
func (s *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) {
header, err := s.b.HeaderByNumber(ctx, number)
if header != nil && err == nil {
@@ -795,8 +829,10 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m
}
// GetBlockByNumber returns the requested canonical block.
-// - When blockNr is -1 the chain head is returned.
-// - When blockNr is -2 the pending chain head is returned.
+// - When blockNr is -1 the chain pending block is returned.
+// - When blockNr is -2 the chain latest block is returned.
+// - When blockNr is -3 the chain finalized block is returned.
+// - When blockNr is -4 the chain safe block is returned.
// - When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
@@ -891,7 +927,7 @@ func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address
if state == nil || err != nil {
return nil, err
}
- key, err := decodeHash(hexKey)
+ key, _, err := decodeHash(hexKey)
if err != nil {
return nil, fmt.Errorf("unable to decode storage key: %s", err)
}
@@ -1050,33 +1086,10 @@ func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.H
return header
}
-func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
- defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
-
- state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
- if state == nil || err != nil {
- return nil, err
- }
+func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
if err := overrides.Apply(state); err != nil {
return nil, err
}
- // If the request is for the pending block, override the block timestamp, number, and estimated
- // base fee, so that the check runs as if it were run on a newly generated block.
- if blkNumber, isNum := blockNrOrHash.Number(); isNum && blkNumber == rpc.PendingBlockNumber {
- // Override header with a copy to ensure the original header is not modified
- header = types.CopyHeader(header)
- // Grab the hash of the unmodified header, so that the modified header can point to the
- // prior block as its parent.
- parentHash := header.Hash()
- header.Time = uint64(time.Now().Unix())
- header.ParentHash = parentHash
- header.Number = new(big.Int).Add(header.Number, big.NewInt(1))
- estimatedBaseFee, err := b.EstimateBaseFee(ctx)
- if err != nil {
- return nil, err
- }
- header.BaseFee = estimatedBaseFee
- }
// Setup context so it may be cancelled the call has completed
// or, in case of unmetered gas, setup a context with a timeout.
@@ -1125,6 +1138,35 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash
return result, nil
}
+func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
+ defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
+
+ state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if state == nil || err != nil {
+ return nil, err
+ }
+
+ // If the request is for the pending block, override the block timestamp, number, and estimated
+ // base fee, so that the check runs as if it were run on a newly generated block.
+ if blkNumber, isNum := blockNrOrHash.Number(); isNum && blkNumber == rpc.PendingBlockNumber {
+ // Override header with a copy to ensure the original header is not modified
+ header = types.CopyHeader(header)
+ // Grab the hash of the unmodified header, so that the modified header can point to the
+ // prior block as its parent.
+ parentHash := header.Hash()
+ header.Time = uint64(time.Now().Unix())
+ header.ParentHash = parentHash
+ header.Number = new(big.Int).Add(header.Number, big.NewInt(1))
+ estimatedBaseFee, err := b.EstimateBaseFee(ctx)
+ if err != nil {
+ return nil, err
+ }
+ header.BaseFee = estimatedBaseFee
+ }
+
+ return doCall(ctx, b, args, state, header, overrides, blockOverrides, timeout, globalGasCap)
+}
+
func newRevertError(result *core.ExecutionResult) *revertError {
reason, errUnpack := abi.UnpackRevert(result.Revert())
err := errors.New("execution reverted")
@@ -1206,7 +1248,7 @@ func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrO
return result.Return(), result.Err
}
-func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) {
+func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) {
// Binary search the gas requirement, as it may be higher than the amount used
var (
lo uint64 = params.TxGas - 1
@@ -1248,6 +1290,10 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
if err != nil {
return 0, err
}
+ err = overrides.Apply(state)
+ if err != nil {
+ return 0, err
+ }
balance := state.GetBalance(*args.From) // from can't be nil
available := new(big.Int).Set(balance)
if args.Value != nil {
@@ -1277,10 +1323,10 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
cap = hi
// Create a helper to check if a gas allowance results in an executable transaction
- executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
+ executable := func(gas uint64, state *state.StateDB, header *types.Header) (bool, *core.ExecutionResult, error) {
args.Gas = (*hexutil.Uint64)(&gas)
- result, err := DoCall(ctx, b, args, blockNrOrHash, nil, nil, 0, gasCap)
+ result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap)
if err != nil {
if errors.Is(err, core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit
@@ -1289,10 +1335,19 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
}
return result.Failed(), result, nil
}
+ state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if state == nil || err != nil {
+ return 0, err
+ }
+ err = overrides.Apply(state)
+ if err != nil {
+ return 0, err
+ }
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
+ s := state.Copy()
mid := (hi + lo) / 2
- failed, _, err := executable(mid)
+ failed, _, err := executable(mid, s, header)
// If the error is not nil(consensus error), it means the provided message
// call or transaction will never be accepted no matter how much gas it is
@@ -1308,7 +1363,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
- failed, result, err := executable(hi)
+ failed, result, err := executable(hi, state, header)
if err != nil {
return 0, err
}
@@ -1328,12 +1383,12 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
// EstimateGas returns an estimate of the amount of gas needed to execute the
// given transaction against the current pending block.
-func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) {
+func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) {
bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
if blockNrOrHash != nil {
bNrOrHash = *blockNrOrHash
}
- return DoEstimateGas(ctx, s.b, args, bNrOrHash, s.b.RPCGasCap())
+ return DoEstimateGas(ctx, s.b, args, bNrOrHash, overrides, s.b.RPCGasCap())
}
// RPCMarshalHeader converts the given header to the RPC output .
@@ -1350,7 +1405,6 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
"miner": head.Coinbase,
"difficulty": (*hexutil.Big)(head.Difficulty),
"extraData": hexutil.Bytes(head.Extra),
- "size": hexutil.Uint64(head.Size()),
"gasLimit": hexutil.Uint64(head.GasLimit),
"gasUsed": hexutil.Uint64(head.GasUsed),
"timestamp": hexutil.Uint64(head.Time),
@@ -1371,7 +1425,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
// transaction hashes.
-func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) (map[string]interface{}, error) {
+func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) map[string]interface{} {
fields := RPCMarshalHeader(block.Header())
fields["size"] = hexutil.Uint64(block.Size())
@@ -1397,8 +1451,7 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
uncleHashes[i] = uncle.Hash()
}
fields["uncles"] = uncleHashes
-
- return fields, nil
+ return fields
}
// rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires
@@ -1414,16 +1467,13 @@ func (s *BlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Head
// rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires
// a `BlockchainAPI`.
func (s *BlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
- fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig())
- if err != nil {
- return nil, err
- }
+ fields := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig())
if inclTx {
// Note: Subnet-EVM enforces that the difficulty of a block is always 1, such that the total difficulty of a block
// will be equivalent to its height.
fields["totalDifficulty"] = (*hexutil.Big)(b.Number())
}
- return fields, err
+ return fields, nil
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
@@ -1447,6 +1497,7 @@ type RPCTransaction struct {
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
+ YParity *hexutil.Uint64 `json:"yParity,omitempty"`
}
// newRPCTransaction returns a transaction that will serialize to the RPC
@@ -1474,25 +1525,32 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
result.TransactionIndex = (*hexutil.Uint64)(&index)
}
+
switch tx.Type() {
case types.LegacyTxType:
// if a legacy transaction has an EIP-155 chain id, include it explicitly
if id := tx.ChainId(); id.Sign() != 0 {
result.ChainID = (*hexutil.Big)(id)
}
+
case types.AccessListTxType:
al := tx.AccessList()
+ yparity := hexutil.Uint64(v.Sign())
result.Accesses = &al
result.ChainID = (*hexutil.Big)(tx.ChainId())
+ result.YParity = &yparity
+
case types.DynamicFeeTxType:
al := tx.AccessList()
+ yparity := hexutil.Uint64(v.Sign())
result.Accesses = &al
result.ChainID = (*hexutil.Big)(tx.ChainId())
+ result.YParity = &yparity
result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap())
result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
// if the transaction has been mined, compute the effective gas price
if baseFee != nil && blockHash != (common.Hash{}) {
- // price = min(tip, gasFeeCap - baseFee) + baseFee
+ // price = min(gasTipCap + baseFee, gasFeeCap)
price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap())
result.GasPrice = (*hexutil.Big)(price)
} else {
@@ -1589,7 +1647,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
to = crypto.CreateAddress(args.from(), uint64(*args.Nonce))
}
// Retrieve the precompiles since they don't need to be added to the access list
- precompiles := vm.ActivePrecompiles(b.ChainConfig().AvalancheRules(header.Number, header.Time))
+ precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, header.Time))
// Create an initial tracer
prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles)
@@ -1603,8 +1661,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
// Copy the original db so we don't modify it
statedb := db.Copy()
- // Set the access list tracer to the last al
-
+ // Set the accesslist to the last al
args.AccessList = &accessList
msg, err := args.ToMessage(b.RPCGasCap(), header.BaseFee)
if err != nil {
@@ -1642,7 +1699,6 @@ type BadBlockArgs struct {
// and returns them as a JSON list of block hashes.
func (s *BlockChainAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) {
var (
- err error
badBlocks, reasons = s.b.BadBlocks()
results = make([]*BadBlockArgs, 0, len(badBlocks))
)
@@ -1656,9 +1712,7 @@ func (s *BlockChainAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, erro
} else {
blockRlp = fmt.Sprintf("%#x", rlpBytes)
}
- if blockJSON, err = RPCMarshalBlock(block, true, true, s.b.ChainConfig()); err != nil {
- blockJSON = map[string]interface{}{"error": err.Error()}
- }
+ blockJSON = RPCMarshalBlock(block, true, true, s.b.ChainConfig())
results = append(results, &BadBlockArgs{
Hash: block.Hash(),
RLP: blockRlp,
@@ -1797,7 +1851,7 @@ func (s *TransactionAPI) GetRawTransactionByHash(ctx context.Context, hash commo
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) {
tx, blockHash, blockNumber, index, err := s.b.GetTransaction(ctx, hash)
- if err != nil {
+ if tx == nil || err != nil {
// When the transaction doesn't exist, the RPC method should return JSON null
// as per specification.
return nil, nil
@@ -1806,12 +1860,11 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.
if err != nil {
return nil, err
}
-
receipts, err := s.b.GetReceipts(ctx, blockHash)
if err != nil {
return nil, err
}
- if len(receipts) <= int(index) {
+ if uint64(len(receipts)) <= index {
return nil, nil
}
receipt := receipts[index]
@@ -1840,6 +1893,7 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u
"type": hexutil.Uint(tx.Type()),
"effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice),
}
+
// Assign receipt status or post state.
if len(receipt.PostState) > 0 {
fields["root"] = hexutil.Bytes(receipt.PostState)
@@ -2203,7 +2257,6 @@ func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, err
// NetAPI offers network related RPC methods
type NetAPI struct {
- // net *p2p.Server
networkVersion uint64
}
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 45884d2995..f168777153 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -27,21 +27,16 @@
package ethapi
import (
- "bytes"
"context"
"crypto/ecdsa"
"encoding/json"
"errors"
"fmt"
- "hash"
"math/big"
"reflect"
- "sort"
"testing"
"time"
- "github.com/stretchr/testify/require"
-
"github.com/ava-labs/subnet-evm/accounts"
"github.com/ava-labs/subnet-evm/commontype"
"github.com/ava-labs/subnet-evm/consensus"
@@ -52,6 +47,7 @@ import (
"github.com/ava-labs/subnet-evm/core/state"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/core/vm"
+ "github.com/ava-labs/subnet-evm/internal/blocktest"
"github.com/ava-labs/subnet-evm/params"
"github.com/ava-labs/subnet-evm/rpc"
"github.com/ethereum/go-ethereum/common"
@@ -59,7 +55,9 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
- "golang.org/x/crypto/sha3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
)
func TestTransaction_RoundTripRpcJSON(t *testing.T) {
@@ -72,7 +70,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) {
t.Parallel()
for i, tt := range tests {
var tx2 types.Transaction
- tx, err := types.SignNewTx(key, signer, tt)
+ tx, err := types.SignNewTx(key, signer, tt.Tx)
if err != nil {
t.Fatalf("test %d: signing failed: %v", i, err)
}
@@ -85,7 +83,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) {
t.Fatalf("test %d: stx changed, want %x have %x", i, want, have)
}
- // rpcTransaction
+ // rpcTransaction
rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config)
if data, err := json.Marshal(rpcTx); err != nil {
t.Fatalf("test %d: marshalling failed; %v", i, err)
@@ -93,102 +91,259 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) {
t.Fatalf("test %d: unmarshal failed: %v", i, err)
} else if want, have := tx.Hash(), tx2.Hash(); want != have {
t.Fatalf("test %d: tx changed, want %x have %x", i, want, have)
+ } else {
+ want, have := tt.Want, string(data)
+ require.JSONEqf(t, want, have, "test %d: rpc json not match, want %s have %s", i, want, have)
}
}
}
-func allTransactionTypes(addr common.Address, config *params.ChainConfig) []types.TxData {
- return []types.TxData{
- &types.LegacyTx{
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: &addr,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- V: big.NewInt(9),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.LegacyTx{
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: nil,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.AccessListTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: &addr,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{
- types.AccessTuple{
- Address: common.Address{0x2},
- StorageKeys: []common.Hash{types.EmptyRootHash},
+type txData struct {
+ Tx types.TxData
+ Want string
+}
+
+func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txData {
+ return []txData{
+ {
+ Tx: &types.LegacyTx{
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: &addr,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ V: big.NewInt(9),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
+ },
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x3fa586d2448ae279279fa7036da74eb932763661543428c1a0aba21b95b37bdb",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": "0xdead000000000000000000000000000000000000",
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x0",
+ "chainId": "0x1",
+ "v": "0x25",
+ "r": "0xac639f4319e9268898e29444b97101f1225e2a0837151626da23e73dda2443fc",
+ "s": "0x4fcc3f4c3a75f70ee45bb42d4b0aad432cc8c0140efb3e2611d6a6dda8460907"
+ }`,
+ }, {
+ Tx: &types.LegacyTx{
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: nil,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
+ },
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x617a316c6ff7ed2aa6ead1b4bb28a1322c2156c1c72f376a976d2d2adb1748ee",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": null,
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x0",
+ "chainId": "0x1",
+ "v": "0x25",
+ "r": "0xee8e89b513778d4815ae5969f3d55e0f7590f31b08f2a2290d5bc4ae37fce299",
+ "s": "0x663db5c74c10e2b6525e7026e7cfd569b819ec91a042322655ff2b35060784b1"
+ }`,
+ },
+ {
+ Tx: &types.AccessListTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: &addr,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{
+ types.AccessTuple{
+ Address: common.Address{0x2},
+ StorageKeys: []common.Hash{types.EmptyRootHash},
+ },
},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.AccessListTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: nil,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{
- types.AccessTuple{
- Address: common.Address{0x2},
- StorageKeys: []common.Hash{types.EmptyRootHash},
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x6becb7b9c171aa0d6d0a90dcd97bc3529c4d521f9cc9b7e31616aa9afc178c10",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": "0xdead000000000000000000000000000000000000",
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x1",
+ "accessList": [
+ {
+ "address": "0x0200000000000000000000000000000000000000",
+ "storageKeys": [
+ "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ ]
+ }
+ ],
+ "chainId": "0x1",
+ "v": "0x1",
+ "r": "0xea2289ca0243beecbe69d337bbc53c618e1fb6bd2ec69fd4121df47149a4d4be",
+ "s": "0x2dc134b6bc43abbdfebef0b2d62c175459fc1e8ddff60c8e740c461d7ea1522f",
+ "yParity": "0x1"
+ }`,
+ }, {
+ Tx: &types.AccessListTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: nil,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{
+ types.AccessTuple{
+ Address: common.Address{0x2},
+ StorageKeys: []common.Hash{types.EmptyRootHash},
+ },
},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.DynamicFeeTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasTipCap: big.NewInt(6),
- GasFeeCap: big.NewInt(9),
- Gas: 7,
- To: &addr,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{
- types.AccessTuple{
- Address: common.Address{0x2},
- StorageKeys: []common.Hash{types.EmptyRootHash},
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x22fbf81bae4640511c706e2c72d2f2ef1abc1e7861f2b82c4cae5b102a40709c",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": null,
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x1",
+ "accessList": [
+ {
+ "address": "0x0200000000000000000000000000000000000000",
+ "storageKeys": [
+ "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ ]
+ }
+ ],
+ "chainId": "0x1",
+ "v": "0x1",
+ "r": "0xc50e18edd861639735ec69ca12d82fcbb2c1921d2e2a8fd3a75f408d2d4b8118",
+ "s": "0x32a908d1bc2db0229354f4dd392ffc37934e24ae8b18a620c6588c72660b6238",
+ "yParity": "0x1"
+ }`,
+ }, {
+ Tx: &types.DynamicFeeTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasTipCap: big.NewInt(6),
+ GasFeeCap: big.NewInt(9),
+ Gas: 7,
+ To: &addr,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{
+ types.AccessTuple{
+ Address: common.Address{0x2},
+ StorageKeys: []common.Hash{types.EmptyRootHash},
+ },
},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.DynamicFeeTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasTipCap: big.NewInt(6),
- GasFeeCap: big.NewInt(9),
- Gas: 7,
- To: nil,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x9",
+ "maxFeePerGas": "0x9",
+ "maxPriorityFeePerGas": "0x6",
+ "hash": "0xc5763d2ce6af3f694dcda8a9a50d4f75005a711edd382e993dd0406e0c54cfde",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": "0xdead000000000000000000000000000000000000",
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x2",
+ "accessList": [
+ {
+ "address": "0x0200000000000000000000000000000000000000",
+ "storageKeys": [
+ "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ ]
+ }
+ ],
+ "chainId": "0x1",
+ "v": "0x0",
+ "r": "0x740eb1e3bc206760182993845b7815fd8cf7a42f1a1ed26027f736e9eccfd20f",
+ "s": "0x31da567e2b3a83e58e42f7902c3706926c926625f6978c24fdaa21b9d143bbf7",
+ "yParity": "0x0"
+ }`,
+ }, {
+ Tx: &types.DynamicFeeTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasTipCap: big.NewInt(6),
+ GasFeeCap: big.NewInt(9),
+ Gas: 7,
+ To: nil,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
+ },
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x9",
+ "maxFeePerGas": "0x9",
+ "maxPriorityFeePerGas": "0x6",
+ "hash": "0x85545f69b2410640fbbb7157b9a79adece45bac4b2803733d250d049e9501a28",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": null,
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x2",
+ "accessList": [],
+ "chainId": "0x1",
+ "v": "0x1",
+ "r": "0x5004538adbe499313737033b22eb2b50a9450f02fab3971a591e6d57761b2cdf",
+ "s": "0x5f7b1f5d11bd467d84f32beb2e89629351b96c5204c4f72d5d2040bee369a73a",
+ "yParity": "0x1"
+ }`,
},
}
}
@@ -200,10 +355,7 @@ type testBackend struct {
func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
var (
- engine = dummy.NewCoinbaseFaker()
- backend = &testBackend{
- db: rawdb.NewMemoryDatabase(),
- }
+ engine = dummy.NewCoinbaseFaker()
cacheConfig = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
@@ -212,15 +364,21 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
}
)
// Generate blocks for testing
- _, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator)
- chain, err := core.NewBlockChain(backend.db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false)
+ db, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator)
+ chain, err := core.NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, gspec.ToBlock().Hash(), false)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
if n, err := chain.InsertChain(blocks); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
- backend.chain = chain
+ for _, block := range blocks {
+ if err := chain.Accept(block); err != nil {
+ t.Fatalf("block %d: failed to accept into chain: %v", block.NumberU64(), err)
+ }
+ }
+
+ backend := &testBackend{db: db, chain: chain}
return backend
}
@@ -248,10 +406,16 @@ func (b testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types
return b.chain.GetHeaderByHash(hash), nil
}
func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {
- panic("implement me")
+ if blockNr, ok := blockNrOrHash.Number(); ok {
+ return b.HeaderByNumber(ctx, blockNr)
+ }
+ if blockHash, ok := blockNrOrHash.Hash(); ok {
+ return b.HeaderByHash(ctx, blockHash)
+ }
+ panic("unknown type rpc.BlockNumberOrHash")
}
-func (b testBackend) CurrentHeader() *types.Header { panic("implement me") }
-func (b testBackend) CurrentBlock() *types.Header { panic("implement me") }
+func (b testBackend) CurrentHeader() *types.Header { return b.chain.CurrentBlock() }
+func (b testBackend) CurrentBlock() *types.Header { return b.chain.CurrentBlock() }
func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
if number == rpc.LatestBlockNumber {
head := b.chain.CurrentBlock()
@@ -303,7 +467,6 @@ func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.R
receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config())
return receipts, nil
}
-func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { panic("implement me") }
func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) (*vm.EVM, func() error) {
vmError := func() error { return nil }
if vmConfig == nil {
@@ -332,7 +495,8 @@ func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) er
panic("implement me")
}
func (b testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
- panic("implement me")
+ tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.db, txHash)
+ return tx, blockHash, blockNumber, index, nil
}
func (b testBackend) GetPoolTransactions() (types.Transactions, error) { panic("implement me") }
func (b testBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { panic("implement me") }
@@ -340,10 +504,10 @@ func (b testBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uin
panic("implement me")
}
func (b testBackend) Stats() (pending int, queued int) { panic("implement me") }
-func (b testBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
+func (b testBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
panic("implement me")
}
-func (b testBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
+func (b testBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
panic("implement me")
}
func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription {
@@ -402,6 +566,7 @@ func TestEstimateGas(t *testing.T) {
var testSuite = []struct {
blockNumber rpc.BlockNumber
call TransactionArgs
+ overrides StateOverride
expectErr error
want uint64
}{
@@ -434,9 +599,30 @@ func TestEstimateGas(t *testing.T) {
expectErr: nil,
want: 53000,
},
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ call: TransactionArgs{},
+ overrides: StateOverride{
+ randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))},
+ },
+ expectErr: nil,
+ want: 53000,
+ },
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ call: TransactionArgs{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[1].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ },
+ overrides: StateOverride{
+ randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(0))},
+ },
+ expectErr: core.ErrInsufficientFunds,
+ },
}
for i, tc := range testSuite {
- result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber})
+ result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides)
if tc.expectErr != nil {
if err == nil {
t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr)
@@ -631,19 +817,13 @@ type Account struct {
addr common.Address
}
-type Accounts []Account
-
-func (a Accounts) Len() int { return len(a) }
-func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 }
-
-func newAccounts(n int) (accounts Accounts) {
+func newAccounts(n int) (accounts []Account) {
for i := 0; i < n; i++ {
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
accounts = append(accounts, Account{key: key, addr: addr})
}
- sort.Sort(accounts)
+ slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) })
return accounts
}
@@ -657,32 +837,8 @@ func hex2Bytes(str string) *hexutil.Bytes {
return &rpcBytes
}
-// testHasher is the helper tool for transaction/receipt list hashing.
-// The original hasher is trie, in order to get rid of import cycle,
-// use the testing hasher instead.
-type testHasher struct {
- hasher hash.Hash
-}
-
-func newHasher() *testHasher {
- return &testHasher{hasher: sha3.NewLegacyKeccak256()}
-}
-
-func (h *testHasher) Reset() {
- h.hasher.Reset()
-}
-
-func (h *testHasher) Update(key, val []byte) error {
- h.hasher.Write(key)
- h.hasher.Write(val)
- return nil
-}
-
-func (h *testHasher) Hash() common.Hash {
- return common.BytesToHash(h.hasher.Sum(nil))
-}
-
func TestRPCMarshalBlock(t *testing.T) {
+ t.Parallel()
var (
txs []*types.Transaction
to = common.BytesToAddress([]byte{0x11})
@@ -711,7 +867,7 @@ func TestRPCMarshalBlock(t *testing.T) {
}
txs = append(txs, tx)
}
- block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, newHasher())
+ block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher())
var testSuite = []struct {
inclTx bool
@@ -722,37 +878,1037 @@ func TestRPCMarshalBlock(t *testing.T) {
{
inclTx: false,
fullTx: false,
- want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`,
+ want: `{
+ "difficulty": "0x0",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x64",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x296",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x0",
+ "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e",
+ "uncles": []
+ }`,
},
// only tx hashes
{
inclTx: true,
fullTx: false,
- want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":["0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1"],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`,
+ want: `{
+ "difficulty": "0x0",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x64",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x296",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x0",
+ "transactions": [
+ "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605",
+ "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4",
+ "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5",
+ "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1"
+ ],
+ "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e",
+ "uncles": []
+ }`,
},
-
// full tx details
{
inclTx: true,
fullTx: true,
- want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":[{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","input":"0x111111","nonce":"0x1","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x0","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","input":"0x111111","nonce":"0x2","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x1","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","input":"0x111111","nonce":"0x3","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x2","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1","input":"0x111111","nonce":"0x4","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x3","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"}],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`,
+ want: `{
+ "difficulty": "0x0",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x64",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x296",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x0",
+ "transactions": [
+ {
+ "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605",
+ "input": "0x111111",
+ "nonce": "0x1",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x0",
+ "value": "0x6f",
+ "type": "0x1",
+ "accessList": [],
+ "chainId": "0x539",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0",
+ "yParity": "0x0"
+ },
+ {
+ "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4",
+ "input": "0x111111",
+ "nonce": "0x2",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x1",
+ "value": "0x6f",
+ "type": "0x0",
+ "chainId": "0x7fffffffffffffee",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0"
+ },
+ {
+ "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5",
+ "input": "0x111111",
+ "nonce": "0x3",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x2",
+ "value": "0x6f",
+ "type": "0x1",
+ "accessList": [],
+ "chainId": "0x539",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0",
+ "yParity": "0x0"
+ },
+ {
+ "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1",
+ "input": "0x111111",
+ "nonce": "0x4",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x3",
+ "value": "0x6f",
+ "type": "0x0",
+ "chainId": "0x7fffffffffffffee",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0"
+ }
+ ],
+ "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e",
+ "uncles": []
+ }`,
},
}
for i, tc := range testSuite {
- resp, err := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.TestChainConfig)
+ resp := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.TestSubnetEVMConfig)
+ out, err := json.Marshal(resp)
if err != nil {
- t.Errorf("test %d: got error %v", i, err)
+ t.Errorf("test %d: json marshal error: %v", i, err)
continue
}
- out, err := json.Marshal(resp)
+ assert.JSONEqf(t, tc.want, string(out), "test %d", i)
+ }
+}
+
+func TestRPCGetBlockOrHeader(t *testing.T) {
+ t.Parallel()
+
+ // Initialize test accounts
+ var (
+ acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+ acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
+ acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey)
+ acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey)
+ genesis = &core.Genesis{
+ Config: params.TestSubnetEVMConfig,
+ Alloc: core.GenesisAlloc{
+ acc1Addr: {Balance: big.NewInt(params.Ether)},
+ acc2Addr: {Balance: big.NewInt(params.Ether)},
+ },
+ }
+ genBlocks = 10
+ signer = types.HomesteadSigner{}
+ tx = types.NewTx(&types.LegacyTx{
+ Nonce: 11,
+ GasPrice: big.NewInt(11111),
+ Gas: 1111,
+ To: &acc2Addr,
+ Value: big.NewInt(111),
+ Data: []byte{0x11, 0x11, 0x11},
+ })
+ pending = types.NewBlock(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, blocktest.NewHasher())
+ )
+ backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ // Transfer from account[0] to account[1]
+ // value: 1000 wei
+ // fee: 0 wei
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, acc1Key)
+ b.AddTx(tx)
+ })
+ api := NewBlockChainAPI(backend)
+ blockHashes := make([]common.Hash, genBlocks+1)
+ ctx := context.Background()
+ for i := 0; i <= genBlocks; i++ {
+ header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
if err != nil {
- t.Errorf("test %d: json marshal error: %v", i, err)
+ t.Errorf("failed to get block: %d err: %v", i, err)
+ }
+ blockHashes[i] = header.Hash()
+ }
+ pendingHash := pending.Hash()
+
+ var testSuite = []struct {
+ blockNumber rpc.BlockNumber
+ blockHash *common.Hash
+ fullTx bool
+ reqHeader bool
+ want string
+ expectErr error
+ }{
+ // 0. latest header
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01"
+ }`,
+ },
+ // 1. genesis header
+ {
+ blockNumber: rpc.BlockNumber(0),
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "difficulty": "0x20000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ }`,
+ },
+ // 2. #1 header
+ {
+ blockNumber: rpc.BlockNumber(1),
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5"
+ }`,
+ },
+ // 3. latest-1 header
+ {
+ blockNumber: rpc.BlockNumber(9),
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb"
+ }`,
+ },
+ // 4. latest+1 header
+ {
+ blockNumber: rpc.BlockNumber(11),
+ reqHeader: true,
+ want: "null",
+ },
+ // 5. pending header
+ {
+ blockNumber: rpc.PendingBlockNumber,
+ reqHeader: true,
+ want: "null",
+ },
+ // 6. latest block
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2bb",
+ "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactions": [
+ "0x71be223424ab6e3457513a760b196d43b094414c32a70ff929b2b720a16b832d"
+ ],
+ "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01",
+ "uncles": []
+ }`,
+ },
+ // 7. genesis block
+ {
+ blockNumber: rpc.BlockNumber(0),
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "difficulty": "0x20000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x201",
+ "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactions": [],
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncles": []
+ }`,
+ },
+ // 8. #1 block
+ {
+ blockNumber: rpc.BlockNumber(1),
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2bb",
+ "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactions": [
+ "0xdf92bc7c4c0341ecbdcd2a3ca7011fe9e21df4b8553bf0c8caabe6cb4a1aee26"
+ ],
+ "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5",
+ "uncles": []
+ }`,
+ },
+ // 9. latest-1 block
+ {
+ blockNumber: rpc.BlockNumber(9),
+ fullTx: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2bb",
+ "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactions": [
+ {
+ "blockHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "blockNumber": "0x9",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gas": "0x5208",
+ "gasPrice": "0x5d21dba00",
+ "hash": "0x237f95840187a93f8aaf8d6f1515f8a8ac9d9359fcb0c220cdb3d642d6b9a19a",
+ "input": "0x",
+ "nonce": "0x8",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionIndex": "0x0",
+ "value": "0x3e8",
+ "type": "0x0",
+ "v": "0x1c",
+ "r": "0xd7cdc527490b7ba29c515aae3bbe80c67729cda7f736e6515652cfc40e9da68f",
+ "s": "0x4d0a4a59bef165b16f910bdadd41efaaad1b73549bacc35eaf6d073eb1fb92b7"
+ }
+ ],
+ "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb",
+ "uncles": []
+ }`,
+ },
+ // 10. latest+1 block
+ {
+ blockNumber: rpc.BlockNumber(11),
+ fullTx: true,
+ want: "null",
+ },
+ // 11. pending block
+ {
+ blockNumber: rpc.PendingBlockNumber,
+ want: "null",
+ },
+ // 12. pending block + fullTx
+ {
+ blockNumber: rpc.PendingBlockNumber,
+ fullTx: true,
+ want: "null",
+ },
+ // 13. latest header by hash
+ {
+ blockHash: &blockHashes[len(blockHashes)-1],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01"
+ }`,
+ },
+ // 14. genesis header by hash
+ {
+ blockHash: &blockHashes[0],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "difficulty": "0x20000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ }`,
+ },
+ // 15. #1 header
+ {
+ blockHash: &blockHashes[1],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5"
+ }`,
+ },
+ // 16. latest-1 header
+ {
+ blockHash: &blockHashes[len(blockHashes)-2],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb"
+ }`,
+ },
+ // 17. empty hash
+ {
+ blockHash: &common.Hash{},
+ reqHeader: true,
+ want: "null",
+ },
+ // 18. pending hash
+ {
+ blockHash: &pendingHash,
+ reqHeader: true,
+ want: `null`,
+ },
+ // 19. latest block
+ {
+ blockHash: &blockHashes[len(blockHashes)-1],
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2bb",
+ "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactions": [
+ "0x71be223424ab6e3457513a760b196d43b094414c32a70ff929b2b720a16b832d"
+ ],
+ "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01",
+ "uncles": []
+ }`,
+ },
+ // 20. genesis block
+ {
+ blockHash: &blockHashes[0],
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "difficulty": "0x20000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x201",
+ "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactions": [],
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncles": []
+ }`,
+ },
+ // 21. #1 block
+ {
+ blockHash: &blockHashes[1],
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2bb",
+ "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactions": [
+ "0xdf92bc7c4c0341ecbdcd2a3ca7011fe9e21df4b8553bf0c8caabe6cb4a1aee26"
+ ],
+ "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5",
+ "uncles": []
+ }`,
+ },
+ // 22. latest-1 block
+ {
+ blockHash: &blockHashes[len(blockHashes)-2],
+ fullTx: true,
+ want: `{
+ "baseFeePerGas": "0x5d21dba00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0x7a1200",
+ "gasUsed": "0x5208",
+ "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2bb",
+ "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactions": [
+ {
+ "blockHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365",
+ "blockNumber": "0x9",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gas": "0x5208",
+ "gasPrice": "0x5d21dba00",
+ "hash": "0x237f95840187a93f8aaf8d6f1515f8a8ac9d9359fcb0c220cdb3d642d6b9a19a",
+ "input": "0x",
+ "nonce": "0x8",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionIndex": "0x0",
+ "value": "0x3e8",
+ "type": "0x0",
+ "v": "0x1c",
+ "r": "0xd7cdc527490b7ba29c515aae3bbe80c67729cda7f736e6515652cfc40e9da68f",
+ "s": "0x4d0a4a59bef165b16f910bdadd41efaaad1b73549bacc35eaf6d073eb1fb92b7"
+ }
+ ],
+ "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb",
+ "uncles": []
+ }`,
+ },
+ // 23. empty hash + body
+ {
+ blockHash: &common.Hash{},
+ fullTx: true,
+ want: "null",
+ },
+ // 24. pending block
+ {
+ blockHash: &pendingHash,
+ want: `null`,
+ },
+ // 25. pending block + fullTx
+ {
+ blockHash: &pendingHash,
+ fullTx: true,
+ want: `null`,
+ },
+ }
+
+ for i, tt := range testSuite {
+ var (
+ result map[string]interface{}
+ err error
+ )
+ if tt.blockHash != nil {
+ if tt.reqHeader {
+ result = api.GetHeaderByHash(context.Background(), *tt.blockHash)
+ } else {
+ result, err = api.GetBlockByHash(context.Background(), *tt.blockHash, tt.fullTx)
+ }
+ } else {
+ if tt.reqHeader {
+ result, err = api.GetHeaderByNumber(context.Background(), tt.blockNumber)
+ } else {
+ result, err = api.GetBlockByNumber(context.Background(), tt.blockNumber, tt.fullTx)
+ }
+ }
+ if tt.expectErr != nil {
+ if err == nil {
+ t.Errorf("test %d: want error %v, have nothing", i, tt.expectErr)
+ continue
+ }
+ if !errors.Is(err, tt.expectErr) {
+ t.Errorf("test %d: error mismatch, want %v, have %v", i, tt.expectErr, err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("test %d: want no error, have %v", i, err)
+ continue
+ }
+ data, err := json.Marshal(result)
+ if err != nil {
+ t.Errorf("test %d: json marshal error", i)
continue
}
- if have := string(out); have != tc.want {
- t.Errorf("test %d: want: %s have: %s", i, tc.want, have)
+ want, have := tt.want, string(data)
+ require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
+ }
+}
+
+func TestRPCGetTransactionReceipt(t *testing.T) {
+ t.Parallel()
+
+ // Initialize test accounts
+ var (
+ acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+ acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
+ acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey)
+ acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey)
+ contract = common.HexToAddress("0000000000000000000000000000000000031ec7")
+ genesis = &core.Genesis{
+ Config: params.TestSubnetEVMConfig,
+ Alloc: core.GenesisAlloc{
+ acc1Addr: {Balance: big.NewInt(params.Ether)},
+ acc2Addr: {Balance: big.NewInt(params.Ether)},
+ // // SPDX-License-Identifier: GPL-3.0
+ // pragma solidity >=0.7.0 <0.9.0;
+ //
+ // contract Token {
+ // event Transfer(address indexed from, address indexed to, uint256 value);
+ // function transfer(address to, uint256 value) public returns (bool) {
+ // emit Transfer(msg.sender, to, value);
+ // return true;
+ // }
+ // }
+ contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")},
+ },
}
+ genBlocks = 5
+ signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID)
+ txHashes = make([]common.Hash, genBlocks)
+ )
+ backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ var (
+ tx *types.Transaction
+ err error
+ )
+ switch i {
+ case 0:
+ // transfer 1000wei
+ tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key)
+ case 1:
+ // create contract
+ tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key)
+ case 2:
+ // with logs
+ // transfer(address to, uint256 value)
+ data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:])
+ tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &contract, Gas: 60000, GasPrice: b.BaseFee(), Data: common.FromHex(data)}), signer, acc1Key)
+ case 3:
+ // dynamic fee with logs
+ // transfer(address to, uint256 value)
+ data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:])
+ fee := big.NewInt(500)
+ fee.Add(fee, b.BaseFee())
+ tx, err = types.SignTx(types.NewTx(&types.DynamicFeeTx{Nonce: uint64(i), To: &contract, Gas: 60000, Value: big.NewInt(1), GasTipCap: big.NewInt(500), GasFeeCap: fee, Data: common.FromHex(data)}), signer, acc1Key)
+ case 4:
+ // access list with contract create
+ accessList := types.AccessList{{
+ Address: contract,
+ StorageKeys: []common.Hash{{0}},
+ }}
+ tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key)
+ }
+ if err != nil {
+ t.Errorf("failed to sign tx: %v", err)
+ }
+ if tx != nil {
+ b.AddTx(tx)
+ txHashes[i] = tx.Hash()
+ }
+ })
+ api := NewTransactionAPI(backend, new(AddrLocker))
+ blockHashes := make([]common.Hash, genBlocks+1)
+ ctx := context.Background()
+ for i := 0; i <= genBlocks; i++ {
+ header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
+ if err != nil {
+ t.Errorf("failed to get block: %d err: %v", i, err)
+ }
+ blockHashes[i] = header.Hash()
+ }
+
+ var testSuite = []struct {
+ txHash common.Hash
+ want string
+ }{
+ // 0. normal success
+ {
+ txHash: txHashes[0],
+ want: `{
+ "blockHash": "0xcc27e155b6eadfa892992a8cd8adaf3c929a6ec6d98c4dfbc60258883c73568e",
+ "blockNumber": "0x1",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x5208",
+ "effectiveGasPrice": "0x5d21dba00",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0x5208",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionHash": "0xdf92bc7c4c0341ecbdcd2a3ca7011fe9e21df4b8553bf0c8caabe6cb4a1aee26",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ }`,
+ },
+ // 1. create contract
+ {
+ txHash: txHashes[1],
+ want: `{
+ "blockHash": "0xbea66b509ec6e5639279ca696def697d47d0c40ecfa00bbcdb5e31a492491c83",
+ "blockNumber": "0x2",
+ "contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
+ "cumulativeGasUsed": "0xcf4e",
+ "effectiveGasPrice": "0x5d21dba00",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0xcf4e",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": null,
+ "transactionHash": "0x22aa617165f83a9f8c191c2b7724ae43eeb1249bee06c98c03c7624c21d27dc8",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ }`,
+ },
+ // 2. with logs success
+ {
+ txHash: txHashes[2],
+ want: `{
+ "blockHash": "0x139eee6b02792c6bee20be4d0aa72b3876f22cf8fe8e2bf45e1a0cee94aa3cf1",
+ "blockNumber": "0x3",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x5e28",
+ "effectiveGasPrice": "0x5d21dba00",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0x5e28",
+ "logs": [
+ {
+ "address": "0x0000000000000000000000000000000000031ec7",
+ "topics": [
+ "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
+ "0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7",
+ "0x0000000000000000000000000000000000000000000000000000000000000003"
+ ],
+ "data": "0x000000000000000000000000000000000000000000000000000000000000000d",
+ "blockNumber": "0x3",
+ "transactionHash": "0x7366a7738f47e32f5b6d292ca064b6b66f295d3931533a3745975be1191fccdf",
+ "transactionIndex": "0x0",
+ "blockHash": "0x139eee6b02792c6bee20be4d0aa72b3876f22cf8fe8e2bf45e1a0cee94aa3cf1",
+ "logIndex": "0x0",
+ "removed": false
+ }
+ ],
+ "logsBloom": "0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000",
+ "status": "0x1",
+ "to": "0x0000000000000000000000000000000000031ec7",
+ "transactionHash": "0x7366a7738f47e32f5b6d292ca064b6b66f295d3931533a3745975be1191fccdf",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ }`,
+ },
+ // 3. dynamic tx with logs success
+ {
+ txHash: txHashes[3],
+ want: `{
+ "blockHash": "0xba48c351b0aa848ab2ec889f5794f0db779e0840af80472d1c29df54b22288c8",
+ "blockNumber": "0x4",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x538d",
+ "effectiveGasPrice": "0x5d21dbbf4",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0x538d",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x0",
+ "to": "0x0000000000000000000000000000000000031ec7",
+ "transactionHash": "0x4e1e9194ca6f9d4e1736e9e441f66104f273548ed6d91b236a5f9c2ea10fa06d",
+ "transactionIndex": "0x0",
+ "type": "0x2"
+ }`,
+ },
+ // 4. access list tx with create contract
+ {
+ txHash: txHashes[4],
+ want: `{
+ "blockHash": "0x83f2712dfaeab6ab6239bf060bccfc49652e4afdc3b80b22a8373816a2047bd3",
+ "blockNumber": "0x5",
+ "contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18",
+ "cumulativeGasUsed": "0xe01a",
+ "effectiveGasPrice": "0x5d21dba00",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0xe01a",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": null,
+ "transactionHash": "0x8afe030574f663fe5096371d6f58a6287bfb3e0c73a5050220f5775a08e7abc9",
+ "transactionIndex": "0x0",
+ "type": "0x1"
+ }`,
+ },
+ // 5. txhash empty
+ {
+ txHash: common.Hash{},
+ want: `null`,
+ },
+ // 6. txhash not found
+ {
+ txHash: common.HexToHash("deadbeef"),
+ want: `null`,
+ },
+ }
+
+ for i, tt := range testSuite {
+ var (
+ result interface{}
+ err error
+ )
+ result, err = api.GetTransactionReceipt(context.Background(), tt.txHash)
+ if err != nil {
+ t.Errorf("test %d: want no error, have %v", i, err)
+ continue
+ }
+ data, err := json.Marshal(result)
+ if err != nil {
+ t.Errorf("test %d: json marshal error", i)
+ continue
+ }
+ want, have := tt.want, string(data)
+ require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
}
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index cf27d124fb..cb4d6ca04e 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -58,9 +58,10 @@ type Backend interface {
ChainDb() ethdb.Database
AccountManager() *accounts.Manager
ExtRPCEnabled() bool
- RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection
- RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection
- RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
+ RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection
+ RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection
+ RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
+
UnprotectedAllowed(tx *types.Transaction) bool // allows only for EIP155 transactions.
// Blockchain API
@@ -89,8 +90,8 @@ type Backend interface {
GetPoolTransaction(txHash common.Hash) *types.Transaction
GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error)
Stats() (pending int, queued int)
- TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions)
- TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions)
+ TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction)
+ TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction)
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
ChainConfig() *params.ChainConfig
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index 1ce56fa5b4..5c6c682552 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -90,7 +90,6 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if err := args.setFeeDefaults(ctx, b); err != nil {
return err
}
-
if args.Value == nil {
args.Value = new(hexutil.Big)
}
@@ -123,7 +122,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
AccessList: args.AccessList,
}
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap())
+ estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, b.RPCGasCap())
if err != nil {
return err
}
@@ -258,7 +257,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*
gasPrice = args.GasPrice.ToInt()
gasFeeCap, gasTipCap = gasPrice, gasPrice
} else {
- // User specified 1559 gas feilds (or none), use those
+ // User specified 1559 gas fields (or none), use those
gasFeeCap = new(big.Int)
if args.MaxFeePerGas != nil {
gasFeeCap = args.MaxFeePerGas.ToInt()
diff --git a/internal/flags/categories.go b/internal/flags/categories.go
index 02d063a65a..d7500157e3 100644
--- a/internal/flags/categories.go
+++ b/internal/flags/categories.go
@@ -33,7 +33,8 @@ const (
LightCategory = "LIGHT CLIENT"
DevCategory = "DEVELOPER CHAIN"
EthashCategory = "ETHASH"
- TxPoolCategory = "TRANSACTION POOL"
+ TxPoolCategory = "TRANSACTION POOL (EVM)"
+ BlobPoolCategory = "TRANSACTION POOL (BLOB)"
PerfCategory = "PERFORMANCE TUNING"
AccountCategory = "ACCOUNT"
APICategory = "API AND CONSOLE"
diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go
index e5327d3bd3..8e23c8eeea 100644
--- a/metrics/resetting_timer.go
+++ b/metrics/resetting_timer.go
@@ -2,9 +2,10 @@ package metrics
import (
"math"
- "sort"
"sync"
"time"
+
+ "golang.org/x/exp/slices"
)
// Initial slice capacity for the values stored in a ResettingTimer
@@ -65,7 +66,7 @@ func (NilResettingTimer) Snapshot() ResettingTimer {
}
// Time is a no-op.
-func (NilResettingTimer) Time(func()) {}
+func (NilResettingTimer) Time(f func()) { f() }
// Update is a no-op.
func (NilResettingTimer) Update(time.Duration) {}
@@ -186,7 +187,7 @@ func (t *ResettingTimerSnapshot) Mean() float64 {
}
func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
- sort.Sort(Int64Slice(t.values))
+ slices.Sort(t.values)
count := len(t.values)
if count > 0 {
@@ -232,10 +233,3 @@ func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
t.calculated = true
}
-
-// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order.
-type Int64Slice []int64
-
-func (s Int64Slice) Len() int { return len(s) }
-func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git a/metrics/sample.go b/metrics/sample.go
index afcaa21184..252a878f58 100644
--- a/metrics/sample.go
+++ b/metrics/sample.go
@@ -3,9 +3,10 @@ package metrics
import (
"math"
"math/rand"
- "sort"
"sync"
"time"
+
+ "golang.org/x/exp/slices"
)
const rescaleThreshold = time.Hour
@@ -282,17 +283,17 @@ func SampleMin(values []int64) int64 {
}
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
-func SamplePercentile(values int64Slice, p float64) float64 {
+func SamplePercentile(values []int64, p float64) float64 {
return SamplePercentiles(values, []float64{p})[0]
}
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
// int64.
-func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+func SamplePercentiles(values []int64, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
if size > 0 {
- sort.Sort(values)
+ slices.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
@@ -633,9 +634,3 @@ func (h *expDecaySampleHeap) down(i, n int) {
i = j
}
}
-
-type int64Slice []int64
-
-func (p int64Slice) Len() int { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/metrics/timer.go b/metrics/timer.go
index a63c9dfb6c..2e1a9be472 100644
--- a/metrics/timer.go
+++ b/metrics/timer.go
@@ -123,7 +123,7 @@ func (NilTimer) Stop() {}
func (NilTimer) Sum() int64 { return 0 }
// Time is a no-op.
-func (NilTimer) Time(func()) {}
+func (NilTimer) Time(f func()) { f() }
// Update is a no-op.
func (NilTimer) Update(time.Duration) {}
diff --git a/metrics/writer.go b/metrics/writer.go
index 256fbd14c9..82434e9d1d 100644
--- a/metrics/writer.go
+++ b/metrics/writer.go
@@ -3,8 +3,10 @@ package metrics
import (
"fmt"
"io"
- "sort"
+ "strings"
"time"
+
+ "golang.org/x/exp/slices"
)
// Write sorts writes each metric in the given registry periodically to the
@@ -18,12 +20,11 @@ func Write(r Registry, d time.Duration, w io.Writer) {
// WriteOnce sorts and writes metrics in the given registry to the given
// io.Writer.
func WriteOnce(r Registry, w io.Writer) {
- var namedMetrics namedMetricSlice
+ var namedMetrics []namedMetric
r.Each(func(name string, i interface{}) {
namedMetrics = append(namedMetrics, namedMetric{name, i})
})
-
- sort.Sort(namedMetrics)
+ slices.SortFunc(namedMetrics, namedMetric.cmp)
for _, namedMetric := range namedMetrics {
switch metric := namedMetric.m.(type) {
case Counter:
@@ -91,13 +92,6 @@ type namedMetric struct {
m interface{}
}
-// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
-type namedMetricSlice []namedMetric
-
-func (nms namedMetricSlice) Len() int { return len(nms) }
-
-func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
-
-func (nms namedMetricSlice) Less(i, j int) bool {
- return nms[i].name < nms[j].name
+func (m namedMetric) cmp(other namedMetric) int {
+ return strings.Compare(m.name, other.name)
}
diff --git a/metrics/writer_test.go b/metrics/writer_test.go
index 1aacc28712..8376bf8975 100644
--- a/metrics/writer_test.go
+++ b/metrics/writer_test.go
@@ -1,19 +1,20 @@
package metrics
import (
- "sort"
"testing"
+
+ "golang.org/x/exp/slices"
)
func TestMetricsSorting(t *testing.T) {
- var namedMetrics = namedMetricSlice{
+ var namedMetrics = []namedMetric{
{name: "zzz"},
{name: "bbb"},
{name: "fff"},
{name: "ggg"},
}
- sort.Sort(namedMetrics)
+ slices.SortFunc(namedMetrics, namedMetric.cmp)
for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
if namedMetrics[i].name != name {
t.Fail()
diff --git a/miner/ordering.go b/miner/ordering.go
new file mode 100644
index 0000000000..70a2a42eb6
--- /dev/null
+++ b/miner/ordering.go
@@ -0,0 +1,157 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package miner
+
+import (
+ "container/heap"
+ "math/big"
+
+ "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
+)
+
+// txWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap
+type txWithMinerFee struct {
+ tx *txpool.LazyTransaction
+ from common.Address
+ fees *big.Int
+}
+
+// newTxWithMinerFee creates a wrapped transaction, calculating the effective
+// miner gasTipCap if a base fee is provided.
+// Returns error in case of a negative effective miner gasTipCap.
+func newTxWithMinerFee(tx *txpool.LazyTransaction, from common.Address, baseFee *big.Int) (*txWithMinerFee, error) {
+ tip := new(big.Int).Set(tx.GasTipCap)
+ if baseFee != nil {
+ if tx.GasFeeCap.Cmp(baseFee) < 0 {
+ return nil, types.ErrGasFeeCapTooLow
+ }
+ tip = math.BigMin(tx.GasTipCap, new(big.Int).Sub(tx.GasFeeCap, baseFee))
+ }
+ return &txWithMinerFee{
+ tx: tx,
+ from: from,
+ fees: tip,
+ }, nil
+}
+
+// txByPriceAndTime implements both the sort and the heap interface, making it useful
+// for all at once sorting as well as individually adding and removing elements.
+type txByPriceAndTime []*txWithMinerFee
+
+func (s txByPriceAndTime) Len() int { return len(s) }
+func (s txByPriceAndTime) Less(i, j int) bool {
+ // If the prices are equal, use the time the transaction was first seen for
+ // deterministic sorting
+ cmp := s[i].fees.Cmp(s[j].fees)
+ if cmp == 0 {
+ return s[i].tx.Time.Before(s[j].tx.Time)
+ }
+ return cmp > 0
+}
+func (s txByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s *txByPriceAndTime) Push(x interface{}) {
+ *s = append(*s, x.(*txWithMinerFee))
+}
+
+func (s *txByPriceAndTime) Pop() interface{} {
+ old := *s
+ n := len(old)
+ x := old[n-1]
+ old[n-1] = nil
+ *s = old[0 : n-1]
+ return x
+}
+
+// transactionsByPriceAndNonce represents a set of transactions that can return
+// transactions in a profit-maximizing sorted order, while supporting removing
+// entire batches of transactions for non-executable accounts.
+type transactionsByPriceAndNonce struct {
+ txs map[common.Address][]*txpool.LazyTransaction // Per account nonce-sorted list of transactions
+ heads txByPriceAndTime // Next transaction for each unique account (price heap)
+ signer types.Signer // Signer for the set of transactions
+ baseFee *big.Int // Current base fee
+}
+
+// newTransactionsByPriceAndNonce creates a transaction set that can retrieve
+// price sorted transactions in a nonce-honouring way.
+//
+// Note, the input map is reowned so the caller should not interact any more with
+// if after providing it to the constructor.
+func newTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *transactionsByPriceAndNonce {
+ // Initialize a price and received time based heap with the head transactions
+ heads := make(txByPriceAndTime, 0, len(txs))
+ for from, accTxs := range txs {
+ wrapped, err := newTxWithMinerFee(accTxs[0], from, baseFee)
+ if err != nil {
+ delete(txs, from)
+ continue
+ }
+ heads = append(heads, wrapped)
+ txs[from] = accTxs[1:]
+ }
+ heap.Init(&heads)
+
+ // Assemble and return the transaction set
+ return &transactionsByPriceAndNonce{
+ txs: txs,
+ heads: heads,
+ signer: signer,
+ baseFee: baseFee,
+ }
+}
+
+// Peek returns the next transaction by price.
+func (t *transactionsByPriceAndNonce) Peek() *txpool.LazyTransaction {
+ if len(t.heads) == 0 {
+ return nil
+ }
+ return t.heads[0].tx
+}
+
+// Shift replaces the current best head with the next one from the same account.
+func (t *transactionsByPriceAndNonce) Shift() {
+ acc := t.heads[0].from
+ if txs, ok := t.txs[acc]; ok && len(txs) > 0 {
+ if wrapped, err := newTxWithMinerFee(txs[0], acc, t.baseFee); err == nil {
+ t.heads[0], t.txs[acc] = wrapped, txs[1:]
+ heap.Fix(&t.heads, 0)
+ return
+ }
+ }
+ heap.Pop(&t.heads)
+}
+
+// Pop removes the best transaction, *not* replacing it with the next one from
+// the same account. This should be used when a transaction cannot be executed
+// and hence all subsequent ones should be discarded from the same account.
+func (t *transactionsByPriceAndNonce) Pop() {
+ heap.Pop(&t.heads)
+}
diff --git a/miner/ordering_ext.go b/miner/ordering_ext.go
new file mode 100644
index 0000000000..4bf4a8367c
--- /dev/null
+++ b/miner/ordering_ext.go
@@ -0,0 +1,15 @@
+package miner
+
+import (
+ "math/big"
+
+ "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type TransactionsByPriceAndNonce = transactionsByPriceAndNonce
+
+func NewTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *TransactionsByPriceAndNonce {
+ return newTransactionsByPriceAndNonce(signer, txs, baseFee)
+}
diff --git a/miner/ordering_test.go b/miner/ordering_test.go
new file mode 100644
index 0000000000..ea0f62f913
--- /dev/null
+++ b/miner/ordering_test.go
@@ -0,0 +1,198 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package miner
+
+import (
+ "crypto/ecdsa"
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+func TestTransactionPriceNonceSortLegacy(t *testing.T) {
+ testTransactionPriceNonceSort(t, nil)
+}
+
+func TestTransactionPriceNonceSort1559(t *testing.T) {
+ testTransactionPriceNonceSort(t, big.NewInt(0))
+ testTransactionPriceNonceSort(t, big.NewInt(5))
+ testTransactionPriceNonceSort(t, big.NewInt(50))
+}
+
+// Tests that transactions can be correctly sorted according to their price in
+// decreasing order, but at the same time with increasing nonces when issued by
+// the same account.
+func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
+ // Generate a batch of accounts to start with
+ keys := make([]*ecdsa.PrivateKey, 25)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ }
+ signer := types.LatestSignerForChainID(common.Big1)
+
+ // Generate a batch of transactions with overlapping values, but shifted nonces
+ groups := map[common.Address][]*txpool.LazyTransaction{}
+ expectedCount := 0
+ for start, key := range keys {
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ count := 25
+ for i := 0; i < 25; i++ {
+ var tx *types.Transaction
+ gasFeeCap := rand.Intn(50)
+ if baseFee == nil {
+ tx = types.NewTx(&types.LegacyTx{
+ Nonce: uint64(start + i),
+ To: &common.Address{},
+ Value: big.NewInt(100),
+ Gas: 100,
+ GasPrice: big.NewInt(int64(gasFeeCap)),
+ Data: nil,
+ })
+ } else {
+ tx = types.NewTx(&types.DynamicFeeTx{
+ Nonce: uint64(start + i),
+ To: &common.Address{},
+ Value: big.NewInt(100),
+ Gas: 100,
+ GasFeeCap: big.NewInt(int64(gasFeeCap)),
+ GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))),
+ Data: nil,
+ })
+ if count == 25 && int64(gasFeeCap) < baseFee.Int64() {
+ count = i
+ }
+ }
+ tx, err := types.SignTx(tx, signer, key)
+ if err != nil {
+ t.Fatalf("failed to sign tx: %s", err)
+ }
+ groups[addr] = append(groups[addr], &txpool.LazyTransaction{
+ Hash: tx.Hash(),
+ Tx: &txpool.Transaction{Tx: tx},
+ Time: tx.Time(),
+ GasFeeCap: tx.GasFeeCap(),
+ GasTipCap: tx.GasTipCap(),
+ })
+ }
+ expectedCount += count
+ }
+ // Sort the transactions and cross check the nonce ordering
+ txset := newTransactionsByPriceAndNonce(signer, groups, baseFee)
+
+ txs := types.Transactions{}
+ for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
+ txs = append(txs, tx.Tx.Tx)
+ txset.Shift()
+ }
+ if len(txs) != expectedCount {
+ t.Errorf("expected %d transactions, found %d", expectedCount, len(txs))
+ }
+ for i, txi := range txs {
+ fromi, _ := types.Sender(signer, txi)
+
+ // Make sure the nonce order is valid
+ for j, txj := range txs[i+1:] {
+ fromj, _ := types.Sender(signer, txj)
+ if fromi == fromj && txi.Nonce() > txj.Nonce() {
+ t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
+ }
+ }
+ // If the next tx has different from account, the price must be lower than the current one
+ if i+1 < len(txs) {
+ next := txs[i+1]
+ fromNext, _ := types.Sender(signer, next)
+ tip, err := txi.EffectiveGasTip(baseFee)
+ nextTip, nextErr := next.EffectiveGasTip(baseFee)
+ if err != nil || nextErr != nil {
+ t.Errorf("error calculating effective tip: %v, %v", err, nextErr)
+ }
+ if fromi != fromNext && tip.Cmp(nextTip) < 0 {
+ t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
+ }
+ }
+ }
+}
+
+// Tests that if multiple transactions have the same price, the ones seen earlier
+// are prioritized to avoid network spam attacks aiming for a specific ordering.
+func TestTransactionTimeSort(t *testing.T) {
+ // Generate a batch of accounts to start with
+ keys := make([]*ecdsa.PrivateKey, 5)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ }
+ signer := types.HomesteadSigner{}
+
+ // Generate a batch of transactions with overlapping prices, but different creation times
+ groups := map[common.Address][]*txpool.LazyTransaction{}
+ for start, key := range keys {
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+
+ tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key)
+ tx.SetTime(time.Unix(0, int64(len(keys)-start)))
+
+ groups[addr] = append(groups[addr], &txpool.LazyTransaction{
+ Hash: tx.Hash(),
+ Tx: &txpool.Transaction{Tx: tx},
+ Time: tx.Time(),
+ GasFeeCap: tx.GasFeeCap(),
+ GasTipCap: tx.GasTipCap(),
+ })
+ }
+ // Sort the transactions and cross check the nonce ordering
+ txset := newTransactionsByPriceAndNonce(signer, groups, nil)
+
+ txs := types.Transactions{}
+ for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
+ txs = append(txs, tx.Tx.Tx)
+ txset.Shift()
+ }
+ if len(txs) != len(keys) {
+ t.Errorf("expected %d transactions, found %d", len(keys), len(txs))
+ }
+ for i, txi := range txs {
+ fromi, _ := types.Sender(signer, txi)
+ if i+1 < len(txs) {
+ next := txs[i+1]
+ fromNext, _ := types.Sender(signer, next)
+
+ if txi.GasPrice().Cmp(next.GasPrice()) < 0 {
+ t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
+ }
+ // Make sure time order is ascending if the txs have the same gas price
+ if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.Time().After(next.Time()) {
+ t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.Time(), i+1, fromNext[:4], next.Time())
+ }
+ }
+ }
+}
diff --git a/miner/worker.go b/miner/worker.go
index a2cdaab174..8a6124c1a8 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -42,6 +42,7 @@ import (
"github.com/ava-labs/subnet-evm/consensus/dummy"
"github.com/ava-labs/subnet-evm/core"
"github.com/ava-labs/subnet-evm/core/state"
+ "github.com/ava-labs/subnet-evm/core/txpool"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/core/vm"
"github.com/ava-labs/subnet-evm/params"
@@ -58,8 +59,7 @@ const (
// environment is the worker's current environment and holds all of the current state information.
type environment struct {
- signer types.Signer
-
+ signer types.Signer
state *state.StateDB // apply state changes here
tcount int // tx count in cycle
gasPool *core.GasPool // available gas used to pack transactions
@@ -213,7 +213,7 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte
pending := w.eth.TxPool().PendingWithBaseFee(true, header.BaseFee)
// Split the pending transactions into locals and remotes
- localTxs := make(map[common.Address]types.Transactions)
+ localTxs := make(map[common.Address][]*txpool.LazyTransaction)
remoteTxs := pending
for _, account := range w.eth.TxPool().Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
@@ -222,11 +222,11 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte
}
}
if len(localTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, header.BaseFee)
+ txs := newTransactionsByPriceAndNonce(env.signer, localTxs, header.BaseFee)
w.commitTransactions(env, txs, header.Coinbase)
}
if len(remoteTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, header.BaseFee)
+ txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, header.BaseFee)
w.commitTransactions(env, txs, header.Coinbase)
}
@@ -246,14 +246,14 @@ func (w *worker) createCurrentEnvironment(predicateContext *precompileconfig.Pre
header: header,
tcount: 0,
gasPool: new(core.GasPool).AddGas(header.GasLimit),
- rules: w.chainConfig.AvalancheRules(header.Number, header.Time),
+ rules: w.chainConfig.Rules(header.Number, header.Time),
predicateContext: predicateContext,
predicateResults: predicate.NewResults(),
start: tstart,
}, nil
}
-func (w *worker) commitTransaction(env *environment, tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
+func (w *worker) commitTransaction(env *environment, tx *txpool.Transaction, coinbase common.Address) ([]*types.Log, error) {
var (
snap = env.state.Snapshot()
gp = env.gasPool.Gas()
@@ -261,33 +261,33 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction, coin
)
if env.rules.IsDurango {
- results, err := core.CheckPredicates(env.rules, env.predicateContext, tx)
+ results, err := core.CheckPredicates(env.rules, env.predicateContext, tx.Tx)
if err != nil {
- log.Debug("Transaction predicate failed verification in miner", "tx", tx.Hash(), "err", err)
+ log.Debug("Transaction predicate failed verification in miner", "tx", tx.Tx.Hash(), "err", err)
return nil, err
}
- env.predicateResults.SetTxResults(tx.Hash(), results)
+ env.predicateResults.SetTxResults(tx.Tx.Hash(), results)
blockContext = core.NewEVMBlockContextWithPredicateResults(env.header, w.chain, &coinbase, env.predicateResults)
} else {
blockContext = core.NewEVMBlockContext(env.header, w.chain, &coinbase)
}
- receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, blockContext, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig())
+ receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, blockContext, env.gasPool, env.state, env.header, tx.Tx, &env.header.GasUsed, *w.chain.GetVMConfig())
if err != nil {
env.state.RevertToSnapshot(snap)
env.gasPool.SetGas(gp)
- env.predicateResults.DeleteTxResults(tx.Hash())
+ env.predicateResults.DeleteTxResults(tx.Tx.Hash())
return nil, err
}
- env.txs = append(env.txs, tx)
+ env.txs = append(env.txs, tx.Tx)
env.receipts = append(env.receipts, receipt)
- env.size += tx.Size()
+ env.size += tx.Tx.Size()
return receipt.Logs, nil
}
-func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, coinbase common.Address) {
+func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, coinbase common.Address) {
for {
// If we don't have enough gas for any further transactions then we're done.
if env.gasPool.Gas() < params.TxGas {
@@ -295,38 +295,45 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
break
}
// Retrieve the next transaction and abort if all done.
- tx := txs.Peek()
- if tx == nil {
+ ltx := txs.Peek()
+ if ltx == nil {
break
}
+ tx := ltx.Resolve()
+ if tx == nil {
+ log.Warn("Ignoring evicted transaction")
+
+ txs.Pop()
+ continue
+ }
// Abort transaction if it won't fit in the block and continue to search for a smaller
// transction that will fit.
- if totalTxsSize := env.size + tx.Size(); totalTxsSize > targetTxsSize {
- log.Trace("Skipping transaction that would exceed target size", "hash", tx.Hash(), "totalTxsSize", totalTxsSize, "txSize", tx.Size())
+ if totalTxsSize := env.size + tx.Tx.Size(); totalTxsSize > targetTxsSize {
+ log.Trace("Skipping transaction that would exceed target size", "hash", tx.Tx.Hash(), "totalTxsSize", totalTxsSize, "txSize", tx.Tx.Size())
txs.Pop()
continue
}
// Error may be ignored here. The error has already been checked
// during transaction acceptance is the transaction pool.
- from, _ := types.Sender(env.signer, tx)
+ from, _ := types.Sender(env.signer, tx.Tx)
// Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do.
- if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
- log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
+ if tx.Tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
+ log.Trace("Ignoring reply protected transaction", "hash", tx.Tx.Hash(), "eip155", w.chainConfig.EIP155Block)
txs.Pop()
continue
}
// Start executing the transaction
- env.state.SetTxContext(tx.Hash(), env.tcount)
+ env.state.SetTxContext(tx.Tx.Hash(), env.tcount)
_, err := w.commitTransaction(env, tx, coinbase)
switch {
case errors.Is(err, core.ErrNonceTooLow):
// New head notification data race between the transaction pool and miner, shift
- log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
+ log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Tx.Nonce())
txs.Shift()
case errors.Is(err, nil):
@@ -336,7 +343,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
default:
// Transaction is regarded as invalid, drop all consecutive transactions from
// the same sender because of `nonce-too-high` clause.
- log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
+ log.Debug("Transaction failed, account skipped", "hash", tx.Tx.Hash(), "err", err)
txs.Pop()
}
}
diff --git a/node/config.go b/node/config.go
index 1d4a418cd1..bf67d774ff 100644
--- a/node/config.go
+++ b/node/config.go
@@ -60,6 +60,12 @@ type Config struct {
// InsecureUnlockAllowed allows user to unlock accounts in unsafe http environment.
InsecureUnlockAllowed bool `toml:",omitempty"`
+ // BatchRequestLimit is the maximum number of requests in a batch.
+ BatchRequestLimit int `toml:",omitempty"`
+
+ // BatchResponseMaxSize is the maximum number of bytes returned from a batched rpc call.
+ BatchResponseMaxSize int `toml:",omitempty"`
+
SubnetEVMVersion string
}
diff --git a/params/config.go b/params/config.go
index 1b2fe0930e..3f7a29efc8 100644
--- a/params/config.go
+++ b/params/config.go
@@ -33,9 +33,7 @@ import (
"math/big"
"time"
- "github.com/ava-labs/avalanchego/snow"
"github.com/ava-labs/avalanchego/utils/constants"
- "github.com/ava-labs/avalanchego/version"
"github.com/ava-labs/subnet-evm/commontype"
"github.com/ava-labs/subnet-evm/precompile/modules"
"github.com/ava-labs/subnet-evm/precompile/precompileconfig"
@@ -155,7 +153,7 @@ var (
UpgradeConfig: UpgradeConfig{},
}
- TestRules = TestChainConfig.AvalancheRules(new(big.Int), 0)
+ TestRules = TestChainConfig.Rules(new(big.Int), 0)
)
func getUpgradeTime(networkID uint32, upgradeTimes map[uint32]time.Time) *uint64 {
@@ -167,46 +165,13 @@ func getUpgradeTime(networkID uint32, upgradeTimes map[uint32]time.Time) *uint64
return utils.NewUint64(0)
}
-// GetMandatoryNetworkUpgrades returns the mandatory network upgrades for the specified network ID.
-func GetMandatoryNetworkUpgrades(networkID uint32) MandatoryNetworkUpgrades {
- return MandatoryNetworkUpgrades{
- SubnetEVMTimestamp: utils.NewUint64(0),
- DurangoTimestamp: getUpgradeTime(networkID, version.DurangoTimes),
- }
-}
-
-// UpgradeConfig includes the following configs that may be specified in upgradeBytes:
-// - Timestamps that enable avalanche network upgrades,
-// - Enabling or disabling precompiles as network upgrades.
-type UpgradeConfig struct {
- // Config for optional timestamps that enable network upgrades.
- // Note: if OptionalUpgrades is specified in the JSON all previously activated
- // forks must be present or upgradeBytes will be rejected.
- OptionalNetworkUpgrades *OptionalNetworkUpgrades `json:"networkUpgrades,omitempty"`
-
- // Config for modifying state as a network upgrade.
- StateUpgrades []StateUpgrade `json:"stateUpgrades,omitempty"`
-
- // Config for enabling and disabling precompiles as network upgrades.
- PrecompileUpgrades []PrecompileUpgrade `json:"precompileUpgrades,omitempty"`
-}
-
-// AvalancheContext provides Avalanche specific context directly into the EVM.
-type AvalancheContext struct {
- SnowCtx *snow.Context
-}
-
// ChainConfig is the core config which determines the blockchain settings.
//
// ChainConfig is stored in the database on a per block basis. This means
// that any network, identified by its genesis block, can have its own
// set of configuration options.
type ChainConfig struct {
- AvalancheContext `json:"-"` // Avalanche specific context set during VM initialization. Not serialized.
-
- ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection
- FeeConfig commontype.FeeConfig `json:"feeConfig"` // Set the configuration for the dynamic fee algorithm
- AllowFeeRecipients bool `json:"allowFeeRecipients,omitempty"` // Allows fees to be collected by block builders.
+ ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection
HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead)
@@ -221,58 +186,16 @@ type ChainConfig struct {
IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul)
MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated)
- MandatoryNetworkUpgrades // Config for timestamps that enable mandatory network upgrades. Skip encoding/decoding directly into ChainConfig.
- OptionalNetworkUpgrades // Config for optional timestamps that enable network upgrades
- GenesisPrecompiles Precompiles `json:"-"` // Config for enabling precompiles from genesis. JSON encode/decode will be handled by the custom marshaler/unmarshaler.
- UpgradeConfig `json:"-"` // Config specified in upgradeBytes (avalanche network upgrades or enable/disabling precompiles). Skip encoding/decoding directly into ChainConfig.
-}
-
-// UnmarshalJSON parses the JSON-encoded data and stores the result in the
-// object pointed to by c.
-// This is a custom unmarshaler to handle the Precompiles field.
-// Precompiles was presented as an inline object in the JSON.
-// This custom unmarshaler ensures backwards compatibility with the old format.
-func (c *ChainConfig) UnmarshalJSON(data []byte) error {
- // Alias ChainConfig to avoid recursion
- type _ChainConfig ChainConfig
- tmp := _ChainConfig{}
- if err := json.Unmarshal(data, &tmp); err != nil {
- return err
- }
-
- // At this point we have populated all fields except PrecompileUpgrade
- *c = ChainConfig(tmp)
-
- // Unmarshal inlined PrecompileUpgrade
- return json.Unmarshal(data, &c.GenesisPrecompiles)
-}
+ MandatoryNetworkUpgrades // Config for timestamps that enable mandatory network upgrades. Skip encoding/decoding directly into ChainConfig.
+ OptionalNetworkUpgrades // Config for optional timestamps that enable network upgrades
-// MarshalJSON returns the JSON encoding of c.
-// This is a custom marshaler to handle the Precompiles field.
-func (c ChainConfig) MarshalJSON() ([]byte, error) {
- // Alias ChainConfig to avoid recursion
- type _ChainConfig ChainConfig
- tmp, err := json.Marshal(_ChainConfig(c))
- if err != nil {
- return nil, err
- }
-
- // To include PrecompileUpgrades, we unmarshal the json representing c
- // then directly add the corresponding keys to the json.
- raw := make(map[string]json.RawMessage)
- if err := json.Unmarshal(tmp, &raw); err != nil {
- return nil, err
- }
+ AvalancheContext `json:"-"` // Avalanche specific context set during VM initialization. Not serialized.
- for key, value := range c.GenesisPrecompiles {
- conf, err := json.Marshal(value)
- if err != nil {
- return nil, err
- }
- raw[key] = conf
- }
+ FeeConfig commontype.FeeConfig `json:"feeConfig"` // Set the configuration for the dynamic fee algorithm
+ AllowFeeRecipients bool `json:"allowFeeRecipients,omitempty"` // Allows fees to be collected by block builders.
- return json.Marshal(raw)
+ GenesisPrecompiles Precompiles `json:"-"` // Config for enabling precompiles from genesis. JSON encode/decode will be handled by the custom marshaler/unmarshaler.
+ UpgradeConfig `json:"-"` // Config specified in upgradeBytes (avalanche network upgrades or enable/disabling precompiles). Skip encoding/decoding directly into ChainConfig.
}
// Description returns a human-readable description of ChainConfig.
@@ -285,7 +208,7 @@ func (c *ChainConfig) Description() string {
// Create a list of forks with a short description of them. Forks that only
// makes sense for mainnet should be optional at printing to avoid bloating
// the output for testnets and private networks.
- banner += "Hard Forks:\n"
+ banner += "Hard Forks (block based):\n"
banner += fmt.Sprintf(" - Homestead: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/homestead.md)\n", c.HomesteadBlock)
banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/tangerine-whistle.md)\n", c.EIP150Block)
banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block)
@@ -297,10 +220,13 @@ func (c *ChainConfig) Description() string {
if c.MuirGlacierBlock != nil {
banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/muir-glacier.md)\n", c.MuirGlacierBlock)
}
- banner += "Mandatory Upgrades:\n"
+
+ banner += "Hard forks (timestamp based):\n"
+ banner += fmt.Sprintf(" - Cancun Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.12.0)\n", ptrToString(c.CancunTime))
+
+ banner += "Mandatory Avalanche Upgrades (timestamp based):\n"
banner += fmt.Sprintf(" - SubnetEVM Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0)\n", ptrToString(c.SubnetEVMTimestamp))
banner += fmt.Sprintf(" - Durango Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0)\n", ptrToString(c.DurangoTimestamp))
- banner += fmt.Sprintf(" - Cancun Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.12.0)\n", ptrToString(c.CancunTime))
banner += "\n"
// Add Subnet-EVM custom fields
@@ -398,7 +324,7 @@ func (c *ChainConfig) IsDurango(time uint64) bool {
// IsCancun returns whether [time] represents a block
// with a timestamp after the Cancun upgrade time.
-func (c *ChainConfig) IsCancun(time uint64) bool {
+func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool {
return utils.IsTimestampForked(c.CancunTime, time)
}
@@ -607,15 +533,6 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, height *big.Int, time
return nil
}
-// getOptionalNetworkUpgrades returns OptionalNetworkUpgrades from upgrade config if set there,
-// otherwise it falls back to the genesis chain config.
-func (c *ChainConfig) getOptionalNetworkUpgrades() *OptionalNetworkUpgrades {
- if upgradeConfigOverride := c.UpgradeConfig.OptionalNetworkUpgrades; upgradeConfigOverride != nil {
- return upgradeConfigOverride
- }
- return &c.OptionalNetworkUpgrades
-}
-
// isForkBlockIncompatible returns true if a fork scheduled at s1 cannot be rescheduled to
// block s2 because head is already past the fork.
func isForkBlockIncompatible(s1, s2, head *big.Int) bool {
@@ -774,13 +691,13 @@ func (c *ChainConfig) rules(num *big.Int, timestamp uint64) Rules {
IsConstantinople: c.IsConstantinople(num),
IsPetersburg: c.IsPetersburg(num),
IsIstanbul: c.IsIstanbul(num),
- IsCancun: c.IsCancun(timestamp),
+ IsCancun: c.IsCancun(num, timestamp),
}
}
-// AvalancheRules returns the Avalanche modified rules to support Avalanche
+// Rules returns the Avalanche modified rules to support Avalanche
// network upgrades
-func (c *ChainConfig) AvalancheRules(blockNum *big.Int, timestamp uint64) Rules {
+func (c *ChainConfig) Rules(blockNum *big.Int, timestamp uint64) Rules {
rules := c.rules(blockNum, timestamp)
rules.IsSubnetEVM = c.IsSubnetEVM(timestamp)
@@ -817,71 +734,11 @@ func (c *ChainConfig) AllowedFeeRecipients() bool {
return c.AllowFeeRecipients
}
-type ChainConfigWithUpgradesJSON struct {
- ChainConfig
- UpgradeConfig UpgradeConfig `json:"upgrades,omitempty"`
-}
-
-// MarshalJSON implements json.Marshaler. This is a workaround for the fact that
-// the embedded ChainConfig struct has a MarshalJSON method, which prevents
-// the default JSON marshalling from working for UpgradeConfig.
-// TODO: consider removing this method by allowing external tag for the embedded
-// ChainConfig struct.
-func (cu ChainConfigWithUpgradesJSON) MarshalJSON() ([]byte, error) {
- // embed the ChainConfig struct into the response
- chainConfigJSON, err := json.Marshal(cu.ChainConfig)
- if err != nil {
- return nil, err
- }
- if len(chainConfigJSON) > maxJSONLen {
- return nil, errors.New("value too large")
- }
-
- type upgrades struct {
- UpgradeConfig UpgradeConfig `json:"upgrades"`
- }
-
- upgradeJSON, err := json.Marshal(upgrades{cu.UpgradeConfig})
- if err != nil {
- return nil, err
- }
- if len(upgradeJSON) > maxJSONLen {
- return nil, errors.New("value too large")
- }
-
- // merge the two JSON objects
- mergedJSON := make([]byte, 0, len(chainConfigJSON)+len(upgradeJSON)+1)
- mergedJSON = append(mergedJSON, chainConfigJSON[:len(chainConfigJSON)-1]...)
- mergedJSON = append(mergedJSON, ',')
- mergedJSON = append(mergedJSON, upgradeJSON[1:]...)
- return mergedJSON, nil
-}
-
-func (cu *ChainConfigWithUpgradesJSON) UnmarshalJSON(input []byte) error {
- var cc ChainConfig
- if err := json.Unmarshal(input, &cc); err != nil {
- return err
- }
-
- type upgrades struct {
- UpgradeConfig UpgradeConfig `json:"upgrades"`
- }
-
- var u upgrades
- if err := json.Unmarshal(input, &u); err != nil {
- return err
- }
- cu.ChainConfig = cc
- cu.UpgradeConfig = u.UpgradeConfig
- return nil
-}
-
-// ToWithUpgradesJSON converts the ChainConfig to ChainConfigWithUpgradesJSON with upgrades explicitly displayed.
-// ChainConfig does not include upgrades in its JSON output.
-// This is a workaround for showing upgrades in the JSON output.
-func (c *ChainConfig) ToWithUpgradesJSON() *ChainConfigWithUpgradesJSON {
- return &ChainConfigWithUpgradesJSON{
- ChainConfig: *c,
- UpgradeConfig: c.UpgradeConfig,
+// getOptionalNetworkUpgrades returns OptionalNetworkUpgrades from upgrade config if set there,
+// otherwise it falls back to the genesis chain config.
+func (c *ChainConfig) getOptionalNetworkUpgrades() *OptionalNetworkUpgrades {
+ if upgradeConfigOverride := c.UpgradeConfig.OptionalNetworkUpgrades; upgradeConfigOverride != nil {
+ return upgradeConfigOverride
}
+ return &c.OptionalNetworkUpgrades
}
diff --git a/params/config_extra.go b/params/config_extra.go
new file mode 100644
index 0000000000..f043babb0d
--- /dev/null
+++ b/params/config_extra.go
@@ -0,0 +1,149 @@
+// (c) 2024 Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package params
+
+import (
+ "encoding/json"
+ "errors"
+
+ "github.com/ava-labs/avalanchego/snow"
+)
+
+// UpgradeConfig includes the following configs that may be specified in upgradeBytes:
+// - Timestamps that enable avalanche network upgrades,
+// - Enabling or disabling precompiles as network upgrades.
+type UpgradeConfig struct {
+ // Config for optional timestamps that enable network upgrades.
+ // Note: if OptionalUpgrades is specified in the JSON all previously activated
+ // forks must be present or upgradeBytes will be rejected.
+ OptionalNetworkUpgrades *OptionalNetworkUpgrades `json:"networkUpgrades,omitempty"`
+
+ // Config for modifying state as a network upgrade.
+ StateUpgrades []StateUpgrade `json:"stateUpgrades,omitempty"`
+
+ // Config for enabling and disabling precompiles as network upgrades.
+ PrecompileUpgrades []PrecompileUpgrade `json:"precompileUpgrades,omitempty"`
+}
+
+// AvalancheContext provides Avalanche specific context directly into the EVM.
+type AvalancheContext struct {
+ SnowCtx *snow.Context
+}
+
+// UnmarshalJSON parses the JSON-encoded data and stores the result in the
+// object pointed to by c.
+// This is a custom unmarshaler to handle the Precompiles field.
+// Precompiles was presented as an inline object in the JSON.
+// This custom unmarshaler ensures backwards compatibility with the old format.
+func (c *ChainConfig) UnmarshalJSON(data []byte) error {
+ // Alias ChainConfig to avoid recursion
+ type _ChainConfig ChainConfig
+ tmp := _ChainConfig{}
+ if err := json.Unmarshal(data, &tmp); err != nil {
+ return err
+ }
+
+ // At this point we have populated all fields except PrecompileUpgrade
+ *c = ChainConfig(tmp)
+
+ // Unmarshal inlined PrecompileUpgrade
+ return json.Unmarshal(data, &c.GenesisPrecompiles)
+}
+
+// MarshalJSON returns the JSON encoding of c.
+// This is a custom marshaler to handle the Precompiles field.
+func (c ChainConfig) MarshalJSON() ([]byte, error) {
+ // Alias ChainConfig to avoid recursion
+ type _ChainConfig ChainConfig
+ tmp, err := json.Marshal(_ChainConfig(c))
+ if err != nil {
+ return nil, err
+ }
+
+ // To include PrecompileUpgrades, we unmarshal the json representing c
+ // then directly add the corresponding keys to the json.
+ raw := make(map[string]json.RawMessage)
+ if err := json.Unmarshal(tmp, &raw); err != nil {
+ return nil, err
+ }
+
+ for key, value := range c.GenesisPrecompiles {
+ conf, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ raw[key] = conf
+ }
+
+ return json.Marshal(raw)
+}
+
+type ChainConfigWithUpgradesJSON struct {
+ ChainConfig
+ UpgradeConfig UpgradeConfig `json:"upgrades,omitempty"`
+}
+
+// MarshalJSON implements json.Marshaler. This is a workaround for the fact that
+// the embedded ChainConfig struct has a MarshalJSON method, which prevents
+// the default JSON marshalling from working for UpgradeConfig.
+// TODO: consider removing this method by allowing external tag for the embedded
+// ChainConfig struct.
+func (cu ChainConfigWithUpgradesJSON) MarshalJSON() ([]byte, error) {
+ // embed the ChainConfig struct into the response
+ chainConfigJSON, err := json.Marshal(cu.ChainConfig)
+ if err != nil {
+ return nil, err
+ }
+ if len(chainConfigJSON) > maxJSONLen {
+ return nil, errors.New("value too large")
+ }
+
+ type upgrades struct {
+ UpgradeConfig UpgradeConfig `json:"upgrades"`
+ }
+
+ upgradeJSON, err := json.Marshal(upgrades{cu.UpgradeConfig})
+ if err != nil {
+ return nil, err
+ }
+ if len(upgradeJSON) > maxJSONLen {
+ return nil, errors.New("value too large")
+ }
+
+ // merge the two JSON objects
+ mergedJSON := make([]byte, 0, len(chainConfigJSON)+len(upgradeJSON)+1)
+ mergedJSON = append(mergedJSON, chainConfigJSON[:len(chainConfigJSON)-1]...)
+ mergedJSON = append(mergedJSON, ',')
+ mergedJSON = append(mergedJSON, upgradeJSON[1:]...)
+ return mergedJSON, nil
+}
+
+func (cu *ChainConfigWithUpgradesJSON) UnmarshalJSON(input []byte) error {
+ var cc ChainConfig
+ if err := json.Unmarshal(input, &cc); err != nil {
+ return err
+ }
+
+ type upgrades struct {
+ UpgradeConfig UpgradeConfig `json:"upgrades"`
+ }
+
+ var u upgrades
+ if err := json.Unmarshal(input, &u); err != nil {
+ return err
+ }
+ cu.ChainConfig = cc
+ cu.UpgradeConfig = u.UpgradeConfig
+ return nil
+}
+
+// ToWithUpgradesJSON converts the ChainConfig to ChainConfigWithUpgradesJSON with upgrades explicitly displayed.
+// ChainConfig does not include upgrades in its JSON output.
+// This is a workaround for showing upgrades in the JSON output.
+func (c *ChainConfig) ToWithUpgradesJSON() *ChainConfigWithUpgradesJSON {
+ return &ChainConfigWithUpgradesJSON{
+ ChainConfig: *c,
+ UpgradeConfig: c.UpgradeConfig,
+ }
+}
diff --git a/params/config_test.go b/params/config_test.go
index 066cc21192..1c535bcd21 100644
--- a/params/config_test.go
+++ b/params/config_test.go
@@ -157,15 +157,15 @@ func TestConfigRules(t *testing.T) {
}
var stamp uint64
- if r := c.AvalancheRules(big.NewInt(0), stamp); r.IsSubnetEVM {
+ if r := c.Rules(big.NewInt(0), stamp); r.IsSubnetEVM {
t.Errorf("expected %v to not be subnet-evm", stamp)
}
stamp = 500
- if r := c.AvalancheRules(big.NewInt(0), stamp); !r.IsSubnetEVM {
+ if r := c.Rules(big.NewInt(0), stamp); !r.IsSubnetEVM {
t.Errorf("expected %v to be subnet-evm", stamp)
}
stamp = math.MaxInt64
- if r := c.AvalancheRules(big.NewInt(0), stamp); !r.IsSubnetEVM {
+ if r := c.Rules(big.NewInt(0), stamp); !r.IsSubnetEVM {
t.Errorf("expected %v to be subnet-evm", stamp)
}
}
@@ -250,10 +250,10 @@ func TestActivePrecompiles(t *testing.T) {
},
}
- rules0 := config.AvalancheRules(common.Big0, 0)
+ rules0 := config.Rules(common.Big0, 0)
require.True(t, rules0.IsPrecompileEnabled(nativeminter.Module.Address))
- rules1 := config.AvalancheRules(common.Big0, 1)
+ rules1 := config.Rules(common.Big0, 1)
require.False(t, rules1.IsPrecompileEnabled(nativeminter.Module.Address))
}
diff --git a/params/network_upgrades.go b/params/network_upgrades.go
index 41a4366cb5..b22b9fa626 100644
--- a/params/network_upgrades.go
+++ b/params/network_upgrades.go
@@ -3,6 +3,11 @@
package params
+import (
+ "github.com/ava-labs/avalanchego/version"
+ "github.com/ava-labs/subnet-evm/utils"
+)
+
// MandatoryNetworkUpgrades contains timestamps that enable mandatory network upgrades.
// These upgrades are mandatory, meaning that if a node does not upgrade by the
// specified timestamp, it will be unable to participate in consensus.
@@ -38,6 +43,14 @@ func (m *MandatoryNetworkUpgrades) mandatoryForkOrder() []fork {
}
}
+// GetMandatoryNetworkUpgrades returns the mandatory network upgrades for the specified network ID.
+func GetMandatoryNetworkUpgrades(networkID uint32) MandatoryNetworkUpgrades {
+ return MandatoryNetworkUpgrades{
+ SubnetEVMTimestamp: utils.NewUint64(0),
+ DurangoTimestamp: getUpgradeTime(networkID, version.DurangoTimes),
+ }
+}
+
// OptionalNetworkUpgrades includes overridable and optional Subnet-EVM network upgrades.
// These can be specified in genesis and upgrade configs.
// Timestamps can be different for each subnet network.
diff --git a/params/protocol_params.go b/params/protocol_params.go
index a3cb8e2604..5668eaa8b2 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -161,9 +161,20 @@ const (
Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation
Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation
- BlobTxDataGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
- BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs
- BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price
+ // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529,
+ // up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529
+ RefundQuotient uint64 = 2
+ RefundQuotientEIP3529 uint64 = 5
+
+ BlobTxBytesPerFieldElement = 32 // Size in bytes of a field element
+ BlobTxFieldElementsPerBlob = 4096 // Number of field elements stored in a single data blob
+ BlobTxHashVersion = 0x01 // Version byte of the commitment hash
+ BlobTxMaxBlobGasPerBlock = 1 << 19 // Maximum consumable blob gas for data blobs per block
+ BlobTxTargetBlobGasPerBlock = 1 << 18 // Target consumable blob gas for data blobs per block (for 1559-like pricing)
+ BlobTxBlobGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
+ BlobTxMinBlobGasprice = 1 // Minimum gas price for data blobs
+ BlobTxBlobGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for blob gas price
+ BlobTxPointEvaluationPrecompileGas = 50000 // Gas price for the point evaluation precompile.
)
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
diff --git a/params/version.go b/params/version.go
index b63bd113a8..f7ce5902fe 100644
--- a/params/version.go
+++ b/params/version.go
@@ -33,7 +33,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 12 // Minor version component of the current release
- VersionPatch = 0 // Patch version component of the current release
+ VersionPatch = 2 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/plugin/evm/block.go b/plugin/evm/block.go
index 0c414ebcf1..683b21d67c 100644
--- a/plugin/evm/block.go
+++ b/plugin/evm/block.go
@@ -67,7 +67,7 @@ func (b *Block) Accept(context.Context) error {
// take place before the accepted log is emitted to subscribers. Use of the
// sharedMemoryWriter ensures shared memory requests generated by
// precompiles are committed atomically with the vm's lastAcceptedKey.
- rules := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp())
+ rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp())
sharedMemoryWriter := NewSharedMemoryWriter()
if err := b.handlePrecompileAccept(rules, sharedMemoryWriter); err != nil {
return err
@@ -168,7 +168,7 @@ func (b *Block) syntacticVerify() error {
}
header := b.ethBlock.Header()
- rules := b.vm.chainConfig.AvalancheRules(header.Number, header.Time)
+ rules := b.vm.chainConfig.Rules(header.Number, header.Time)
return b.vm.syntacticBlockValidator.SyntacticVerify(b, rules)
}
@@ -182,7 +182,7 @@ func (b *Block) Verify(context.Context) error {
// ShouldVerifyWithContext implements the block.WithVerifyContext interface
func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) {
- predicates := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters
+ predicates := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters
// Short circuit early if there are no predicates to verify
if len(predicates) == 0 {
return false, nil
@@ -248,7 +248,7 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ
// verifyPredicates verifies the predicates in the block are valid according to predicateContext.
func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateContext) error {
- rules := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp())
+ rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp())
switch {
case !rules.IsDurango && rules.PredicatersExist():
diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go
index 60611dd031..0b8ac3d8b7 100644
--- a/plugin/evm/block_verification.go
+++ b/plugin/evm/block_verification.go
@@ -141,13 +141,19 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error {
}
}
- // Verify the existence / non-existence of excessDataGas
- if rules.IsCancun && ethHeader.ExcessDataGas == nil {
- return errors.New("missing excessDataGas")
+ // Verify the existence / non-existence of excessBlobGas
+ cancun := rules.IsCancun
+ if !cancun && ethHeader.ExcessBlobGas != nil {
+ return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", ethHeader.ExcessBlobGas)
}
- if !rules.IsCancun && ethHeader.ExcessDataGas != nil {
- return fmt.Errorf("invalid excessDataGas: have %d, expected nil", ethHeader.ExcessDataGas)
+ if !cancun && ethHeader.BlobGasUsed != nil {
+ return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", ethHeader.BlobGasUsed)
+ }
+ if cancun && ethHeader.ExcessBlobGas == nil {
+ return errors.New("header is missing excessBlobGas")
+ }
+ if cancun && ethHeader.BlobGasUsed == nil {
+ return errors.New("header is missing blobGasUsed")
}
-
return nil
}
diff --git a/plugin/evm/config.go b/plugin/evm/config.go
index 4871415327..5020114445 100644
--- a/plugin/evm/config.go
+++ b/plugin/evm/config.go
@@ -8,7 +8,7 @@ import (
"fmt"
"time"
- "github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/txpool/legacypool"
"github.com/ava-labs/subnet-evm/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -106,13 +106,11 @@ type Config struct {
RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"`
// Cache settings
- TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB)
- TrieCleanJournal string `json:"trie-clean-journal"` // Directory to use to save the trie clean cache (must be populated to enable journaling the trie clean cache)
- TrieCleanRejournal Duration `json:"trie-clean-rejournal"` // Frequency to re-journal the trie clean cache to disk (minimum 1 minute, must be populated to enable journaling the trie clean cache)
- TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB)
- TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB)
- TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once
- SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB)
+ TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB)
+ TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB)
+ TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB)
+ TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once
+ SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB)
// Eth Settings
Preimages bool `json:"preimages-enabled"`
@@ -239,13 +237,13 @@ func (c *Config) SetDefaults() {
c.RPCTxFeeCap = defaultRpcTxFeeCap
c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled
- c.TxPoolPriceLimit = txpool.DefaultConfig.PriceLimit
- c.TxPoolPriceBump = txpool.DefaultConfig.PriceBump
- c.TxPoolAccountSlots = txpool.DefaultConfig.AccountSlots
- c.TxPoolGlobalSlots = txpool.DefaultConfig.GlobalSlots
- c.TxPoolAccountQueue = txpool.DefaultConfig.AccountQueue
- c.TxPoolGlobalQueue = txpool.DefaultConfig.GlobalQueue
- c.TxPoolLifetime.Duration = txpool.DefaultConfig.Lifetime
+ c.TxPoolPriceLimit = legacypool.DefaultConfig.PriceLimit
+ c.TxPoolPriceBump = legacypool.DefaultConfig.PriceBump
+ c.TxPoolAccountSlots = legacypool.DefaultConfig.AccountSlots
+ c.TxPoolGlobalSlots = legacypool.DefaultConfig.GlobalSlots
+ c.TxPoolAccountQueue = legacypool.DefaultConfig.AccountQueue
+ c.TxPoolGlobalQueue = legacypool.DefaultConfig.GlobalQueue
+ c.TxPoolLifetime.Duration = legacypool.DefaultConfig.Lifetime
c.APIMaxDuration.Duration = defaultApiMaxDuration
c.WSCPURefillRate.Duration = defaultWsCpuRefillRate
diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go
index e87bb354d3..65f68fd4d2 100644
--- a/plugin/evm/gossip.go
+++ b/plugin/evm/gossip.go
@@ -133,8 +133,8 @@ func (g *GossipEthTxPool) Subscribe(ctx context.Context) {
if reset {
log.Debug("resetting bloom filter", "reason", "reached max filled ratio")
- g.mempool.IteratePending(func(tx *types.Transaction) bool {
- g.bloom.Add(&GossipEthTx{Tx: tx})
+ g.mempool.IteratePending(func(tx *txpool.Transaction) bool {
+ g.bloom.Add(&GossipEthTx{Tx: tx.Tx})
return true
})
}
@@ -147,7 +147,7 @@ func (g *GossipEthTxPool) Subscribe(ctx context.Context) {
// Add enqueues the transaction to the mempool. Subscribe should be called
// to receive an event if tx is actually added to the mempool or not.
func (g *GossipEthTxPool) Add(tx *GossipEthTx) error {
- return g.mempool.AddRemotes([]*types.Transaction{tx.Tx})[0]
+ return g.mempool.Add([]*txpool.Transaction{{Tx: tx.Tx}}, false, false)[0]
}
// Has should just return whether or not the [txID] is still in the mempool,
@@ -157,8 +157,8 @@ func (g *GossipEthTxPool) Has(txID ids.ID) bool {
}
func (g *GossipEthTxPool) Iterate(f func(tx *GossipEthTx) bool) {
- g.mempool.IteratePending(func(tx *types.Transaction) bool {
- return f(&GossipEthTx{Tx: tx})
+ g.mempool.IteratePending(func(tx *txpool.Transaction) bool {
+ return f(&GossipEthTx{Tx: tx.Tx})
})
}
diff --git a/plugin/evm/gossip_test.go b/plugin/evm/gossip_test.go
index e1b47ec972..b62953203a 100644
--- a/plugin/evm/gossip_test.go
+++ b/plugin/evm/gossip_test.go
@@ -14,6 +14,7 @@ import (
"github.com/ava-labs/subnet-evm/core"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/txpool"
+ "github.com/ava-labs/subnet-evm/core/txpool/legacypool"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/core/vm"
"github.com/ava-labs/subnet-evm/params"
@@ -46,8 +47,8 @@ func TestGossipSubscribe(t *testing.T) {
require.NoError(err)
txPool := setupPoolWithConfig(t, params.TestChainConfig, addr)
- defer txPool.Stop()
- txPool.SetGasPrice(common.Big1)
+ defer txPool.Close()
+ txPool.SetGasTip(common.Big1)
txPool.SetMinFee(common.Big0)
gossipTxPool, err := NewGossipEthTxPool(txPool, prometheus.NewRegistry())
@@ -97,8 +98,11 @@ func setupPoolWithConfig(t *testing.T, config *params.ChainConfig, fundedAddress
}
chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false)
require.NoError(t, err)
- testTxPoolConfig := txpool.DefaultConfig
- pool := txpool.NewTxPool(testTxPoolConfig, config, chain)
+ testTxPoolConfig := legacypool.DefaultConfig
+ legacyPool := legacypool.New(testTxPoolConfig, chain)
- return pool
+ txPool, err := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), chain, []txpool.SubPool{legacyPool})
+ require.NoError(t, err)
+
+ return txPool
}
diff --git a/plugin/evm/gossiper_eth_gossiping_test.go b/plugin/evm/gossiper_eth_gossiping_test.go
index a2bb95c93b..1ce8bc753b 100644
--- a/plugin/evm/gossiper_eth_gossiping_test.go
+++ b/plugin/evm/gossiper_eth_gossiping_test.go
@@ -65,7 +65,7 @@ func getValidEthTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*type
gasPrice,
[]byte(strings.Repeat("aaaaaaaaaa", 100))),
types.HomesteadSigner{}, key)
- tx.SetFirstSeen(time.Now().Add(-1 * time.Minute))
+ tx.SetTime(time.Now().Add(-1 * time.Minute))
res[i] = tx
}
return res
@@ -92,7 +92,7 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) {
err := vm.Shutdown(context.Background())
assert.NoError(err)
}()
- vm.txPool.SetGasPrice(common.Big1)
+ vm.txPool.SetGasTip(common.Big1)
vm.txPool.SetMinFee(common.Big0)
var (
diff --git a/plugin/evm/handler.go b/plugin/evm/handler.go
index 2915d422a2..2c955b5e83 100644
--- a/plugin/evm/handler.go
+++ b/plugin/evm/handler.go
@@ -55,7 +55,11 @@ func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip
return nil
}
h.stats.IncEthTxsGossipReceived()
- errs := h.txPool.AddRemotes(txs)
+ wrapped := make([]*txpool.Transaction, len(txs))
+ for i, tx := range txs {
+ wrapped[i] = &txpool.Transaction{Tx: tx}
+ }
+ errs := h.txPool.Add(wrapped, false, false)
for i, err := range errs {
if err != nil {
log.Trace(
diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go
index fc9355ed84..c2ef03df62 100644
--- a/plugin/evm/tx_gossip_test.go
+++ b/plugin/evm/tx_gossip_test.go
@@ -27,6 +27,7 @@ import (
"google.golang.org/protobuf/proto"
+ "github.com/ava-labs/subnet-evm/core/txpool"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/utils"
)
@@ -115,7 +116,7 @@ func TestEthTxGossip(t *testing.T) {
signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key)
require.NoError(err)
- errs := vm.txPool.AddLocals([]*types.Transaction{signedTx})
+ errs := vm.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, true)
require.Len(errs, 1)
require.Nil(errs[0])
@@ -181,7 +182,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) {
require.NoError(err)
// issue a tx
- require.NoError(vm.txPool.AddLocal(signedTx))
+ require.NoError(vm.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, true)[0])
vm.ethTxPushGossiper.Get().Add(&GossipEthTx{signedTx})
sent := <-sender.SentAppGossip
diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go
index 37c4aa764e..6ac0ce42d7 100644
--- a/plugin/evm/vm.go
+++ b/plugin/evm/vm.go
@@ -403,8 +403,6 @@ func (vm *VM) Initialize(
vm.ethConfig.Preimages = vm.config.Preimages
vm.ethConfig.Pruning = vm.config.Pruning
vm.ethConfig.TrieCleanCache = vm.config.TrieCleanCache
- vm.ethConfig.TrieCleanJournal = vm.config.TrieCleanJournal
- vm.ethConfig.TrieCleanRejournal = vm.config.TrieCleanRejournal.Duration
vm.ethConfig.TrieDirtyCache = vm.config.TrieDirtyCache
vm.ethConfig.TrieDirtyCommitTarget = vm.config.TrieDirtyCommitTarget
vm.ethConfig.TriePrefetcherParallelism = vm.config.TriePrefetcherParallelism
@@ -546,7 +544,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash, ethConfig ethconfig.
vm.eth.SetEtherbase(ethConfig.Miner.Etherbase)
vm.txPool = vm.eth.TxPool()
vm.txPool.SetMinFee(vm.chainConfig.FeeConfig.MinBaseFee)
- vm.txPool.SetGasPrice(big.NewInt(0))
+ vm.txPool.SetGasTip(big.NewInt(0))
vm.blockChain = vm.eth.BlockChain()
vm.miner = vm.eth.Miner()
diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go
index ea96cc7689..3cba6fd4ea 100644
--- a/plugin/evm/vm_test.go
+++ b/plugin/evm/vm_test.go
@@ -304,7 +304,7 @@ func TestVMUpgrades(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
_, vm, _, _ := GenesisVM(t, true, test.genesis, "", "")
- if gasPrice := vm.txPool.GasPrice(); gasPrice.Cmp(test.expectedGasPrice) != 0 {
+ if gasPrice := vm.txPool.GasTip(); gasPrice.Cmp(test.expectedGasPrice) != 0 {
t.Fatalf("Expected pool gas price to be %d but found %d", test.expectedGasPrice, gasPrice)
}
defer func() {
@@ -2082,7 +2082,7 @@ func TestBuildSubnetEVMBlock(t *testing.T) {
}
txs[i] = signedTx
}
- errs := vm.txPool.AddRemotes(txs)
+ errs := vm.txPool.AddRemotesSync(txs)
for i, err := range errs {
if err != nil {
t.Fatalf("Failed to add tx at index %d: %s", i, err)
@@ -2644,7 +2644,7 @@ func TestFeeManagerChangeFee(t *testing.T) {
t.Fatal(err)
}
- err = vm.txPool.AddRemote(signedTx2)
+ err = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2})[0]
require.ErrorIs(t, err, txpool.ErrUnderpriced)
}
diff --git a/precompile/contract/interfaces.go b/precompile/contract/interfaces.go
index 20b4ab6b2f..5ac6baa486 100644
--- a/precompile/contract/interfaces.go
+++ b/precompile/contract/interfaces.go
@@ -39,8 +39,6 @@ type StateDB interface {
GetTxHash() common.Hash
- Suicide(common.Address) bool
-
Snapshot() int
RevertToSnapshot(int)
}
diff --git a/precompile/contract/mocks.go b/precompile/contract/mocks.go
index 00e726032e..6510d2d738 100644
--- a/precompile/contract/mocks.go
+++ b/precompile/contract/mocks.go
@@ -383,17 +383,3 @@ func (mr *MockStateDBMockRecorder) Snapshot() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockStateDB)(nil).Snapshot))
}
-
-// Suicide mocks base method.
-func (m *MockStateDB) Suicide(arg0 common.Address) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Suicide", arg0)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// Suicide indicates an expected call of Suicide.
-func (mr *MockStateDBMockRecorder) Suicide(arg0 any) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Suicide", reflect.TypeOf((*MockStateDB)(nil).Suicide), arg0)
-}
diff --git a/rpc/client.go b/rpc/client.go
index 989441a6f8..7a9047f310 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -43,14 +43,15 @@ import (
var (
ErrBadResult = errors.New("bad result in JSON-RPC response")
ErrClientQuit = errors.New("client is closed")
- ErrNoResult = errors.New("no result in JSON-RPC response")
+ ErrNoResult = errors.New("JSON-RPC response has no result")
+ ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call")
ErrSubscriptionQueueOverflow = errors.New("subscription queue overflow")
errClientReconnected = errors.New("client reconnected")
errDead = errors.New("connection lost")
)
+// Timeouts
const (
- // Timeouts
defaultDialTimeout = 10 * time.Second // used if context has no deadline
subscribeTimeout = 10 * time.Second // overall timeout eth_subscribe, rpc_modules calls
)
@@ -93,6 +94,10 @@ type Client struct {
// This function, if non-nil, is called when the connection is lost.
reconnectFunc reconnectFunc
+ // config fields
+ batchItemLimit int
+ batchResponseMaxSize int
+
// writeConn is used for writing to the connection on the caller's goroutine. It should
// only be accessed outside of dispatch, with the write lock held. The write lock is
// taken by sending on reqInit and released by sending on reqSent.
@@ -123,7 +128,7 @@ func (c *Client) newClientConn(conn ServerCodec, apiMaxDuration, refillRate, max
ctx := context.Background()
ctx = context.WithValue(ctx, clientContextKey{}, c)
ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo())
- handler := newHandler(ctx, conn, c.idgen, c.services)
+ handler := newHandler(ctx, conn, c.idgen, c.services, c.batchItemLimit, c.batchResponseMaxSize)
// When [apiMaxDuration] or [refillRate]/[maxStored] is 0 (as is the case for
// all client invocations of this function), it is ignored.
@@ -142,14 +147,17 @@ type readOp struct {
batch bool
}
+// requestOp represents a pending request. This is used for both batch and non-batch
+// requests.
type requestOp struct {
- ids []json.RawMessage
- err error
- resp chan *jsonrpcMessage // receives up to len(ids) responses
- sub *ClientSubscription // only set for EthSubscribe requests
+ ids []json.RawMessage
+ err error
+ resp chan []*jsonrpcMessage // the response goes here
+ sub *ClientSubscription // set for Subscribe requests.
+ hadResponse bool // true when the request was responded to
}
-func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, error) {
+func (op *requestOp) wait(ctx context.Context, c *Client) ([]*jsonrpcMessage, error) {
select {
case <-ctx.Done():
// Send the timeout to dispatch so it can remove the request IDs.
@@ -225,7 +233,7 @@ func DialOptions(ctx context.Context, rawurl string, options ...ClientOption) (*
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
- return newClient(ctx, reconnect)
+ return newClient(ctx, cfg, reconnect)
}
// ClientFromContext retrieves the client from the context, if any. This can be used to perform
@@ -235,34 +243,43 @@ func ClientFromContext(ctx context.Context) (*Client, bool) {
return client, ok
}
-func newClient(initctx context.Context, connect reconnectFunc) (*Client, error) {
+func newClient(initctx context.Context, cfg *clientConfig, connect reconnectFunc) (*Client, error) {
conn, err := connect(initctx)
if err != nil {
return nil, err
}
- c := initClient(conn, randomIDGenerator(), new(serviceRegistry), 0, 0, 0)
+ c := initClient(conn, new(serviceRegistry), cfg, 0, 0, 0)
c.reconnectFunc = connect
return c, nil
}
-func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry, apiMaxDuration, refillRate, maxStored time.Duration) *Client {
+func initClient(conn ServerCodec, services *serviceRegistry, cfg *clientConfig, apiMaxDuration, refillRate, maxStored time.Duration) *Client {
_, isHTTP := conn.(*httpConn)
c := &Client{
- idgen: idgen,
- isHTTP: isHTTP,
- services: services,
- writeConn: conn,
- close: make(chan struct{}),
- closing: make(chan struct{}),
- didClose: make(chan struct{}),
- reconnected: make(chan ServerCodec),
- readOp: make(chan readOp),
- readErr: make(chan error),
- reqInit: make(chan *requestOp),
- reqSent: make(chan error, 1),
- reqTimeout: make(chan *requestOp),
- }
- if !c.isHTTP {
+ isHTTP: isHTTP,
+ services: services,
+ idgen: cfg.idgen,
+ batchItemLimit: cfg.batchItemLimit,
+ batchResponseMaxSize: cfg.batchResponseLimit,
+ writeConn: conn,
+ close: make(chan struct{}),
+ closing: make(chan struct{}),
+ didClose: make(chan struct{}),
+ reconnected: make(chan ServerCodec),
+ readOp: make(chan readOp),
+ readErr: make(chan error),
+ reqInit: make(chan *requestOp),
+ reqSent: make(chan error, 1),
+ reqTimeout: make(chan *requestOp),
+ }
+
+ // Set defaults.
+ if c.idgen == nil {
+ c.idgen = randomIDGenerator()
+ }
+
+ // Launch the main loop.
+ if !isHTTP {
go c.dispatch(conn, apiMaxDuration, refillRate, maxStored)
}
return c
@@ -339,7 +356,10 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
if err != nil {
return err
}
- op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *jsonrpcMessage, 1)}
+ op := &requestOp{
+ ids: []json.RawMessage{msg.ID},
+ resp: make(chan []*jsonrpcMessage, 1),
+ }
if c.isHTTP {
err = c.sendHTTP(ctx, op, msg)
@@ -351,9 +371,12 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
}
// dispatch has accepted the request and will close the channel when it quits.
- switch resp, err := op.wait(ctx, c); {
- case err != nil:
+ batchresp, err := op.wait(ctx, c)
+ if err != nil {
return err
+ }
+ resp := batchresp[0]
+ switch {
case resp.Error != nil:
return resp.Error
case len(resp.Result) == 0:
@@ -394,7 +417,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
)
op := &requestOp{
ids: make([]json.RawMessage, len(b)),
- resp: make(chan *jsonrpcMessage, len(b)),
+ resp: make(chan []*jsonrpcMessage, 1),
}
for i, elem := range b {
msg, err := c.newMessage(elem.Method, elem.Args...)
@@ -412,28 +435,48 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
} else {
err = c.send(ctx, op, msgs)
}
+ if err != nil {
+ return err
+ }
+
+ batchresp, err := op.wait(ctx, c)
+ if err != nil {
+ return err
+ }
// Wait for all responses to come back.
- for n := 0; n < len(b) && err == nil; n++ {
- var resp *jsonrpcMessage
- resp, err = op.wait(ctx, c)
- if err != nil {
- break
+ for n := 0; n < len(batchresp) && err == nil; n++ {
+ resp := batchresp[n]
+ if resp == nil {
+ // Ignore null responses. These can happen for batches sent via HTTP.
+ continue
}
+
// Find the element corresponding to this response.
- // The element is guaranteed to be present because dispatch
- // only sends valid IDs to our channel.
- elem := &b[byID[string(resp.ID)]]
- if resp.Error != nil {
- elem.Error = resp.Error
+ index, ok := byID[string(resp.ID)]
+ if !ok {
continue
}
- if len(resp.Result) == 0 {
+ delete(byID, string(resp.ID))
+
+ // Assign result and error.
+ elem := &b[index]
+ switch {
+ case resp.Error != nil:
+ elem.Error = resp.Error
+ case resp.Result == nil:
elem.Error = ErrNoResult
- continue
+ default:
+ elem.Error = json.Unmarshal(resp.Result, elem.Result)
}
- elem.Error = json.Unmarshal(resp.Result, elem.Result)
}
+
+ // Check that all expected responses have been received.
+ for _, index := range byID {
+ elem := &b[index]
+ elem.Error = ErrMissingBatchResponse
+ }
+
return err
}
@@ -494,7 +537,7 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf
}
op := &requestOp{
ids: []json.RawMessage{msg.ID},
- resp: make(chan *jsonrpcMessage),
+ resp: make(chan []*jsonrpcMessage, 1),
sub: newClientSubscription(c, namespace, chanVal),
}
@@ -509,6 +552,13 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf
return op.sub, nil
}
+// SupportsSubscriptions reports whether subscriptions are supported by the client
+// transport. When this returns false, Subscribe and related methods will return
+// ErrNotificationsUnsupported.
+func (c *Client) SupportsSubscriptions() bool {
+ return !c.isHTTP
+}
+
func (c *Client) newMessage(method string, paramsIn ...interface{}) (*jsonrpcMessage, error) {
msg := &jsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method}
if paramsIn != nil { // prevent sending "params":null
diff --git a/rpc/client_opt.go b/rpc/client_opt.go
index c1b9931253..dfbef66b86 100644
--- a/rpc/client_opt.go
+++ b/rpc/client_opt.go
@@ -38,11 +38,18 @@ type ClientOption interface {
}
type clientConfig struct {
+ // HTTP settings
httpClient *http.Client
httpHeaders http.Header
httpAuth HTTPAuth
+ // WebSocket options
wsDialer *websocket.Dialer
+
+ // RPC handler options
+ idgen func() ID
+ batchItemLimit int
+ batchResponseLimit int
}
func (cfg *clientConfig) initHeaders() {
@@ -114,3 +121,25 @@ func WithHTTPAuth(a HTTPAuth) ClientOption {
// Usually, HTTPAuth functions will call h.Set("authorization", "...") to add
// auth information to the request.
type HTTPAuth func(h http.Header) error
+
+// WithBatchItemLimit changes the maximum number of items allowed in batch requests.
+//
+// Note: this option applies when processing incoming batch requests. It does not affect
+// batch requests sent by the client.
+func WithBatchItemLimit(limit int) ClientOption {
+ return optionFunc(func(cfg *clientConfig) {
+ cfg.batchItemLimit = limit
+ })
+}
+
+// WithBatchResponseSizeLimit changes the maximum number of response bytes that can be
+// generated for batch requests. When this limit is reached, further calls in the batch
+// will not be processed.
+//
+// Note: this option applies when processing incoming batch requests. It does not affect
+// batch requests sent by the client.
+func WithBatchResponseSizeLimit(sizeLimit int) ClientOption {
+ return optionFunc(func(cfg *clientConfig) {
+ cfg.batchResponseLimit = sizeLimit
+ })
+}
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 4ec0ef4122..ede8045fac 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -38,6 +38,7 @@ import (
"reflect"
"runtime"
"strings"
+ "sync"
"testing"
"time"
@@ -177,10 +178,12 @@ func TestClientBatchRequest(t *testing.T) {
}
}
+// This checks that, for HTTP connections, the length of batch responses is validated to
+// match the request exactly.
func TestClientBatchRequest_len(t *testing.T) {
b, err := json.Marshal([]jsonrpcMessage{
- {Version: "2.0", ID: json.RawMessage("1"), Method: "foo", Result: json.RawMessage(`"0x1"`)},
- {Version: "2.0", ID: json.RawMessage("2"), Method: "bar", Result: json.RawMessage(`"0x2"`)},
+ {Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)},
+ {Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)},
})
if err != nil {
t.Fatal("failed to encode jsonrpc message:", err)
@@ -193,37 +196,102 @@ func TestClientBatchRequest_len(t *testing.T) {
}))
t.Cleanup(s.Close)
- client, err := Dial(s.URL)
- if err != nil {
- t.Fatal("failed to dial test server:", err)
- }
- defer client.Close()
-
t.Run("too-few", func(t *testing.T) {
+ client, err := Dial(s.URL)
+ if err != nil {
+ t.Fatal("failed to dial test server:", err)
+ }
+ defer client.Close()
+
batch := []BatchElem{
- {Method: "foo"},
- {Method: "bar"},
- {Method: "baz"},
+ {Method: "foo", Result: new(string)},
+ {Method: "bar", Result: new(string)},
+ {Method: "baz", Result: new(string)},
}
ctx, cancelFn := context.WithTimeout(context.Background(), time.Second)
defer cancelFn()
- if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) {
- t.Errorf("expected %q but got: %v", ErrBadResult, err)
+
+ if err := client.BatchCallContext(ctx, batch); err != nil {
+ t.Fatal("error:", err)
+ }
+ for i, elem := range batch[:2] {
+ if elem.Error != nil {
+ t.Errorf("expected no error for batch element %d, got %q", i, elem.Error)
+ }
+ }
+ for i, elem := range batch[2:] {
+ if elem.Error != ErrMissingBatchResponse {
+ t.Errorf("wrong error %q for batch element %d", elem.Error, i+2)
+ }
}
})
t.Run("too-many", func(t *testing.T) {
+ client, err := Dial(s.URL)
+ if err != nil {
+ t.Fatal("failed to dial test server:", err)
+ }
+ defer client.Close()
+
batch := []BatchElem{
- {Method: "foo"},
+ {Method: "foo", Result: new(string)},
}
ctx, cancelFn := context.WithTimeout(context.Background(), time.Second)
defer cancelFn()
- if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) {
- t.Errorf("expected %q but got: %v", ErrBadResult, err)
+
+ if err := client.BatchCallContext(ctx, batch); err != nil {
+ t.Fatal("error:", err)
+ }
+ for i, elem := range batch[:1] {
+ if elem.Error != nil {
+ t.Errorf("expected no error for batch element %d, got %q", i, elem.Error)
+ }
+ }
+ for i, elem := range batch[1:] {
+ if elem.Error != ErrMissingBatchResponse {
+ t.Errorf("wrong error %q for batch element %d", elem.Error, i+2)
+ }
}
})
}
+// This checks that the client can handle the case where the server doesn't
+// respond to all requests in a batch.
+func TestClientBatchRequestLimit(t *testing.T) {
+ server := newTestServer()
+ defer server.Stop()
+ server.SetBatchLimits(2, 100000)
+ client := DialInProc(server)
+
+ batch := []BatchElem{
+ {Method: "foo"},
+ {Method: "bar"},
+ {Method: "baz"},
+ }
+ err := client.BatchCall(batch)
+ if err != nil {
+ t.Fatal("unexpected error:", err)
+ }
+
+ // Check that the first response indicates an error with batch size.
+ var err0 Error
+ if !errors.As(batch[0].Error, &err0) {
+ t.Log("error zero:", batch[0].Error)
+ t.Fatalf("batch elem 0 has wrong error type: %T", batch[0].Error)
+ } else {
+ if err0.ErrorCode() != -32600 || err0.Error() != errMsgBatchTooLarge {
+ t.Fatalf("wrong error on batch elem zero: %v", err0)
+ }
+ }
+
+ // Check that remaining response batch elements are reported as absent.
+ for i, elem := range batch[1:] {
+ if elem.Error != ErrMissingBatchResponse {
+ t.Fatalf("batch elem %d has unexpected error: %v", i+1, elem.Error)
+ }
+ }
+}
+
func TestClientNotify(t *testing.T) {
server := newTestServer()
defer server.Stop()
@@ -251,99 +319,85 @@ func testClientCancel(transport string, t *testing.T) {
server := newTestServer()
defer server.Stop()
-}
-// func TestClientCancelIPC(t *testing.T) { testClientCancel("ipc", t) }
-
-// // This test checks that requests made through CallContext can be canceled by canceling
-// // the context.
-// func testClientCancel(transport string, t *testing.T) {
-// // These tests take a lot of time, run them all at once.
-// // You probably want to run with -parallel 1 or comment out
-// // the call to t.Parallel if you enable the logging.
-// t.Parallel()
-
-// server := newTestServer()
-// defer server.Stop()
-
-// // What we want to achieve is that the context gets canceled
-// // at various stages of request processing. The interesting cases
-// // are:
-// // - cancel during dial
-// // - cancel while performing a HTTP request
-// // - cancel while waiting for a response
-// //
-// // To trigger those, the times are chosen such that connections
-// // are killed within the deadline for every other call (maxKillTimeout
-// // is 2x maxCancelTimeout).
-// //
-// // Once a connection is dead, there is a fair chance it won't connect
-// // successfully because the accept is delayed by 1s.
-// maxContextCancelTimeout := 300 * time.Millisecond
-// fl := &flakeyListener{
-// maxAcceptDelay: 1 * time.Second,
-// maxKillTimeout: 600 * time.Millisecond,
-// }
+ // What we want to achieve is that the context gets canceled
+ // at various stages of request processing. The interesting cases
+ // are:
+ // - cancel during dial
+ // - cancel while performing a HTTP request
+ // - cancel while waiting for a response
+ //
+ // To trigger those, the times are chosen such that connections
+ // are killed within the deadline for every other call (maxKillTimeout
+ // is 2x maxCancelTimeout).
+ //
+ // Once a connection is dead, there is a fair chance it won't connect
+ // successfully because the accept is delayed by 1s.
+ maxContextCancelTimeout := 300 * time.Millisecond
+ fl := &flakeyListener{
+ maxAcceptDelay: 1 * time.Second,
+ maxKillTimeout: 600 * time.Millisecond,
+ }
+
+ var client *Client
+ switch transport {
+ case "ws", "http":
+ c, hs := httpTestClient(server, transport, fl)
+ defer hs.Close()
+ client = c
+ // case "ipc":
+ // c, l := ipcTestClient(server, fl)
+ // defer l.Close()
+ // client = c
+ default:
+ panic("unknown transport: " + transport)
+ }
-// var client *Client
-// switch transport {
-// case "ws", "http":
-// c, hs := httpTestClient(server, transport, fl)
-// defer hs.Close()
-// client = c
-// case "ipc":
-// c, l := ipcTestClient(server, fl)
-// defer l.Close()
-// client = c
-// default:
-// panic("unknown transport: " + transport)
-// }
+ // The actual test starts here.
+ var (
+ wg sync.WaitGroup
+ nreqs = 10
+ ncallers = 10
+ )
+ caller := func(index int) {
+ defer wg.Done()
+ for i := 0; i < nreqs; i++ {
+ var (
+ ctx context.Context
+ cancel func()
+ timeout = time.Duration(rand.Int63n(int64(maxContextCancelTimeout)))
+ )
+ if index < ncallers/2 {
+ // For half of the callers, create a context without deadline
+ // and cancel it later.
+ ctx, cancel = context.WithCancel(context.Background())
+ time.AfterFunc(timeout, cancel)
+ } else {
+ // For the other half, create a context with a deadline instead. This is
+ // different because the context deadline is used to set the socket write
+ // deadline.
+ ctx, cancel = context.WithTimeout(context.Background(), timeout)
+ }
-// // The actual test starts here.
-// var (
-// wg sync.WaitGroup
-// nreqs = 10
-// ncallers = 10
-// )
-// caller := func(index int) {
-// defer wg.Done()
-// for i := 0; i < nreqs; i++ {
-// var (
-// ctx context.Context
-// cancel func()
-// timeout = time.Duration(rand.Int63n(int64(maxContextCancelTimeout)))
-// )
-// if index < ncallers/2 {
-// // For half of the callers, create a context without deadline
-// // and cancel it later.
-// ctx, cancel = context.WithCancel(context.Background())
-// time.AfterFunc(timeout, cancel)
-// } else {
-// // For the other half, create a context with a deadline instead. This is
-// // different because the context deadline is used to set the socket write
-// // deadline.
-// ctx, cancel = context.WithTimeout(context.Background(), timeout)
-// }
-
-// // Now perform a call with the context.
-// // The key thing here is that no call will ever complete successfully.
-// err := client.CallContext(ctx, nil, "test_block")
-// switch {
-// case err == nil:
-// _, hasDeadline := ctx.Deadline()
-// t.Errorf("no error for call with %v wait time (deadline: %v)", timeout, hasDeadline)
-// // default:
-// // t.Logf("got expected error with %v wait time: %v", timeout, err)
-// }
-// cancel()
-// }
-// }
-// wg.Add(ncallers)
-// for i := 0; i < ncallers; i++ {
-// go caller(i)
-// }
-// wg.Wait()
-// }
+ // Now perform a call with the context.
+ // The key thing here is that no call will ever complete successfully.
+ err := client.CallContext(ctx, nil, "test_block")
+ switch {
+ case err == nil:
+ _, hasDeadline := ctx.Deadline()
+ t.Errorf("no error for call with %v wait time (deadline: %v)", timeout, hasDeadline)
+ // default:
+ // t.Logf("got expected error with %v wait time: %v", timeout, err)
+ }
+ cancel()
+ }
+ }
+ wg.Add(ncallers)
+ for i := 0; i < ncallers; i++ {
+ go caller(i)
+ }
+ wg.Wait()
+}
func TestClientSubscribeInvalidArg(t *testing.T) {
server := newTestServer()
@@ -510,7 +564,8 @@ func TestClientSubscriptionUnsubscribeServer(t *testing.T) {
defer srv.Stop()
// Create the client on the other end of the pipe.
- client, _ := newClient(context.Background(), func(context.Context) (ServerCodec, error) {
+ cfg := new(clientConfig)
+ client, _ := newClient(context.Background(), cfg, func(context.Context) (ServerCodec, error) {
return NewCodec(p2), nil
})
defer client.Close()
diff --git a/rpc/errors.go b/rpc/errors.go
index 7e39510087..44094715e1 100644
--- a/rpc/errors.go
+++ b/rpc/errors.go
@@ -68,15 +68,19 @@ var (
)
const (
- errcodeDefault = -32000
- errcodeNotificationsUnsupported = -32001
- errcodeTimeout = -32002
- errcodePanic = -32603
- errcodeMarshalError = -32603
+ errcodeDefault = -32000
+ errcodeTimeout = -32002
+ errcodeResponseTooLarge = -32003
+ errcodePanic = -32603
+ errcodeMarshalError = -32603
+
+ legacyErrcodeNotificationsUnsupported = -32001
)
const (
- errMsgTimeout = "request timed out"
+ errMsgTimeout = "request timed out"
+ errMsgResponseTooLarge = "response too large"
+ errMsgBatchTooLarge = "batch too large"
)
type methodNotFoundError struct{ method string }
@@ -87,6 +91,34 @@ func (e *methodNotFoundError) Error() string {
return fmt.Sprintf("the method %s does not exist/is not available", e.method)
}
+type notificationsUnsupportedError struct{}
+
+func (e notificationsUnsupportedError) Error() string {
+ return "notifications not supported"
+}
+
+func (e notificationsUnsupportedError) ErrorCode() int { return -32601 }
+
+// Is checks for equivalence to another error. Here we define that all errors with code
+// -32601 (method not found) are equivalent to notificationsUnsupportedError. This is
+// done to enable the following pattern:
+//
+// sub, err := client.Subscribe(...)
+// if errors.Is(err, rpc.ErrNotificationsUnsupported) {
+// // server doesn't support subscriptions
+// }
+func (e notificationsUnsupportedError) Is(other error) bool {
+ if other == (notificationsUnsupportedError{}) {
+ return true
+ }
+ rpcErr, ok := other.(Error)
+ if ok {
+ code := rpcErr.ErrorCode()
+ return code == -32601 || code == legacyErrcodeNotificationsUnsupported
+ }
+ return false
+}
+
type subscriptionNotFoundError struct{ namespace, subscription string }
func (e *subscriptionNotFoundError) ErrorCode() int { return -32601 }
diff --git a/rpc/handler.go b/rpc/handler.go
index 00bafad427..8ef948696f 100644
--- a/rpc/handler.go
+++ b/rpc/handler.go
@@ -61,17 +61,19 @@ import (
// h.removeRequestOp(op) // timeout, etc.
// }
type handler struct {
- reg *serviceRegistry
- unsubscribeCb *callback
- idgen func() ID // subscription ID generator
- respWait map[string]*requestOp // active client requests
- clientSubs map[string]*ClientSubscription // active client subscriptions
- callWG sync.WaitGroup // pending call goroutines
- rootCtx context.Context // canceled by close()
- cancelRoot func() // cancel function for rootCtx
- conn jsonWriter // where responses will be sent
- log log.Logger
- allowSubscribe bool
+ reg *serviceRegistry
+ unsubscribeCb *callback
+ idgen func() ID // subscription ID generator
+ respWait map[string]*requestOp // active client requests
+ clientSubs map[string]*ClientSubscription // active client subscriptions
+ callWG sync.WaitGroup // pending call goroutines
+ rootCtx context.Context // canceled by close()
+ cancelRoot func() // cancel function for rootCtx
+ conn jsonWriter // where responses will be sent
+ log log.Logger
+ allowSubscribe bool
+ batchRequestLimit int
+ batchResponseMaxSize int
subLock sync.Mutex
serverSubs map[ID]*Subscription
@@ -87,19 +89,21 @@ type callProc struct {
procStart time.Time
}
-func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler {
+func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int) *handler {
rootCtx, cancelRoot := context.WithCancel(connCtx)
h := &handler{
- reg: reg,
- idgen: idgen,
- conn: conn,
- respWait: make(map[string]*requestOp),
- clientSubs: make(map[string]*ClientSubscription),
- rootCtx: rootCtx,
- cancelRoot: cancelRoot,
- allowSubscribe: true,
- serverSubs: make(map[ID]*Subscription),
- log: log.Root(),
+ reg: reg,
+ idgen: idgen,
+ conn: conn,
+ respWait: make(map[string]*requestOp),
+ clientSubs: make(map[string]*ClientSubscription),
+ rootCtx: rootCtx,
+ cancelRoot: cancelRoot,
+ allowSubscribe: true,
+ serverSubs: make(map[ID]*Subscription),
+ log: log.Root(),
+ batchRequestLimit: batchRequestLimit,
+ batchResponseMaxSize: batchResponseMaxSize,
}
if conn.remoteAddr() != "" {
h.log = h.log.New("conn", conn.remoteAddr())
@@ -151,16 +155,15 @@ func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) {
b.doWrite(ctx, conn, false)
}
-// timeout sends the responses added so far. For the remaining unanswered call
-// messages, it sends a timeout error response.
-func (b *batchCallBuffer) timeout(ctx context.Context, conn jsonWriter) {
+// respondWithError sends the responses added so far. For the remaining unanswered call
+// messages, it responds with the given error.
+func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, err error) {
b.mutex.Lock()
defer b.mutex.Unlock()
for _, msg := range b.calls {
if !msg.isNotification() {
- resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout})
- b.resp = append(b.resp, resp)
+ b.resp = append(b.resp, msg.errorResponse(err))
}
}
b.doWrite(ctx, conn, true)
@@ -200,17 +203,24 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
})
return
}
+ // Apply limit on total number of requests.
+ if h.batchRequestLimit != 0 && len(msgs) > h.batchRequestLimit {
+ h.startCallProc(func(cp *callProc) {
+ h.respondWithBatchTooLarge(cp, msgs)
+ })
+ return
+ }
- // Handle non-call messages first:
+ // Handle non-call messages first.
+ // Here we need to find the requestOp that sent the request batch.
calls := make([]*jsonrpcMessage, 0, len(msgs))
- for _, msg := range msgs {
- if handled := h.handleImmediate(msg); !handled {
- calls = append(calls, msg)
- }
- }
+ h.handleResponses(msgs, func(msg *jsonrpcMessage) {
+ calls = append(calls, msg)
+ })
if len(calls) == 0 {
return
}
+
// Process calls on a goroutine because they may block indefinitely:
h.startCallProc(func(cp *callProc) {
var (
@@ -228,10 +238,12 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
timer = time.AfterFunc(timeout, func() {
cancel()
- callBuffer.timeout(cp.ctx, h.conn)
+ err := &internalServerError{errcodeTimeout, errMsgTimeout}
+ callBuffer.respondWithError(cp.ctx, h.conn, err)
})
}
+ responseBytes := 0
for {
// No need to handle rest of calls if timed out.
if cp.ctx.Err() != nil {
@@ -243,59 +255,86 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
}
resp := h.handleCallMsg(cp, msg)
callBuffer.pushResponse(resp)
+ if resp != nil && h.batchResponseMaxSize != 0 {
+ responseBytes += len(resp.Result)
+ if responseBytes > h.batchResponseMaxSize {
+ err := &internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge}
+ callBuffer.respondWithError(cp.ctx, h.conn, err)
+ break
+ }
+ }
}
if timer != nil {
timer.Stop()
}
- callBuffer.write(cp.ctx, h.conn)
+
h.addSubscriptions(cp.notifiers)
+ callBuffer.write(cp.ctx, h.conn)
for _, n := range cp.notifiers {
n.activate()
}
})
}
-// handleMsg handles a single message.
-func (h *handler) handleMsg(msg *jsonrpcMessage) {
- if ok := h.handleImmediate(msg); ok {
- return
+func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage) {
+ resp := errorMessage(&invalidRequestError{errMsgBatchTooLarge})
+ // Find the first call and add its "id" field to the error.
+ // This is the best we can do, given that the protocol doesn't have a way
+ // of reporting an error for the entire batch.
+ for _, msg := range batch {
+ if msg.isCall() {
+ resp.ID = msg.ID
+ break
+ }
}
- h.startCallProc(func(cp *callProc) {
- var (
- responded sync.Once
- timer *time.Timer
- cancel context.CancelFunc
- )
- cp.ctx, cancel = context.WithCancel(cp.ctx)
- defer cancel()
+ h.conn.writeJSONSkipDeadline(cp.ctx, []*jsonrpcMessage{resp}, true, h.deadlineContext > 0)
+}
- // Cancel the request context after timeout and send an error response. Since the
- // running method might not return immediately on timeout, we must wait for the
- // timeout concurrently with processing the request.
- if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
- timer = time.AfterFunc(timeout, func() {
- cancel()
- responded.Do(func() {
- resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout})
- h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0)
- })
- })
- }
+// handleMsg handles a single non-batch message.
+func (h *handler) handleMsg(msg *jsonrpcMessage) {
+ msgs := []*jsonrpcMessage{msg}
+ h.handleResponses(msgs, func(msg *jsonrpcMessage) {
+ h.startCallProc(func(cp *callProc) {
+ h.handleNonBatchCall(cp, msg)
+ })
+ })
+}
- answer := h.handleCallMsg(cp, msg)
- if timer != nil {
- timer.Stop()
- }
- h.addSubscriptions(cp.notifiers)
- if answer != nil {
+func (h *handler) handleNonBatchCall(cp *callProc, msg *jsonrpcMessage) {
+ var (
+ responded sync.Once
+ timer *time.Timer
+ cancel context.CancelFunc
+ )
+ cp.ctx, cancel = context.WithCancel(cp.ctx)
+ defer cancel()
+
+ // Cancel the request context after timeout and send an error response. Since the
+ // running method might not return immediately on timeout, we must wait for the
+ // timeout concurrently with processing the request.
+ if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
+ timer = time.AfterFunc(timeout, func() {
+ cancel()
responded.Do(func() {
- h.conn.writeJSONSkipDeadline(cp.ctx, answer, false, h.deadlineContext > 0)
+ resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout})
+ h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0)
})
- }
- for _, n := range cp.notifiers {
- n.activate()
- }
- })
+ })
+ }
+
+ answer := h.handleCallMsg(cp, msg)
+ if timer != nil {
+ timer.Stop()
+ }
+ h.addSubscriptions(cp.notifiers)
+ if answer != nil {
+ responded.Do(func() {
+ h.conn.writeJSONSkipDeadline(cp.ctx, answer, false, h.deadlineContext > 0)
+ })
+ }
+ for _, n := range cp.notifiers {
+ n.activate()
+ }
}
// close cancels all requests except for inflightReq and waits for
@@ -437,23 +476,60 @@ func (h *handler) startCallProc(fn func(*callProc)) {
}
}
-// handleImmediate executes non-call messages. It returns false if the message is a
-// call or requires a reply.
-func (h *handler) handleImmediate(msg *jsonrpcMessage) bool {
- execStart := time.Now()
- switch {
- case msg.isNotification():
- if strings.HasSuffix(msg.Method, notificationMethodSuffix) {
- h.handleSubscriptionResult(msg)
- return true
+// handleResponse processes method call responses.
+func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) {
+ var resolvedops []*requestOp
+ handleResp := func(msg *jsonrpcMessage) {
+ op := h.respWait[string(msg.ID)]
+ if op == nil {
+ h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID})
+ return
+ }
+ resolvedops = append(resolvedops, op)
+ delete(h.respWait, string(msg.ID))
+
+ // For subscription responses, start the subscription if the server
+ // indicates success. EthSubscribe gets unblocked in either case through
+ // the op.resp channel.
+ if op.sub != nil {
+ if msg.Error != nil {
+ op.err = msg.Error
+ } else {
+ op.err = json.Unmarshal(msg.Result, &op.sub.subid)
+ if op.err == nil {
+ go op.sub.run()
+ h.clientSubs[op.sub.subid] = op.sub
+ }
+ }
+ }
+
+ if !op.hadResponse {
+ op.hadResponse = true
+ op.resp <- batch
}
- return false
- case msg.isResponse():
- h.handleResponse(msg)
- h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(execStart))
- return true
- default:
- return false
+ }
+
+ for _, msg := range batch {
+ start := time.Now()
+ switch {
+ case msg.isResponse():
+ handleResp(msg)
+ h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start))
+
+ case msg.isNotification():
+ if strings.HasSuffix(msg.Method, notificationMethodSuffix) {
+ h.handleSubscriptionResult(msg)
+ continue
+ }
+ handleCall(msg)
+
+ default:
+ handleCall(msg)
+ }
+ }
+
+ for _, op := range resolvedops {
+ h.removeRequestOp(op)
}
}
@@ -469,33 +545,6 @@ func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) {
}
}
-// handleResponse processes method call responses.
-func (h *handler) handleResponse(msg *jsonrpcMessage) {
- op := h.respWait[string(msg.ID)]
- if op == nil {
- h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID})
- return
- }
- delete(h.respWait, string(msg.ID))
- // For normal responses, just forward the reply to Call/BatchCall.
- if op.sub == nil {
- op.resp <- msg
- return
- }
- // For subscription responses, start the subscription if the server
- // indicates success. EthSubscribe gets unblocked in either case through
- // the op.resp channel.
- defer close(op.resp)
- if msg.Error != nil {
- op.err = msg.Error
- return
- }
- if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil {
- go op.sub.run()
- h.clientSubs[op.sub.subid] = op.sub
- }
-}
-
// handleCallMsg executes a call message and returns the answer.
func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMessage {
// [callStart] is the time the message was enqueued for handler processing
@@ -514,6 +563,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess
h.handleCall(ctx, msg)
h.log.Debug("Served "+msg.Method, "execTime", time.Since(execStart), "procTime", time.Since(procStart), "totalTime", time.Since(callStart))
return nil
+
case msg.isCall():
resp := h.handleCall(ctx, msg)
var ctx []interface{}
@@ -528,8 +578,10 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess
h.log.Debug("Served "+msg.Method, ctx...)
}
return resp
+
case msg.hasValidID():
return msg.errorResponse(&invalidRequestError{"invalid request"})
+
default:
return errorMessage(&invalidRequestError{"invalid request"})
}
@@ -549,12 +601,14 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage
if callb == nil {
return msg.errorResponse(&methodNotFoundError{method: msg.Method})
}
+
args, err := parsePositionalArguments(msg.Params, callb.argTypes)
if err != nil {
return msg.errorResponse(&invalidParamsError{err.Error()})
}
start := time.Now()
answer := h.runMethod(cp.ctx, msg, callb, args)
+
// Collect the statistics for RPC calls if metrics is enabled.
// We only care about pure rpc call. Filter out subscription.
if callb != h.unsubscribeCb {
@@ -569,16 +623,14 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage
updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start))
}
}
+
return answer
}
// handleSubscribe processes *_subscribe method calls.
func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage {
if !h.allowSubscribe {
- return msg.errorResponse(&internalServerError{
- code: errcodeNotificationsUnsupported,
- message: ErrNotificationsUnsupported.Error(),
- })
+ return msg.errorResponse(ErrNotificationsUnsupported)
}
// Subscription method name is first argument.
diff --git a/rpc/http.go b/rpc/http.go
index 56fea59f2c..a3ff1eac7f 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -153,7 +153,7 @@ func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) {
var cfg clientConfig
cfg.httpClient = client
fn := newClientTransportHTTP(endpoint, &cfg)
- return newClient(context.Background(), fn)
+ return newClient(context.Background(), &cfg, fn)
}
func newClientTransportHTTP(endpoint string, cfg *clientConfig) reconnectFunc {
@@ -190,11 +190,12 @@ func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) e
}
defer respBody.Close()
- var respmsg jsonrpcMessage
- if err := json.NewDecoder(respBody).Decode(&respmsg); err != nil {
+ var resp jsonrpcMessage
+ batch := [1]*jsonrpcMessage{&resp}
+ if err := json.NewDecoder(respBody).Decode(&resp); err != nil {
return err
}
- op.resp <- &respmsg
+ op.resp <- batch[:]
return nil
}
@@ -205,16 +206,12 @@ func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*jsonr
return err
}
defer respBody.Close()
- var respmsgs []jsonrpcMessage
+
+ var respmsgs []*jsonrpcMessage
if err := json.NewDecoder(respBody).Decode(&respmsgs); err != nil {
return err
}
- if len(respmsgs) != len(msgs) {
- return fmt.Errorf("batch has %d requests but response has %d: %w", len(msgs), len(respmsgs), ErrBadResult)
- }
- for i := 0; i < len(respmsgs); i++ {
- op.resp <- &respmsgs[i]
- }
+ op.resp <- respmsgs
return nil
}
@@ -342,10 +339,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
connInfo.HTTP.UserAgent = r.Header.Get("User-Agent")
ctx := r.Context()
ctx = context.WithValue(ctx, peerInfoContextKey{}, connInfo)
+
// All checks passed, create a codec that reads directly from the request body
// until EOF, writes the response to w, and orders the server to process a
// single request.
-
w.Header().Set("content-type", contentType)
codec := newHTTPServerConn(r, w)
defer codec.close()
diff --git a/rpc/inproc.go b/rpc/inproc.go
index e008fd8804..6165af0a96 100644
--- a/rpc/inproc.go
+++ b/rpc/inproc.go
@@ -34,7 +34,8 @@ import (
// DialInProc attaches an in-process connection to the given RPC server.
func DialInProc(handler *Server) *Client {
initctx := context.Background()
- c, _ := newClient(initctx, func(context.Context) (ServerCodec, error) {
+ cfg := new(clientConfig)
+ c, _ := newClient(initctx, cfg, func(context.Context) (ServerCodec, error) {
p1, p2 := net.Pipe()
go handler.ServeCodec(NewCodec(p1), 0, 0, 0, 0)
return NewCodec(p2), nil
diff --git a/rpc/server.go b/rpc/server.go
index 13adf8112c..a993fbe96e 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -57,9 +57,11 @@ type Server struct {
idgen func() ID
maximumDuration time.Duration
- mutex sync.Mutex
- codecs map[ServerCodec]struct{}
- run atomic.Bool
+ mutex sync.Mutex
+ codecs map[ServerCodec]struct{}
+ run atomic.Bool
+ batchItemLimit int
+ batchResponseLimit int
}
// NewServer creates a new server instance with no registered handlers.
@@ -81,6 +83,17 @@ func NewServer(maximumDuration time.Duration) *Server {
return server
}
+// SetBatchLimits sets limits applied to batch requests. There are two limits: 'itemLimit'
+// is the maximum number of items in a batch. 'maxResponseSize' is the maximum number of
+// response bytes across all requests in a batch.
+//
+// This method should be called before processing any requests via ServeCodec, ServeHTTP,
+// ServeListener etc.
+func (s *Server) SetBatchLimits(itemLimit, maxResponseSize int) {
+ s.batchItemLimit = itemLimit
+ s.batchResponseLimit = maxResponseSize
+}
+
// RegisterName creates a service for the given receiver type under the given name. When no
// methods on the given receiver match the criteria to be either a RPC method or a
// subscription an error is returned. Otherwise a new service is created and added to the
@@ -102,7 +115,12 @@ func (s *Server) ServeCodec(codec ServerCodec, options CodecOption, apiMaxDurati
}
defer s.untrackCodec(codec)
- c := initClient(codec, s.idgen, &s.services, apiMaxDuration, refillRate, maxStored)
+ cfg := &clientConfig{
+ idgen: s.idgen,
+ batchItemLimit: s.batchItemLimit,
+ batchResponseLimit: s.batchResponseLimit,
+ }
+ c := initClient(codec, &s.services, cfg, apiMaxDuration, refillRate, maxStored)
<-codec.closed()
c.Close()
}
@@ -134,7 +152,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) {
return
}
- h := newHandler(ctx, codec, s.idgen, &s.services)
+ h := newHandler(ctx, codec, s.idgen, &s.services, s.batchItemLimit, s.batchResponseLimit)
h.deadlineContext = s.maximumDuration
h.allowSubscribe = false
defer h.close(io.EOF, nil)
diff --git a/rpc/server_test.go b/rpc/server_test.go
index e3b26623e1..7702002085 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -79,6 +79,7 @@ func TestServer(t *testing.T) {
func runTestScript(t *testing.T, file string) {
server := newTestServer()
+ server.SetBatchLimits(4, 100000)
content, err := os.ReadFile(file)
if err != nil {
t.Fatal(err)
@@ -160,3 +161,41 @@ func runTestScript(t *testing.T, file string) {
// }
// }
// }
+
+func TestServerBatchResponseSizeLimit(t *testing.T) {
+ server := newTestServer()
+ defer server.Stop()
+ server.SetBatchLimits(100, 60)
+ var (
+ batch []BatchElem
+ client = DialInProc(server)
+ )
+ for i := 0; i < 5; i++ {
+ batch = append(batch, BatchElem{
+ Method: "test_echo",
+ Args: []any{"x", 1},
+ Result: new(echoResult),
+ })
+ }
+ if err := client.BatchCall(batch); err != nil {
+ t.Fatal("error sending batch:", err)
+ }
+ for i := range batch {
+ // We expect the first two queries to be ok, but after that the size limit takes effect.
+ if i < 2 {
+ if batch[i].Error != nil {
+ t.Fatalf("batch elem %d has unexpected error: %v", i, batch[i].Error)
+ }
+ continue
+ }
+ // After two, we expect an error.
+ re, ok := batch[i].Error.(Error)
+ if !ok {
+ t.Fatalf("batch elem %d has wrong error: %v", i, batch[i].Error)
+ }
+ wantedCode := errcodeResponseTooLarge
+ if re.ErrorCode() != wantedCode {
+ t.Errorf("batch elem %d wrong error code, have %d want %d", i, re.ErrorCode(), wantedCode)
+ }
+ }
+}
diff --git a/rpc/subscription.go b/rpc/subscription.go
index 3544a69ffb..1174e7e2c0 100644
--- a/rpc/subscription.go
+++ b/rpc/subscription.go
@@ -42,8 +42,17 @@ import (
)
var (
- // ErrNotificationsUnsupported is returned when the connection doesn't support notifications
- ErrNotificationsUnsupported = errors.New("notifications not supported")
+ // ErrNotificationsUnsupported is returned by the client when the connection doesn't
+ // support notifications. You can use this error value to check for subscription
+ // support like this:
+ //
+ // sub, err := client.EthSubscribe(ctx, channel, "newHeads", true)
+ // if errors.Is(err, rpc.ErrNotificationsUnsupported) {
+ // // Server does not support subscriptions, fall back to polling.
+ // }
+ //
+ ErrNotificationsUnsupported = notificationsUnsupportedError{}
+
// ErrSubscriptionNotFound is returned when the notification for the given id is not found
ErrSubscriptionNotFound = errors.New("subscription not found")
)
diff --git a/rpc/testdata/invalid-batch-toolarge.js b/rpc/testdata/invalid-batch-toolarge.js
new file mode 100644
index 0000000000..218fea58aa
--- /dev/null
+++ b/rpc/testdata/invalid-batch-toolarge.js
@@ -0,0 +1,13 @@
+// This file checks the behavior of the batch item limit code.
+// In tests, the batch item limit is set to 4. So to trigger the error,
+// all batches in this file have 5 elements.
+
+// For batches that do not contain any calls, a response message with "id" == null
+// is returned.
+
+--> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}]
+<-- [{"jsonrpc":"2.0","id":null,"error":{"code":-32600,"message":"batch too large"}}]
+
+// For batches with at least one call, the call's "id" is used.
+--> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","id":3,"method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}]
+<-- [{"jsonrpc":"2.0","id":3,"error":{"code":-32600,"message":"batch too large"}}]
diff --git a/rpc/websocket.go b/rpc/websocket.go
index d753d2667b..b43b1b5a1a 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -211,7 +211,7 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale
if err != nil {
return nil, err
}
- return newClient(ctx, connect)
+ return newClient(ctx, cfg, connect)
}
// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server
@@ -228,7 +228,7 @@ func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error
if err != nil {
return nil, err
}
- return newClient(ctx, connect)
+ return newClient(ctx, cfg, connect)
}
func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, error) {
diff --git a/scripts/avalanche_header.txt b/scripts/avalanche_header.txt
new file mode 100644
index 0000000000..c848a208bd
--- /dev/null
+++ b/scripts/avalanche_header.txt
@@ -0,0 +1,10 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
diff --git a/scripts/build_test.sh b/scripts/build_test.sh
index bd2ef3903b..9e6fb61e42 100755
--- a/scripts/build_test.sh
+++ b/scripts/build_test.sh
@@ -24,4 +24,4 @@ source "$SUBNET_EVM_PATH"/scripts/constants.sh
# parallelism, and test coverage.
# DO NOT RUN tests from the top level "tests" directory since they are run by ginkgo
# shellcheck disable=SC2046
-go test -coverprofile=coverage.out -covermode=atomic -timeout="30m" "$@" $(go list ./... | grep -v github.com/ava-labs/subnet-evm/tests)
+go test -shuffle=on -race -coverprofile=coverage.out -covermode=atomic -timeout="30m" "$@" $(go list ./... | grep -v github.com/ava-labs/subnet-evm/tests)
diff --git a/scripts/format_add_avalanche_header.sh b/scripts/format_add_avalanche_header.sh
new file mode 100755
index 0000000000..5c58906bf8
--- /dev/null
+++ b/scripts/format_add_avalanche_header.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+set -x
+
+script_dir=$(dirname "$0")
+
+sed_command="1{/The go-ethereum Authors/{r ${script_dir}/avalanche_header.txt
+ N
+ }
+}"
+sed -i '' -e "${sed_command}" "$@"
\ No newline at end of file
diff --git a/scripts/format_as_fork.sh b/scripts/format_as_fork.sh
new file mode 100755
index 0000000000..1be9ce6248
--- /dev/null
+++ b/scripts/format_as_fork.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+set -x
+
+script_dir=$(dirname "$0")
+
+commit_msg_remove_header="format: remove avalanche header"
+commit_msg_remove_upstream="format: remove upstream go-ethereum"
+commit_msg_rename_packages_as_fork="format: rename packages as fork"
+
+make_commit() {
+ if git diff-index --cached --quiet HEAD --; then
+ echo "No changes to commit."
+ else
+ git commit -m "$1"
+ fi
+}
+
+revert_by_message() {
+ hash=$(git log --grep="$1" --format="%H" -n 1)
+ git revert --no-edit "$hash"
+}
+
+if git status --porcelain | grep -q '^ M'; then
+ echo "There are edited files in the repository. Please commit or stash them before running this script."
+ exit 1
+fi
+
+upstream_dirs=$(sed -e 's/"github.com\/ethereum\/go-ethereum\/\(.*\)"/\1/' "${script_dir}"/geth-allowed-packages.txt | xargs)
+for dir in ${upstream_dirs}; do
+ if [ -d "${dir}" ]; then
+ git rm -r "${dir}"
+ fi
+done
+git clean -df -- "${upstream_dirs}"
+make_commit "${commit_msg_remove_upstream}"
+
+sed_command='s!\([^/]\)github.com/ethereum/go-ethereum!\1github.com/ava-labs/subnet-evm!g'
+find . \( -name '*.go' -o -name 'go.mod' -o -name 'build_test.sh' \) -exec sed -i '' -e "${sed_command}" {} \;
+for dir in ${upstream_dirs}; do
+ sed_command="s!\"github.com/ava-labs/subnet-evm/${dir}\"!\"github.com/ethereum/go-ethereum/${dir}\"!g"
+ find . -name '*.go' -exec sed -i '' -e "${sed_command}" {} \;
+done
+go get github.com/ethereum/go-ethereum@"$1"
+gofmt -w .
+go mod tidy
+git add -u .
+make_commit "${commit_msg_rename_packages_as_fork}"
+
+revert_by_message "${commit_msg_remove_header}"
\ No newline at end of file
diff --git a/scripts/format_as_upstream.sh b/scripts/format_as_upstream.sh
new file mode 100755
index 0000000000..c4ba234609
--- /dev/null
+++ b/scripts/format_as_upstream.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+set -x
+
+script_dir=$(dirname "$0")
+
+commit_msg_remove_header="format: remove avalanche header"
+commit_msg_add_upstream="format: add upstream go-ethereum"
+commit_msg_rename_packages_to_upstream="format: rename packages to upstream"
+
+make_commit() {
+ if git diff-index --cached --quiet HEAD --; then
+ echo "No changes to commit."
+ else
+ git commit -m "$1"
+ fi
+}
+
+if git status --porcelain | grep -q '^ M'; then
+ echo "There are edited files in the repository. Please commit or stash them before running this script."
+ exit 1
+fi
+
+sed_command='/\/\/ (c) [0-9]*\(-[0-9]*\)\{0,1\}, Ava Labs, Inc.$/,+9d'
+find . -name '*.go' -exec sed -i '' -e "${sed_command}" {} \;
+git add -u .
+make_commit "${commit_msg_remove_header}"
+
+upstream_tag=$(grep -o 'github.com/ethereum/go-ethereum v.*' go.mod | cut -f2 -d' ')
+upstream_dirs=$(sed -e 's/"github.com\/ethereum\/go-ethereum\/\(.*\)"/\1/' "${script_dir}"/geth-allowed-packages.txt | xargs)
+upstream_dirs_array=()
+IFS=" " read -r -a upstream_dirs_array <<< "$upstream_dirs"
+
+git clean -f "${upstream_dirs_array[@]}"
+git checkout "${upstream_tag}" -- "${upstream_dirs_array[@]}"
+git add "${upstream_dirs_array[@]}"
+make_commit "${commit_msg_add_upstream}"
+
+sed_command='s!\([^/]\)github.com/ava-labs/subnet-evm!\1github.com/ethereum/go-ethereum!g'
+find . \( -name '*.go' -o -name 'go.mod' -o -name 'build_test.sh' \) -exec sed -i '' -e "${sed_command}" {} \;
+gofmt -w .
+go mod tidy
+git add -u .
+make_commit "${commit_msg_rename_packages_to_upstream}"
\ No newline at end of file
diff --git a/scripts/geth-allowed-packages.txt b/scripts/geth-allowed-packages.txt
index e39828ed57..c295d5d044 100644
--- a/scripts/geth-allowed-packages.txt
+++ b/scripts/geth-allowed-packages.txt
@@ -10,6 +10,7 @@
"github.com/ethereum/go-ethereum/crypto/blake2b"
"github.com/ethereum/go-ethereum/crypto/bls12381"
"github.com/ethereum/go-ethereum/crypto/bn256"
+"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go
index 634b9be4fb..e199d55d58 100644
--- a/sync/handlers/leafs_request.go
+++ b/sync/handlers/leafs_request.go
@@ -92,8 +92,8 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N
// TODO: We should know the state root that accounts correspond to,
// as this information will be necessary to access storage tries when
// the trie is path based.
- stateRoot := common.Hash{}
- t, err := trie.New(trie.StorageTrieID(stateRoot, leafsRequest.Account, leafsRequest.Root), lrh.trieDB)
+ // stateRoot := common.Hash{}
+ t, err := trie.New(trie.TrieID(leafsRequest.Root), lrh.trieDB)
if err != nil {
log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err)
lrh.stats.IncMissingRoot()
@@ -332,14 +332,14 @@ func (rb *responseBuilder) generateRangeProof(start []byte, keys [][]byte) (*mem
start = bytes.Repeat([]byte{0x00}, rb.keyLength)
}
- if err := rb.t.Prove(start, 0, proof); err != nil {
+ if err := rb.t.Prove(start, proof); err != nil {
_ = proof.Close() // closing memdb does not error
return nil, err
}
if len(keys) > 0 {
// If there is a non-zero number of keys, set [end] for the range proof to the last key.
end := keys[len(keys)-1]
- if err := rb.t.Prove(end, 0, proof); err != nil {
+ if err := rb.t.Prove(end, proof); err != nil {
_ = proof.Close() // closing memdb does not error
return nil, err
}
@@ -422,7 +422,11 @@ func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool,
defer func() { rb.trieReadTime += time.Since(startTime) }()
// create iterator to iterate the trie
- it := trie.NewIterator(rb.t.NodeIterator(rb.nextKey()))
+ nodeIt, err := rb.t.NodeIterator(rb.nextKey())
+ if err != nil {
+ return false, err
+ }
+ it := trie.NewIterator(nodeIt)
more := false
for it.Next() {
// if we're at the end, break this loop
diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go
index c2b8c33aae..cd1758a561 100644
--- a/sync/handlers/leafs_request_test.go
+++ b/sync/handlers/leafs_request_test.go
@@ -20,7 +20,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert"
)
@@ -473,15 +472,12 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) {
}
// modify one entry of 1 in 4 segments
if i%(segmentLen*4) == 0 {
- var acc snapshot.Account
- if err := rlp.DecodeBytes(it.Account(), &acc); err != nil {
+ acc, err := types.FullAccount(it.Account())
+ if err != nil {
t.Fatalf("could not parse snapshot account: %v", err)
}
acc.Nonce++
- bytes, err := rlp.EncodeToBytes(acc)
- if err != nil {
- t.Fatalf("coult not encode snapshot account to bytes: %v", err)
- }
+ bytes := types.SlimAccountRLP(*acc)
rawdb.WriteAccountSnapshot(memdb, it.Hash(), bytes)
}
i++
diff --git a/sync/statesync/sync_helpers.go b/sync/statesync/sync_helpers.go
index c11844cf55..45cfc02b90 100644
--- a/sync/statesync/sync_helpers.go
+++ b/sync/statesync/sync_helpers.go
@@ -5,7 +5,6 @@ package statesync
import (
"github.com/ava-labs/subnet-evm/core/rawdb"
- "github.com/ava-labs/subnet-evm/core/state/snapshot"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/trie"
"github.com/ethereum/go-ethereum/common"
@@ -15,14 +14,18 @@ import (
// writeAccountSnapshot stores the account represented by [acc] to the snapshot at [accHash], using
// SlimAccountRLP format (omitting empty code/storage).
func writeAccountSnapshot(db ethdb.KeyValueWriter, accHash common.Hash, acc types.StateAccount) {
- slimAccount := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+ slimAccount := types.SlimAccountRLP(acc)
rawdb.WriteAccountSnapshot(db, accHash, slimAccount)
}
// writeAccountStorageSnapshotFromTrie iterates the trie at [storageTrie] and copies all entries
// to the storage snapshot for [accountHash].
func writeAccountStorageSnapshotFromTrie(batch ethdb.Batch, batchSize int, accountHash common.Hash, storageTrie *trie.Trie) error {
- it := trie.NewIterator(storageTrie.NodeIterator(nil))
+ nodeIt, err := storageTrie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
+ it := trie.NewIterator(nodeIt)
for it.Next() {
rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(it.Key), it.Value)
if batch.ValueSize() > batchSize {
diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go
index 7e7845e6e6..6f8e81f0be 100644
--- a/sync/statesync/sync_test.go
+++ b/sync/statesync/sync_test.go
@@ -441,7 +441,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- it := trie.NewIterator(tr.NodeIterator(nil))
+ nodeIt, err := tr.NodeIterator(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ it := trie.NewIterator(nodeIt)
accountsWithStorage := 0
// keep track of storage tries we delete trie nodes from
diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go
index f606e7e9df..a319e34c08 100644
--- a/sync/statesync/test_sync.go
+++ b/sync/statesync/test_sync.go
@@ -10,7 +10,6 @@ import (
"github.com/ava-labs/subnet-evm/accounts/keystore"
"github.com/ava-labs/subnet-evm/core/rawdb"
- "github.com/ava-labs/subnet-evm/core/state/snapshot"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/sync/syncutils"
"github.com/ava-labs/subnet-evm/trie"
@@ -48,7 +47,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database
}
// check snapshot consistency
snapshotVal := rawdb.ReadAccountSnapshot(clientDB, accHash)
- expectedSnapshotVal := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+ expectedSnapshotVal := types.SlimAccountRLP(acc)
assert.Equal(t, expectedSnapshotVal, snapshotVal)
// check code consistency
diff --git a/sync/syncutils/iterators.go b/sync/syncutils/iterators.go
index c546cccd37..45752ca72f 100644
--- a/sync/syncutils/iterators.go
+++ b/sync/syncutils/iterators.go
@@ -5,6 +5,7 @@ package syncutils
import (
"github.com/ava-labs/subnet-evm/core/state/snapshot"
+ "github.com/ava-labs/subnet-evm/core/types"
"github.com/ethereum/go-ethereum/ethdb"
)
@@ -26,7 +27,7 @@ func (it *AccountIterator) Next() bool {
return false
}
for it.AccountIterator.Next() {
- it.val, it.err = snapshot.FullAccountRLP(it.Account())
+ it.val, it.err = types.FullAccountRLP(it.Account())
return it.err == nil
}
it.val = nil
diff --git a/sync/syncutils/test_trie.go b/sync/syncutils/test_trie.go
index 08c7516100..3714055f85 100644
--- a/sync/syncutils/test_trie.go
+++ b/sync/syncutils/test_trie.go
@@ -35,8 +35,9 @@ func GenerateTrie(t *testing.T, trieDB *trie.Database, numKeys int, keySize int)
keys, values := FillTrie(t, numKeys, keySize, testTrie)
// Commit the root to [trieDB]
- root, nodes := testTrie.Commit(false)
- err := trieDB.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, err := testTrie.Commit(false)
+ assert.NoError(t, err)
+ err = trieDB.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
assert.NoError(t, err)
err = trieDB.Commit(root, false)
assert.NoError(t, err)
@@ -82,8 +83,16 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database,
t.Fatalf("error creating trieB, root=%s, err=%v", root, err)
}
- itA := trie.NewIterator(trieA.NodeIterator(nil))
- itB := trie.NewIterator(trieB.NodeIterator(nil))
+ nodeItA, err := trieA.NodeIterator(nil)
+ if err != nil {
+ t.Fatalf("error creating node iterator for trieA, root=%s, err=%v", root, err)
+ }
+ nodeItB, err := trieB.NodeIterator(nil)
+ if err != nil {
+ t.Fatalf("error creating node iterator for trieB, root=%s, err=%v", root, err)
+ }
+ itA := trie.NewIterator(nodeItA)
+ itB := trie.NewIterator(nodeItB)
count := 0
for itA.Next() && itB.Next() {
count++
@@ -107,7 +116,10 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database,
func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) {
// Delete some trie nodes
batch := diskdb.NewBatch()
- nodeIt := tr.NodeIterator(nil)
+ nodeIt, err := tr.NodeIterator(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
count := 0
for nodeIt.Next(true) {
count++
@@ -169,8 +181,11 @@ func FillAccounts(
accounts[key] = &acc
}
- newRoot, nodes := tr.Commit(false)
- if err := trieDB.Update(newRoot, root, trienode.NewWithNodeSet(nodes)); err != nil {
+ newRoot, nodes, err := tr.Commit(false)
+ if err != nil {
+ t.Fatalf("error committing trie: %v", err)
+ }
+ if err := trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
t.Fatalf("error updating trieDB: %v", err)
}
if err := trieDB.Commit(newRoot, false); err != nil {
diff --git a/tests/init.go b/tests/init.go
index 6d2d5bae9e..3cbd4d3511 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -197,6 +197,22 @@ var Forks = map[string]*params.ChainConfig{
DurangoTimestamp: utils.NewUint64(0),
},
},
+ "Cancun": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MandatoryNetworkUpgrades: params.MandatoryNetworkUpgrades{
+ SubnetEVMTimestamp: utils.NewUint64(0),
+ DurangoTimestamp: utils.NewUint64(0),
+ CancunTime: utils.NewUint64(0),
+ },
+ },
}
// AvailableForks returns the set of defined fork names
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 9b44009172..848038397c 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -111,6 +111,19 @@ type stTransaction struct {
GasLimit []uint64 `json:"gasLimit"`
Value []string `json:"value"`
PrivateKey []byte `json:"secretKey"`
+ BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
+ BlobGasFeeCap *big.Int `json:"maxFeePerBlobGas,omitempty"`
+}
+
+// nolint: unused
+type stTransactionMarshaling struct {
+ GasPrice *math.HexOrDecimal256
+ MaxFeePerGas *math.HexOrDecimal256
+ MaxPriorityFeePerGas *math.HexOrDecimal256
+ Nonce math.HexOrDecimal64
+ GasLimit []math.HexOrDecimal64
+ PrivateKey hexutil.Bytes
+ BlobGasFeeCap *math.HexOrDecimal256
}
// GetChainConfig takes a fork definition and returns a chain config.
@@ -200,13 +213,18 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo
}
post := t.json.Post[subtest.Fork][subtest.Index]
// N.B: We need to do this in a two-step process, because the first Commit takes care
- // of suicides, and we need to touch the coinbase _after_ it has potentially suicided.
+ // of self-destructs, and we need to touch the coinbase _after_ it has potentially self-destructed.
if root != common.Hash(post.Root) {
return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) {
return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
+ // Re-init the post-state instance for further operation
+ statedb, err = state.New(root, statedb.Database(), snaps)
+ if err != nil {
+ return nil, nil, err
+ }
return snaps, statedb, nil
}
@@ -267,14 +285,12 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
}
// Add 0-value mining reward. This only makes a difference in the cases
// where
- // - the coinbase suicided, or
+ // - the coinbase self-destructed, or
// - there are only 'bad' transactions, which aren't executed. In those cases,
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
statedb.AddBalance(block.Coinbase(), new(big.Int))
// Commit block
- statedb.Commit(config.IsEIP158(block.Number()), false)
- // And _now_ get the state root
- root := statedb.IntermediateRoot(config.IsEIP158(block.Number()))
+ root, _ := statedb.Commit(block.NumberU64(), config.IsEIP158(block.Number()), false)
return snaps, statedb, root, err
}
@@ -290,7 +306,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(false, false)
+ root, _ := statedb.Commit(0, false, false)
var snaps *snapshot.Tree
if snapshotter {
@@ -393,16 +409,18 @@ func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Mess
}
msg := &core.Message{
- From: from,
- To: to,
- Nonce: tx.Nonce,
- Value: value,
- GasLimit: gasLimit,
- GasPrice: gasPrice,
- GasFeeCap: tx.MaxFeePerGas,
- GasTipCap: tx.MaxPriorityFeePerGas,
- Data: data,
- AccessList: accessList,
+ From: from,
+ To: to,
+ Nonce: tx.Nonce,
+ Value: value,
+ GasLimit: gasLimit,
+ GasPrice: gasPrice,
+ GasFeeCap: tx.MaxFeePerGas,
+ GasTipCap: tx.MaxPriorityFeePerGas,
+ Data: data,
+ AccessList: accessList,
+ BlobHashes: tx.BlobVersionedHashes,
+ BlobGasFeeCap: tx.BlobGasFeeCap,
}
return msg, nil
}
diff --git a/trie/committer.go b/trie/committer.go
index b06a8b2c53..1ce9ccf33d 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -141,22 +141,15 @@ func (c *committer) store(path []byte, n node) node {
// The node is embedded in its parent, in other words, this node
// will not be stored in the database independently, mark it as
// deleted only if the node was existent in database before.
- prev, ok := c.tracer.accessList[string(path)]
+ _, ok := c.tracer.accessList[string(path)]
if ok {
- c.nodes.AddNode(path, trienode.NewWithPrev(common.Hash{}, nil, prev))
+ c.nodes.AddNode(path, trienode.NewDeleted())
}
return n
}
// Collect the dirty node to nodeset for return.
- var (
- nhash = common.BytesToHash(hash)
- node = trienode.NewWithPrev(
- nhash,
- nodeToBytes(n),
- c.tracer.accessList[string(path)],
- )
- )
- c.nodes.AddNode(path, node)
+ nhash := common.BytesToHash(hash)
+ c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n)))
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
diff --git a/trie/database_wrap.go b/trie/database.go
similarity index 76%
rename from trie/database_wrap.go
rename to trie/database.go
index dab26df692..4be40dc49f 100644
--- a/trie/database_wrap.go
+++ b/trie/database.go
@@ -18,15 +18,14 @@ package trie
import (
"errors"
- "runtime"
- "time"
"github.com/ava-labs/subnet-evm/trie/triedb/hashdb"
+ "github.com/ava-labs/subnet-evm/trie/triedb/pathdb"
"github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
"github.com/ava-labs/subnet-evm/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/log"
)
const (
@@ -35,10 +34,13 @@ const (
// Config defines all necessary options for database.
type Config struct {
- Cache int // Memory allowance (MB) to use for caching trie nodes in memory
- Journal string // Journal of clean cache to survive node restarts
- Preimages bool // Flag whether the preimage of trie key is recorded
- StatsPrefix string // Prefix for cache stats (disabled if empty)
+ Cache int // Memory allowance (MB) to use for caching trie nodes in memory
+ Preimages bool // Flag whether the preimage of trie key is recorded
+ StatsPrefix string // Prefix for cache stats (disabled if empty)
+ PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet.
+
+ // Testing hooks
+ OnCommit func(states *triestate.Set) // Hook invoked when commit is performed
}
// backend defines the methods needed to access/update trie nodes in different
@@ -58,8 +60,10 @@ type backend interface {
// Update performs a state transition by committing dirty nodes contained
// in the given set in order to update state from the specified parent to
// the specified root.
- Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error
- UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error
+ //
+ // The passed in maps(nodes, states) will be retained to avoid copying
+ // everything. Therefore, these maps must not be changed afterwards.
+ Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error
// Commit writes all relevant trie nodes belonging to the specified state
// to disk. Report specifies whether logs will be displayed in info level.
@@ -82,7 +86,6 @@ type cache interface {
type Database struct {
config *Config // Configuration for trie database
diskdb ethdb.Database // Persistent database to store the snapshot
- cleans cache // Megabytes permitted using for read caches
preimages *preimageStore // The store for caching preimages
backend backend // The backend for managing trie nodes
}
@@ -90,10 +93,6 @@ type Database struct {
// prepare initializes the database with provided configs, but the
// database backend is still left as nil.
func prepare(diskdb ethdb.Database, config *Config) *Database {
- var cleans cache
- if config != nil && config.Cache > 0 {
- cleans = utils.NewMeteredCache(config.Cache*1024*1024, config.Journal, config.StatsPrefix, cacheStatsUpdateFrequency)
- }
var preimages *preimageStore
if config != nil && config.Preimages {
preimages = newPreimageStore(diskdb)
@@ -101,7 +100,6 @@ func prepare(diskdb ethdb.Database, config *Config) *Database {
return &Database{
config: config,
diskdb: diskdb,
- cleans: cleans,
preimages: preimages,
}
}
@@ -116,33 +114,53 @@ func NewDatabase(diskdb ethdb.Database) *Database {
// The path-based scheme is not activated yet, always initialized with legacy
// hash-based scheme by default.
func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
+ var cleans cache
+ if config != nil && config.Cache != 0 {
+ cleans = utils.NewMeteredCache(config.Cache*1024*1024, config.StatsPrefix, cacheStatsUpdateFrequency)
+ }
db := prepare(diskdb, config)
- db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
+ db.backend = hashdb.New(diskdb, cleans, mptResolver{})
return db
}
// Reader returns a reader for accessing all trie nodes with provided state root.
-// Nil is returned in case the state is not available.
-func (db *Database) Reader(blockRoot common.Hash) Reader {
- return db.backend.(*hashdb.Database).Reader(blockRoot)
+// An error will be returned if the requested state is not available.
+func (db *Database) Reader(blockRoot common.Hash) (Reader, error) {
+ switch b := db.backend.(type) {
+ case *hashdb.Database:
+ return b.Reader(blockRoot)
+ case *pathdb.Database:
+ return b.Reader(blockRoot)
+ }
+ return nil, errors.New("unknown backend")
}
// Update performs a state transition by committing dirty nodes contained in the
// given set in order to update state from the specified parent to the specified
// root. The held pre-images accumulated up to this point will be flushed in case
// the size exceeds the threshold.
-func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+//
+// The passed in maps(nodes, states) will be retained to avoid copying everything.
+// Therefore, these maps must not be changed afterwards.
+func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ if db.config != nil && db.config.OnCommit != nil {
+ db.config.OnCommit(states)
+ }
if db.preimages != nil {
db.preimages.commit(false)
}
- return db.backend.Update(root, parent, nodes)
+ return db.backend.Update(root, parent, block, nodes, states)
}
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
if db.preimages != nil {
db.preimages.commit(false)
}
- return db.backend.UpdateAndReferenceRoot(root, parent, nodes)
+ hdb, ok := db.backend.(*hashdb.Database)
+ if ok {
+ return hdb.UpdateAndReferenceRoot(root, parent, block, nodes, states)
+ }
+ return db.backend.Update(root, parent, block, nodes, states)
}
// Commit iterates over all the children of a particular node, writes them out
@@ -184,49 +202,14 @@ func (db *Database) Scheme() string {
// It is meant to be called when closing the blockchain object, so that all
// resources held can be released correctly.
func (db *Database) Close() error {
- if db.preimages != nil {
- db.preimages.commit(true)
- }
+ db.WritePreimages()
return db.backend.Close()
}
-// saveCache saves clean state cache to given directory path
-// using specified CPU cores.
-func (db *Database) saveCache(dir string, threads int) error {
- if db.cleans == nil {
- return nil
- }
- log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads)
-
- start := time.Now()
- err := db.cleans.SaveToFileConcurrent(dir, threads)
- if err != nil {
- log.Error("Failed to persist clean trie cache", "error", err)
- return err
- }
- log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start)))
- return nil
-}
-
-// SaveCache atomically saves fast cache data to the given dir using all
-// available CPU cores.
-func (db *Database) SaveCache(dir string) error {
- return db.saveCache(dir, runtime.GOMAXPROCS(0))
-}
-
-// SaveCachePeriodically atomically saves fast cache data to the given dir with
-// the specified interval. All dump operation will only use a single CPU core.
-func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- db.saveCache(dir, 1)
- case <-stopCh:
- return
- }
+// WritePreimages flushes all accumulated preimages to disk forcibly.
+func (db *Database) WritePreimages() {
+ if db.preimages != nil {
+ db.preimages.commit(true)
}
}
diff --git a/trie/database_test.go b/trie/database_test.go
index 19394b55fe..a3621392f6 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -29,6 +29,7 @@ package trie
import (
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/trie/triedb/hashdb"
+ "github.com/ava-labs/subnet-evm/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/ethdb"
)
@@ -36,10 +37,9 @@ import (
func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
db := prepare(diskdb, nil)
if scheme == rawdb.HashScheme {
- db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
+ db.backend = hashdb.New(diskdb, nil, mptResolver{})
+ } else {
+ db.backend = pathdb.New(diskdb, &pathdb.Config{}) // disable clean/dirty cache
}
- //} else {
- // db.backend = snap.New(diskdb, db.cleans, nil)
- //}
return db
}
diff --git a/trie/errors.go b/trie/errors.go
index b6f90132b6..307a5f8747 100644
--- a/trie/errors.go
+++ b/trie/errors.go
@@ -27,11 +27,17 @@
package trie
import (
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
)
+// ErrCommitted is returned when a already committed trie is requested for usage.
+// The potential usages can be `Get`, `Update`, `Delete`, `NodeIterator`, `Prove`
+// and so on.
+var ErrCommitted = errors.New("trie is already committed")
+
// MissingNodeError is returned by the trie functions (Get, Update, Delete)
// in the case where a trie node is not present in the local database. It contains
// information necessary for retrieving the missing node.
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 4e8f956d28..6e14e9b29c 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -44,7 +44,7 @@ import (
func TestEmptyIterator(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- iter := trie.NodeIterator(nil)
+ iter := trie.MustNodeIterator(nil)
seen := make(map[string]struct{})
for iter.Next(true) {
@@ -72,12 +72,12 @@ func TestIterator(t *testing.T) {
all[val.k] = val.v
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
found[string(it.Key)] = string(it.Value)
}
@@ -94,6 +94,10 @@ type kv struct {
t bool
}
+func (k *kv) cmp(other *kv) int {
+ return bytes.Compare(k.k, other.k)
+}
+
func TestIteratorLargeData(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
@@ -107,7 +111,7 @@ func TestIteratorLargeData(t *testing.T) {
vals[string(value2.k)] = value2
}
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
vals[string(it.Key)].t = true
}
@@ -136,7 +140,7 @@ type iterationElement struct {
// Tests that the node iterator indeed walks over the entire database contents.
func TestNodeIteratorCoverage(t *testing.T) {
testNodeIteratorCoverage(t, rawdb.HashScheme)
- //testNodeIteratorCoverage(t, rawdb.PathScheme)
+ testNodeIteratorCoverage(t, rawdb.PathScheme)
}
func testNodeIteratorCoverage(t *testing.T, scheme string) {
@@ -145,7 +149,7 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) {
// Gather all the node hashes found by the iterator
var elements = make(map[common.Hash]iterationElement)
- for it := trie.NodeIterator(nil); it.Next(true); {
+ for it := trie.MustNodeIterator(nil); it.Next(true); {
if it.Hash() != (common.Hash{}) {
elements[it.Hash()] = iterationElement{
hash: it.Hash(),
@@ -155,8 +159,12 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) {
}
}
// Cross check the hashes and the database itself
+ reader, err := nodeDb.Reader(trie.Hash())
+ if err != nil {
+ t.Fatalf("state is not available %x", trie.Hash())
+ }
for _, element := range elements {
- if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil {
+ if blob, err := reader.Node(common.Hash{}, element.path, element.hash); err != nil {
t.Errorf("failed to retrieve reported node %x: %v", element.hash, err)
} else if !bytes.Equal(blob, element.blob) {
t.Errorf("node blob is different, want %v got %v", element.blob, blob)
@@ -216,19 +224,19 @@ func TestIteratorSeek(t *testing.T) {
}
// Seek to the middle.
- it := NewIterator(trie.NodeIterator([]byte("fab")))
+ it := NewIterator(trie.MustNodeIterator([]byte("fab")))
if err := checkIteratorOrder(testdata1[4:], it); err != nil {
t.Fatal(err)
}
// Seek to a non-existent key.
- it = NewIterator(trie.NodeIterator([]byte("barc")))
+ it = NewIterator(trie.MustNodeIterator([]byte("barc")))
if err := checkIteratorOrder(testdata1[1:], it); err != nil {
t.Fatal(err)
}
// Seek beyond the end.
- it = NewIterator(trie.NodeIterator([]byte("z")))
+ it = NewIterator(trie.MustNodeIterator([]byte("z")))
if err := checkIteratorOrder(nil, it); err != nil {
t.Fatal(err)
}
@@ -256,8 +264,8 @@ func TestDifferenceIterator(t *testing.T) {
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootA, nodesA := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
+ rootA, nodesA, _ := triea.Commit(false)
+ dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -265,12 +273,12 @@ func TestDifferenceIterator(t *testing.T) {
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootB, nodesB := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
+ rootB, nodesB, _ := trieb.Commit(false)
+ dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
- di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
+ di, _ := NewDifferenceIterator(triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil))
it := NewIterator(di)
for it.Next() {
found[string(it.Key)] = string(it.Value)
@@ -298,8 +306,8 @@ func TestUnionIterator(t *testing.T) {
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootA, nodesA := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
+ rootA, nodesA, _ := triea.Commit(false)
+ dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -307,11 +315,11 @@ func TestUnionIterator(t *testing.T) {
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootB, nodesB := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
+ rootB, nodesB, _ := trieb.Commit(false)
+ dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
- di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
+ di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)})
it := NewIterator(di)
all := []struct{ k, v string }{
@@ -350,15 +358,15 @@ func TestIteratorNoDups(t *testing.T) {
for _, val := range testdata1 {
tr.MustUpdate([]byte(val.k), []byte(val.v))
}
- checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
+ checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
}
// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
func TestIteratorContinueAfterError(t *testing.T) {
testIteratorContinueAfterError(t, false, rawdb.HashScheme)
testIteratorContinueAfterError(t, true, rawdb.HashScheme)
- // testIteratorContinueAfterError(t, false, rawdb.PathScheme)
- // testIteratorContinueAfterError(t, true, rawdb.PathScheme)
+ testIteratorContinueAfterError(t, false, rawdb.PathScheme)
+ testIteratorContinueAfterError(t, true, rawdb.PathScheme)
}
func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
@@ -369,13 +377,13 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
for _, val := range testdata1 {
tr.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := tr.Commit(false)
- tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := tr.Commit(false)
+ tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
tdb.Commit(root, false)
}
tr, _ = New(TrieID(root), tdb)
- wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
+ wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
var (
paths [][]byte
@@ -434,7 +442,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
}
// Iterate until the error is hit.
seen := make(map[string]bool)
- it := tr.NodeIterator(nil)
+ it := tr.MustNodeIterator(nil)
checkIteratorNoDups(t, it, seen)
missing, ok := it.Error().(*MissingNodeError)
if !ok || missing.NodeHash != rhash {
@@ -463,8 +471,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
func TestIteratorContinueAfterSeekError(t *testing.T) {
testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme)
testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme)
- // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
- // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
+ testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
+ testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
}
func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) {
@@ -479,14 +487,14 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
for _, val := range testdata1 {
ctr.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := ctr.Commit(false)
+ root, nodes, _ := ctr.Commit(false)
for path, n := range nodes.Nodes {
if n.Hash == barNodeHash {
barNodePath = []byte(path)
break
}
}
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
triedb.Commit(root, false)
}
@@ -502,7 +510,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
}
// Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing.
- it := tr.NodeIterator([]byte("bars"))
+ it := tr.MustNodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError)
if !ok {
t.Fatal("want MissingNodeError, got", it.Error())
@@ -536,7 +544,7 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in
func TestIteratorNodeBlob(t *testing.T) {
testIteratorNodeBlob(t, rawdb.HashScheme)
- //testIteratorNodeBlob(t, rawdb.PathScheme)
+ testIteratorNodeBlob(t, rawdb.PathScheme)
}
type loggingDb struct {
@@ -606,9 +614,12 @@ func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) {
val = crypto.Keccak256(val)
trie.MustUpdate(key, val)
}
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Commit(root, false)
+
// Return the generated trie
+ trie, _ = NewStateTrie(TrieID(root), triedb)
return triedb, trie, logDb
}
@@ -620,8 +631,8 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
// Do a seek operation
trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885"))
// master: 24 get operations
- // this pr: 5 get operations
- if have, want := logDb.getCount, uint64(5); have != want {
+ // this pr: 6 get operations
+ if have, want := logDb.getCount, uint64(6); have != want {
t.Fatalf("Too many lookups during seek, have %d want %d", have, want)
}
}
@@ -646,13 +657,13 @@ func testIteratorNodeBlob(t *testing.T, scheme string) {
all[val.k] = val.v
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
triedb.Commit(root, false)
var found = make(map[common.Hash][]byte)
trie, _ = New(TrieID(root), triedb)
- it := trie.NodeIterator(nil)
+ it := trie.MustNodeIterator(nil)
for it.Next(true) {
if it.Hash() == (common.Hash{}) {
continue
diff --git a/trie/proof.go b/trie/proof.go
index a90d76bb15..be0e8bc5c9 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -43,7 +43,11 @@ import (
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
-func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
+func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
// Collect all nodes on the path to key.
var (
prefix []byte
@@ -91,10 +95,6 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
defer returnHasherToPool(hasher)
for i, n := range nodes {
- if fromLevel > 0 {
- fromLevel--
- continue
- }
var hn node
n, hn = hasher.proofHash(n)
if hash, ok := hn.(hashNode); ok || i == 0 {
@@ -117,8 +117,8 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
-func (t *StateTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
- return t.trie.Prove(key, fromLevel, proofDb)
+func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ return t.trie.Prove(key, proofDb)
}
// VerifyProof checks merkle proofs. The given proof must contain the value for
diff --git a/trie/proof_test.go b/trie/proof_test.go
index b62668810c..42d6fda662 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -32,13 +32,13 @@ import (
"encoding/binary"
"fmt"
mrand "math/rand"
- "sort"
"testing"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "golang.org/x/exp/slices"
)
// Prng is a pseudo random number generator seeded by strong randomness.
@@ -67,13 +67,13 @@ func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database {
// Create a direct trie based Merkle prover
provers = append(provers, func(key []byte) *memorydb.Database {
proof := memorydb.New()
- trie.Prove(key, 0, proof)
+ trie.Prove(key, proof)
return proof
})
// Create a leaf iterator based Merkle prover
provers = append(provers, func(key []byte) *memorydb.Database {
proof := memorydb.New()
- if it := NewIterator(trie.NodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
+ if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
for _, p := range it.Prove() {
proof.Put(crypto.Keccak256(p), p)
}
@@ -160,7 +160,7 @@ func TestMissingKeyProof(t *testing.T) {
for i, key := range []string{"a", "j", "l", "z"} {
proof := memorydb.New()
- trie.Prove([]byte(key), 0, proof)
+ trie.Prove([]byte(key), proof)
if proof.Len() != 1 {
t.Errorf("test %d: proof should have one element", i)
@@ -175,30 +175,24 @@ func TestMissingKeyProof(t *testing.T) {
}
}
-type entrySlice []*kv
-
-func (p entrySlice) Len() int { return len(p) }
-func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
-func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
// TestRangeProof tests normal range proof with both edge proofs
// as the existent proof. The test cases are generated randomly.
func TestRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
for i := 0; i < 500; i++ {
start := mrand.Intn(len(entries))
end := mrand.Intn(len(entries)-start) + start + 1
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -218,11 +212,11 @@ func TestRangeProof(t *testing.T) {
// The test cases are generated randomly.
func TestRangeProofWithNonExistentProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
for i := 0; i < 500; i++ {
start := mrand.Intn(len(entries))
end := mrand.Intn(len(entries)-start) + start + 1
@@ -246,10 +240,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
if bytes.Compare(last, entries[end-1].k) < 0 {
continue
}
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -267,10 +261,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
proof := memorydb.New()
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var k [][]byte
@@ -290,21 +284,21 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
// - There exists a gap between the last element and the right edge proof
func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// Case 1
start, end := 100, 200
first := decreaseKey(common.CopyBytes(entries[start].k))
proof := memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
start = 105 // Gap created
@@ -323,10 +317,10 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
start, end = 100, 200
last := increaseKey(common.CopyBytes(entries[end-1].k))
proof = memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
end = 195 // Capped slice
@@ -347,17 +341,17 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
// non-existent one.
func TestOneElementRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// One element with existent edge proof, both edge proofs
// point to the SAME key.
start := 1000
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
_, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -369,10 +363,10 @@ func TestOneElementRangeProof(t *testing.T) {
start = 1000
first := decreaseKey(common.CopyBytes(entries[start].k))
proof = memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -384,10 +378,10 @@ func TestOneElementRangeProof(t *testing.T) {
start = 1000
last := increaseKey(common.CopyBytes(entries[start].k))
proof = memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -399,10 +393,10 @@ func TestOneElementRangeProof(t *testing.T) {
start = 1000
first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k))
proof = memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -418,10 +412,10 @@ func TestOneElementRangeProof(t *testing.T) {
first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last = entry.k
proof = memorydb.New()
- if err := tinyTrie.Prove(first, 0, proof); err != nil {
+ if err := tinyTrie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := tinyTrie.Prove(last, 0, proof); err != nil {
+ if err := tinyTrie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof)
@@ -434,11 +428,11 @@ func TestOneElementRangeProof(t *testing.T) {
// The edge proofs can be nil.
func TestAllElementsProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var k [][]byte
var v [][]byte
@@ -453,10 +447,10 @@ func TestAllElementsProof(t *testing.T) {
// With edge proofs, it should still work.
proof := memorydb.New()
- if err := trie.Prove(entries[0].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[0].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[len(entries)-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof)
@@ -468,10 +462,10 @@ func TestAllElementsProof(t *testing.T) {
proof = memorydb.New()
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof)
@@ -484,21 +478,21 @@ func TestAllElementsProof(t *testing.T) {
func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- var entries entrySlice
+ var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
for _, pos := range cases {
proof := memorydb.New()
- if err := trie.Prove(common.Hash{}.Bytes(), 0, proof); err != nil {
+ if err := trie.Prove(common.Hash{}.Bytes(), proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[pos].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[pos].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
k := make([][]byte, 0)
@@ -519,22 +513,22 @@ func TestSingleSideRangeProof(t *testing.T) {
func TestReverseSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- var entries entrySlice
+ var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
for _, pos := range cases {
proof := memorydb.New()
- if err := trie.Prove(entries[pos].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[pos].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- if err := trie.Prove(last.Bytes(), 0, proof); err != nil {
+ if err := trie.Prove(last.Bytes(), proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
k := make([][]byte, 0)
@@ -555,20 +549,20 @@ func TestReverseSingleSideRangeProof(t *testing.T) {
// The prover is expected to detect the error.
func TestBadRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
for i := 0; i < 500; i++ {
start := mrand.Intn(len(entries))
end := mrand.Intn(len(entries)-start) + start + 1
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -634,10 +628,10 @@ func TestGappedRangeProof(t *testing.T) {
}
first, last := 2, 8
proof := memorydb.New()
- if err := trie.Prove(entries[first].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[first].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[last-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[last-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -658,11 +652,11 @@ func TestGappedRangeProof(t *testing.T) {
// TestSameSideProofs tests the element is not in the range covered by proofs
func TestSameSideProofs(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
pos := 1000
first := decreaseKey(common.CopyBytes(entries[pos].k))
@@ -670,10 +664,10 @@ func TestSameSideProofs(t *testing.T) {
last := decreaseKey(common.CopyBytes(entries[pos].k))
proof := memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
@@ -686,10 +680,10 @@ func TestSameSideProofs(t *testing.T) {
last = increaseKey(last)
proof = memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
@@ -700,13 +694,13 @@ func TestSameSideProofs(t *testing.T) {
func TestHasRightElement(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- var entries entrySlice
+ var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []struct {
start int
@@ -734,23 +728,23 @@ func TestHasRightElement(t *testing.T) {
)
if c.start == -1 {
firstKey, start = common.Hash{}.Bytes(), 0
- if err := trie.Prove(firstKey, 0, proof); err != nil {
+ if err := trie.Prove(firstKey, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
} else {
firstKey = entries[c.start].k
- if err := trie.Prove(entries[c.start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[c.start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
}
if c.end == -1 {
lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries)
- if err := trie.Prove(lastKey, 0, proof); err != nil {
+ if err := trie.Prove(lastKey, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
} else {
lastKey = entries[c.end-1].k
- if err := trie.Prove(entries[c.end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
}
@@ -774,11 +768,11 @@ func TestHasRightElement(t *testing.T) {
// The first edge proof must be a non-existent proof.
func TestEmptyRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []struct {
pos int
@@ -790,7 +784,7 @@ func TestEmptyRangeProof(t *testing.T) {
for _, c := range cases {
proof := memorydb.New()
first := increaseKey(common.CopyBytes(entries[c.pos].k))
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
_, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof)
@@ -809,11 +803,11 @@ func TestEmptyRangeProof(t *testing.T) {
func TestBloatedProof(t *testing.T) {
// Use a small trie
trie, kvs := nonRandomTrie(100)
- var entries entrySlice
+ var entries []*kv
for _, kv := range kvs {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var keys [][]byte
var vals [][]byte
@@ -821,7 +815,7 @@ func TestBloatedProof(t *testing.T) {
// In the 'malicious' case, we add proofs for every single item
// (but only one key/value pair used as leaf)
for i, entry := range entries {
- trie.Prove(entry.k, 0, proof)
+ trie.Prove(entry.k, proof)
if i == 50 {
keys = append(keys, entry.k)
vals = append(vals, entry.v)
@@ -830,8 +824,8 @@ func TestBloatedProof(t *testing.T) {
// For reference, we use the same function, but _only_ prove the first
// and last element
want := memorydb.New()
- trie.Prove(keys[0], 0, want)
- trie.Prove(keys[len(keys)-1], 0, want)
+ trie.Prove(keys[0], want)
+ trie.Prove(keys[len(keys)-1], want)
if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof); err != nil {
t.Fatalf("expected bloated proof to succeed, got %v", err)
@@ -843,11 +837,11 @@ func TestBloatedProof(t *testing.T) {
// noop technically, but practically should be rejected.
func TestEmptyValueRangeProof(t *testing.T) {
trie, values := randomTrie(512)
- var entries entrySlice
+ var entries []*kv
for _, kv := range values {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// Create a new entry with a slightly modified key
mid := len(entries) / 2
@@ -864,10 +858,10 @@ func TestEmptyValueRangeProof(t *testing.T) {
start, end := 1, len(entries)-1
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -887,11 +881,11 @@ func TestEmptyValueRangeProof(t *testing.T) {
// practically should be rejected.
func TestAllElementsEmptyValueRangeProof(t *testing.T) {
trie, values := randomTrie(512)
- var entries entrySlice
+ var entries []*kv
for _, kv := range values {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// Create a new entry with a slightly modified key
mid := len(entries) / 2
@@ -959,7 +953,7 @@ func BenchmarkProve(b *testing.B) {
for i := 0; i < b.N; i++ {
kv := vals[keys[i%len(keys)]]
proofs := memorydb.New()
- if trie.Prove(kv.k, 0, proofs); proofs.Len() == 0 {
+ if trie.Prove(kv.k, proofs); proofs.Len() == 0 {
b.Fatalf("zero length proof for %x", kv.k)
}
}
@@ -973,7 +967,7 @@ func BenchmarkVerifyProof(b *testing.B) {
for k := range vals {
keys = append(keys, k)
proof := memorydb.New()
- trie.Prove([]byte(k), 0, proof)
+ trie.Prove([]byte(k), proof)
proofs = append(proofs, proof)
}
@@ -993,19 +987,19 @@ func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b,
func benchmarkVerifyRangeProof(b *testing.B, size int) {
trie, vals := randomTrie(8192)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
start := 2
end := start + size
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
b.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
b.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -1030,11 +1024,11 @@ func BenchmarkVerifyRangeNoProof1000(b *testing.B) { benchmarkVerifyRangeNoProof
func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
trie, vals := randomTrie(size)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var keys [][]byte
var values [][]byte
@@ -1104,10 +1098,10 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
proof := memorydb.New()
start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- if err := trie.Prove(start, 0, proof); err != nil {
+ if err := trie.Prove(start, proof); err != nil {
t.Fatalf("failed to prove start: %v", err)
}
- if err := trie.Prove(end, 0, proof); err != nil {
+ if err := trie.Prove(end, proof); err != nil {
t.Fatalf("failed to prove end: %v", err)
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 21c4f83075..ef29bb8404 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -96,7 +96,12 @@ func (t *StateTrie) MustGet(key []byte) []byte {
// If the specified storage slot is not in the trie, nil will be returned.
// If a trie node is not found in the database, a MissingNodeError is returned.
func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
- return t.trie.Get(t.hashKey(key))
+ enc, err := t.trie.Get(t.hashKey(key))
+ if err != nil || len(enc) == 0 {
+ return nil, err
+ }
+ _, content, _, err := rlp.Split(enc)
+ return content, err
}
// GetAccount attempts to retrieve an account with provided account address.
@@ -158,7 +163,8 @@ func (t *StateTrie) MustUpdate(key, value []byte) {
// If a node is not found in the database, a MissingNodeError is returned.
func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
hk := t.hashKey(key)
- err := t.trie.Update(hk, value)
+ v, _ := rlp.EncodeToBytes(value)
+ err := t.trie.Update(hk, v)
if err != nil {
return err
}
@@ -180,6 +186,10 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil
}
+func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
+ return nil
+}
+
// MustDelete removes any existing value for key from the trie. This function
// will omit any encountered error but just print out an error message.
func (t *StateTrie) MustDelete(key []byte) {
@@ -223,7 +233,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte {
// All cached preimages will be also flushed if preimages recording is enabled.
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
-func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
if t.preimages != nil {
@@ -254,12 +264,18 @@ func (t *StateTrie) Copy() *StateTrie {
}
}
-// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration
-// starts at the key after the given start key.
-func (t *StateTrie) NodeIterator(start []byte) NodeIterator {
+// NodeIterator returns an iterator that returns nodes of the underlying trie.
+// Iteration starts at the key after the given start key.
+func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) {
return t.trie.NodeIterator(start)
}
+// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
+// error but just print out an error message.
+func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator {
+ return t.trie.MustNodeIterator(start)
+}
+
// hashKey returns the hash of key as an ephemeral buffer.
// The caller must not hold onto the return value because it will become
// invalid on the next call to hashKey or secKey.
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index ff17ed2ddf..99935378e0 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -70,8 +70,8 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
trie.MustUpdate(key, val)
}
}
- root, nodes := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
+ root, nodes, _ := trie.Commit(false)
+ if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
// Re-create the trie based on the new state
diff --git a/trie/sync_test.go b/trie/sync_test.go
index fcf4863a02..1fda202276 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -62,8 +62,8 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str
trie.MustUpdate(key, val)
}
}
- root, nodes := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
+ root, nodes, _ := trie.Commit(false)
+ if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
if err := triedb.Commit(root, false); err != nil {
diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go
new file mode 100644
index 0000000000..88411efec5
--- /dev/null
+++ b/trie/testutil/utils.go
@@ -0,0 +1,71 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package testutil
+
+import (
+ crand "crypto/rand"
+ "encoding/binary"
+ mrand "math/rand"
+
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+// Prng is a pseudo random number generator seeded by strong randomness.
+// The randomness is printed on startup in order to make failures reproducible.
+var prng = initRand()
+
+func initRand() *mrand.Rand {
+ var seed [8]byte
+ crand.Read(seed[:])
+ rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
+ return rnd
+}
+
+// RandBytes generates a random byte slice with specified length.
+func RandBytes(n int) []byte {
+ r := make([]byte, n)
+ prng.Read(r)
+ return r
+}
+
+// RandomHash generates a random blob of data and returns it as a hash.
+func RandomHash() common.Hash {
+ return common.BytesToHash(RandBytes(common.HashLength))
+}
+
+// RandomAddress generates a random blob of data and returns it as an address.
+func RandomAddress() common.Address {
+ return common.BytesToAddress(RandBytes(common.AddressLength))
+}
+
+// RandomNode generates a random node.
+func RandomNode() *trienode.Node {
+ val := RandBytes(100)
+ return trienode.New(crypto.Keccak256Hash(val), val)
+}
diff --git a/trie/tracer.go b/trie/tracer.go
index e847050805..5786af4d3e 100644
--- a/trie/tracer.go
+++ b/trie/tracer.go
@@ -17,7 +17,6 @@
package trie
import (
- "github.com/ava-labs/subnet-evm/trie/trienode"
"github.com/ethereum/go-ethereum/common"
)
@@ -114,16 +113,18 @@ func (t *tracer) copy() *tracer {
}
}
-// markDeletions puts all tracked deletions into the provided nodeset.
-func (t *tracer) markDeletions(set *trienode.NodeSet) {
+// deletedNodes returns a list of node paths which are deleted from the trie.
+func (t *tracer) deletedNodes() []string {
+ var paths []string
for path := range t.deletes {
// It's possible a few deleted nodes were embedded
// in their parent before, the deletions can be no
// effect by deleting nothing, filter them out.
- prev, ok := t.accessList[path]
+ _, ok := t.accessList[path]
if !ok {
continue
}
- set.AddNode([]byte(path), trienode.NewWithPrev(common.Hash{}, nil, prev))
+ paths = append(paths, path)
}
+ return paths
}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index d5a55e38ce..06e48578d7 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -70,8 +70,8 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
}
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) {
@@ -136,8 +136,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -151,8 +151,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustUpdate([]byte(val.k), randBytes(32))
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -169,8 +169,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
keys = append(keys, string(key))
trie.MustUpdate(key, randBytes(32))
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -184,8 +184,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, key := range keys {
trie.MustUpdate([]byte(key), nil)
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -199,8 +199,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustUpdate([]byte(val.k), nil)
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -218,22 +218,22 @@ func TestAccessListLeak(t *testing.T) {
for _, val := range standard {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
var cases = []struct {
op func(tr *Trie)
}{
{
func(tr *Trie) {
- it := tr.NodeIterator(nil)
+ it := tr.MustNodeIterator(nil)
for it.Next(true) {
}
},
},
{
func(tr *Trie) {
- it := NewIterator(tr.NodeIterator(nil))
+ it := NewIterator(tr.MustNodeIterator(nil))
for it.Next() {
}
},
@@ -241,7 +241,7 @@ func TestAccessListLeak(t *testing.T) {
{
func(tr *Trie) {
for _, val := range standard {
- tr.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase())
+ tr.Prove([]byte(val.k), rawdb.NewMemoryDatabase())
}
},
},
@@ -268,8 +268,8 @@ func TestTinyTree(t *testing.T) {
for _, val := range tiny {
trie.MustUpdate([]byte(val.k), randBytes(32))
}
- root, set := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
+ root, set, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil)
parent := root
trie, _ = New(TrieID(root), db)
@@ -277,8 +277,8 @@ func TestTinyTree(t *testing.T) {
for _, val := range tiny {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, set = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(set))
+ root, set, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(set), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil {
@@ -300,7 +300,7 @@ func compareSet(setA, setB map[string]struct{}) bool {
func forNodes(tr *Trie) map[string][]byte {
var (
- it = tr.NodeIterator(nil)
+ it = tr.MustNodeIterator(nil)
nodes = make(map[string][]byte)
)
for it.Next(true) {
@@ -319,7 +319,7 @@ func iterNodes(db *Database, root common.Hash) map[string][]byte {
func forHashedNodes(tr *Trie) map[string][]byte {
var (
- it = tr.NodeIterator(nil)
+ it = tr.MustNodeIterator(nil)
nodes = make(map[string][]byte)
)
for it.Next(true) {
diff --git a/trie/trie.go b/trie/trie.go
index 2104fd1a83..168f2b9730 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -49,6 +49,10 @@ type Trie struct {
root node
owner common.Hash
+ // Flag whether the commit operation is already performed. If so the
+ // trie is not usable(latest states is invisible).
+ committed bool
+
// Keep track of the number leaves which have been inserted since the last
// hashing operation. This number will not directly map to the number of
// actually unhashed nodes.
@@ -70,11 +74,12 @@ func (t *Trie) newFlag() nodeFlag {
// Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie {
return &Trie{
- root: t.root,
- owner: t.owner,
- unhashed: t.unhashed,
- reader: t.reader,
- tracer: t.tracer.copy(),
+ root: t.root,
+ owner: t.owner,
+ committed: t.committed,
+ unhashed: t.unhashed,
+ reader: t.reader,
+ tracer: t.tracer.copy(),
}
}
@@ -84,7 +89,7 @@ func (t *Trie) Copy() *Trie {
// zero hash or the sha3 hash of an empty string, then trie is initially
// empty, otherwise, the root node must be present in database or returns
// a MissingNodeError if not.
-func New(id *ID, db NodeReader) (*Trie, error) {
+func New(id *ID, db *Database) (*Trie, error) {
reader, err := newTrieReader(id.StateRoot, id.Owner, db)
if err != nil {
return nil, err
@@ -110,10 +115,24 @@ func NewEmpty(db *Database) *Trie {
return tr
}
+// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
+// error but just print out an error message.
+func (t *Trie) MustNodeIterator(start []byte) NodeIterator {
+ it, err := t.NodeIterator(start)
+ if err != nil {
+ log.Error("Unhandled trie error in Trie.NodeIterator", "err", err)
+ }
+ return it
+}
+
// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
// the key after the given start key.
-func (t *Trie) NodeIterator(start []byte) NodeIterator {
- return newNodeIterator(t, start)
+func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, ErrCommitted
+ }
+ return newNodeIterator(t, start), nil
}
// MustGet is a wrapper of Get and will omit any encountered error but just
@@ -132,6 +151,10 @@ func (t *Trie) MustGet(key []byte) []byte {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) Get(key []byte) ([]byte, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, ErrCommitted
+ }
value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0)
if err == nil && didResolve {
t.root = newroot
@@ -191,6 +214,10 @@ func (t *Trie) MustGetNode(path []byte) ([]byte, int) {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) GetNode(path []byte) ([]byte, int, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, 0, ErrCommitted
+ }
item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0)
if err != nil {
return nil, resolved, err
@@ -283,6 +310,10 @@ func (t *Trie) MustUpdate(key, value []byte) {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) Update(key, value []byte) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
return t.update(key, value)
}
@@ -397,6 +428,10 @@ func (t *Trie) MustDelete(key []byte) {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) Delete(key []byte) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
t.unhashed++
k := keybytesToHex(key)
_, n, err := t.delete(t.root, nil, k)
@@ -582,17 +617,25 @@ func (t *Trie) Hash() common.Hash {
// The returned nodeset can be nil if the trie is clean (nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
-func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
defer t.tracer.reset()
-
- nodes := trienode.NewNodeSet(t.owner)
- t.tracer.markDeletions(nodes)
-
+ defer func() {
+ t.committed = true
+ }()
// Trie is empty and can be classified into two types of situations:
- // - The trie was empty and no update happens
- // - The trie was non-empty and all nodes are dropped
+ // (a) The trie was empty and no update happens => return nil
+ // (b) The trie was non-empty and all nodes are dropped => return
+ // the node set includes all deleted nodes
if t.root == nil {
- return types.EmptyRootHash, nodes
+ paths := t.tracer.deletedNodes()
+ if len(paths) == 0 {
+ return types.EmptyRootHash, nil, nil // case (a)
+ }
+ nodes := trienode.NewNodeSet(t.owner)
+ for _, path := range paths {
+ nodes.AddNode([]byte(path), trienode.NewDeleted())
+ }
+ return types.EmptyRootHash, nodes, nil // case (b)
}
// Derive the hash for all dirty nodes first. We hold the assumption
// in the following procedure that all nodes are hashed.
@@ -604,10 +647,14 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
// Replace the root node with the origin hash in order to
// ensure all resolved nodes are dropped after the commit.
t.root = hashedNode
- return rootHash, nil
+ return rootHash, nil, nil
+ }
+ nodes := trienode.NewNodeSet(t.owner)
+ for _, path := range t.tracer.deletedNodes() {
+ nodes.AddNode([]byte(path), trienode.NewDeleted())
}
t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root)
- return rootHash, nodes
+ return rootHash, nodes, nil
}
// hashRoot calculates the root hash of the given trie
@@ -631,4 +678,5 @@ func (t *Trie) Reset() {
t.owner = common.Hash{}
t.unhashed = 0
t.tracer.reset()
+ t.committed = false
}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 1112f9d245..64ba0f14b1 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -27,26 +27,24 @@
package trie
import (
- "fmt"
-
+ "github.com/ava-labs/subnet-evm/core/types"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
)
// Reader wraps the Node method of a backing trie store.
type Reader interface {
- // Node retrieves the RLP-encoded trie node blob with the provided trie
- // identifier, node path and the corresponding node hash. No error will
- // be returned if the node is not found.
+ // Node retrieves the trie node blob with the provided trie identifier, node path and
+ // the corresponding node hash. No error will be returned if the node is not found.
+ //
+ // When looking up nodes in the account trie, 'owner' is the zero hash. For contract
+ // storage trie nodes, 'owner' is the hash of the account address that containing the
+ // storage.
+ //
+ // TODO(rjl493456442): remove the 'hash' parameter, it's redundant in PBSS.
Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
-// NodeReader wraps all the necessary functions for accessing trie node.
-type NodeReader interface {
- // Reader returns a reader for accessing all trie nodes with provided
- // state root. Nil is returned in case the state is not available.
- Reader(root common.Hash) Reader
-}
-
// trieReader is a wrapper of the underlying node reader. It's not safe
// for concurrent usage.
type trieReader struct {
@@ -56,10 +54,16 @@ type trieReader struct {
}
// newTrieReader initializes the trie reader with the given node reader.
-func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) {
- reader := db.Reader(stateRoot)
- if reader == nil {
- return nil, fmt.Errorf("state not found #%x", stateRoot)
+func newTrieReader(stateRoot, owner common.Hash, db *Database) (*trieReader, error) {
+ if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
+ if stateRoot == (common.Hash{}) {
+ log.Error("Zero state root hash!")
+ }
+ return &trieReader{owner: owner}, nil
+ }
+ reader, err := db.Reader(stateRoot)
+ if err != nil {
+ return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err}
}
return &trieReader{owner: owner, reader: reader}, nil
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index f986f8128a..82db275e3d 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -87,9 +87,9 @@ func TestMissingRoot(t *testing.T) {
func TestMissingNode(t *testing.T) {
testMissingNode(t, false, rawdb.HashScheme)
- //testMissingNode(t, false, rawdb.PathScheme)
+ testMissingNode(t, false, rawdb.PathScheme)
testMissingNode(t, true, rawdb.HashScheme)
- //testMissingNode(t, true, rawdb.PathScheme)
+ testMissingNode(t, true, rawdb.PathScheme)
}
func testMissingNode(t *testing.T, memonly bool, scheme string) {
@@ -99,8 +99,8 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
trie := NewEmpty(triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
require.NoError(t, triedb.Commit(root, false))
@@ -188,7 +188,7 @@ func TestInsert(t *testing.T) {
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
- root, _ = trie.Commit(false)
+ root, _, _ = trie.Commit(false)
if root != exp {
t.Errorf("case 2: exp %x got %x", exp, root)
}
@@ -213,8 +213,8 @@ func TestGet(t *testing.T) {
if i == 1 {
return
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
}
}
@@ -285,8 +285,8 @@ func TestReplication(t *testing.T) {
for _, val := range vals {
updateString(trie, val.k, val.v)
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// create a new trie on top of the database and check that lookups work.
trie2, err := New(TrieID(root), db)
@@ -298,14 +298,14 @@ func TestReplication(t *testing.T) {
t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
}
}
- hash, nodes := trie2.Commit(false)
+ hash, nodes, _ := trie2.Commit(false)
if hash != root {
t.Errorf("root failure. expected %x got %x", root, hash)
}
// recreate the trie after commit
if nodes != nil {
- db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
}
trie2, err = New(TrieID(hash), db)
if err != nil {
@@ -433,44 +433,44 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
if !ok || n.IsDeleted() {
return errors.New("expect new node")
}
- if len(n.Prev) > 0 {
- return errors.New("unexpected origin value")
- }
+ //if len(n.Prev) > 0 {
+ // return errors.New("unexpected origin value")
+ //}
}
// Check deletion set
- for path, blob := range deletes {
+ for path := range deletes {
n, ok := set.Nodes[path]
if !ok || !n.IsDeleted() {
return errors.New("expect deleted node")
}
- if len(n.Prev) == 0 {
- return errors.New("expect origin value")
- }
- if !bytes.Equal(n.Prev, blob) {
- return errors.New("invalid origin value")
- }
+ //if len(n.Prev) == 0 {
+ // return errors.New("expect origin value")
+ //}
+ //if !bytes.Equal(n.Prev, blob) {
+ // return errors.New("invalid origin value")
+ //}
}
// Check update set
- for path, blob := range updates {
+ for path := range updates {
n, ok := set.Nodes[path]
if !ok || n.IsDeleted() {
return errors.New("expect updated node")
}
- if len(n.Prev) == 0 {
- return errors.New("expect origin value")
- }
- if !bytes.Equal(n.Prev, blob) {
- return errors.New("invalid origin value")
- }
+ //if len(n.Prev) == 0 {
+ // return errors.New("expect origin value")
+ //}
+ //if !bytes.Equal(n.Prev, blob) {
+ // return errors.New("invalid origin value")
+ //}
}
return nil
}
func runRandTest(rt randTest) bool {
var scheme = rawdb.HashScheme
- //if rand.Intn(2) == 0 {
- // scheme = rawdb.PathScheme
- //}
+ if rand.Intn(2) == 0 {
+ scheme = rawdb.PathScheme
+ }
var (
origin = types.EmptyRootHash
triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme)
@@ -500,7 +500,7 @@ func runRandTest(rt randTest) bool {
continue
}
proofDb := rawdb.NewMemoryDatabase()
- err := tr.Prove(step.key, 0, proofDb)
+ err := tr.Prove(step.key, proofDb)
if err != nil {
rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err)
}
@@ -511,9 +511,9 @@ func runRandTest(rt randTest) bool {
case opHash:
tr.Hash()
case opCommit:
- root, nodes := tr.Commit(true)
+ root, nodes, _ := tr.Commit(true)
if nodes != nil {
- triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, origin, 0, trienode.NewWithNodeSet(nodes), nil)
}
newtr, err := New(TrieID(root), triedb)
if err != nil {
@@ -531,7 +531,7 @@ func runRandTest(rt randTest) bool {
origin = root
case opItercheckhash:
checktr := NewEmpty(triedb)
- it := NewIterator(tr.NodeIterator(nil))
+ it := NewIterator(tr.MustNodeIterator(nil))
for it.Next() {
checktr.MustUpdate(it.Key, it.Value)
}
@@ -540,8 +540,8 @@ func runRandTest(rt randTest) bool {
}
case opNodeDiff:
var (
- origIter = origTrie.NodeIterator(nil)
- curIter = tr.NodeIterator(nil)
+ origIter = origTrie.MustNodeIterator(nil)
+ curIter = tr.MustNodeIterator(nil)
origSeen = make(map[string]struct{})
curSeen = make(map[string]struct{})
)
@@ -727,7 +727,7 @@ func TestTinyTrie(t *testing.T) {
t.Errorf("3: got %x, exp %x", root, exp)
}
checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
checktr.MustUpdate(it.Key, it.Value)
}
@@ -751,7 +751,7 @@ func TestCommitAfterHash(t *testing.T) {
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
- root, _ = trie.Commit(false)
+ root, _, _ = trie.Commit(false)
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
@@ -854,8 +854,8 @@ func TestCommitSequence(t *testing.T) {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Flush trie -> database
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -895,8 +895,8 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
trie.MustUpdate(key, val)
}
// Flush trie -> database
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -934,9 +934,9 @@ func TestCommitSequenceStackTrie(t *testing.T) {
stTrie.Update(key, val)
}
// Flush trie -> database
- root, nodes := trie.Commit(false)
+ root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
@@ -982,9 +982,9 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie.Update(key, []byte{0x1})
stTrie.Update(key, []byte{0x1})
// Flush trie -> database
- root, nodes := trie.Commit(false)
+ root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
@@ -1155,8 +1155,8 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
h := trie.Hash()
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
b.StartTimer()
triedb.Dereference(h)
b.StopTimer()
diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go
index cf9a6f2b17..e25e9c1ea4 100644
--- a/trie/triedb/hashdb/database.go
+++ b/trie/triedb/hashdb/database.go
@@ -28,6 +28,7 @@ package hashdb
import (
"errors"
+ "fmt"
"reflect"
"sync"
"time"
@@ -36,6 +37,7 @@ import (
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/metrics"
"github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
@@ -43,35 +45,35 @@ import (
)
var (
- memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
- memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
- memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
- memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
-
- memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
- memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
- memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
- memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
-
- memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/size", nil)
- memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/childsize", nil)
- memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("trie/memcache/dirty/nodes", nil)
-
- memcacheFlushMeter = metrics.NewRegisteredMeter("trie/memcache/flush/count", nil)
- memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
- memcacheFlushLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/locktime", nil)
- memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
- memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
-
- memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
- memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
- memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
-
- memcacheCommitMeter = metrics.NewRegisteredMeter("trie/memcache/commit/count", nil)
- memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
- memcacheCommitLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/locktime", nil)
- memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
- memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
+ memcacheCleanHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil)
+ memcacheCleanMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil)
+ memcacheCleanReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil)
+ memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil)
+
+ memcacheDirtyHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil)
+ memcacheDirtyMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil)
+ memcacheDirtyReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil)
+ memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil)
+
+ memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("hashdb/memcache/dirty/size", nil)
+ memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("hashdb/memcache/dirty/childsize", nil)
+ memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("hashdb/memcache/dirty/nodes", nil)
+
+ memcacheFlushMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/count", nil)
+ memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil)
+ memcacheFlushLockTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/locktime", nil)
+ memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil)
+ memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil)
+
+ memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil)
+ memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil)
+ memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil)
+
+ memcacheCommitMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/count", nil)
+ memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil)
+ memcacheCommitLockTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/locktime", nil)
+ memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil)
+ memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil)
)
// ChildResolver defines the required method to decode the provided
@@ -84,6 +86,7 @@ type cache interface {
HasGet([]byte, []byte) ([]byte, bool)
Del([]byte)
Set([]byte, []byte)
+ SaveToFileConcurrent(string, int) error
}
// Database is an intermediate write layer between the trie data structures and
@@ -257,6 +260,7 @@ func (db *Database) Reference(child common.Hash, parent common.Hash) {
db.reference(child, parent)
}
+// reference is the private locked version of Reference.
func (db *Database) reference(child common.Hash, parent common.Hash) {
// If the node does not exist, it's a node pulled from disk, skip
node, ok := db.dirties[child]
@@ -303,7 +307,7 @@ func (db *Database) Dereference(root common.Hash) {
memcacheDirtyNodesGauge.Update(int64(len(db.dirties)))
memcacheGCTimeTimer.Update(time.Since(start))
- memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
@@ -460,7 +464,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
memcacheFlushMeter.Mark(1)
memcacheFlushTimeTimer.Update(time.Since(start))
memcacheFlushLockTimeTimer.Update(lockTime + time.Since(lockStart))
- memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
@@ -511,7 +515,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
memcacheCommitMeter.Mark(1)
memcacheCommitTimeTimer.Update(time.Since(start))
memcacheCommitLockTimeTimer.Update(lockTime + time.Since(lockStart))
- memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
logger := log.Info
@@ -609,7 +613,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
// Update inserts the dirty nodes in provided nodeset into database and link the
// account trie with multiple storage tries if necessary.
-func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash {
if blob, _ := db.Node(parent); len(blob) == 0 {
@@ -625,7 +629,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode
// UpdateAndReferenceRoot inserts the dirty nodes in provided nodeset into
// database and links the account trie with multiple storage tries if necessary,
// then adds a reference [from] root to the metaroot while holding the db's lock.
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash {
if blob, _ := db.Node(parent); len(blob) == 0 {
@@ -706,8 +710,12 @@ func (db *Database) Scheme() string {
}
// Reader retrieves a node reader belonging to the given state root.
-func (db *Database) Reader(root common.Hash) *reader {
- return &reader{db: db}
+// An error will be returned if the requested state is not available.
+func (db *Database) Reader(root common.Hash) (*reader, error) {
+ if _, err := db.Node(root); err != nil {
+ return nil, fmt.Errorf("state %#x is not available, %v", root, err)
+ }
+ return &reader{db: db}, nil
}
// reader is a state reader of Database which implements the Reader interface.
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
new file mode 100644
index 0000000000..17f27e3a25
--- /dev/null
+++ b/trie/triedb/pathdb/database.go
@@ -0,0 +1,373 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/params"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// maxDiffLayers is the maximum diff layers allowed in the layer tree.
+const maxDiffLayers = 128
+
+// layer is the interface implemented by all state layers which includes some
+// public methods and some additional methods for internal usage.
+type layer interface {
+ // Node retrieves the trie node with the node info. An error will be returned
+ // if the read operation exits abnormally. For example, if the layer is already
+ // stale, or the associated state is regarded as corrupted. Notably, no error
+ // will be returned if the requested node is not found in database.
+ Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
+
+ // rootHash returns the root hash for which this layer was made.
+ rootHash() common.Hash
+
+ // stateID returns the associated state id of layer.
+ stateID() uint64
+
+ // parentLayer returns the subsequent layer of it, or nil if the disk was reached.
+ parentLayer() layer
+
+ // update creates a new layer on top of the existing layer diff tree with
+ // the provided dirty trie nodes along with the state change set.
+ //
+ // Note, the maps are retained by the method to avoid copying everything.
+ update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer
+
+ // journal commits an entire diff hierarchy to disk into a single journal entry.
+ // This is meant to be used during shutdown to persist the layer without
+ // flattening everything down (bad for reorgs).
+ journal(w io.Writer) error
+}
+
+// Config contains the settings for database.
+type Config struct {
+ StateLimit uint64 // Number of recent blocks to maintain state history for
+ CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes
+ DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes
+ ReadOnly bool // Flag whether the database is opened in read only mode.
+}
+
+var (
+ // defaultCleanSize is the default memory allowance of clean cache.
+ defaultCleanSize = 16 * 1024 * 1024
+
+ // defaultBufferSize is the default memory allowance of node buffer
+ // that aggregates the writes from above until it's flushed into the
+ // disk. Do not increase the buffer size arbitrarily, otherwise the
+ // system pause time will increase when the database writes happen.
+ defaultBufferSize = 128 * 1024 * 1024
+)
+
+// Defaults contains default settings for Ethereum mainnet.
+var Defaults = &Config{
+ StateLimit: params.FullImmutabilityThreshold,
+ CleanSize: defaultCleanSize,
+ DirtySize: defaultBufferSize,
+}
+
+// Database is a multiple-layered structure for maintaining in-memory trie nodes.
+// It consists of one persistent base layer backed by a key-value store, on top
+// of which arbitrarily many in-memory diff layers are stacked. The memory diffs
+// can form a tree with branching, but the disk layer is singleton and common to
+// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can
+// be applied to rollback. The deepest reorg that can be handled depends on the
+// amount of state histories tracked in the disk.
+//
+// At most one readable and writable database can be opened at the same time in
+// the whole system which ensures that only one database writer can operate disk
+// state. Unexpected open operations can cause the system to panic.
+type Database struct {
+ // readOnly is the flag whether the mutation is allowed to be applied.
+ // It will be set automatically when the database is journaled during
+ // the shutdown to reject all following unexpected mutations.
+ readOnly bool // Indicator if database is opened in read only mode
+ bufferSize int // Memory allowance (in bytes) for caching dirty nodes
+ config *Config // Configuration for database
+ diskdb ethdb.Database // Persistent storage for matured trie nodes
+ tree *layerTree // The group for all known layers
+ lock sync.RWMutex // Lock to prevent mutations from happening at the same time
+
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests
+}
+
+// New attempts to load an already existing layer from a persistent key-value
+// store (with a number of memory layers from a journal). If the journal is not
+// matched with the base persistent layer, all the recorded diff layers are discarded.
+func New(diskdb ethdb.Database, config *Config) *Database {
+ if config == nil {
+ config = Defaults
+ }
+ db := &Database{
+ readOnly: config.ReadOnly,
+ bufferSize: config.DirtySize,
+ config: config,
+ diskdb: diskdb,
+ }
+ // Construct the layer tree by resolving the in-disk singleton state
+ // and in-memory layer journal.
+ db.tree = newLayerTree(db.loadLayers())
+
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // Open the freezer for state history if the passed database contains an
+ // ancient store. Otherwise, all the relevant functionalities are disabled.
+ //
+ // Because the freezer can only be opened once at the same time, this
+ // mechanism also ensures that at most one **non-readOnly** database
+ // is opened at the same time to prevent accidental mutation.
+ //if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly {
+ // freezer, err := rawdb.NewStateHistoryFreezer(ancient, false)
+ // if err != nil {
+ // log.Crit("Failed to open state history freezer", "err", err)
+ // }
+ // db.freezer = freezer
+
+ // // Truncate the extra state histories above in freezer in case
+ // // it's not aligned with the disk layer.
+ // pruned, err := truncateFromHead(db.diskdb, freezer, db.tree.bottom().stateID())
+ // if err != nil {
+ // log.Crit("Failed to truncate extra state histories", "err", err)
+ // }
+ // if pruned != 0 {
+ // log.Warn("Truncated extra state histories", "number", pruned)
+ // }
+ //}
+ log.Warn("Path-based state scheme is an experimental feature")
+ return db
+}
+
+// Reader retrieves a layer belonging to the given state root.
+func (db *Database) Reader(root common.Hash) (layer, error) {
+ l := db.tree.get(root)
+ if l == nil {
+ return nil, fmt.Errorf("state %#x is not available", root)
+ }
+ return l, nil
+}
+
+// Update adds a new layer into the tree, if that can be linked to an existing
+// old parent. It is disallowed to insert a disk layer (the origin of all). Apart
+// from that this function will flatten the extra diff layers at bottom into disk
+// to only keep 128 diff layers in memory by default.
+//
+// The passed in maps(nodes, states) will be retained to avoid copying everything.
+// Therefore, these maps must not be changed afterwards.
+func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ // Hold the lock to prevent concurrent mutations.
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil {
+ return err
+ }
+ // Keep 128 diff layers in the memory, persistent layer is 129th.
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
+ // - head-128 layer(disk layer) is paired with HEAD-128 state
+ return db.tree.cap(root, maxDiffLayers)
+}
+
+// Commit traverses downwards the layer tree from a specified layer with the
+// provided state root and all the layers below are flattened downwards. It
+// can be used alone and mostly for test purposes.
+func (db *Database) Commit(root common.Hash, report bool) error {
+ // Hold the lock to prevent concurrent mutations.
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ return db.tree.cap(root, 0)
+}
+
+// Reset rebuilds the database with the specified state as the base.
+//
+// - if target state is empty, clear the stored state and all layers on top
+// - if target state is non-empty, ensure the stored state matches with it
+// and clear all other layers on top.
+func (db *Database) Reset(root common.Hash) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ batch := db.diskdb.NewBatch()
+ root = types.TrieRootHash(root)
+ if root == types.EmptyRootHash {
+ // Empty state is requested as the target, nuke out
+ // the root node and leave all others as dangling.
+ rawdb.DeleteAccountTrieNode(batch, nil)
+ } else {
+ // Ensure the requested state is existent before any
+ // action is applied.
+ _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ if hash != root {
+ return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root)
+ }
+ }
+ // Mark the disk layer as stale before applying any mutation.
+ db.tree.bottom().markStale()
+
+ // Drop the stale state journal in persistent database and
+ // reset the persistent state id back to zero.
+ rawdb.DeleteTrieJournal(batch)
+ rawdb.WritePersistentStateID(batch, 0)
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // Clean up all state histories in freezer. Theoretically
+ // all root->id mappings should be removed as well. Since
+ // mappings can be huge and might take a while to clear
+ // them, just leave them in disk and wait for overwriting.
+ // if db.freezer != nil {
+ // if err := db.freezer.Reset(); err != nil {
+ // return err
+ // }
+ // }
+ // Re-construct a new disk layer backed by persistent state
+ // with **empty clean cache and node buffer**.
+ dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0))
+ db.tree.reset(dl)
+ log.Info("Rebuilt trie database", "root", root)
+ return nil
+}
+
+// Recover rollbacks the database to a specified historical point.
+// The state is supported as the rollback destination only if it's
+// canonical state and the corresponding trie histories are existent.
+func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error {
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ return errors.New("state rollback is non-supported")
+}
+
+// Recoverable returns the indicator if the specified state is recoverable.
+func (db *Database) Recoverable(root common.Hash) bool {
+ // Ensure the requested state is a known state.
+ root = types.TrieRootHash(root)
+ id := rawdb.ReadStateID(db.diskdb, root)
+ if id == nil {
+ return false
+ }
+ // Recoverable state must below the disk layer. The recoverable
+ // state only refers the state that is currently not available,
+ // but can be restored by applying state history.
+ dl := db.tree.bottom()
+ if *id >= dl.stateID() {
+ return false
+ }
+ return false
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // Ensure the requested state is a canonical state and all state
+ // histories in range [id+1, disklayer.ID] are present and complete.
+ // parent := root
+ // return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error {
+ // if m.parent != parent {
+ // return errors.New("unexpected state history")
+ // }
+ // if len(m.incomplete) > 0 {
+ // return errors.New("incomplete state history")
+ // }
+ // parent = m.root
+ // return nil
+ // }) == nil
+}
+
+// Close closes the trie database and the held freezer.
+func (db *Database) Close() error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ db.readOnly = true
+ return nil
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // if db.freezer == nil {
+ // return nil
+ // }
+ // return db.freezer.Close()
+}
+
+// Size returns the current storage size of the memory cache in front of the
+// persistent database layer.
+func (db *Database) Size() (size common.StorageSize) {
+ db.tree.forEach(func(layer layer) {
+ if diff, ok := layer.(*diffLayer); ok {
+ size += common.StorageSize(diff.memory)
+ }
+ if disk, ok := layer.(*diskLayer); ok {
+ size += disk.size()
+ }
+ })
+ return size
+}
+
+// Initialized returns an indicator if the state data is already
+// initialized in path-based scheme.
+func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ var inited bool
+ db.tree.forEach(func(layer layer) {
+ if layer.rootHash() != types.EmptyRootHash {
+ inited = true
+ }
+ })
+ return inited
+}
+
+// SetBufferSize sets the node buffer size to the provided value(in bytes).
+func (db *Database) SetBufferSize(size int) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ db.bufferSize = size
+ return db.tree.bottom().setBufferSize(db.bufferSize)
+}
+
+// Scheme returns the node scheme used in the database.
+func (db *Database) Scheme() string {
+ return rawdb.PathScheme
+}
diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go
new file mode 100644
index 0000000000..2948e22140
--- /dev/null
+++ b/trie/triedb/pathdb/database_test.go
@@ -0,0 +1,563 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "testing"
+
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie/testutil"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/stretchr/testify/require"
+)
+
+func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) {
+ h, err := newTestHasher(addrHash, root, cleans)
+ if err != nil {
+ panic(fmt.Errorf("failed to create hasher, err: %w", err))
+ }
+ for key, val := range dirties {
+ if len(val) == 0 {
+ h.Delete(key.Bytes())
+ } else {
+ h.Update(key.Bytes(), val)
+ }
+ }
+ return h.Commit(false)
+}
+
+func generateAccount(storageRoot common.Hash) types.StateAccount {
+ return types.StateAccount{
+ Nonce: uint64(rand.Intn(100)),
+ Balance: big.NewInt(rand.Int63()),
+ CodeHash: testutil.RandBytes(32),
+ Root: storageRoot,
+ }
+}
+
+const (
+ createAccountOp int = iota
+ modifyAccountOp
+ deleteAccountOp
+ opLen
+)
+
+type genctx struct {
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+ accountOrigin map[common.Address][]byte
+ storageOrigin map[common.Address]map[common.Hash][]byte
+ nodes *trienode.MergedNodeSet
+}
+
+func newCtx() *genctx {
+ return &genctx{
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountOrigin: make(map[common.Address][]byte),
+ storageOrigin: make(map[common.Address]map[common.Hash][]byte),
+ nodes: trienode.NewMergedNodeSet(),
+ }
+}
+
+type tester struct {
+ db *Database
+ roots []common.Hash
+ preimages map[common.Hash]common.Address
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+
+ // state snapshots
+ snapAccounts map[common.Hash]map[common.Hash][]byte
+ snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
+}
+
+func newTester(t *testing.T) *tester {
+ var (
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
+ disk = rawdb.NewMemoryDatabase()
+ db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024})
+ obj = &tester{
+ db: db,
+ preimages: make(map[common.Hash]common.Address),
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
+ snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte),
+ }
+ )
+ for i := 0; i < 2*128; i++ {
+ var parent = types.EmptyRootHash
+ if len(obj.roots) != 0 {
+ parent = obj.roots[len(obj.roots)-1]
+ }
+ root, nodes, states := obj.generate(parent)
+ if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
+ panic(fmt.Errorf("failed to update state changes, err: %w", err))
+ }
+ obj.roots = append(obj.roots, root)
+ }
+ return obj
+}
+
+func (t *tester) release() {
+ t.db.Close()
+ t.db.diskdb.Close()
+}
+
+func (t *tester) randAccount() (common.Address, []byte) {
+ for addrHash, account := range t.accounts {
+ return t.preimages[addrHash], account
+ }
+ return common.Address{}, nil
+}
+
+func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for i := 0; i < 10; i++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ hash := testutil.RandomHash()
+
+ storage[hash] = v
+ origin[hash] = nil
+ }
+ root, set := updateTrie(addrHash, types.EmptyRootHash, storage, nil)
+
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for hash, val := range t.storages[addrHash] {
+ origin[hash] = val
+ storage[hash] = nil
+
+ if len(origin) == 3 {
+ break
+ }
+ }
+ for i := 0; i < 3; i++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ hash := testutil.RandomHash()
+
+ storage[hash] = v
+ origin[hash] = nil
+ }
+ root, set := updateTrie(crypto.Keccak256Hash(addr.Bytes()), root, storage, t.storages[addrHash])
+
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for hash, val := range t.storages[addrHash] {
+ origin[hash] = val
+ storage[hash] = nil
+ }
+ root, set := updateTrie(addrHash, root, storage, t.storages[addrHash])
+ if root != types.EmptyRootHash {
+ panic("failed to clear storage trie")
+ }
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) {
+ var (
+ ctx = newCtx()
+ dirties = make(map[common.Hash]struct{})
+ )
+ for i := 0; i < 20; i++ {
+ switch rand.Intn(opLen) {
+ case createAccountOp:
+ // account creation
+ addr := testutil.RandomAddress()
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := t.accounts[addrHash]; ok {
+ continue
+ }
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ root := t.generateStorage(ctx, addr)
+ ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
+ ctx.accountOrigin[addr] = nil
+ t.preimages[addrHash] = addr
+
+ case modifyAccountOp:
+ // account mutation
+ addr, account := t.randAccount()
+ if addr == (common.Address{}) {
+ continue
+ }
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ acct, _ := types.FullAccount(account)
+ stRoot := t.mutateStorage(ctx, addr, acct.Root)
+ newAccount := types.SlimAccountRLP(generateAccount(stRoot))
+
+ ctx.accounts[addrHash] = newAccount
+ ctx.accountOrigin[addr] = account
+
+ case deleteAccountOp:
+ // account deletion
+ addr, account := t.randAccount()
+ if addr == (common.Address{}) {
+ continue
+ }
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ acct, _ := types.FullAccount(account)
+ if acct.Root != types.EmptyRootHash {
+ t.clearStorage(ctx, addr, acct.Root)
+ }
+ ctx.accounts[addrHash] = nil
+ ctx.accountOrigin[addr] = account
+ }
+ }
+ root, set := updateTrie(common.Hash{}, parent, ctx.accounts, t.accounts)
+ ctx.nodes.Merge(set)
+
+ // Save state snapshot before commit
+ t.snapAccounts[parent] = copyAccounts(t.accounts)
+ t.snapStorages[parent] = copyStorages(t.storages)
+
+ // Commit all changes to live state set
+ for addrHash, account := range ctx.accounts {
+ if len(account) == 0 {
+ delete(t.accounts, addrHash)
+ } else {
+ t.accounts[addrHash] = account
+ }
+ }
+ for addrHash, slots := range ctx.storages {
+ if _, ok := t.storages[addrHash]; !ok {
+ t.storages[addrHash] = make(map[common.Hash][]byte)
+ }
+ for sHash, slot := range slots {
+ if len(slot) == 0 {
+ delete(t.storages[addrHash], sHash)
+ } else {
+ t.storages[addrHash][sHash] = slot
+ }
+ }
+ }
+ return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil)
+}
+
+// lastRoot returns the latest root hash, or empty if nothing is cached.
+func (t *tester) lastHash() common.Hash {
+ if len(t.roots) == 0 {
+ return common.Hash{}
+ }
+ return t.roots[len(t.roots)-1]
+}
+
+func (t *tester) verifyState(root common.Hash) error {
+ reader, err := t.db.Reader(root)
+ if err != nil {
+ return err
+ }
+ _, err = reader.Node(common.Hash{}, nil, root)
+ if err != nil {
+ return errors.New("root node is not available")
+ }
+ for addrHash, account := range t.snapAccounts[root] {
+ blob, err := reader.Node(common.Hash{}, addrHash.Bytes(), crypto.Keccak256Hash(account))
+ if err != nil || !bytes.Equal(blob, account) {
+ return fmt.Errorf("account is mismatched: %w", err)
+ }
+ }
+ for addrHash, slots := range t.snapStorages[root] {
+ for hash, slot := range slots {
+ blob, err := reader.Node(addrHash, hash.Bytes(), crypto.Keccak256Hash(slot))
+ if err != nil || !bytes.Equal(blob, slot) {
+ return fmt.Errorf("slot is mismatched: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+// bottomIndex returns the index of current disk layer.
+func (t *tester) bottomIndex() int {
+ bottom := t.db.tree.bottom()
+ for i := 0; i < len(t.roots); i++ {
+ if t.roots[i] == bottom.rootHash() {
+ return i
+ }
+ }
+ return -1
+}
+
+func TestDatabaseRollback(t *testing.T) {
+ // Verify state histories
+ tester := newTester(t)
+ defer tester.release()
+
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // if err := tester.verifyHistory(); err != nil {
+ // t.Fatalf("Invalid state history, err: %v", err)
+ // }
+ // Revert database from top to bottom
+ for i := tester.bottomIndex(); i >= 0; i-- {
+ root := tester.roots[i]
+ parent := types.EmptyRootHash
+ if i > 0 {
+ parent = tester.roots[i-1]
+ }
+ loader := newHashLoader(tester.snapAccounts[root], tester.snapStorages[root])
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // if err := tester.db.Recover(parent, loader); err != nil {
+ // t.Fatalf("Failed to revert db, err: %v", err)
+ // }
+ require.ErrorContains(t, tester.db.Recover(parent, loader), "state rollback is non-supported")
+ tester.verifyState(parent)
+ }
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // if tester.db.tree.len() != 1 {
+ // t.Fatal("Only disk layer is expected")
+ // }
+}
+
+func TestDatabaseRecoverable(t *testing.T) {
+ var (
+ tester = newTester(t)
+ index = tester.bottomIndex()
+ )
+ defer tester.release()
+
+ var cases = []struct {
+ root common.Hash
+ expect bool
+ }{
+ // Unknown state should be unrecoverable
+ {common.Hash{0x1}, false},
+
+ // Initial state should be recoverable
+ {types.EmptyRootHash, true},
+
+ // Initial state should be recoverable
+ {common.Hash{}, true},
+
+ // Layers below current disk layer are recoverable
+ {tester.roots[index-1], true},
+
+ // Disklayer itself is not recoverable, since it's
+ // available for accessing.
+ {tester.roots[index], false},
+
+ // Layers above current disk layer are not recoverable
+ // since they are available for accessing.
+ {tester.roots[index+1], false},
+ }
+ for i, c := range cases {
+ result := tester.db.Recoverable(c.root)
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // originally was `result != c.expect`
+ if result != false {
+ t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result)
+ }
+ }
+}
+
+func TestReset(t *testing.T) {
+ var (
+ tester = newTester(t)
+ // index = tester.bottomIndex()
+ )
+ defer tester.release()
+
+ // Reset database to unknown target, should reject it
+ if err := tester.db.Reset(testutil.RandomHash()); err == nil {
+ t.Fatal("Failed to reject invalid reset")
+ }
+ // Reset database to state persisted in the disk
+ if err := tester.db.Reset(types.EmptyRootHash); err != nil {
+ t.Fatalf("Failed to reset database %v", err)
+ }
+ // Ensure journal is deleted from disk
+ if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
+ t.Fatal("Failed to clean journal")
+ }
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // Ensure all trie histories are removed
+ // for i := 0; i <= index; i++ {
+ // _, err := readHistory(tester.db.freezer, uint64(i+1))
+ // if err == nil {
+ // t.Fatalf("Failed to clean state history, index %d", i+1)
+ // }
+ // }
+ // Verify layer tree structure, single disk layer is expected
+ if tester.db.tree.len() != 1 {
+ t.Fatalf("Extra layer kept %d", tester.db.tree.len())
+ }
+ if tester.db.tree.bottom().rootHash() != types.EmptyRootHash {
+ t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash())
+ }
+}
+
+func TestCommit(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Commit(tester.lastHash(), false); err != nil {
+ t.Fatalf("Failed to cap database, err: %v", err)
+ }
+ // Verify layer tree structure, single disk layer is expected
+ if tester.db.tree.len() != 1 {
+ t.Fatal("Layer tree structure is invalid")
+ }
+ if tester.db.tree.bottom().rootHash() != tester.lastHash() {
+ t.Fatal("Layer tree structure is invalid")
+ }
+ // Verify states
+ if err := tester.verifyState(tester.lastHash()); err != nil {
+ t.Fatalf("State is invalid, err: %v", err)
+ }
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // Verify state histories
+ // if err := tester.verifyHistory(); err != nil {
+ // t.Fatalf("State history is invalid, err: %v", err)
+ // }
+}
+
+func TestJournal(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Journal(tester.lastHash()); err != nil {
+ t.Errorf("Failed to journal, err: %v", err)
+ }
+ tester.db.Close()
+ tester.db = New(tester.db.diskdb, nil)
+
+ // Verify states including disk layer and all diff on top.
+ for i := 0; i < len(tester.roots); i++ {
+ if i >= tester.bottomIndex() {
+ if err := tester.verifyState(tester.roots[i]); err != nil {
+ t.Fatalf("Invalid state, err: %v", err)
+ }
+ continue
+ }
+ if err := tester.verifyState(tester.roots[i]); err == nil {
+ t.Fatal("Unexpected state")
+ }
+ }
+}
+
+func TestCorruptedJournal(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Journal(tester.lastHash()); err != nil {
+ t.Errorf("Failed to journal, err: %v", err)
+ }
+ tester.db.Close()
+ _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)
+
+ // Mutate the journal in disk, it should be regarded as invalid
+ blob := rawdb.ReadTrieJournal(tester.db.diskdb)
+ blob[0] = 1
+ rawdb.WriteTrieJournal(tester.db.diskdb, blob)
+
+ // Verify states, all not-yet-written states should be discarded
+ tester.db = New(tester.db.diskdb, nil)
+ for i := 0; i < len(tester.roots); i++ {
+ if tester.roots[i] == root {
+ if err := tester.verifyState(root); err != nil {
+ t.Fatalf("Disk state is corrupted, err: %v", err)
+ }
+ continue
+ }
+ if err := tester.verifyState(tester.roots[i]); err == nil {
+ t.Fatal("Unexpected state")
+ }
+ }
+}
+
+// copyAccounts returns a deep-copied account set of the provided one.
+func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte {
+ copied := make(map[common.Hash][]byte, len(set))
+ for key, val := range set {
+ copied[key] = common.CopyBytes(val)
+ }
+ return copied
+}
+
+// copyStorages returns a deep-copied storage set of the provided one.
+func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ copied := make(map[common.Hash]map[common.Hash][]byte, len(set))
+ for addrHash, subset := range set {
+ copied[addrHash] = make(map[common.Hash][]byte, len(subset))
+ for key, val := range subset {
+ copied[addrHash][key] = common.CopyBytes(val)
+ }
+ }
+ return copied
+}
diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go
new file mode 100644
index 0000000000..52f08bf46a
--- /dev/null
+++ b/trie/triedb/pathdb/difflayer.go
@@ -0,0 +1,184 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// diffLayer represents a collection of modifications made to the in-memory tries
+// along with associated state changes after running a block on top.
+//
+// The goal of a diff layer is to act as a journal, tracking recent modifications
+// made to the state, that have not yet graduated into a semi-immutable state.
+type diffLayer struct {
+ // Immutables
+ root common.Hash // Root hash to which this layer diff belongs to
+ id uint64 // Corresponding state id
+ block uint64 // Associated block number
+ nodes map[common.Hash]map[string]*trienode.Node // Cached trie nodes indexed by owner and path
+ states *triestate.Set // Associated state change set for building history
+ memory uint64 // Approximate guess as to how much memory we use
+
+ parent layer // Parent layer modified by this one, never nil, **can be changed**
+ lock sync.RWMutex // Lock used to protect parent
+}
+
+// newDiffLayer creates a new diff layer on top of an existing layer.
+func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ var (
+ size int64
+ count int
+ )
+ dl := &diffLayer{
+ root: root,
+ id: id,
+ block: block,
+ nodes: nodes,
+ states: states,
+ parent: parent,
+ }
+ for _, subset := range nodes {
+ for path, n := range subset {
+ dl.memory += uint64(n.Size() + len(path))
+ size += int64(len(n.Blob) + len(path))
+ }
+ count += len(subset)
+ }
+ if states != nil {
+ dl.memory += uint64(states.Size())
+ }
+ dirtyWriteMeter.Mark(size)
+ diffLayerNodesMeter.Mark(int64(count))
+ diffLayerBytesMeter.Mark(int64(dl.memory))
+ log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory))
+ return dl
+}
+
+// rootHash implements the layer interface, returning the root hash of
+// corresponding state.
+func (dl *diffLayer) rootHash() common.Hash {
+ return dl.root
+}
+
+// stateID implements the layer interface, returning the state id of the layer.
+func (dl *diffLayer) stateID() uint64 {
+ return dl.id
+}
+
+// parentLayer implements the layer interface, returning the subsequent
+// layer of the diff layer.
+func (dl *diffLayer) parentLayer() layer {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.parent
+}
+
+// node retrieves the node with provided node information. It's the internal
+// version of Node function with additional accessed layer tracked. No error
+// will be returned if node is not found.
+func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, depth int) ([]byte, error) {
+ // Hold the lock, ensure the parent won't be changed during the
+ // state accessing.
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // If the trie node is known locally, return it
+ subset, ok := dl.nodes[owner]
+ if ok {
+ n, ok := subset[string(path)]
+ if ok {
+ // If the trie node is not hash matched, or marked as removed,
+ // bubble up an error here. It shouldn't happen at all.
+ if n.Hash != hash {
+ dirtyFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
+ return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path)
+ }
+ dirtyHitMeter.Mark(1)
+ dirtyNodeHitDepthHist.Update(int64(depth))
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
+ return n.Blob, nil
+ }
+ }
+ // Trie node unknown to this layer, resolve from parent
+ if diff, ok := dl.parent.(*diffLayer); ok {
+ return diff.node(owner, path, hash, depth+1)
+ }
+ // Failed to resolve through diff layers, fallback to disk layer
+ return dl.parent.Node(owner, path, hash)
+}
+
+// Node implements the layer interface, retrieving the trie node blob with the
+// provided node information. No error will be returned if the node is not found.
+func (dl *diffLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ return dl.node(owner, path, hash, 0)
+}
+
+// update implements the layer interface, creating a new layer on top of the
+// existing layer tree with the specified data items.
+func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ return newDiffLayer(dl, root, id, block, nodes, states)
+}
+
+// persist flushes the diff layer and all its parent layers to disk layer.
+func (dl *diffLayer) persist(force bool) (layer, error) {
+ if parent, ok := dl.parentLayer().(*diffLayer); ok {
+ // Hold the lock to prevent any read operation until the new
+ // parent is linked correctly.
+ dl.lock.Lock()
+
+ // The merging of diff layers starts at the bottom-most layer,
+ // therefore we recurse down here, flattening on the way up
+ // (diffToDisk).
+ result, err := parent.persist(force)
+ if err != nil {
+ dl.lock.Unlock()
+ return nil, err
+ }
+ dl.parent = result
+ dl.lock.Unlock()
+ }
+ return diffToDisk(dl, force)
+}
+
+// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
+// it. The method will panic if called onto a non-bottom-most diff layer.
+func diffToDisk(layer *diffLayer, force bool) (layer, error) {
+ disk, ok := layer.parentLayer().(*diskLayer)
+ if !ok {
+ panic(fmt.Sprintf("unknown layer type: %T", layer.parentLayer()))
+ }
+ return disk.commit(layer, force)
+}
diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go
new file mode 100644
index 0000000000..5520a5779e
--- /dev/null
+++ b/trie/triedb/pathdb/difflayer_test.go
@@ -0,0 +1,180 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/trie/testutil"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func emptyLayer() *diskLayer {
+ return &diskLayer{
+ db: New(rawdb.NewMemoryDatabase(), nil),
+ buffer: newNodeBuffer(defaultBufferSize, nil, 0),
+ }
+}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ava-labs/subnet-evm/trie
+// BenchmarkSearch128Layers
+// BenchmarkSearch128Layers-8 243826 4755 ns/op
+func BenchmarkSearch128Layers(b *testing.B) { benchmarkSearch(b, 0, 128) }
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ava-labs/subnet-evm/trie
+// BenchmarkSearch512Layers
+// BenchmarkSearch512Layers-8 49686 24256 ns/op
+func BenchmarkSearch512Layers(b *testing.B) { benchmarkSearch(b, 0, 512) }
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ava-labs/subnet-evm/trie
+// BenchmarkSearch1Layer
+// BenchmarkSearch1Layer-8 14062725 88.40 ns/op
+func BenchmarkSearch1Layer(b *testing.B) { benchmarkSearch(b, 127, 128) }
+
+func benchmarkSearch(b *testing.B, depth int, total int) {
+ var (
+ npath []byte
+ nhash common.Hash
+ nblob []byte
+ )
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer, index int) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ if npath == nil && depth == index {
+ npath = common.CopyBytes(path)
+ nblob = common.CopyBytes(node.Blob)
+ nhash = node.Hash
+ }
+ }
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ var layer layer
+ layer = emptyLayer()
+ for i := 0; i < total; i++ {
+ layer = fill(layer, i)
+ }
+ b.ResetTimer()
+
+ var (
+ have []byte
+ err error
+ )
+ for i := 0; i < b.N; i++ {
+ have, err = layer.Node(common.Hash{}, npath, nhash)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ if !bytes.Equal(have, nblob) {
+ b.Fatalf("have %x want %x", have, nblob)
+ }
+}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ava-labs/subnet-evm/trie
+// BenchmarkPersist
+// BenchmarkPersist-8 10 111252975 ns/op
+func BenchmarkPersist(b *testing.B) {
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ }
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ var layer layer
+ layer = emptyLayer()
+ for i := 1; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.StartTimer()
+
+ dl, ok := layer.(*diffLayer)
+ if !ok {
+ break
+ }
+ dl.persist(false)
+ }
+}
+
+// BenchmarkJournal benchmarks the performance for journaling the layers.
+//
+// BenchmarkJournal
+// BenchmarkJournal-8 10 110969279 ns/op
+func BenchmarkJournal(b *testing.B) {
+ b.SkipNow()
+
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ }
+ // TODO(rjl493456442) a non-nil state set is expected.
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ var layer layer
+ layer = emptyLayer()
+ for i := 0; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ layer.journal(new(bytes.Buffer))
+ }
+}
diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go
new file mode 100644
index 0000000000..aa3287ccfd
--- /dev/null
+++ b/trie/triedb/pathdb/disklayer.go
@@ -0,0 +1,308 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "golang.org/x/crypto/sha3"
+)
+
+// diskLayer is a low level persistent layer built on top of a key-value store.
+type diskLayer struct {
+ root common.Hash // Immutable, root hash to which this layer was made for
+ id uint64 // Immutable, corresponding state id
+ db *Database // Path-based trie database
+ cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
+ buffer *nodebuffer // Node buffer to aggregate writes
+ stale bool // Signals that the layer became stale (state progressed)
+ lock sync.RWMutex // Lock used to protect stale flag
+}
+
+// newDiskLayer creates a new disk layer based on the passing arguments.
+func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer {
+ // Initialize a clean cache if the memory allowance is not zero
+ // or reuse the provided cache if it is not nil (inherited from
+ // the original disk layer).
+ if cleans == nil && db.config.CleanSize != 0 {
+ cleans = fastcache.New(db.config.CleanSize)
+ }
+ return &diskLayer{
+ root: root,
+ id: id,
+ db: db,
+ cleans: cleans,
+ buffer: buffer,
+ }
+}
+
+// root implements the layer interface, returning root hash of corresponding state.
+func (dl *diskLayer) rootHash() common.Hash {
+ return dl.root
+}
+
+// stateID implements the layer interface, returning the state id of disk layer.
+func (dl *diskLayer) stateID() uint64 {
+ return dl.id
+}
+
+// parent implements the layer interface, returning nil as there's no layer
+// below the disk.
+func (dl *diskLayer) parentLayer() layer {
+ return nil
+}
+
+// isStale return whether this layer has become stale (was flattened across) or if
+// it's still live.
+func (dl *diskLayer) isStale() bool {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.stale
+}
+
+// markStale sets the stale flag as true.
+func (dl *diskLayer) markStale() {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ if dl.stale {
+ panic("triedb disk layer is stale") // we've committed into the same base from two children, boom
+ }
+ dl.stale = true
+}
+
+// Node implements the layer interface, retrieving the trie node with the
+// provided node info. No error will be returned if the node is not found.
+func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return nil, errSnapshotStale
+ }
+ // Try to retrieve the trie node from the not-yet-written
+ // node buffer first. Note the buffer is lock free since
+ // it's impossible to mutate the buffer before tagging the
+ // layer as stale.
+ n, err := dl.buffer.node(owner, path, hash)
+ if err != nil {
+ return nil, err
+ }
+ if n != nil {
+ dirtyHitMeter.Mark(1)
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
+ return n.Blob, nil
+ }
+ dirtyMissMeter.Mark(1)
+
+ // Try to retrieve the trie node from the clean memory cache
+ key := cacheKey(owner, path)
+ if dl.cleans != nil {
+ if blob := dl.cleans.Get(nil, key); len(blob) > 0 {
+ h := newHasher()
+ defer h.release()
+
+ got := h.hash(blob)
+ if got == hash {
+ cleanHitMeter.Mark(1)
+ cleanReadMeter.Mark(int64(len(blob)))
+ return blob, nil
+ }
+ cleanFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got)
+ }
+ cleanMissMeter.Mark(1)
+ }
+ // Try to retrieve the trie node from the disk.
+ var (
+ nBlob []byte
+ nHash common.Hash
+ )
+ if owner == (common.Hash{}) {
+ nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path)
+ } else {
+ nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path)
+ }
+ if nHash != hash {
+ diskFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash)
+ return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path)
+ }
+ if dl.cleans != nil && len(nBlob) > 0 {
+ dl.cleans.Set(key, nBlob)
+ cleanWriteMeter.Mark(int64(len(nBlob)))
+ }
+ return nBlob, nil
+}
+
+// update implements the layer interface, returning a new diff layer on top
+// with the given state set.
+func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ return newDiffLayer(dl, root, id, block, nodes, states)
+}
+
+// commit merges the given bottom-most diff layer into the node buffer
+// and returns a newly constructed disk layer. Note the current disk
+// layer must be tagged as stale first to prevent re-access.
+func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ // NOTE(freezer): This is disabled since we do not have a freezer.
+ // Construct and store the state history first. If crash happens
+ // after storing the state history but without flushing the
+ // corresponding states(journal), the stored state history will
+ // be truncated in the next restart.
+ // if dl.db.freezer != nil {
+ // err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit)
+ // if err != nil {
+ // return nil, err
+ // }
+ // }
+ // Mark the diskLayer as stale before applying any mutations on top.
+ dl.stale = true
+
+ // Store the root->id lookup afterwards. All stored lookups are
+ // identified by the **unique** state root. It's impossible that
+ // in the same chain blocks are not adjacent but have the same
+ // root.
+ if dl.id == 0 {
+ rawdb.WriteStateID(dl.db.diskdb, dl.root, 0)
+ }
+ rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID())
+
+ // Construct a new disk layer by merging the nodes from the provided
+ // diff layer, and flush the content in disk layer if there are too
+ // many nodes cached. The clean cache is inherited from the original
+ // disk layer for reusing.
+ ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes))
+ err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force)
+ if err != nil {
+ return nil, err
+ }
+ return ndl, nil
+}
+
+// nolint: unused
+// revert applies the given state history and return a reverted disk layer.
+func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer, error) {
+ if h.meta.root != dl.rootHash() {
+ return nil, errUnexpectedHistory
+ }
+ // Reject if the provided state history is incomplete. It's due to
+ // a large construct SELF-DESTRUCT which can't be handled because
+ // of memory limitation.
+ if len(h.meta.incomplete) > 0 {
+ return nil, errors.New("incomplete state history")
+ }
+ if dl.id == 0 {
+ return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
+ }
+ // Apply the reverse state changes upon the current state. This must
+ // be done before holding the lock in order to access state in "this"
+ // layer.
+ nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader)
+ if err != nil {
+ return nil, err
+ }
+ // Mark the diskLayer as stale before applying any mutations on top.
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ dl.stale = true
+
+ // State change may be applied to node buffer, or the persistent
+ // state, depends on if node buffer is empty or not. If the node
+ // buffer is not empty, it means that the state transition that
+ // needs to be reverted is not yet flushed and cached in node
+ // buffer, otherwise, manipulate persistent state directly.
+ if !dl.buffer.empty() {
+ err := dl.buffer.revert(dl.db.diskdb, nodes)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ batch := dl.db.diskdb.NewBatch()
+ writeNodes(batch, nodes, dl.cleans)
+ rawdb.WritePersistentStateID(batch, dl.id-1)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write states", "err", err)
+ }
+ }
+ return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil
+}
+
+// setBufferSize sets the node buffer size to the provided value.
+func (dl *diskLayer) setBufferSize(size int) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return errSnapshotStale
+ }
+ return dl.buffer.setSize(size, dl.db.diskdb, dl.cleans, dl.id)
+}
+
+// size returns the approximate size of cached nodes in the disk layer.
+func (dl *diskLayer) size() common.StorageSize {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return 0
+ }
+ return common.StorageSize(dl.buffer.size)
+}
+
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
+
+func (h *hasher) release() {
+ hasherPool.Put(h)
+}
diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go
new file mode 100644
index 0000000000..be6bf6c36e
--- /dev/null
+++ b/trie/triedb/pathdb/errors.go
@@ -0,0 +1,63 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ // errSnapshotReadOnly is returned if the database is opened in read only mode
+ // and mutation is requested.
+ errSnapshotReadOnly = errors.New("read only")
+
+ // errSnapshotStale is returned from data accessors if the underlying layer
+ // layer had been invalidated due to the chain progressing forward far enough
+ // to not maintain the layer's original state.
+ errSnapshotStale = errors.New("layer stale")
+
+ // nolint: unused
+ // errUnexpectedHistory is returned if an unmatched state history is applied
+ // to the database for state rollback.
+ errUnexpectedHistory = errors.New("unexpected state history")
+
+ // nolint: unused
+ // errStateUnrecoverable is returned if state is required to be reverted to
+ // a destination without associated state history available.
+ errStateUnrecoverable = errors.New("state is unrecoverable")
+
+ // errUnexpectedNode is returned if the requested node with specified path is
+ // not hash matched with expectation.
+ errUnexpectedNode = errors.New("unexpected node")
+)
+
+func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error {
+ return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash)
+}
diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go
new file mode 100644
index 0000000000..83fc385185
--- /dev/null
+++ b/trie/triedb/pathdb/history.go
@@ -0,0 +1,496 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "golang.org/x/exp/slices"
+)
+
+// State history records the state changes involved in executing a block. The
+// state can be reverted to the previous version by applying the associated
+// history object (state reverse diff). State history objects are kept to
+// guarantee that the system can perform state rollbacks in case of deep reorg.
+//
+// Each state transition will generate a state history object. Note that not
+// every block has a corresponding state history object. If a block performs
+// no state changes whatsoever, no state is created for it. Each state history
+// will have a sequentially increasing number acting as its unique identifier.
+//
+// The state history is written to disk (ancient store) when the corresponding
+// diff layer is merged into the disk layer. At the same time, system can prune
+// the oldest histories according to config.
+//
+// Disk State
+// ^
+// |
+// +------------+ +---------+ +---------+ +---------+
+// | Init State |---->| State 1 |---->| ... |---->| State n |
+// +------------+ +---------+ +---------+ +---------+
+//
+// +-----------+ +------+ +-----------+
+// | History 1 |----> | ... |---->| History n |
+// +-----------+ +------+ +-----------+
+//
+// # Rollback
+//
+// If the system wants to roll back to a previous state n, it needs to ensure
+// all history objects from n+1 up to the current disk layer are existent. The
+// history objects are applied to the state in reverse order, starting from the
+// current disk layer.
+
+const (
+ accountIndexSize = common.AddressLength + 13 // The length of encoded account index
+ slotIndexSize = common.HashLength + 5 // The length of encoded slot index
+ historyMetaSize = 9 + 2*common.HashLength // The length of fixed size part of meta object
+
+ stateHistoryVersion = uint8(0) // initial version of state history structure.
+)
+
+// Each state history entry is consisted of five elements:
+//
+// # metadata
+// This object contains a few meta fields, such as the associated state root,
+// block number, version tag and so on. This object may contain an extra
+// accountHash list which means the storage changes belong to these accounts
+// are not complete due to large contract destruction. The incomplete history
+// can not be used for rollback and serving archive state request.
+//
+// # account index
+// This object contains some index information of account. For example, offset
+// and length indicate the location of the data belonging to the account. Besides,
+// storageOffset and storageSlots indicate the storage modification location
+// belonging to the account.
+//
+// The size of each account index is *fixed*, and all indexes are sorted
+// lexicographically. Thus binary search can be performed to quickly locate a
+// specific account.
+//
+// # account data
+// Account data is a concatenated byte stream composed of all account data.
+// The account data can be solved by the offset and length info indicated
+// by corresponding account index.
+//
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | length
+// offset |----------------+
+// v v
+// +----------------+----------------+----------------+----------------+
+// | Account data 1 | Account data 2 | ... | Account data N |
+// +----------------+----------------+----------------+----------------+
+//
+// # storage index
+// This object is similar with account index. It's also fixed size and contains
+// the location info of storage slot data.
+//
+// # storage data
+// Storage data is a concatenated byte stream composed of all storage slot data.
+// The storage slot data can be solved by the location info indicated by
+// corresponding account index and storage slot index.
+//
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | storage slots
+// storage offset |-----------------------------------------------------+
+// v v
+// +-----------------+-----------------+-----------------+
+// | storage index 1 | storage index 2 | storage index 3 |
+// +-----------------+-----------------+-----------------+
+// | length
+// offset |-------------+
+// v v
+// +-------------+
+// | slot data 1 |
+// +-------------+
+
+// accountIndex describes the metadata belonging to an account.
+type accountIndex struct {
+ address common.Address // The address of account
+ length uint8 // The length of account data, size limited by 255
+ offset uint32 // The offset of item in account data table
+ storageOffset uint32 // The offset of storage index in storage index table
+ storageSlots uint32 // The number of mutated storage slots belonging to the account
+}
+
+// encode packs account index into byte stream.
+func (i *accountIndex) encode() []byte {
+ var buf [accountIndexSize]byte
+ copy(buf[:], i.address.Bytes())
+ buf[common.AddressLength] = i.length
+ binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset)
+ binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset)
+ binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots)
+ return buf[:]
+}
+
+// decode unpacks account index from byte stream.
+func (i *accountIndex) decode(blob []byte) {
+ i.address = common.BytesToAddress(blob[:common.AddressLength])
+ i.length = blob[common.AddressLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:])
+ i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:])
+ i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:])
+}
+
+// slotIndex describes the metadata belonging to a storage slot.
+type slotIndex struct {
+ hash common.Hash // The hash of slot key
+ length uint8 // The length of storage slot, up to 32 bytes defined in protocol
+ offset uint32 // The offset of item in storage slot data table
+}
+
+// encode packs slot index into byte stream.
+func (i *slotIndex) encode() []byte {
+ var buf [slotIndexSize]byte
+ copy(buf[:common.HashLength], i.hash.Bytes())
+ buf[common.HashLength] = i.length
+ binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
+ return buf[:]
+}
+
+// decode unpack slot index from the byte stream.
+func (i *slotIndex) decode(blob []byte) {
+ i.hash = common.BytesToHash(blob[:common.HashLength])
+ i.length = blob[common.HashLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
+}
+
+// meta describes the meta data of state history object.
+type meta struct {
+ version uint8 // version tag of history object
+ parent common.Hash // prev-state root before the state transition
+ root common.Hash // post-state root after the state transition
+ block uint64 // associated block number
+ incomplete []common.Address // list of address whose storage set is incomplete
+}
+
+// encode packs the meta object into byte stream.
+func (m *meta) encode() []byte {
+ buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength)
+ buf[0] = m.version
+ copy(buf[1:1+common.HashLength], m.parent.Bytes())
+ copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes())
+ binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block)
+ for i, h := range m.incomplete {
+ copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes())
+ }
+ return buf[:]
+}
+
+// decode unpacks the meta object from byte stream.
+func (m *meta) decode(blob []byte) error {
+ if len(blob) < 1 {
+ return fmt.Errorf("no version tag")
+ }
+ switch blob[0] {
+ case stateHistoryVersion:
+ if len(blob) < historyMetaSize {
+ return fmt.Errorf("invalid state history meta, len: %d", len(blob))
+ }
+ if (len(blob)-historyMetaSize)%common.AddressLength != 0 {
+ return fmt.Errorf("corrupted state history meta, len: %d", len(blob))
+ }
+ m.version = blob[0]
+ m.parent = common.BytesToHash(blob[1 : 1+common.HashLength])
+ m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength])
+ m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize])
+ for pos := historyMetaSize; pos < len(blob); {
+ m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength]))
+ pos += common.AddressLength
+ }
+ return nil
+ default:
+ return fmt.Errorf("unknown version %d", blob[0])
+ }
+}
+
+// history represents a set of state changes belong to a block along with
+// the metadata including the state roots involved in the state transition.
+// State history objects in disk are linked with each other by a unique id
+// (8-bytes integer), the oldest state history object can be pruned on demand
+// in order to control the storage size.
+type history struct {
+ meta *meta // Meta data of history
+ accounts map[common.Address][]byte // Account data keyed by its address hash
+ accountList []common.Address // Sorted account hash list
+ storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash
+ storageList map[common.Address][]common.Hash // Sorted slot hash list
+}
+
+// newHistory constructs the state history object with provided state change set.
+func newHistory(root common.Hash, parent common.Hash, block uint64, states *triestate.Set) *history {
+ var (
+ accountList []common.Address
+ storageList = make(map[common.Address][]common.Hash)
+ incomplete []common.Address
+ )
+ for addr := range states.Accounts {
+ accountList = append(accountList, addr)
+ }
+ slices.SortFunc(accountList, common.Address.Cmp)
+
+ for addr, slots := range states.Storages {
+ slist := make([]common.Hash, 0, len(slots))
+ for slotHash := range slots {
+ slist = append(slist, slotHash)
+ }
+ slices.SortFunc(slist, common.Hash.Cmp)
+ storageList[addr] = slist
+ }
+ for addr := range states.Incomplete {
+ incomplete = append(incomplete, addr)
+ }
+ slices.SortFunc(incomplete, common.Address.Cmp)
+
+ return &history{
+ meta: &meta{
+ version: stateHistoryVersion,
+ parent: parent,
+ root: root,
+ block: block,
+ incomplete: incomplete,
+ },
+ accounts: states.Accounts,
+ accountList: accountList,
+ storages: states.Storages,
+ storageList: storageList,
+ }
+}
+
+// encode serializes the state history and returns four byte streams represent
+// concatenated account/storage data, account/storage indexes respectively.
+func (h *history) encode() ([]byte, []byte, []byte, []byte) {
+ var (
+ slotNumber uint32 // the number of processed slots
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account index
+ storageIndexes []byte // the buffer for concatenated storage index
+ )
+ for _, addr := range h.accountList {
+ accIndex := accountIndex{
+ address: addr,
+ length: uint8(len(h.accounts[addr])),
+ offset: uint32(len(accountData)),
+ }
+ slots, exist := h.storages[addr]
+ if exist {
+ // Encode storage slots in order
+ for _, slotHash := range h.storageList[addr] {
+ sIndex := slotIndex{
+ hash: slotHash,
+ length: uint8(len(slots[slotHash])),
+ offset: uint32(len(storageData)),
+ }
+ storageData = append(storageData, slots[slotHash]...)
+ storageIndexes = append(storageIndexes, sIndex.encode()...)
+ }
+ // Fill up the storage meta in account index
+ accIndex.storageOffset = slotNumber
+ accIndex.storageSlots = uint32(len(slots))
+ slotNumber += uint32(len(slots))
+ }
+ accountData = append(accountData, h.accounts[addr]...)
+ accountIndexes = append(accountIndexes, accIndex.encode()...)
+ }
+ return accountData, storageData, accountIndexes, storageIndexes
+}
+
+// decoder wraps the byte streams for decoding with extra meta fields.
+type decoder struct {
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account index
+ storageIndexes []byte // the buffer for concatenated storage index
+
+ lastAccount *common.Address // the address of last resolved account
+ lastAccountRead uint32 // the read-cursor position of account data
+ lastSlotIndexRead uint32 // the read-cursor position of storage slot index
+ lastSlotDataRead uint32 // the read-cursor position of storage slot data
+}
+
+// verify validates the provided byte streams for decoding state history. A few
+// checks will be performed to quickly detect data corruption. The byte stream
+// is regarded as corrupted if:
+//
+// - account indexes buffer is empty(empty state set is invalid)
+// - account indexes/storage indexer buffer is not aligned
+//
+// note, these situations are allowed:
+//
+// - empty account data: all accounts were not present
+// - empty storage set: no slots are modified
+func (r *decoder) verify() error {
+ if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 {
+ return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes))
+ }
+ if len(r.storageIndexes)%slotIndexSize != 0 {
+ return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes))
+ }
+ return nil
+}
+
+// readAccount parses the account from the byte stream with specified position.
+func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
+ // Decode account index from the index byte stream.
+ var index accountIndex
+ if (pos+1)*accountIndexSize > len(r.accountIndexes) {
+ return accountIndex{}, nil, errors.New("account data buffer is corrupted")
+ }
+ index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize])
+
+ // Perform validation before parsing account data, ensure
+ // - account is sorted in order in byte stream
+ // - account data is strictly encoded with no gap inside
+ // - account data is not out-of-slice
+ if r.lastAccount != nil { // zero address is possible
+ if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 {
+ return accountIndex{}, nil, errors.New("account is not in order")
+ }
+ }
+ if index.offset != r.lastAccountRead {
+ return accountIndex{}, nil, errors.New("account data buffer is gaped")
+ }
+ last := index.offset + uint32(index.length)
+ if uint32(len(r.accountData)) < last {
+ return accountIndex{}, nil, errors.New("account data buffer is corrupted")
+ }
+ data := r.accountData[index.offset:last]
+
+ r.lastAccount = &index.address
+ r.lastAccountRead = last
+
+ return index, data, nil
+}
+
+// readStorage parses the storage slots from the byte stream with specified account.
+func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
+ var (
+ last common.Hash
+ list []common.Hash
+ storage = make(map[common.Hash][]byte)
+ )
+ for j := 0; j < int(accIndex.storageSlots); j++ {
+ var (
+ index slotIndex
+ start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize)
+ end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize)
+ )
+ // Perform validation before parsing storage slot data, ensure
+ // - slot index is not out-of-slice
+ // - slot data is not out-of-slice
+ // - slot is sorted in order in byte stream
+ // - slot indexes is strictly encoded with no gap inside
+ // - slot data is strictly encoded with no gap inside
+ if start != r.lastSlotIndexRead {
+ return nil, nil, errors.New("storage index buffer is gapped")
+ }
+ if uint32(len(r.storageIndexes)) < end {
+ return nil, nil, errors.New("storage index buffer is corrupted")
+ }
+ index.decode(r.storageIndexes[start:end])
+
+ if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
+ return nil, nil, errors.New("storage slot is not in order")
+ }
+ if index.offset != r.lastSlotDataRead {
+ return nil, nil, errors.New("storage data buffer is gapped")
+ }
+ sEnd := index.offset + uint32(index.length)
+ if uint32(len(r.storageData)) < sEnd {
+ return nil, nil, errors.New("storage data buffer is corrupted")
+ }
+ storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd]
+ list = append(list, index.hash)
+
+ last = index.hash
+ r.lastSlotIndexRead = end
+ r.lastSlotDataRead = sEnd
+ }
+ return list, storage, nil
+}
+
+// decode deserializes the account and storage data from the provided byte stream.
+func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
+ var (
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ accountList []common.Address
+ storageList = make(map[common.Address][]common.Hash)
+
+ r = &decoder{
+ accountData: accountData,
+ storageData: storageData,
+ accountIndexes: accountIndexes,
+ storageIndexes: storageIndexes,
+ }
+ )
+ if err := r.verify(); err != nil {
+ return err
+ }
+ for i := 0; i < len(accountIndexes)/accountIndexSize; i++ {
+ // Resolve account first
+ accIndex, accData, err := r.readAccount(i)
+ if err != nil {
+ return err
+ }
+ accounts[accIndex.address] = accData
+ accountList = append(accountList, accIndex.address)
+
+ // Resolve storage slots
+ slotList, slotData, err := r.readStorage(accIndex)
+ if err != nil {
+ return err
+ }
+ if len(slotList) > 0 {
+ storageList[accIndex.address] = slotList
+ storages[accIndex.address] = slotData
+ }
+ }
+ h.accounts = accounts
+ h.accountList = accountList
+ h.storages = storages
+ h.storageList = storageList
+ return nil
+}
diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go
new file mode 100644
index 0000000000..abf40c2838
--- /dev/null
+++ b/trie/triedb/pathdb/history_test.go
@@ -0,0 +1,171 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie/testutil"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// randomStateSet generates a random state change set.
+func randomStateSet(n int) *triestate.Set {
+ var (
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ )
+ for i := 0; i < n; i++ {
+ addr := testutil.RandomAddress()
+ storages[addr] = make(map[common.Hash][]byte)
+ for j := 0; j < 3; j++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ storages[addr][testutil.RandomHash()] = v
+ }
+ account := generateAccount(types.EmptyRootHash)
+ accounts[addr] = types.SlimAccountRLP(account)
+ }
+ return triestate.New(accounts, storages, nil)
+}
+
+func makeHistory() *history {
+ return newHistory(testutil.RandomHash(), types.EmptyRootHash, 0, randomStateSet(3))
+}
+
+// nolint: unused
+func makeHistories(n int) []*history {
+ var (
+ parent = types.EmptyRootHash
+ result []*history
+ )
+ for i := 0; i < n; i++ {
+ root := testutil.RandomHash()
+ h := newHistory(root, parent, uint64(i), randomStateSet(3))
+ parent = root
+ result = append(result, h)
+ }
+ return result
+}
+
+func TestEncodeDecodeHistory(t *testing.T) {
+ var (
+ m meta
+ dec history
+ obj = makeHistory()
+ )
+ // check if meta data can be correctly encode/decode
+ blob := obj.meta.encode()
+ if err := m.decode(blob); err != nil {
+ t.Fatalf("Failed to decode %v", err)
+ }
+ if !reflect.DeepEqual(&m, obj.meta) {
+ t.Fatal("meta is mismatched")
+ }
+
+ // check if account/storage data can be correctly encode/decode
+ accountData, storageData, accountIndexes, storageIndexes := obj.encode()
+ if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
+ t.Fatalf("Failed to decode, err: %v", err)
+ }
+ if !compareSet(dec.accounts, obj.accounts) {
+ t.Fatal("account data is mismatched")
+ }
+ if !compareStorages(dec.storages, obj.storages) {
+ t.Fatal("storage data is mismatched")
+ }
+ if !compareList(dec.accountList, obj.accountList) {
+ t.Fatal("account list is mismatched")
+ }
+ if !compareStorageList(dec.storageList, obj.storageList) {
+ t.Fatal("storage list is mismatched")
+ }
+}
+
+func compareSet[k comparable](a, b map[k][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for key, valA := range a {
+ valB, ok := b[key]
+ if !ok {
+ return false
+ }
+ if !bytes.Equal(valA, valB) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareList[k comparable](a, b []k) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for h, subA := range a {
+ subB, ok := b[h]
+ if !ok {
+ return false
+ }
+ if !compareSet(subA, subB) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareStorageList(a, b map[common.Address][]common.Hash) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for h, la := range a {
+ lb, ok := b[h]
+ if !ok {
+ return false
+ }
+ if !compareList(la, lb) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go
new file mode 100644
index 0000000000..d35f00bab6
--- /dev/null
+++ b/trie/triedb/pathdb/journal.go
@@ -0,0 +1,388 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+var (
+ errMissJournal = errors.New("journal not found")
+ errMissVersion = errors.New("version not found")
+ errUnexpectedVersion = errors.New("unexpected journal version")
+ errMissDiskRoot = errors.New("disk layer root not found")
+ errUnmatchedJournal = errors.New("unmatched journal")
+)
+
+const journalVersion uint64 = 0
+
+// journalNode represents a trie node persisted in the journal.
+type journalNode struct {
+ Path []byte // Path of the node in the trie
+ Blob []byte // RLP-encoded trie node blob, nil means the node is deleted
+}
+
+// journalNodes represents a list trie nodes belong to a single account
+// or the main account trie.
+type journalNodes struct {
+ Owner common.Hash
+ Nodes []journalNode
+}
+
+// journalAccounts represents a list accounts belong to the layer.
+type journalAccounts struct {
+ Addresses []common.Address
+ Accounts [][]byte
+}
+
+// journalStorage represents a list of storage slots belong to an account.
+type journalStorage struct {
+ Incomplete bool
+ Account common.Address
+ Hashes []common.Hash
+ Slots [][]byte
+}
+
+// loadJournal tries to parse the layer journal from the disk.
+func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
+ journal := rawdb.ReadTrieJournal(db.diskdb)
+ if len(journal) == 0 {
+ return nil, errMissJournal
+ }
+ r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+ // Firstly, resolve the first element as the journal version
+ version, err := r.Uint64()
+ if err != nil {
+ return nil, errMissVersion
+ }
+ if version != journalVersion {
+ return nil, fmt.Errorf("%w want %d got %d", errUnexpectedVersion, journalVersion, version)
+ }
+ // Secondly, resolve the disk layer root, ensure it's continuous
+ // with disk layer. Note now we can ensure it's the layer journal
+ // correct version, so we expect everything can be resolved properly.
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ return nil, errMissDiskRoot
+ }
+ // The journal is not matched with persistent state, discard them.
+ // It can happen that geth crashes without persisting the journal.
+ if !bytes.Equal(root.Bytes(), diskRoot.Bytes()) {
+ return nil, fmt.Errorf("%w want %x got %x", errUnmatchedJournal, root, diskRoot)
+ }
+ // Load the disk layer from the journal
+ base, err := db.loadDiskLayer(r)
+ if err != nil {
+ return nil, err
+ }
+ // Load all the diff layers from the journal
+ head, err := db.loadDiffLayer(base, r)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("Loaded layer journal", "diskroot", diskRoot, "diffhead", head.rootHash())
+ return head, nil
+}
+
+// loadLayers loads a pre-existing state layer backed by a key-value store.
+func (db *Database) loadLayers() layer {
+ // Retrieve the root node of persistent state.
+ _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ root = types.TrieRootHash(root)
+
+ // Load the layers by resolving the journal
+ head, err := db.loadJournal(root)
+ if err == nil {
+ return head
+ }
+ // journal is not matched(or missing) with the persistent state, discard
+ // it. Display log for discarding journal, but try to avoid showing
+ // useless information when the db is created from scratch.
+ if !(root == types.EmptyRootHash && errors.Is(err, errMissJournal)) {
+ log.Info("Failed to load journal, discard it", "err", err)
+ }
+ // Return single layer with persistent state.
+ return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newNodeBuffer(db.bufferSize, nil, 0))
+}
+
+// loadDiskLayer reads the binary blob from the layer journal, reconstructing
+// a new disk layer on it.
+func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
+ // Resolve disk layer root
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ return nil, fmt.Errorf("load disk root: %v", err)
+ }
+ // Resolve the state id of disk layer, it can be different
+ // with the persistent id tracked in disk, the id distance
+ // is the number of transitions aggregated in disk layer.
+ var id uint64
+ if err := r.Decode(&id); err != nil {
+ return nil, fmt.Errorf("load state id: %v", err)
+ }
+ stored := rawdb.ReadPersistentStateID(db.diskdb)
+ if stored > id {
+ return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id)
+ }
+ // Resolve nodes cached in node buffer
+ var encoded []journalNodes
+ if err := r.Decode(&encoded); err != nil {
+ return nil, fmt.Errorf("load disk nodes: %v", err)
+ }
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ for _, entry := range encoded {
+ subset := make(map[string]*trienode.Node)
+ for _, n := range entry.Nodes {
+ if len(n.Blob) > 0 {
+ subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob)
+ } else {
+ subset[string(n.Path)] = trienode.NewDeleted()
+ }
+ }
+ nodes[entry.Owner] = subset
+ }
+ // Calculate the internal state transitions by id difference.
+ base := newDiskLayer(root, id, db, nil, newNodeBuffer(db.bufferSize, nodes, id-stored))
+ return base, nil
+}
+
+// loadDiffLayer reads the next sections of a layer journal, reconstructing a new
+// diff and verifying that it can be linked to the requested parent.
+func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
+ // Read the next diff journal entry
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ // The first read may fail with EOF, marking the end of the journal
+ if err == io.EOF {
+ return parent, nil
+ }
+ return nil, fmt.Errorf("load diff root: %v", err)
+ }
+ var block uint64
+ if err := r.Decode(&block); err != nil {
+ return nil, fmt.Errorf("load block number: %v", err)
+ }
+ // Read in-memory trie nodes from journal
+ var encoded []journalNodes
+ if err := r.Decode(&encoded); err != nil {
+ return nil, fmt.Errorf("load diff nodes: %v", err)
+ }
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ for _, entry := range encoded {
+ subset := make(map[string]*trienode.Node)
+ for _, n := range entry.Nodes {
+ if len(n.Blob) > 0 {
+ subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob)
+ } else {
+ subset[string(n.Path)] = trienode.NewDeleted()
+ }
+ }
+ nodes[entry.Owner] = subset
+ }
+ // Read state changes from journal
+ var (
+ jaccounts journalAccounts
+ jstorages []journalStorage
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ incomplete = make(map[common.Address]struct{})
+ )
+ if err := r.Decode(&jaccounts); err != nil {
+ return nil, fmt.Errorf("load diff accounts: %v", err)
+ }
+ for i, addr := range jaccounts.Addresses {
+ accounts[addr] = jaccounts.Accounts[i]
+ }
+ if err := r.Decode(&jstorages); err != nil {
+ return nil, fmt.Errorf("load diff storages: %v", err)
+ }
+ for _, entry := range jstorages {
+ set := make(map[common.Hash][]byte)
+ for i, h := range entry.Hashes {
+ if len(entry.Slots[i]) > 0 {
+ set[h] = entry.Slots[i]
+ } else {
+ set[h] = nil
+ }
+ }
+ if entry.Incomplete {
+ incomplete[entry.Account] = struct{}{}
+ }
+ storages[entry.Account] = set
+ }
+ return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r)
+}
+
+// journal implements the layer interface, marshaling the un-flushed trie nodes
+// along with layer meta data into provided byte buffer.
+func (dl *diskLayer) journal(w io.Writer) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // Ensure the layer didn't get stale
+ if dl.stale {
+ return errSnapshotStale
+ }
+ // Step one, write the disk root into the journal.
+ if err := rlp.Encode(w, dl.root); err != nil {
+ return err
+ }
+ // Step two, write the corresponding state id into the journal
+ if err := rlp.Encode(w, dl.id); err != nil {
+ return err
+ }
+ // Step three, write all unwritten nodes into the journal
+ nodes := make([]journalNodes, 0, len(dl.buffer.nodes))
+ for owner, subset := range dl.buffer.nodes {
+ entry := journalNodes{Owner: owner}
+ for path, node := range subset {
+ entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob})
+ }
+ nodes = append(nodes, entry)
+ }
+ if err := rlp.Encode(w, nodes); err != nil {
+ return err
+ }
+ log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(dl.buffer.nodes))
+ return nil
+}
+
+// journal implements the layer interface, writing the memory layer contents
+// into a buffer to be stored in the database as the layer journal.
+func (dl *diffLayer) journal(w io.Writer) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // journal the parent first
+ if err := dl.parent.journal(w); err != nil {
+ return err
+ }
+ // Everything below was journaled, persist this layer too
+ if err := rlp.Encode(w, dl.root); err != nil {
+ return err
+ }
+ if err := rlp.Encode(w, dl.block); err != nil {
+ return err
+ }
+ // Write the accumulated trie nodes into buffer
+ nodes := make([]journalNodes, 0, len(dl.nodes))
+ for owner, subset := range dl.nodes {
+ entry := journalNodes{Owner: owner}
+ for path, node := range subset {
+ entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob})
+ }
+ nodes = append(nodes, entry)
+ }
+ if err := rlp.Encode(w, nodes); err != nil {
+ return err
+ }
+ // Write the accumulated state changes into buffer
+ var jacct journalAccounts
+ for addr, account := range dl.states.Accounts {
+ jacct.Addresses = append(jacct.Addresses, addr)
+ jacct.Accounts = append(jacct.Accounts, account)
+ }
+ if err := rlp.Encode(w, jacct); err != nil {
+ return err
+ }
+ storage := make([]journalStorage, 0, len(dl.states.Storages))
+ for addr, slots := range dl.states.Storages {
+ entry := journalStorage{Account: addr}
+ if _, ok := dl.states.Incomplete[addr]; ok {
+ entry.Incomplete = true
+ }
+ for slotHash, slot := range slots {
+ entry.Hashes = append(entry.Hashes, slotHash)
+ entry.Slots = append(entry.Slots, slot)
+ }
+ storage = append(storage, entry)
+ }
+ if err := rlp.Encode(w, storage); err != nil {
+ return err
+ }
+ log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block, "nodes", len(dl.nodes))
+ return nil
+}
+
+// Journal commits an entire diff hierarchy to disk into a single journal entry.
+// This is meant to be used during shutdown to persist the layer without
+// flattening everything down (bad for reorgs). And this function will mark the
+// database as read-only to prevent all following mutation to disk.
+func (db *Database) Journal(root common.Hash) error {
+ // Retrieve the head layer to journal from.
+ l := db.tree.get(root)
+ if l == nil {
+ return fmt.Errorf("triedb layer [%#x] missing", root)
+ }
+ // Run the journaling
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ // Firstly write out the metadata of journal
+ journal := new(bytes.Buffer)
+ if err := rlp.Encode(journal, journalVersion); err != nil {
+ return err
+ }
+ // The stored state in disk might be empty, convert the
+ // root to emptyRoot in this case.
+ _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ diskroot = types.TrieRootHash(diskroot)
+
+ // Secondly write out the state root in disk, ensure all layers
+ // on top are continuous with disk.
+ if err := rlp.Encode(journal, diskroot); err != nil {
+ return err
+ }
+ // Finally write out the journal of each layer in reverse order.
+ if err := l.journal(journal); err != nil {
+ return err
+ }
+ // Store the journal into the database and return
+ rawdb.WriteTrieJournal(db.diskdb, journal.Bytes())
+
+ // Set the db in read only mode to reject all following mutations
+ db.readOnly = true
+ log.Info("Stored journal in triedb", "disk", diskroot, "size", common.StorageSize(journal.Len()))
+ return nil
+}
diff --git a/trie/triedb/pathdb/layertree.go b/trie/triedb/pathdb/layertree.go
new file mode 100644
index 0000000000..58b112c6bb
--- /dev/null
+++ b/trie/triedb/pathdb/layertree.go
@@ -0,0 +1,224 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// layerTree is a group of state layers identified by the state root.
+// This structure defines a few basic operations for manipulating
+// state layers linked with each other in a tree structure. It's
+// thread-safe to use. However, callers need to ensure the thread-safety
+// of the referenced layer by themselves.
+type layerTree struct {
+ lock sync.RWMutex
+ layers map[common.Hash]layer
+}
+
+// newLayerTree constructs the layerTree with the given head layer.
+func newLayerTree(head layer) *layerTree {
+ tree := new(layerTree)
+ tree.reset(head)
+ return tree
+}
+
+// reset initializes the layerTree by the given head layer.
+// All the ancestors will be iterated out and linked in the tree.
+func (tree *layerTree) reset(head layer) {
+ tree.lock.Lock()
+ defer tree.lock.Unlock()
+
+ var layers = make(map[common.Hash]layer)
+ for head != nil {
+ layers[head.rootHash()] = head
+ head = head.parentLayer()
+ }
+ tree.layers = layers
+}
+
+// get retrieves a layer belonging to the given state root.
+func (tree *layerTree) get(root common.Hash) layer {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ return tree.layers[types.TrieRootHash(root)]
+}
+
+// forEach iterates the stored layers inside and applies the
+// given callback on them.
+func (tree *layerTree) forEach(onLayer func(layer)) {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ for _, layer := range tree.layers {
+ onLayer(layer)
+ }
+}
+
+// len returns the number of layers cached.
+func (tree *layerTree) len() int {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ return len(tree.layers)
+}
+
+// add inserts a new layer into the tree if it can be linked to an existing old parent.
+func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ // Reject noop updates to avoid self-loops. This is a special case that can
+ // happen for clique networks and proof-of-stake networks where empty blocks
+ // don't modify the state (0 block subsidy).
+ //
+ // Although we could silently ignore this internally, it should be the caller's
+ // responsibility to avoid even attempting to insert such a layer.
+ root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot)
+ if root == parentRoot {
+ return errors.New("layer cycle")
+ }
+ parent := tree.get(parentRoot)
+ if parent == nil {
+ return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot)
+ }
+ l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states)
+
+ tree.lock.Lock()
+ tree.layers[l.rootHash()] = l
+ tree.lock.Unlock()
+ return nil
+}
+
+// cap traverses downwards the diff tree until the number of allowed diff layers
+// are crossed. All diffs beyond the permitted number are flattened downwards.
+func (tree *layerTree) cap(root common.Hash, layers int) error {
+ // Retrieve the head layer to cap from
+ root = types.TrieRootHash(root)
+ l := tree.get(root)
+ if l == nil {
+ return fmt.Errorf("triedb layer [%#x] missing", root)
+ }
+ diff, ok := l.(*diffLayer)
+ if !ok {
+ return fmt.Errorf("triedb layer [%#x] is disk layer", root)
+ }
+ tree.lock.Lock()
+ defer tree.lock.Unlock()
+
+ // If full commit was requested, flatten the diffs and merge onto disk
+ if layers == 0 {
+ base, err := diff.persist(true)
+ if err != nil {
+ return err
+ }
+ // Replace the entire layer tree with the flat base
+ tree.layers = map[common.Hash]layer{base.rootHash(): base}
+ return nil
+ }
+ // Dive until we run out of layers or reach the persistent database
+ for i := 0; i < layers-1; i++ {
+ // If we still have diff layers below, continue down
+ if parent, ok := diff.parentLayer().(*diffLayer); ok {
+ diff = parent
+ } else {
+ // Diff stack too shallow, return without modifications
+ return nil
+ }
+ }
+ // We're out of layers, flatten anything below, stopping if it's the disk or if
+ // the memory limit is not yet exceeded.
+ switch parent := diff.parentLayer().(type) {
+ case *diskLayer:
+ return nil
+
+ case *diffLayer:
+ // Hold the lock to prevent any read operations until the new
+ // parent is linked correctly.
+ diff.lock.Lock()
+
+ base, err := parent.persist(false)
+ if err != nil {
+ diff.lock.Unlock()
+ return err
+ }
+ tree.layers[base.rootHash()] = base
+ diff.parent = base
+
+ diff.lock.Unlock()
+
+ default:
+ panic(fmt.Sprintf("unknown data layer in triedb: %T", parent))
+ }
+ // Remove any layer that is stale or links into a stale layer
+ children := make(map[common.Hash][]common.Hash)
+ for root, layer := range tree.layers {
+ if dl, ok := layer.(*diffLayer); ok {
+ parent := dl.parentLayer().rootHash()
+ children[parent] = append(children[parent], root)
+ }
+ }
+ var remove func(root common.Hash)
+ remove = func(root common.Hash) {
+ delete(tree.layers, root)
+ for _, child := range children[root] {
+ remove(child)
+ }
+ delete(children, root)
+ }
+ for root, layer := range tree.layers {
+ if dl, ok := layer.(*diskLayer); ok && dl.isStale() {
+ remove(root)
+ }
+ }
+ return nil
+}
+
+// bottom returns the bottom-most disk layer in this tree.
+func (tree *layerTree) bottom() *diskLayer {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ if len(tree.layers) == 0 {
+ return nil // Shouldn't happen, empty tree
+ }
+ // pick a random one as the entry point
+ var current layer
+ for _, layer := range tree.layers {
+ current = layer
+ break
+ }
+ for current.parentLayer() != nil {
+ current = current.parentLayer()
+ }
+ return current.(*diskLayer)
+}
diff --git a/trie/triedb/pathdb/metrics.go b/trie/triedb/pathdb/metrics.go
new file mode 100644
index 0000000000..27dfe7fede
--- /dev/null
+++ b/trie/triedb/pathdb/metrics.go
@@ -0,0 +1,61 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import "github.com/ava-labs/subnet-evm/metrics"
+
+// nolint: unused
+var (
+ cleanHitMeter = metrics.NewRegisteredMeter("pathdb/clean/hit", nil)
+ cleanMissMeter = metrics.NewRegisteredMeter("pathdb/clean/miss", nil)
+ cleanReadMeter = metrics.NewRegisteredMeter("pathdb/clean/read", nil)
+ cleanWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/write", nil)
+
+ dirtyHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/hit", nil)
+ dirtyMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/miss", nil)
+ dirtyReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/read", nil)
+ dirtyWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/write", nil)
+ dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
+ dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
+ diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
+
+ commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
+ commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
+ commitBytesMeter = metrics.NewRegisteredMeter("pathdb/commit/bytes", nil)
+
+ gcNodesMeter = metrics.NewRegisteredMeter("pathdb/gc/nodes", nil)
+ gcBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/bytes", nil)
+
+ diffLayerBytesMeter = metrics.NewRegisteredMeter("pathdb/diff/bytes", nil)
+ diffLayerNodesMeter = metrics.NewRegisteredMeter("pathdb/diff/nodes", nil)
+
+ historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
+ historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
+ historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil)
+)
diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go
new file mode 100644
index 0000000000..9a0ce7fb02
--- /dev/null
+++ b/trie/triedb/pathdb/nodebuffer.go
@@ -0,0 +1,287 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ava-labs/subnet-evm/core/rawdb"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// nodebuffer is a collection of modified trie nodes to aggregate the disk
+// write. The content of the nodebuffer must be checked before diving into
+// disk (since it basically is not-yet-written data).
+type nodebuffer struct {
+ layers uint64 // The number of diff layers aggregated inside
+ size uint64 // The size of aggregated writes
+ limit uint64 // The maximum memory allowance in bytes
+ nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path
+}
+
+// newNodeBuffer initializes the node buffer with the provided nodes.
+func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer {
+ if nodes == nil {
+ nodes = make(map[common.Hash]map[string]*trienode.Node)
+ }
+ var size uint64
+ for _, subset := range nodes {
+ for path, n := range subset {
+ size += uint64(len(n.Blob) + len(path))
+ }
+ }
+ return &nodebuffer{
+ layers: layers,
+ nodes: nodes,
+ size: size,
+ limit: uint64(limit),
+ }
+}
+
+// node retrieves the trie node with given node info.
+func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) {
+ subset, ok := b.nodes[owner]
+ if !ok {
+ return nil, nil
+ }
+ n, ok := subset[string(path)]
+ if !ok {
+ return nil, nil
+ }
+ if n.Hash != hash {
+ dirtyFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
+ return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path)
+ }
+ return n, nil
+}
+
+// commit merges the dirty nodes into the nodebuffer. This operation won't take
+// the ownership of the nodes map which belongs to the bottom-most diff layer.
+// It will just hold the node references from the given map which are safe to
+// copy.
+func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer {
+ var (
+ delta int64
+ overwrite int64
+ overwriteSize int64
+ )
+ for owner, subset := range nodes {
+ current, exist := b.nodes[owner]
+ if !exist {
+ // Allocate a new map for the subset instead of claiming it directly
+ // from the passed map to avoid potential concurrent map read/write.
+ // The nodes belong to original diff layer are still accessible even
+ // after merging, thus the ownership of nodes map should still belong
+ // to original layer and any mutation on it should be prevented.
+ current = make(map[string]*trienode.Node)
+ for path, n := range subset {
+ current[path] = n
+ delta += int64(len(n.Blob) + len(path))
+ }
+ b.nodes[owner] = current
+ continue
+ }
+ for path, n := range subset {
+ if orig, exist := current[path]; !exist {
+ delta += int64(len(n.Blob) + len(path))
+ } else {
+ delta += int64(len(n.Blob) - len(orig.Blob))
+ overwrite++
+ overwriteSize += int64(len(orig.Blob) + len(path))
+ }
+ current[path] = n
+ }
+ b.nodes[owner] = current
+ }
+ b.updateSize(delta)
+ b.layers++
+ gcNodesMeter.Mark(overwrite)
+ gcBytesMeter.Mark(overwriteSize)
+ return b
+}
+
+// nolint: unused
+// revert is the reverse operation of commit. It also merges the provided nodes
+// into the nodebuffer, the difference is that the provided node set should
+// revert the changes made by the last state transition.
+func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error {
+ // Short circuit if no embedded state transition to revert.
+ if b.layers == 0 {
+ return errStateUnrecoverable
+ }
+ b.layers--
+
+ // Reset the entire buffer if only a single transition left.
+ if b.layers == 0 {
+ b.reset()
+ return nil
+ }
+ var delta int64
+ for owner, subset := range nodes {
+ current, ok := b.nodes[owner]
+ if !ok {
+ panic(fmt.Sprintf("non-existent subset (%x)", owner))
+ }
+ for path, n := range subset {
+ orig, ok := current[path]
+ if !ok {
+ // There is a special case in MPT that one child is removed from
+ // a fullNode which only has two children, and then a new child
+ // with different position is immediately inserted into the fullNode.
+ // In this case, the clean child of the fullNode will also be
+ // marked as dirty because of node collapse and expansion.
+ //
+ // In case of database rollback, don't panic if this "clean"
+ // node occurs which is not present in buffer.
+ var nhash common.Hash
+ if owner == (common.Hash{}) {
+ _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path))
+ } else {
+ _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path))
+ }
+ // Ignore the clean node in the case described above.
+ if nhash == n.Hash {
+ continue
+ }
+ panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex()))
+ }
+ current[path] = n
+ delta += int64(len(n.Blob)) - int64(len(orig.Blob))
+ }
+ }
+ b.updateSize(delta)
+ return nil
+}
+
+// updateSize updates the total cache size by the given delta.
+func (b *nodebuffer) updateSize(delta int64) {
+ size := int64(b.size) + delta
+ if size >= 0 {
+ b.size = uint64(size)
+ return
+ }
+ s := b.size
+ b.size = 0
+ log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta))
+}
+
+// reset cleans up the disk cache.
+func (b *nodebuffer) reset() {
+ b.layers = 0
+ b.size = 0
+ b.nodes = make(map[common.Hash]map[string]*trienode.Node)
+}
+
+// nolint: unused
+// empty returns an indicator if nodebuffer contains any state transition inside.
+func (b *nodebuffer) empty() bool {
+ return b.layers == 0
+}
+
+// setSize sets the buffer size to the provided number, and invokes a flush
+// operation if the current memory usage exceeds the new limit.
+func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error {
+ b.limit = uint64(size)
+ return b.flush(db, clean, id, false)
+}
+
+// flush persists the in-memory dirty trie node into the disk if the configured
+// memory threshold is reached. Note, all data must be written atomically.
+func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error {
+ if b.size <= b.limit && !force {
+ return nil
+ }
+ // Ensure the target state id is aligned with the internal counter.
+ head := rawdb.ReadPersistentStateID(db)
+ if head+b.layers != id {
+ return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id)
+ }
+ var (
+ start = time.Now()
+ batch = db.NewBatchWithSize(int(b.size))
+ )
+ nodes := writeNodes(batch, b.nodes, clean)
+ rawdb.WritePersistentStateID(batch, id)
+
+ // Flush all mutations in a single batch
+ size := batch.ValueSize()
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ commitBytesMeter.Mark(int64(size))
+ commitNodesMeter.Mark(int64(nodes))
+ commitTimeTimer.UpdateSince(start)
+ log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start)))
+ b.reset()
+ return nil
+}
+
+// writeNodes writes the trie nodes into the provided database batch.
+// Note this function will also inject all the newly written nodes
+// into clean cache.
+func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) {
+ for owner, subset := range nodes {
+ for path, n := range subset {
+ if n.IsDeleted() {
+ if owner == (common.Hash{}) {
+ rawdb.DeleteAccountTrieNode(batch, []byte(path))
+ } else {
+ rawdb.DeleteStorageTrieNode(batch, owner, []byte(path))
+ }
+ if clean != nil {
+ clean.Del(cacheKey(owner, []byte(path)))
+ }
+ } else {
+ if owner == (common.Hash{}) {
+ rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob)
+ } else {
+ rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob)
+ }
+ if clean != nil {
+ clean.Set(cacheKey(owner, []byte(path)), n.Blob)
+ }
+ }
+ }
+ total += len(subset)
+ }
+ return total
+}
+
+// cacheKey constructs the unique key of clean cache.
+func cacheKey(owner common.Hash, path []byte) []byte {
+ if owner == (common.Hash{}) {
+ return path
+ }
+ return append(owner.Bytes(), path...)
+}
diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go
new file mode 100644
index 0000000000..71b845d2ad
--- /dev/null
+++ b/trie/triedb/pathdb/testutils.go
@@ -0,0 +1,166 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ava-labs/subnet-evm/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "golang.org/x/exp/slices"
+)
+
+// testHasher is a test utility for computing root hash of a batch of state
+// elements. The hash algorithm is to sort all the elements in lexicographical
+// order, concat the key and value in turn, and perform hash calculation on
+// the concatenated bytes. Except the root hash, a nodeset will be returned
+// once Commit is called, which contains all the changes made to hasher.
+type testHasher struct {
+ owner common.Hash // owner identifier
+ root common.Hash // original root
+ dirties map[common.Hash][]byte // dirty states
+ cleans map[common.Hash][]byte // clean states
+}
+
+// newTestHasher constructs a hasher object with provided states.
+func newTestHasher(owner common.Hash, root common.Hash, cleans map[common.Hash][]byte) (*testHasher, error) {
+ if cleans == nil {
+ cleans = make(map[common.Hash][]byte)
+ }
+ if got, _ := hash(cleans); got != root {
+ return nil, fmt.Errorf("state root mismatched, want: %x, got: %x", root, got)
+ }
+ return &testHasher{
+ owner: owner,
+ root: root,
+ dirties: make(map[common.Hash][]byte),
+ cleans: cleans,
+ }, nil
+}
+
+// Get returns the value for key stored in the trie.
+func (h *testHasher) Get(key []byte) ([]byte, error) {
+ hash := common.BytesToHash(key)
+ val, ok := h.dirties[hash]
+ if ok {
+ return val, nil
+ }
+ return h.cleans[hash], nil
+}
+
+// Update associates key with value in the trie.
+func (h *testHasher) Update(key, value []byte) error {
+ h.dirties[common.BytesToHash(key)] = common.CopyBytes(value)
+ return nil
+}
+
+// Delete removes any existing value for key from the trie.
+func (h *testHasher) Delete(key []byte) error {
+ h.dirties[common.BytesToHash(key)] = nil
+ return nil
+}
+
+// Commit computes the new hash of the states and returns the set with all
+// state changes.
+func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+ var (
+ nodes = make(map[common.Hash][]byte)
+ set = trienode.NewNodeSet(h.owner)
+ )
+ for hash, val := range h.cleans {
+ nodes[hash] = val
+ }
+ for hash, val := range h.dirties {
+ nodes[hash] = val
+ if bytes.Equal(val, h.cleans[hash]) {
+ continue
+ }
+ if len(val) == 0 {
+ set.AddNode(hash.Bytes(), trienode.NewDeleted())
+ } else {
+ set.AddNode(hash.Bytes(), trienode.New(crypto.Keccak256Hash(val), val))
+ }
+ }
+ root, blob := hash(nodes)
+
+ // Include the dirty root node as well.
+ if root != types.EmptyRootHash && root != h.root {
+ set.AddNode(nil, trienode.New(root, blob))
+ }
+ if root == types.EmptyRootHash && h.root != types.EmptyRootHash {
+ set.AddNode(nil, trienode.NewDeleted())
+ }
+ return root, set
+}
+
+// hash performs the hash computation upon the provided states.
+func hash(states map[common.Hash][]byte) (common.Hash, []byte) {
+ var hs []common.Hash
+ for hash := range states {
+ hs = append(hs, hash)
+ }
+ slices.SortFunc(hs, common.Hash.Cmp)
+
+ var input []byte
+ for _, hash := range hs {
+ if len(states[hash]) == 0 {
+ continue
+ }
+ input = append(input, hash.Bytes()...)
+ input = append(input, states[hash]...)
+ }
+ if len(input) == 0 {
+ return types.EmptyRootHash, nil
+ }
+ return crypto.Keccak256Hash(input), input
+}
+
+type hashLoader struct {
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+}
+
+func newHashLoader(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *hashLoader {
+ return &hashLoader{
+ accounts: accounts,
+ storages: storages,
+ }
+}
+
+// OpenTrie opens the main account trie.
+func (l *hashLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
+ return newTestHasher(common.Hash{}, root, l.accounts)
+}
+
+// OpenStorageTrie opens the storage trie of an account.
+func (l *hashLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
+ return newTestHasher(addrHash, root, l.storages[addrHash])
+}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index 8152eab6c0..98d5588b6d 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -25,8 +25,8 @@ import (
)
// Node is a wrapper which contains the encoded blob of the trie node and its
-// unique hash identifier. It is general enough that can be used to represent
-// trie nodes corresponding to different trie implementations.
+// node hash. It is general enough that can be used to represent trie node
+// corresponding to different trie implementations.
type Node struct {
Hash common.Hash // Node hash, empty for deleted node
Blob []byte // Encoded node blob, nil for the deleted node
@@ -42,35 +42,13 @@ func (n *Node) IsDeleted() bool {
return n.Hash == (common.Hash{})
}
-// WithPrev wraps the Node with the previous node value attached.
-type WithPrev struct {
- *Node
- Prev []byte // Encoded original value, nil means it's non-existent
-}
-
-// Unwrap returns the internal Node object.
-func (n *WithPrev) Unwrap() *Node {
- return n.Node
-}
-
-// Size returns the total memory size used by this node. It overloads
-// the function in Node by counting the size of previous value as well.
-func (n *WithPrev) Size() int {
- return n.Node.Size() + len(n.Prev)
-}
-
// New constructs a node with provided node information.
func New(hash common.Hash, blob []byte) *Node {
return &Node{Hash: hash, Blob: blob}
}
-// NewWithPrev constructs a node with provided node information.
-func NewWithPrev(hash common.Hash, blob []byte, prev []byte) *WithPrev {
- return &WithPrev{
- Node: New(hash, blob),
- Prev: prev,
- }
-}
+// NewDeleted constructs a node which is deleted.
+func NewDeleted() *Node { return New(common.Hash{}, nil) }
// leaf represents a trie leaf node
type leaf struct {
@@ -83,7 +61,7 @@ type leaf struct {
type NodeSet struct {
Owner common.Hash
Leaves []*leaf
- Nodes map[string]*WithPrev
+ Nodes map[string]*Node
updates int // the count of updated and inserted nodes
deletes int // the count of deleted nodes
}
@@ -93,26 +71,26 @@ type NodeSet struct {
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
Owner: owner,
- Nodes: make(map[string]*WithPrev),
+ Nodes: make(map[string]*Node),
}
}
// ForEachWithOrder iterates the nodes with the order from bottom to top,
// right to left, nodes with the longest path will be iterated first.
func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
- var paths sort.StringSlice
+ var paths []string
for path := range set.Nodes {
paths = append(paths, path)
}
- // Bottom-up, longest path first
- sort.Sort(sort.Reverse(paths))
+ // Bottom-up, the longest path first
+ sort.Sort(sort.Reverse(sort.StringSlice(paths)))
for _, path := range paths {
- callback(path, set.Nodes[path].Unwrap())
+ callback(path, set.Nodes[path])
}
}
// AddNode adds the provided node into set.
-func (set *NodeSet) AddNode(path []byte, n *WithPrev) {
+func (set *NodeSet) AddNode(path []byte, n *Node) {
if n.IsDeleted() {
set.deletes += 1
} else {
@@ -121,6 +99,26 @@ func (set *NodeSet) AddNode(path []byte, n *WithPrev) {
set.Nodes[string(path)] = n
}
+// Merge adds a set of nodes into the set.
+func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
+ if set.Owner != owner {
+ return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
+ }
+ for path, node := range nodes {
+ prev, ok := set.Nodes[path]
+ if ok {
+ // overwrite happens, revoke the counter
+ if prev.IsDeleted() {
+ set.deletes -= 1
+ } else {
+ set.updates -= 1
+ }
+ }
+ set.AddNode([]byte(path), node)
+ }
+ return nil
+}
+
// AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can
// we get rid of it?
func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
@@ -150,16 +148,11 @@ func (set *NodeSet) Summary() string {
for path, n := range set.Nodes {
// Deletion
if n.IsDeleted() {
- fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev)
+ fmt.Fprintf(out, " [-]: %x\n", path)
continue
}
- // Insertion
- if len(n.Prev) == 0 {
- fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash)
- continue
- }
- // Update
- fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev)
+ // Insertion or update
+ fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
}
}
for _, n := range set.Leaves {
@@ -188,10 +181,19 @@ func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
// Merge merges the provided dirty nodes of a trie into the set. The assumption
// is held that no duplicated set belonging to the same trie will be merged twice.
func (set *MergedNodeSet) Merge(other *NodeSet) error {
- _, present := set.Sets[other.Owner]
+ subset, present := set.Sets[other.Owner]
if present {
- return fmt.Errorf("duplicate trie for owner %#x", other.Owner)
+ return subset.Merge(other.Owner, other.Nodes)
}
set.Sets[other.Owner] = other
return nil
}
+
+// Flatten returns a two-dimensional map for internal nodes.
+func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node {
+ nodes := make(map[common.Hash]map[string]*Node)
+ for owner, set := range set.Sets {
+ nodes[owner] = set.Nodes
+ }
+ return nodes
+}
diff --git a/trie/triestate/state.go b/trie/triestate/state.go
new file mode 100644
index 0000000000..6504ac6518
--- /dev/null
+++ b/trie/triestate/state.go
@@ -0,0 +1,277 @@
+// (c) 2024, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package triestate
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ava-labs/subnet-evm/core/types"
+ "github.com/ava-labs/subnet-evm/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia
+// tree or Verkle tree.
+type Trie interface {
+ // Get returns the value for key stored in the trie.
+ Get(key []byte) ([]byte, error)
+
+ // Update associates key with value in the trie.
+ Update(key, value []byte) error
+
+ // Delete removes any existing value for key from the trie.
+ Delete(key []byte) error
+
+ // Commit the trie and returns a set of dirty nodes generated along with
+ // the new root hash.
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet)
+}
+
+// TrieLoader wraps functions to load tries.
+type TrieLoader interface {
+ // OpenTrie opens the main account trie.
+ OpenTrie(root common.Hash) (Trie, error)
+
+ // OpenStorageTrie opens the storage trie of an account.
+ OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
+}
+
+// Set represents a collection of mutated states during a state transition.
+// The value refers to the original content of state before the transition
+// is made. Nil means that the state was not present previously.
+type Set struct {
+ Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
+ Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
+ Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion
+ size common.StorageSize // Approximate size of set
+}
+
+// New constructs the state set with provided data.
+func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set {
+ return &Set{
+ Accounts: accounts,
+ Storages: storages,
+ Incomplete: incomplete,
+ }
+}
+
+// Size returns the approximate memory size occupied by the set.
+func (s *Set) Size() common.StorageSize {
+ if s.size != 0 {
+ return s.size
+ }
+ for _, account := range s.Accounts {
+ s.size += common.StorageSize(common.AddressLength + len(account))
+ }
+ for _, slots := range s.Storages {
+ for _, val := range slots {
+ s.size += common.StorageSize(common.HashLength + len(val))
+ }
+ s.size += common.StorageSize(common.AddressLength)
+ }
+ s.size += common.StorageSize(common.AddressLength * len(s.Incomplete))
+ return s.size
+}
+
+// context wraps all fields for executing state diffs.
+type context struct {
+ prevRoot common.Hash
+ postRoot common.Hash
+ accounts map[common.Address][]byte
+ storages map[common.Address]map[common.Hash][]byte
+ accountTrie Trie
+ nodes *trienode.MergedNodeSet
+}
+
+// Apply traverses the provided state diffs, apply them in the associated
+// post-state and return the generated dirty trie nodes. The state can be
+// loaded via the provided trie loader.
+func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) {
+ tr, err := loader.OpenTrie(postRoot)
+ if err != nil {
+ return nil, err
+ }
+ ctx := &context{
+ prevRoot: prevRoot,
+ postRoot: postRoot,
+ accounts: accounts,
+ storages: storages,
+ accountTrie: tr,
+ nodes: trienode.NewMergedNodeSet(),
+ }
+ for addr, account := range accounts {
+ var err error
+ if len(account) == 0 {
+ err = deleteAccount(ctx, loader, addr)
+ } else {
+ err = updateAccount(ctx, loader, addr)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to revert state, err: %w", err)
+ }
+ }
+ root, result := tr.Commit(false)
+ if root != prevRoot {
+ return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
+ }
+ if err := ctx.nodes.Merge(result); err != nil {
+ return nil, err
+ }
+ return ctx.nodes.Flatten(), nil
+}
+
+// updateAccount the account was present in prev-state, and may or may not
+// existent in post-state. Apply the reverse diff and verify if the storage
+// root matches the one in prev-state account.
+func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
+ // The account was present in prev-state, decode it from the
+ // 'slim-rlp' format bytes.
+ h := newHasher()
+ defer h.release()
+
+ addrHash := h.hash(addr.Bytes())
+ prev, err := types.FullAccount(ctx.accounts[addr])
+ if err != nil {
+ return err
+ }
+ // The account may or may not existent in post-state, try to
+ // load it and decode if it's found.
+ blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ post := types.NewEmptyStateAccount()
+ if len(blob) != 0 {
+ if err := rlp.DecodeBytes(blob, &post); err != nil {
+ return err
+ }
+ }
+ // Apply all storage changes into the post-state storage trie.
+ st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
+ if err != nil {
+ return err
+ }
+ for key, val := range ctx.storages[addr] {
+ var err error
+ if len(val) == 0 {
+ err = st.Delete(key.Bytes())
+ } else {
+ err = st.Update(key.Bytes(), val)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ root, result := st.Commit(false)
+ if root != prev.Root {
+ return errors.New("failed to reset storage trie")
+ }
+ // The returned set can be nil if storage trie is not changed
+ // at all.
+ if result != nil {
+ if err := ctx.nodes.Merge(result); err != nil {
+ return err
+ }
+ }
+ // Write the prev-state account into the main trie
+ full, err := rlp.EncodeToBytes(prev)
+ if err != nil {
+ return err
+ }
+ return ctx.accountTrie.Update(addrHash.Bytes(), full)
+}
+
+// deleteAccount the account was not present in prev-state, and is expected
+// to be existent in post-state. Apply the reverse diff and verify if the
+// account and storage is wiped out correctly.
+func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
+ // The account must be existent in post-state, load the account.
+ h := newHasher()
+ defer h.release()
+
+ addrHash := h.hash(addr.Bytes())
+ blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ if len(blob) == 0 {
+ return fmt.Errorf("account is non-existent %#x", addrHash)
+ }
+ var post types.StateAccount
+ if err := rlp.DecodeBytes(blob, &post); err != nil {
+ return err
+ }
+ st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
+ if err != nil {
+ return err
+ }
+ for key, val := range ctx.storages[addr] {
+ if len(val) != 0 {
+ return errors.New("expect storage deletion")
+ }
+ if err := st.Delete(key.Bytes()); err != nil {
+ return err
+ }
+ }
+ root, result := st.Commit(false)
+ if root != types.EmptyRootHash {
+ return errors.New("failed to clear storage trie")
+ }
+ // The returned set can be nil if storage trie is not changed
+ // at all.
+ if result != nil {
+ if err := ctx.nodes.Merge(result); err != nil {
+ return err
+ }
+ }
+ // Delete the post-state account from the main trie.
+ return ctx.accountTrie.Delete(addrHash.Bytes())
+}
+
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
+
+func (h *hasher) release() {
+ hasherPool.Put(h)
+}
diff --git a/utils/metered_cache.go b/utils/metered_cache.go
index 17c86bdaa2..d554dcb13a 100644
--- a/utils/metered_cache.go
+++ b/utils/metered_cache.go
@@ -5,15 +5,11 @@ package utils
import (
"fmt"
- "os"
- "path/filepath"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/fastcache"
"github.com/ava-labs/subnet-evm/metrics"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/log"
)
// MeteredCache wraps *fastcache.Cache and periodically pulls stats from it.
@@ -35,37 +31,15 @@ type MeteredCache struct {
updateFrequency uint64
}
-func dirSize(path string) (int64, error) {
- var size int64
- err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !info.IsDir() {
- size += info.Size()
- }
- return nil
- })
- return size, err
-}
-
// NewMeteredCache returns a new MeteredCache that will update stats to the
// provided namespace once per each [updateFrequency] operations.
// Note: if [updateFrequency] is passed as 0, it will be treated as 1.
-func NewMeteredCache(size int, journal string, namespace string, updateFrequency uint64) *MeteredCache {
- var cache *fastcache.Cache
- if journal == "" {
- cache = fastcache.New(size)
- } else {
- dirSize, err := dirSize(journal)
- log.Info("attempting to load cache from disk", "path", journal, "dirSize", common.StorageSize(dirSize), "err", err)
- cache = fastcache.LoadFromFileOrNew(journal, size)
- }
+func NewMeteredCache(size int, namespace string, updateFrequency uint64) *MeteredCache {
if updateFrequency == 0 {
updateFrequency = 1 // avoid division by zero
}
mc := &MeteredCache{
- Cache: cache,
+ Cache: fastcache.New(size),
namespace: namespace,
updateFrequency: updateFrequency,
}
diff --git a/warp/aggregator/mock_signature_getter.go b/warp/aggregator/mock_signature_getter.go
index f00bb920fa..537e3ae2e1 100644
--- a/warp/aggregator/mock_signature_getter.go
+++ b/warp/aggregator/mock_signature_getter.go
@@ -8,8 +8,8 @@ import (
context "context"
reflect "reflect"
- bls "github.com/ava-labs/avalanchego/utils/crypto/bls"
ids "github.com/ava-labs/avalanchego/ids"
+ bls "github.com/ava-labs/avalanchego/utils/crypto/bls"
warp "github.com/ava-labs/avalanchego/vms/platformvm/warp"
gomock "go.uber.org/mock/gomock"
)