Skip to content

Commit

Permalink
Merge branch 'zkevm' into CDK-382-cdk-erigon-rpc-node-handle-rollbacks
Browse files Browse the repository at this point in the history
  • Loading branch information
Stefan-Ethernal committed Aug 28, 2024
2 parents ec3f836 + 423754c commit 84c3991
Show file tree
Hide file tree
Showing 19 changed files with 117 additions and 176 deletions.
2 changes: 1 addition & 1 deletion cmd/rpcdaemon/commands/zkevm_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -646,7 +646,7 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B
}

// local exit root
localExitRoot, err := utils.GetBatchLocalExitRoot(batchNo, hermezDb, tx)
localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorageForLatestBlock(batchNo, hermezDb, tx)
if err != nil {
return nil, err
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -560,17 +560,17 @@ var (
DataStreamWriteTimeout = cli.DurationFlag{
Name: "zkevm.data-stream-writeTimeout",
Usage: "Define the TCP write timeout when sending data to a datastream client",
Value: 5 * time.Second,
Value: 20 * time.Second,
}
DataStreamInactivityTimeout = cli.DurationFlag{
Name: "zkevm.data-stream-inactivity-timeout",
Usage: "Define the inactivity timeout when interacting with a data stream server",
Value: 10 * time.Second,
Value: 10 * time.Minute,
}
DataStreamInactivityCheckInterval = cli.DurationFlag{
Name: "zkevm.data-stream-inactivity-check-interval",
Usage: "Define the inactivity check interval timeout when interacting with a data stream server",
Value: 2 * time.Second,
Value: 5 * time.Minute,
}
Limbo = cli.BoolFlag{
Name: "zkevm.limbo",
Expand Down
1 change: 0 additions & 1 deletion core/vm/eips_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ func enable2929_zkevm(jt *JumpTable) {
// factor here
jt[SELFDESTRUCT].constantGas = params.SelfdestructGasEIP150
jt[SELFDESTRUCT].dynamicGas = gasSelfdestructEIP2929_zkevm
jt[SENDALL].dynamicGas = gasSelfdestructEIP2929_zkevm
}

func enable3529_zkevm(jt *JumpTable) {
Expand Down
2 changes: 0 additions & 2 deletions core/vm/interpreter_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,6 @@ func shouldExecuteLastOpCode(op OpCode) bool {
fallthrough
case CREATE2:
fallthrough
case SENDALL:
fallthrough
case SLOAD:
fallthrough
case SSTORE:
Expand Down
9 changes: 1 addition & 8 deletions core/vm/jump_table_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,15 +38,8 @@ func newForkID4InstructionSet() JumpTable {

instructionSet[EXTCODEHASH].execute = opExtCodeHash_zkevm

instructionSet[SENDALL] = &operation{
execute: opSendAll_zkevm,
dynamicGas: gasSelfdestruct_zkevm,
numPop: 1,
numPush: 0,
}

// SELFDESTRUCT is replaces by SENDALL
instructionSet[SELFDESTRUCT] = instructionSet[SENDALL]
instructionSet[SELFDESTRUCT].execute = opSendAll_zkevm

validateAndFillMaxStack(&instructionSet)
return instructionSet
Expand Down
7 changes: 2 additions & 5 deletions core/vm/opcodes_zkevm.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
package vm

const (
SENDALL OpCode = 0xfb
)
const ()

// adding extra opcodes dynamically to keep separate from the main codebase
// that simplifies rebasing new versions of Erigon
func init() {
opCodeToString[SENDALL] = "SENDALL"
stringToOp["SENDALL"] = SENDALL

}
1 change: 0 additions & 1 deletion core/vm/zk_counters.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,6 @@ func SimpleCounterOperations(cc *CounterCollector) *[256]executionFunc {
CREATE2: cc.opCreate2,
RETURN: cc.opReturn,
REVERT: cc.opRevert,
SENDALL: cc.opSendAll,
SELFDESTRUCT: cc.opSendAll,
INVALID: cc.opInvalid,
ADDRESS: cc.opAddress,
Expand Down
5 changes: 2 additions & 3 deletions zk/datastream/server/data_stream_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (

type DbReader interface {
GetL2BlockNosByBatch(batchNo uint64) ([]uint64, error)
GetLocalExitRootForBatchNo(batchNo uint64) (libcommon.Hash, error)
GetBatchGlobalExitRootsProto(lastBatchNumber, batchNumber uint64) ([]types.GerUpdateProto, error)
GetForkId(batchNumber uint64) (uint64, error)
GetBlockGlobalExitRoot(blockNumber uint64) (libcommon.Hash, error)
Expand Down Expand Up @@ -194,7 +193,7 @@ func createBlockWithBatchCheckStreamEntriesProto(
}
// the genesis we insert fully, so we would have to skip closing it
if !shouldSkipBatchEndEntry {
localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorage(batchNumber, reader, tx)
localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorageForLatestBlock(batchNumber, reader, tx)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -383,7 +382,7 @@ func BuildWholeBatchStreamEntriesProto(
}

// the genesis we insert fully, so we would have to skip closing it
localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorage(batchNumber, reader, tx)
localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorageForLatestBlock(batchNumber, reader, tx)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion zk/datastream/server/data_stream_server_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ func createBatchStartEntriesProto(
}

// seal off the last batch
if localExitRoot, err = utils.GetBatchLocalExitRootFromSCStorage(workingBatch, reader, tx); err != nil {
if localExitRoot, err = utils.GetBatchLocalExitRootFromSCStorageForLatestBlock(workingBatch, reader, tx); err != nil {
return nil, err
}
entries.Add(newBatchEndProto(localExitRoot, root, workingBatch))
Expand Down
2 changes: 1 addition & 1 deletion zk/datastream/server/datastream_populate.go
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ func (srv *DataStreamServer) WriteGenesisToStream(
l2Block := newL2BlockProto(genesis, genesis.Hash().Bytes(), batchNo, ger, 0, 0, common.Hash{}, 0, common.Hash{})
batchStart := newBatchStartProto(batchNo, srv.chainId, GenesisForkId, datastream.BatchType_BATCH_TYPE_REGULAR)

ler, err := utils.GetBatchLocalExitRoot(0, reader, tx)
ler, err := utils.GetBatchLocalExitRootFromSCStorageForLatestBlock(0, reader, tx)
if err != nil {
return err
}
Expand Down
10 changes: 10 additions & 0 deletions zk/debug_tools/datastream-correctness-check/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ func main() {
var lastBlockRoot common.Hash
progressBatch := uint64(0)
progressBlock := uint64(0)
lastSeenBatch := uint64(0)
lastSeenBlock := uint64(0)

function := func(file *types.FileEntry) error {
switch file.EntryType {
Expand Down Expand Up @@ -65,6 +67,10 @@ func main() {
if err != nil {
return err
}
if lastSeenBatch+1 != batchStart.Number {
return fmt.Errorf("unexpected batch %d, expected %d", batchStart.Number, lastSeenBatch+1)
}
lastSeenBatch = batchStart.Number
progressBatch = batchStart.Number
if previousFile != nil {
if previousFile.EntryType != types.BookmarkEntryType {
Expand Down Expand Up @@ -106,6 +112,10 @@ func main() {
if err != nil {
return err
}
if l2Block.L2BlockNumber > 0 && lastSeenBlock+1 != l2Block.L2BlockNumber {
return fmt.Errorf("unexpected block %d, expected %d", l2Block.L2BlockNumber, lastSeenBlock+1)
}
lastSeenBlock = l2Block.L2BlockNumber
progressBlock = l2Block.L2BlockNumber
if previousFile != nil {
if previousFile.EntryType != types.BookmarkEntryType && !previousFile.IsL2BlockEnd() {
Expand Down
18 changes: 1 addition & 17 deletions zk/hermez_db/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const BLOCK_L1_BLOCK_HASHES = "block_l1_block_hashes" // block
const L1_BLOCK_HASH_GER = "l1_block_hash_ger" // l1 block hash -> GER
const INTERMEDIATE_TX_STATEROOTS = "hermez_intermediate_tx_stateRoots" // l2blockno -> stateRoot
const BATCH_WITNESSES = "hermez_batch_witnesses" // batch number -> witness
const BATCH_COUNTERS = "hermez_batch_counters" // batch number -> counters
const BATCH_COUNTERS = "hermez_batch_counters" // block number -> counters
const L1_BATCH_DATA = "l1_batch_data" // batch number -> l1 batch data from transaction call data
const REUSED_L1_INFO_TREE_INDEX = "reused_l1_info_tree_index" // block number => const 1
const LATEST_USED_GER = "latest_used_ger" // batch number -> GER latest used GER
Expand All @@ -47,8 +47,6 @@ const SMT_DEPTHS = "smt_depths" // block
const L1_INFO_LEAVES = "l1_info_leaves" // l1 info tree index -> l1 info tree leaf
const L1_INFO_ROOTS = "l1_info_roots" // root hash -> l1 info tree index
const INVALID_BATCHES = "invalid_batches" // batch number -> true
const BATCH_PARTIALLY_PROCESSED = "batch_partially_processed" // batch number -> true
const LOCAL_EXIT_ROOTS = "local_exit_roots" // batch number -> local exit root
const ROllUP_TYPES_FORKS = "rollup_types_forks" // rollup type id -> fork id
const FORK_HISTORY = "fork_history" // index -> fork id + last verified batch
const JUST_UNWOUND = "just_unwound" // batch number -> true
Expand Down Expand Up @@ -86,8 +84,6 @@ var HermezDbTables = []string{
L1_INFO_LEAVES,
L1_INFO_ROOTS,
INVALID_BATCHES,
BATCH_PARTIALLY_PROCESSED,
LOCAL_EXIT_ROOTS,
ROllUP_TYPES_FORKS,
FORK_HISTORY,
JUST_UNWOUND,
Expand Down Expand Up @@ -1610,18 +1606,6 @@ func (db *HermezDbReader) GetInvalidBatch(batchNo uint64) (bool, error) {
return len(v) > 0, nil
}

func (db *HermezDb) WriteLocalExitRootForBatchNo(batchNo uint64, root common.Hash) error {
return db.tx.Put(LOCAL_EXIT_ROOTS, Uint64ToBytes(batchNo), root.Bytes())
}

func (db *HermezDbReader) GetLocalExitRootForBatchNo(batchNo uint64) (common.Hash, error) {
v, err := db.tx.GetOne(LOCAL_EXIT_ROOTS, Uint64ToBytes(batchNo))
if err != nil {
return common.Hash{}, err
}
return common.BytesToHash(v), nil
}

func (db *HermezDb) WriteRollupType(rollupType, forkId uint64) error {
return db.tx.Put(ROllUP_TYPES_FORKS, Uint64ToBytes(rollupType), Uint64ToBytes(forkId))
}
Expand Down
12 changes: 0 additions & 12 deletions zk/stages/stage_batches.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ type HermezDb interface {
WriteBlockL1InfoTreeIndex(blockNumber uint64, l1Index uint64) error
WriteBlockL1InfoTreeIndexProgress(blockNumber uint64, l1Index uint64) error
WriteLatestUsedGer(blockNo uint64, ger common.Hash) error
WriteLocalExitRootForBatchNo(batchNo uint64, localExitRoot common.Hash) error
}

type DatastreamClient interface {
Expand Down Expand Up @@ -310,9 +309,6 @@ LOOP:
if entry.StateRoot != lastBlockRoot {
log.Warn(fmt.Sprintf("[%s] batch end state root mismatches last block's: %x, expected: %x", logPrefix, entry.StateRoot, lastBlockRoot))
}
if err := writeBatchEnd(hermezDb, entry); err != nil {
return fmt.Errorf("write batch end error: %v", err)
}
case *types.FullL2Block:
log.Debug(fmt.Sprintf("[%s] Retrieved %d (%s) block from stream", logPrefix, entry.L2BlockNumber, entry.L2Blockhash.String()))
if cfg.zkCfg.SyncLimit > 0 && entry.L2BlockNumber >= cfg.zkCfg.SyncLimit {
Expand Down Expand Up @@ -854,14 +850,6 @@ func PruneBatchesStage(s *stagedsync.PruneState, tx kv.RwTx, cfg BatchesCfg, ctx
return nil
}

func writeBatchEnd(hermezDb HermezDb, batchEnd *types.BatchEnd) (err error) {
// utils.CalculateAccInputHash(oldAccInputHash, batchStart., l1InfoRoot common.Hash, timestampLimit uint64, sequencerAddr common.Address, forcedBlockhashL1 common.Hash)
if batchEnd.LocalExitRoot != emptyHash {
err = hermezDb.WriteLocalExitRootForBatchNo(batchEnd.Number, batchEnd.LocalExitRoot)
}
return
}

// writeL2Block writes L2Block to ErigonDb and HermezDb
// writes header, body, forkId and blockBatch
func writeL2Block(eriDb ErigonDb, hermezDb HermezDb, l2Block *types.FullL2Block, highestL1InfoTreeIndex uint64) error {
Expand Down
10 changes: 9 additions & 1 deletion zk/stages/stage_l1syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"
"time"

"github.com/VictoriaMetrics/metrics"
"github.com/gateway-fm/cdk-erigon-lib/kv"
"github.com/ledgerwatch/log/v3"

Expand Down Expand Up @@ -41,7 +42,11 @@ type IL1Syncer interface {
Stop()
}

var ErrStateRootMismatch = errors.New("state root mismatch")
var (
ErrStateRootMismatch = errors.New("state root mismatch")

lastCheckedL1BlockCounter = metrics.GetOrCreateCounter(`last_checked_l1_block`)
)

type L1SyncerCfg struct {
db kv.RwDB
Expand Down Expand Up @@ -172,6 +177,9 @@ Loop:
}

latestCheckedBlock := cfg.syncer.GetLastCheckedL1Block()

lastCheckedL1BlockCounter.Set(latestCheckedBlock)

if highestWrittenL1BlockNo > l1BlockProgress {
log.Info(fmt.Sprintf("[%s] Saving L1 syncer progress", logPrefix), "latestCheckedBlock", latestCheckedBlock, "newVerificationsCount", newVerificationsCount, "newSequencesCount", newSequencesCount, "highestWrittenL1BlockNo", highestWrittenL1BlockNo)

Expand Down
36 changes: 31 additions & 5 deletions zk/stages/stage_sequence_execute.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,13 @@ func SpawnSequencingStage(
// if we identify any. During normal operation this function will simply check and move on without performing
// any action.
if !batchState.isAnyRecovery() {
isUnwinding, err := handleBatchEndChecks(batchContext, batchState, executionAt, u)
if err != nil || isUnwinding {
isUnwinding, err := alignExecutionToDatastream(batchContext, batchState, executionAt, u)
if err != nil {
return err
}
if isUnwinding {
return sdb.tx.Commit()
}
}

tryHaltSequencer(batchContext, batchState.batchNumber)
Expand Down Expand Up @@ -137,6 +140,7 @@ func SpawnSequencingStage(

log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber))

var allConditionsOK bool
for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ {
log.Info(fmt.Sprintf("[%s] Starting block %d (forkid %v)...", logPrefix, blockNumber, batchState.forkId))
logTicker.Reset(10 * time.Second)
Expand Down Expand Up @@ -223,12 +227,18 @@ func SpawnSequencingStage(
return err
}
} else if !batchState.isL1Recovery() {
batchState.blockState.transactionsForInclusion, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions)
batchState.blockState.transactionsForInclusion, allConditionsOK, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions)
if err != nil {
return err
}

if len(batchState.blockState.transactionsForInclusion) == 0 {
time.Sleep(batchContext.cfg.zk.SequencerTimeoutOnEmptyTxPool)
if allConditionsOK {
time.Sleep(batchContext.cfg.zk.SequencerTimeoutOnEmptyTxPool)
} else {
time.Sleep(batchContext.cfg.zk.SequencerTimeoutOnEmptyTxPool / 5) // we do not need to sleep too long for txpool not ready
}

} else {
log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(batchState.blockState.transactionsForInclusion))
}
Expand Down Expand Up @@ -375,9 +385,25 @@ func SpawnSequencingStage(
return err
}

if err = runBatchLastSteps(batchContext, batchState.batchNumber, block.NumberU64(), batchCounters); err != nil {
/*
if adding something below that line we must ensure
- it is also handled property in processInjectedInitialBatch
- it is also handled property in alignExecutionToDatastream
- it is also handled property in doCheckForBadBatch
- it is unwound correctly
*/

if err := finalizeLastBatchInDatastream(batchContext, batchState.batchNumber, block.NumberU64()); err != nil {
return err
}

// TODO: It is 99% sure that there is no need to write this in any of processInjectedInitialBatch, alignExecutionToDatastream, doCheckForBadBatch but it is worth double checknig
// the unwind of this value is handed by UnwindExecutionStageDbWrites
if _, err := rawdb.IncrementStateVersionByBlockNumberIfNeeded(batchContext.sdb.tx, block.NumberU64()); err != nil {
return fmt.Errorf("writing plain state version: %w", err)
}

log.Info(fmt.Sprintf("[%s] Finish batch %d...", batchContext.s.LogPrefix(), batchState.batchNumber))

return sdb.tx.Commit()
}
Loading

0 comments on commit 84c3991

Please sign in to comment.