Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: reduce block interval poc #265

Draft
wants to merge 9 commits into
base: develop
Choose a base branch
from
2 changes: 1 addition & 1 deletion op-batcher/batcher/channel_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ type ChannelBuilder struct {
outputBytes int
}

// newChannelBuilder creates a new channel builder or returns an error if the
// NewChannelBuilder creates a new channel builder or returns an error if the
// channel out could not be created.
// it acts as a factory for either a span or singular channel out
func NewChannelBuilder(cfg ChannelConfig, rollupCfg rollup.Config, latestL1OriginBlockNum uint64) (*ChannelBuilder, error) {
Expand Down
7 changes: 7 additions & 0 deletions op-batcher/batcher/channel_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -362,11 +362,18 @@ func (s *channelManager) AddL2Block(block *types.Block) error {
}

func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo) eth.L2BlockRef {
milliPart := uint64(0)
Copy link
Collaborator

@joeylichang joeylichang Mar 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

L2BlockRef as the l2BlockRefFromBlockAndL1Info return value, it is only used to mertics, so it is useless for adding the milliPart.

if block.MixDigest() != (common.Hash{}) {
// adapts l2 millisecond, highest 2 bytes as milli-part.
milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1])
}

return eth.L2BlockRef{
Hash: block.Hash(),
Number: block.NumberU64(),
ParentHash: block.ParentHash(),
Time: block.Time(),
MilliTime: milliPart,
L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number},
SequenceNumber: l1info.SequenceNumber,
}
Expand Down
36 changes: 34 additions & 2 deletions op-chain-ops/genesis/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,31 @@ type DeployConfig struct {
UseInterop bool `json:"useInterop,omitempty"`
}

func (d *DeployConfig) L1MillisecondBlockInterval() uint64 {
// convert second to millisecond
return d.L1BlockTime * 1000
}

func (d *DeployConfig) L2MillisecondBlockInterval() uint64 {
if d.L2BlockTime > 3 {
// has been millisecond
return d.L2BlockTime
}
// convert second to millisecond
return d.L2BlockTime * 1000
}

// L2SecondBlockInterval is just used by ut&e2e test.
// TODO: ut&e2e need to be refined later.
func (d *DeployConfig) L2SecondBlockInterval() uint64 {
if d.L2BlockTime <= 3 {
// has been second
return d.L2BlockTime
}
// convert millisecond to second
return d.L2BlockTime / 1000
}

// Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy
// which makes it easier to maintain, we do not need efficiency in this case.
func (d *DeployConfig) Copy() *DeployConfig {
Expand Down Expand Up @@ -434,9 +459,15 @@ func (d *DeployConfig) Check() error {
return fmt.Errorf("%w: GovernanceToken owner cannot be address(0)", ErrInvalidDeployConfig)
}
}
if d.L2BlockTime <= 3 {
// TODO: too many tests depend it, tmp work around it
// convert ms l2 time interval
d.L2BlockTime = d.L2BlockTime * 1000
}

// L2 block time must always be smaller than L1 block time
if d.L1BlockTime < d.L2BlockTime {
return fmt.Errorf("L2 block time (%d) is larger than L1 block time (%d)", d.L2BlockTime, d.L1BlockTime)
if d.L1MillisecondBlockInterval() < d.L2MillisecondBlockInterval() {
return fmt.Errorf("L2 block interval ms (%d) is larger than L1 block interval ms (%d)", d.L2MillisecondBlockInterval(), d.L1MillisecondBlockInterval())
}
if d.RequiredProtocolVersion == (params.ProtocolVersion{}) {
log.Warn("RequiredProtocolVersion is empty")
Expand Down Expand Up @@ -585,6 +616,7 @@ func (d *DeployConfig) DeltaTime(genesisTime uint64) *uint64 {
return &v
}

// TODO judge if it is need to use milliseconds timestamp with the fork information
func (d *DeployConfig) EcotoneTime(genesisTime uint64) *uint64 {
if d.L2GenesisEcotoneTimeOffset == nil {
return nil
Expand Down
2 changes: 1 addition & 1 deletion op-e2e/actions/dencun_fork_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) {
cancunOffset := hexutil.Uint64(0)
dp.DeployConfig.L1CancunTimeOffset = &cancunOffset
// This test wil fork on the second block
offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime * 2)
offset := hexutil.Uint64(dp.DeployConfig.L2SecondBlockInterval() * 2)
dp.DeployConfig.L2GenesisCanyonTimeOffset = &offset
dp.DeployConfig.L2GenesisDeltaTimeOffset = &offset
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
Expand Down
4 changes: 2 additions & 2 deletions op-e2e/actions/l2_sequencer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
origin := miner.l1Chain.CurrentBlock()

// L2 makes blocks to catch up
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.SecondBlockInterval() < origin.Time {
makeL2BlockWithAliceTx()
require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches")
}
Expand All @@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t)

// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.SecondBlockInterval() <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) {
sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
Expand Down
4 changes: 2 additions & 2 deletions op-e2e/actions/user_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,10 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) {
dp.DeployConfig.L2GenesisFjordTimeOffset = test.fjordTime

if test.canyonTime != nil {
require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime), "canyon fork must be aligned")
require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2SecondBlockInterval()), "canyon fork must be aligned")
}
if test.ecotoneTime != nil {
require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime), "ecotone fork must be aligned")
require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2SecondBlockInterval()), "ecotone fork must be aligned")
}

sd := e2eutils.Setup(t, dp, defaultAlloc)
Expand Down
5 changes: 5 additions & 0 deletions op-e2e/e2eutils/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,11 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
PlasmaConfig: pcfg,
}

if rollupCfg.BlockTime <= 3 {
// covert to ms timestamp
rollupCfg.BlockTime = rollupCfg.BlockTime * 1000
}

require.NoError(t, rollupCfg.Check())

// Sanity check that the config is correct
Expand Down
10 changes: 5 additions & 5 deletions op-e2e/op_geth.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,8 @@ func (d *OpGeth) StartBlockBuilding(ctx context.Context, attrs *eth.PayloadAttri

// CreatePayloadAttributes creates a valid PayloadAttributes containing a L1Info deposit transaction followed by the supplied transactions.
func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.PayloadAttributes, error) {
timestamp := d.L2Head.Timestamp + 2
l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp))
milliTimestamp := d.L2Head.MillisecondTimestamp() + 2*1000 // 2000 millisecond block interval
l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, milliTimestamp)
if err != nil {
return nil, err
}
Expand All @@ -228,17 +228,17 @@ func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.Payloa
}

var withdrawals *types.Withdrawals
if d.L2ChainConfig.IsCanyon(uint64(timestamp)) {
if d.L2ChainConfig.IsCanyon(milliTimestamp / 1000) {
withdrawals = &types.Withdrawals{}
}

var parentBeaconBlockRoot *common.Hash
if d.L2ChainConfig.IsEcotone(uint64(timestamp)) {
if d.L2ChainConfig.IsEcotone(milliTimestamp / 1000) {
parentBeaconBlockRoot = d.L1Head.ParentBeaconRoot()
}

attrs := eth.PayloadAttributes{
Timestamp: timestamp,
Timestamp: eth.Uint64Quantity(milliTimestamp / 1000),
Transactions: txBytes,
NoTxPool: true,
GasLimit: (*eth.Uint64Quantity)(&d.SystemConfig.GasLimit),
Expand Down
5 changes: 5 additions & 0 deletions op-e2e/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,11 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
return nil, err
}
sys.RollupConfig = &defaultConfig
if sys.RollupConfig.BlockTime <= 3 {
// TODO: too many tests depend it, tmp work around it
// covert ms timestamp
sys.RollupConfig.BlockTime = sys.RollupConfig.BlockTime * 1000
}

// Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2
bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LevelInfo).New("role", "l1_cl"),
Expand Down
2 changes: 1 addition & 1 deletion op-e2e/system_adminrpc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func TestStopStartSequencer(t *testing.T) {
require.False(t, active, "sequencer should be inactive")

blockBefore := latestBlock(t, l2Seq)
time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime+1) * time.Second)
time.Sleep(time.Duration(cfg.DeployConfig.L2SecondBlockInterval()+1) * time.Second)
blockAfter := latestBlock(t, l2Seq)
require.Equal(t, blockAfter, blockBefore, "Chain advanced after stopping sequencer")

Expand Down
2 changes: 1 addition & 1 deletion op-node/p2p/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ func (srv *ReqRespServer) handleSyncRequest(ctx context.Context, stream network.
if req < srv.cfg.Genesis.L2.Number {
return req, fmt.Errorf("cannot serve request for L2 block %d before genesis %d: %w", req, srv.cfg.Genesis.L2.Number, invalidRequestErr)
}
max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().Unix()))
max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().UnixMilli()))
if err != nil {
return req, fmt.Errorf("cannot determine max target block number to verify request: %w", invalidRequestErr)
}
Expand Down
2 changes: 1 addition & 1 deletion op-node/rollup/chain_spec_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ var testConfig = Config{
GasLimit: 30_000_000,
},
},
BlockTime: 2,
BlockTime: 2000,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
Expand Down
33 changes: 17 additions & 16 deletions op-node/rollup/derive/attributes.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@ import (
"fmt"
"math/big"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"

"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/bsc"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)

var (
Expand Down Expand Up @@ -85,6 +84,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
return nil, NewCriticalError(fmt.Errorf("failed to derive some deposits: %w", err))
}
// apply sysCfg changes
// TODO: may need to pass l1origin milli-timestamp later if IsEcotone() use the milli-timestamp
if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, ba.rollupCfg, info.Time()); err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to apply derived L1 sysCfg updates: %w", err))
}
Expand All @@ -107,7 +107,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex

// Calculate bsc block base fee
var l1BaseFee *big.Int
if ba.rollupCfg.IsSnow(l2Parent.Time + ba.rollupCfg.BlockTime) {
if ba.rollupCfg.IsSnow((l2Parent.MillisecondTimestamp() + ba.rollupCfg.MillisecondBlockInterval()) / 1000) {
l1BaseFee, err = SnowL1GasPrice(ctx, ba, epoch)
if err != nil {
return nil, err
Expand All @@ -124,29 +124,29 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
l1Info = bsc.NewBlockInfoBSCWrapper(l1Info, l1BaseFee)

// Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2
nextL2Time := l2Parent.Time + ba.rollupCfg.BlockTime
if nextL2Time < l1Info.Time() {
nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.MillisecondBlockInterval()
if nextL2MilliTime < l1Info.MillisecondTimestamp() {
return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d",
l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time()))
l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MillisecondTimestamp()))
}

var upgradeTxs []hexutil.Bytes
if ba.rollupCfg.IsEcotoneActivationBlock(nextL2Time) {
if ba.rollupCfg.IsEcotoneActivationBlock(nextL2MilliTime / 1000) {
upgradeTxs, err = EcotoneNetworkUpgradeTransactions()
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to build ecotone network upgrade txs: %w", err))
}
}

if ba.rollupCfg.IsFjordActivationBlock(nextL2Time) {
if ba.rollupCfg.IsFjordActivationBlock(nextL2MilliTime / 1000) {
fjord, err := FjordNetworkUpgradeTransactions()
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to build fjord network upgrade txs: %w", err))
}
upgradeTxs = append(upgradeTxs, fjord...)
}

l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time)
l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2MilliTime)
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err))
}
Expand All @@ -157,28 +157,29 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
txs = append(txs, upgradeTxs...)

var withdrawals *types.Withdrawals
if ba.rollupCfg.IsCanyon(nextL2Time) {
if ba.rollupCfg.IsCanyon(nextL2MilliTime / 1000) {
withdrawals = &types.Withdrawals{}
}

var parentBeaconRoot *common.Hash
if ba.rollupCfg.IsEcotone(nextL2Time) {
if ba.rollupCfg.IsEcotone(nextL2MilliTime / 1000) {
parentBeaconRoot = l1Info.ParentBeaconRoot()
if parentBeaconRoot == nil { // default to zero hash if there is no beacon-block-root available
parentBeaconRoot = new(common.Hash)
}
}

return &eth.PayloadAttributes{
Timestamp: hexutil.Uint64(nextL2Time),
pa := &eth.PayloadAttributes{
PrevRandao: eth.Bytes32(l1Info.MixDigest()),
SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr,
Transactions: txs,
NoTxPool: true,
GasLimit: (*eth.Uint64Quantity)(&sysConfig.GasLimit),
Withdrawals: withdrawals,
ParentBeaconBlockRoot: parentBeaconRoot,
}, nil
}
pa.SetMillisecondTimestamp(nextL2MilliTime)
Copy link
Collaborator

@joeylichang joeylichang Mar 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SetMillisecondTimestamp(common.hash, millseconds) -> set millseconds to first common.hash.

then the return value to init eth.PayloadAttributes in 173 line, may be better.

return pa, nil
}

func (ba *FetchingAttributesBuilder) CachePayloadByHash(payload *eth.ExecutionPayloadEnvelope) bool {
Expand Down
2 changes: 1 addition & 1 deletion op-node/rollup/derive/attributes_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Sing
return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash))
}
// sanity check timestamp
if expected := l2SafeHead.Time + aq.config.BlockTime; expected != batch.Timestamp {
if expected := l2SafeHead.MillisecondTimestamp() + aq.config.MillisecondBlockInterval(); expected != batch.Timestamp {
return nil, NewResetError(fmt.Errorf("valid batch has bad timestamp %d, expected %d", batch.Timestamp, expected))
}
fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
Expand Down
14 changes: 7 additions & 7 deletions op-node/rollup/derive/batch_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si
if len(bq.nextSpan) > 0 {
// There are cached singular batches derived from the span batch.
// Check if the next cached batch matches the given parent block.
if bq.nextSpan[0].Timestamp == parent.Time+bq.config.BlockTime {
if bq.nextSpan[0].Timestamp == parent.MillisecondTimestamp()+bq.config.MillisecondBlockInterval() {
Copy link
Collaborator

@joeylichang joeylichang Mar 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if the Timestamp is changed to milliseconds for seconds, add one note that it is active after hard fork, otherwise it is seconds.

// Pop first one and return.
nextBatch := bq.popNextBatch(parent)
// len(bq.nextSpan) == 0 means it's the last batch of the span.
Expand Down Expand Up @@ -257,7 +257,7 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, paren
// Find the first-seen batch that matches all validity conditions.
// We may not have sufficient information to proceed filtering, and then we stop.
// There may be none: in that case we force-create an empty batch
nextTimestamp := parent.Time + bq.config.BlockTime
nextMilliTimestamp := parent.MillisecondTimestamp() + bq.config.MillisecondBlockInterval()
var nextBatch *BatchWithL1InclusionBlock

// Go over all batches, in order of inclusion, and find the first batch we can accept.
Expand Down Expand Up @@ -304,7 +304,7 @@ batchLoop:
firstOfEpoch := epoch.Number == parent.L1Origin.Number+1

bq.log.Trace("Potentially generating an empty batch",
"expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "nextTimestamp", nextTimestamp,
"expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "next_ms_timestamp", nextMilliTimestamp,
"epoch_time", epoch.Time, "len_l1_blocks", len(bq.l1Blocks), "firstOfEpoch", firstOfEpoch)

if !forceEmptyBatches {
Expand All @@ -321,20 +321,20 @@ batchLoop:
// Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin,
// to preserve that L2 time >= L1 time. If this is the first block of the epoch, always generate a
// batch to ensure that we at least have one batch per epoch.
if nextTimestamp < nextEpoch.Time || firstOfEpoch {
bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp)
if nextMilliTimestamp < nextEpoch.MillisecondTimestamp() || firstOfEpoch {
bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextMilliTimestamp)
return &SingularBatch{
ParentHash: parent.Hash,
EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash,
Timestamp: nextTimestamp,
Timestamp: nextMilliTimestamp,
Transactions: nil,
}, nil
}

// At this point we have auto generated every batch for the current epoch
// that we can, so we can advance to the next epoch.
bq.log.Trace("Advancing internal L1 blocks", "next_timestamp", nextTimestamp, "next_epoch_time", nextEpoch.Time)
bq.log.Trace("Advancing internal L1 blocks", "next_ms_timestamp", nextMilliTimestamp, "next_epoch_ms_time", nextEpoch.MillisecondTimestamp())
bq.l1Blocks = bq.l1Blocks[1:]
return nil, io.EOF
}
Loading
Loading