Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: reduce block interval poc #265

Draft
wants to merge 9 commits into
base: develop
Choose a base branch
from
2 changes: 2 additions & 0 deletions op-batcher/batcher/channel_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/holiman/uint256"
)

var ErrReorg = errors.New("block does not extend existing chain")
Expand Down Expand Up @@ -367,6 +368,7 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo
Number: block.NumberU64(),
ParentHash: block.ParentHash(),
Time: block.Time(),
MilliPartTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), // adapts millisecond part
L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number},
SequenceNumber: l1info.SequenceNumber,
}
Expand Down
8 changes: 6 additions & 2 deletions op-chain-ops/genesis/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ type DeployConfig struct {
L1ChainID uint64 `json:"l1ChainID"`
// L2ChainID is the chain ID of the L2 chain.
L2ChainID uint64 `json:"l2ChainID"`
// L2BlockTime is the number of seconds between each L2 block.
// L2BlockTime is the number of seconds between each L2 block. // millisecond
L2BlockTime uint64 `json:"l2BlockTime"`
// FinalizationPeriodSeconds represents the number of seconds before an output is considered
// finalized. This impacts the amount of time that withdrawals take to finalize and is
Expand Down Expand Up @@ -434,8 +434,12 @@ func (d *DeployConfig) Check() error {
return fmt.Errorf("%w: GovernanceToken owner cannot be address(0)", ErrInvalidDeployConfig)
}
}
if d.L2BlockTime <= 3 {
// convert ms l2 time interval
d.L2BlockTime = d.L2BlockTime * 1000
}
// L2 block time must always be smaller than L1 block time
if d.L1BlockTime < d.L2BlockTime {
if d.L1BlockTime*1000 < d.L2BlockTime { // TODO: tmp adjust, l1 interval is second timstamp and l2 interval is millisecond.
return fmt.Errorf("L2 block time (%d) is larger than L1 block time (%d)", d.L2BlockTime, d.L1BlockTime)
}
if d.RequiredProtocolVersion == (params.ProtocolVersion{}) {
Expand Down
6 changes: 3 additions & 3 deletions op-e2e/actions/blocktime_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@ func BatchInLastPossibleBlocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
applyDeltaTimeOffset(dp, deltaTimeOffset)
dp.DeployConfig.SequencerWindowSize = 4
dp.DeployConfig.L2BlockTime = 2
dp.DeployConfig.L2BlockTime = 2 // second

sd := e2eutils.Setup(t, dp, defaultAlloc)
sd := e2eutils.Setup(t, dp, defaultAlloc) // dp.DeployConfig.L2BlockTime will be changed to 2000
log := testlog.Logger(t, log.LevelDebug)

sd, _, miner, sequencer, sequencerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log)
Expand Down Expand Up @@ -162,7 +162,7 @@ func LargeL1Gaps(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp.DeployConfig.SequencerWindowSize = 4
dp.DeployConfig.MaxSequencerDrift = 32
applyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, defaultAlloc)
sd := e2eutils.Setup(t, dp, defaultAlloc) // dp.DeployConfig.L2BlockTime = 2000
log := testlog.Logger(t, log.LevelDebug)

sd, _, miner, sequencer, sequencerEngine, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
Expand Down
5 changes: 4 additions & 1 deletion op-e2e/actions/dencun_fork_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,10 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) {
cancunOffset := hexutil.Uint64(0)
dp.DeployConfig.L1CancunTimeOffset = &cancunOffset
// This test wil fork on the second block
offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime * 2)
if dp.DeployConfig.L2BlockTime <= 3 {
dp.DeployConfig.L2BlockTime = dp.DeployConfig.L2BlockTime * 1000 // millisecond
}
offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime / 1000 * 2) // second
dp.DeployConfig.L2GenesisCanyonTimeOffset = &offset
dp.DeployConfig.L2GenesisDeltaTimeOffset = &offset
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
Expand Down
4 changes: 2 additions & 2 deletions op-e2e/actions/l2_sequencer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
origin := miner.l1Chain.CurrentBlock()

// L2 makes blocks to catch up
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime/1000 < origin.Time {
makeL2BlockWithAliceTx()
require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches")
}
Expand All @@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t)

// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime/1000 <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) {
sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
Expand Down
8 changes: 6 additions & 2 deletions op-e2e/actions/user_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,11 +118,15 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) {
dp.DeployConfig.L2GenesisEcotoneTimeOffset = test.ecotoneTime
dp.DeployConfig.L2GenesisFjordTimeOffset = test.fjordTime

if dp.DeployConfig.L2BlockTime <= 3 {
dp.DeployConfig.L2BlockTime = dp.DeployConfig.L2BlockTime * 1000
}

if test.canyonTime != nil {
require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime), "canyon fork must be aligned")
require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime/1000), "canyon fork must be aligned")
}
if test.ecotoneTime != nil {
require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime), "ecotone fork must be aligned")
require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime/1000), "ecotone fork must be aligned")
}

sd := e2eutils.Setup(t, dp, defaultAlloc)
Expand Down
5 changes: 5 additions & 0 deletions op-e2e/e2eutils/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,11 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
PlasmaConfig: pcfg,
}

if rollupCfg.BlockTime <= 3 {
// covert to ms timestamp
rollupCfg.BlockTime = rollupCfg.BlockTime * 1000
}

require.NoError(t, rollupCfg.Check())

// Sanity check that the config is correct
Expand Down
2 changes: 1 addition & 1 deletion op-e2e/op_geth.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (d *OpGeth) StartBlockBuilding(ctx context.Context, attrs *eth.PayloadAttri
// CreatePayloadAttributes creates a valid PayloadAttributes containing a L1Info deposit transaction followed by the supplied transactions.
func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.PayloadAttributes, error) {
timestamp := d.L2Head.Timestamp + 2
l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp))
l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp*1000) /*ms*/)
if err != nil {
return nil, err
}
Expand Down
3 changes: 3 additions & 0 deletions op-e2e/op_geth_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,9 @@ func TestRegolith(t *testing.T) {
// We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime
if cfg.DeployConfig.L2BlockTime <= 3 {
cfg.DeployConfig.L2BlockTime = cfg.DeployConfig.L2BlockTime * 1000 // ms
}

ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
Expand Down
4 changes: 4 additions & 0 deletions op-e2e/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,10 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
return nil, err
}
sys.RollupConfig = &defaultConfig
if sys.RollupConfig.BlockTime <= 3 {
// covert ms timestamp
sys.RollupConfig.BlockTime = sys.RollupConfig.BlockTime * 1000
}

// Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2
bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LevelInfo).New("role", "l1_cl"),
Expand Down
2 changes: 1 addition & 1 deletion op-e2e/system_adminrpc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func TestStopStartSequencer(t *testing.T) {
require.False(t, active, "sequencer should be inactive")

blockBefore := latestBlock(t, l2Seq)
time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime+1) * time.Second)
time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime/1000+1) * time.Second)
blockAfter := latestBlock(t, l2Seq)
require.Equal(t, blockAfter, blockBefore, "Chain advanced after stopping sequencer")

Expand Down
2 changes: 1 addition & 1 deletion op-node/p2p/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ func (srv *ReqRespServer) handleSyncRequest(ctx context.Context, stream network.
if req < srv.cfg.Genesis.L2.Number {
return req, fmt.Errorf("cannot serve request for L2 block %d before genesis %d: %w", req, srv.cfg.Genesis.L2.Number, invalidRequestErr)
}
max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().Unix()))
max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().UnixMilli()))
if err != nil {
return req, fmt.Errorf("cannot determine max target block number to verify request: %w", invalidRequestErr)
}
Expand Down
2 changes: 1 addition & 1 deletion op-node/rollup/chain_spec_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ var testConfig = Config{
GasLimit: 30_000_000,
},
},
BlockTime: 2,
BlockTime: 2000,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
Expand Down
23 changes: 12 additions & 11 deletions op-node/rollup/derive/attributes.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"

"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/bsc"
Expand Down Expand Up @@ -107,7 +108,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex

// Calculate bsc block base fee
var l1BaseFee *big.Int
if ba.rollupCfg.IsSnow(l2Parent.Time + ba.rollupCfg.BlockTime) {
if ba.rollupCfg.IsSnow((l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime) / 1000) {
l1BaseFee, err = SnowL1GasPrice(ctx, ba, epoch)
if err != nil {
return nil, err
Expand All @@ -124,29 +125,29 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
l1Info = bsc.NewBlockInfoBSCWrapper(l1Info, l1BaseFee)

// Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2
nextL2Time := l2Parent.Time + ba.rollupCfg.BlockTime
if nextL2Time < l1Info.Time() {
nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime
if nextL2MilliTime < l1Info.MilliTime() {
return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d",
l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time()))
l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MilliTime()))
}

var upgradeTxs []hexutil.Bytes
if ba.rollupCfg.IsEcotoneActivationBlock(nextL2Time) {
if ba.rollupCfg.IsEcotoneActivationBlock(nextL2MilliTime / 1000) {
upgradeTxs, err = EcotoneNetworkUpgradeTransactions()
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to build ecotone network upgrade txs: %w", err))
}
}

if ba.rollupCfg.IsFjordActivationBlock(nextL2Time) {
if ba.rollupCfg.IsFjordActivationBlock(nextL2MilliTime / 1000) {
fjord, err := FjordNetworkUpgradeTransactions()
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to build fjord network upgrade txs: %w", err))
}
upgradeTxs = append(upgradeTxs, fjord...)
}

l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time)
l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2MilliTime)
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err))
}
Expand All @@ -157,21 +158,21 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
txs = append(txs, upgradeTxs...)

var withdrawals *types.Withdrawals
if ba.rollupCfg.IsCanyon(nextL2Time) {
if ba.rollupCfg.IsCanyon(nextL2MilliTime / 1000) {
withdrawals = &types.Withdrawals{}
}

var parentBeaconRoot *common.Hash
if ba.rollupCfg.IsEcotone(nextL2Time) {
if ba.rollupCfg.IsEcotone(nextL2MilliTime / 1000) {
parentBeaconRoot = l1Info.ParentBeaconRoot()
if parentBeaconRoot == nil { // default to zero hash if there is no beacon-block-root available
parentBeaconRoot = new(common.Hash)
}
}

return &eth.PayloadAttributes{
Timestamp: hexutil.Uint64(nextL2Time),
PrevRandao: eth.Bytes32(l1Info.MixDigest()),
Timestamp: hexutil.Uint64(nextL2MilliTime / 1000), // second part
PrevRandao: uint256.NewInt(nextL2MilliTime % 1000).Bytes32(), // millisecond part
SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr,
Transactions: txs,
NoTxPool: true,
Expand Down
2 changes: 1 addition & 1 deletion op-node/rollup/derive/attributes_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Sing
return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash))
}
// sanity check timestamp
if expected := l2SafeHead.Time + aq.config.BlockTime; expected != batch.Timestamp {
if expected := l2SafeHead.MillisecondTimestamp() + aq.config.BlockTime; expected != batch.Timestamp {
return nil, NewResetError(fmt.Errorf("valid batch has bad timestamp %d, expected %d", batch.Timestamp, expected))
}
fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
Expand Down
14 changes: 7 additions & 7 deletions op-node/rollup/derive/batch_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si
if len(bq.nextSpan) > 0 {
// There are cached singular batches derived from the span batch.
// Check if the next cached batch matches the given parent block.
if bq.nextSpan[0].Timestamp == parent.Time+bq.config.BlockTime {
if bq.nextSpan[0].Timestamp == parent.MillisecondTimestamp()+bq.config.BlockTime {
// Pop first one and return.
nextBatch := bq.popNextBatch(parent)
// len(bq.nextSpan) == 0 means it's the last batch of the span.
Expand Down Expand Up @@ -257,7 +257,7 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, paren
// Find the first-seen batch that matches all validity conditions.
// We may not have sufficient information to proceed filtering, and then we stop.
// There may be none: in that case we force-create an empty batch
nextTimestamp := parent.Time + bq.config.BlockTime
nextMilliTimestamp := parent.MillisecondTimestamp() + bq.config.BlockTime
var nextBatch *BatchWithL1InclusionBlock

// Go over all batches, in order of inclusion, and find the first batch we can accept.
Expand Down Expand Up @@ -304,7 +304,7 @@ batchLoop:
firstOfEpoch := epoch.Number == parent.L1Origin.Number+1

bq.log.Trace("Potentially generating an empty batch",
"expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "nextTimestamp", nextTimestamp,
"expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "next_ms_timestamp", nextMilliTimestamp,
"epoch_time", epoch.Time, "len_l1_blocks", len(bq.l1Blocks), "firstOfEpoch", firstOfEpoch)

if !forceEmptyBatches {
Expand All @@ -321,20 +321,20 @@ batchLoop:
// Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin,
// to preserve that L2 time >= L1 time. If this is the first block of the epoch, always generate a
// batch to ensure that we at least have one batch per epoch.
if nextTimestamp < nextEpoch.Time || firstOfEpoch {
bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp)
if nextMilliTimestamp < nextEpoch.MilliTimestamp() || firstOfEpoch {
bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextMilliTimestamp)
return &SingularBatch{
ParentHash: parent.Hash,
EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash,
Timestamp: nextTimestamp,
Timestamp: nextMilliTimestamp,
Transactions: nil,
}, nil
}

// At this point we have auto generated every batch for the current epoch
// that we can, so we can advance to the next epoch.
bq.log.Trace("Advancing internal L1 blocks", "next_timestamp", nextTimestamp, "next_epoch_time", nextEpoch.Time)
bq.log.Trace("Advancing internal L1 blocks", "next_ms_timestamp", nextMilliTimestamp, "next_epoch_ms_time", nextEpoch.MilliTimestamp())
bq.l1Blocks = bq.l1Blocks[1:]
return nil, io.EOF
}
Loading
Loading