Skip to content

Commit

Permalink
refactor(protocol): use require-statement for clarity (#18038)
Browse files Browse the repository at this point in the history
  • Loading branch information
dantaik authored Sep 5, 2024
1 parent 32d7c90 commit ac95a08
Show file tree
Hide file tree
Showing 9 changed files with 96 additions and 109 deletions.
2 changes: 1 addition & 1 deletion packages/protocol/contracts/L1/TaikoL1.sol
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ contract TaikoL1 is EssentialContract, ITaikoL1, TaikoEvents {
uint256[50] private __gap;

modifier whenProvingNotPaused() {
if (state.slotB.provingPaused) revert LibProving.L1_PROVING_PAUSED();
require(!state.slotB.provingPaused, LibProving.L1_PROVING_PAUSED());
_;
}

Expand Down
46 changes: 22 additions & 24 deletions packages/protocol/contracts/L1/libs/LibProposing.sol
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,8 @@ library LibProposing {
public
returns (TaikoData.BlockMetadataV2[] memory metas_)
{
if (_paramsArr.length == 0 || _paramsArr.length != _txListArr.length) {
revert L1_INVALID_PARAMS();
}
require(_paramsArr.length != 0, L1_INVALID_PARAMS());
require(_paramsArr.length == _txListArr.length, L1_INVALID_PARAMS());

metas_ = new TaikoData.BlockMetadataV2[](_paramsArr.length);

Expand Down Expand Up @@ -118,9 +117,10 @@ library LibProposing {

// It's essential to ensure that the ring buffer for proposed blocks
// still has space for at least one more block.
if (local.b.numBlocks >= local.b.lastVerifiedBlockId + _config.blockMaxProposals + 1) {
revert L1_TOO_MANY_BLOCKS();
}
require(
local.b.numBlocks < local.b.lastVerifiedBlockId + _config.blockMaxProposals + 1,
L1_TOO_MANY_BLOCKS()
);

if (_params.length != 0) {
local.params = abi.decode(_params, (TaikoData.BlockParamsV2));
Expand All @@ -147,32 +147,30 @@ library LibProposing {
// We only allow the L1 block to be 2 epochs old.
// The other constraint is that the L1 block number needs to be larger than or equal
// the one in the previous L2 block.
if (
local.params.anchorBlockId + _config.maxAnchorHeightOffset < block.number //
|| local.params.anchorBlockId >= block.number
|| local.params.anchorBlockId < parentBlk.proposedIn
) {
revert L1_INVALID_ANCHOR_BLOCK();
}
require(
local.params.anchorBlockId + _config.maxAnchorHeightOffset >= block.number,
L1_INVALID_ANCHOR_BLOCK()
);
require(local.params.anchorBlockId < block.number, L1_INVALID_ANCHOR_BLOCK());
require(local.params.anchorBlockId >= parentBlk.proposedIn, L1_INVALID_ANCHOR_BLOCK());

// Verify the passed in timestamp.
// We only allow the timestamp to be 2 epochs old.
// The other constraint is that the timestamp needs to be larger than or equal the
// one in the previous L2 block.
if (
local.params.timestamp + _config.maxAnchorHeightOffset * 12 < block.timestamp
|| local.params.timestamp > block.timestamp
|| local.params.timestamp < parentBlk.proposedAt
) {
revert L1_INVALID_TIMESTAMP();
}
require(
local.params.timestamp + _config.maxAnchorHeightOffset * 12 >= block.timestamp,
L1_INVALID_TIMESTAMP()
);
require(local.params.timestamp <= block.timestamp, L1_INVALID_TIMESTAMP());
require(local.params.timestamp >= parentBlk.proposedAt, L1_INVALID_TIMESTAMP());

// Check if parent block has the right meta hash. This is to allow the proposer to make
// sure the block builds on the expected latest chain state.
if (local.params.parentMetaHash == 0) {
local.params.parentMetaHash = parentBlk.metaHash;
} else if (local.params.parentMetaHash != parentBlk.metaHash) {
revert L1_UNEXPECTED_PARENT();
} else {
require(local.params.parentMetaHash == parentBlk.metaHash, L1_UNEXPECTED_PARENT());
}

// Initialize metadata to compute a metaHash, which forms a part of
Expand Down Expand Up @@ -209,14 +207,14 @@ library LibProposing {

// Update certain meta fields
if (meta_.blobUsed) {
if (!LibNetwork.isDencunSupported(block.chainid)) revert L1_BLOB_NOT_AVAILABLE();
require(LibNetwork.isDencunSupported(block.chainid), L1_BLOB_NOT_AVAILABLE());

// Always use the first blob in this transaction. If the
// proposeBlock functions are called more than once in the same
// L1 transaction, these multiple L2 blocks will share the same
// blob.
meta_.blobHash = blobhash(local.params.blobIndex);
if (meta_.blobHash == 0) revert L1_BLOB_NOT_FOUND();
require(meta_.blobHash != 0, L1_BLOB_NOT_FOUND());
} else {
meta_.blobHash = keccak256(_txList);
emit CalldataTxList(meta_.id, _txList);
Expand Down
56 changes: 25 additions & 31 deletions packages/protocol/contracts/L1/libs/LibProving.sol
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ library LibProving {
/// @param _state Current TaikoData.State.
/// @param _pause The pause status.
function pauseProving(TaikoData.State storage _state, bool _pause) internal {
if (_state.slotB.provingPaused == _pause) revert L1_INVALID_PAUSE_STATUS();
require(_state.slotB.provingPaused != _pause, L1_INVALID_PAUSE_STATUS());
_state.slotB.provingPaused = _pause;

if (!_pause) {
Expand Down Expand Up @@ -115,14 +115,13 @@ library LibProving {
)
public
{
if (_blockIds.length == 0 || _blockIds.length != _inputs.length) {
revert L1_INVALID_PARAMS();
}
require(_blockIds.length != 0, L1_INVALID_PARAMS());
require(_blockIds.length == _inputs.length, L1_INVALID_PARAMS());

TaikoData.TierProof memory batchProof;
if (_batchProof.length != 0) {
batchProof = abi.decode(_batchProof, (TaikoData.TierProof));
if (batchProof.tier == 0) revert L1_INVALID_TIER();
require(batchProof.tier != 0, L1_INVALID_TIER());
}

IVerifier.ContextV2[] memory ctxs = new IVerifier.ContextV2[](_blockIds.length);
Expand All @@ -141,8 +140,8 @@ library LibProving {
if (!batchVerifierNameSet) {
batchVerifierNameSet = true;
batchVerifierName = _verifierName;
} else if (batchVerifierName != _verifierName) {
revert L1_DIFF_VERIFIER();
} else {
require(batchVerifierName == _verifierName, L1_DIFF_VERIFIER());
}
}
}
Expand Down Expand Up @@ -203,19 +202,18 @@ library LibProving {
local.proof = _batchProof;
}

if (_blockId != local.meta.id) revert LibUtils.L1_INVALID_BLOCK_ID();
require(_blockId == local.meta.id, LibUtils.L1_INVALID_BLOCK_ID());

// Make sure parentHash is not zero
// To contest an existing transition, simply use any non-zero value as
// the blockHash and stateRoot.
if (ctx_.tran.parentHash == 0 || ctx_.tran.blockHash == 0 || ctx_.tran.stateRoot == 0) {
revert L1_INVALID_TRANSITION();
}
require(ctx_.tran.parentHash != 0, L1_INVALID_TRANSITION());
require(ctx_.tran.blockHash != 0, L1_INVALID_TRANSITION());
require(ctx_.tran.stateRoot != 0, L1_INVALID_TRANSITION());

// Check that the block has been proposed but has not yet been verified.
if (local.meta.id <= local.b.lastVerifiedBlockId || local.meta.id >= local.b.numBlocks) {
revert LibUtils.L1_INVALID_BLOCK_ID();
}
require(local.meta.id > local.b.lastVerifiedBlockId, LibUtils.L1_INVALID_BLOCK_ID());
require(local.meta.id < local.b.numBlocks, LibUtils.L1_INVALID_BLOCK_ID());

local.slot = local.meta.id % _config.blockRingBufferSize;
TaikoData.BlockV2 storage blk = _state.blocks[local.slot];
Expand All @@ -242,7 +240,7 @@ library LibProving {
// caution.
{
bytes32 metaHash = keccak256(abi.encode(local.meta));
if (local.metaHash != metaHash) revert LibUtils.L1_BLOCK_MISMATCH();
require(local.metaHash == metaHash, LibUtils.L1_BLOCK_MISMATCH());
}

// Each transition is uniquely identified by the parentHash, with the
Expand All @@ -254,12 +252,9 @@ library LibProving {

// The new proof must meet or exceed the minimum tier required by the
// block or the previous proof; it cannot be on a lower tier.
if (
local.proof.tier == 0 || local.proof.tier < local.meta.minTier
|| local.proof.tier < ts.tier
) {
revert L1_INVALID_TIER();
}
require(local.proof.tier != 0, L1_INVALID_TIER());
require(local.proof.tier >= local.meta.minTier, L1_INVALID_TIER());
require(local.proof.tier >= ts.tier, L1_INVALID_TIER());

// Retrieve the tier configurations. If the tier is not supported, the
// subsequent action will result in a revert.
Expand All @@ -284,7 +279,7 @@ library LibProving {
local.tier.contestBond != 0 && ts.contester == address(0) && local.tid == 1
&& ts.tier == 0 && local.inProvingWindow
) {
if (msg.sender != local.assignedProver) revert L1_NOT_ASSIGNED_PROVER();
require(msg.sender == local.assignedProver, L1_NOT_ASSIGNED_PROVER());
}
// We must verify the proof, and any failure in proof verification will
// result in a revert.
Expand Down Expand Up @@ -345,7 +340,7 @@ library LibProving {
} else {
// New transition and old transition on the same tier - and if this transaction tries to
// prove the same, it reverts
if (local.sameTransition) revert L1_ALREADY_PROVED();
require(!local.sameTransition, L1_ALREADY_PROVED());

if (local.isTopTier) {
// The top tier prover re-proves.
Expand All @@ -366,17 +361,16 @@ library LibProving {
});
} else {
// Contesting but not on the highest tier
if (ts.contester != address(0)) revert L1_ALREADY_CONTESTED();
require(ts.contester == address(0), L1_ALREADY_CONTESTED());

// Making it a non-sliding window, relative when ts.timestamp was registered (or to
// lastUnpaused if that one is bigger)
if (
LibUtils.isPostDeadline(
require(
!LibUtils.isPostDeadline(
ts.timestamp, local.b.lastUnpausedAt, local.tier.cooldownWindow
)
) {
revert L1_CANNOT_CONTEST();
}
),
L1_CANNOT_CONTEST()
);

// _checkIfContestable(/*_state,*/ tier.cooldownWindow, ts.timestamp);
// Burn the contest bond from the prover.
Expand Down Expand Up @@ -520,7 +514,7 @@ library LibProving {
LibBonds.creditBond(_state, _ts.contester, _ts.contestBond + reward * 3);
}
} else {
if (_local.sameTransition) revert L1_ALREADY_PROVED();
require(!_local.sameTransition, L1_ALREADY_PROVED());

// The code below will be executed if
// - 1) the transition is proved for the fist time, or
Expand Down
13 changes: 7 additions & 6 deletions packages/protocol/contracts/L1/libs/LibUtils.sol
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ library LibUtils {
/// @param _state The state to initialize.
/// @param _genesisBlockHash The block hash of the genesis block.
function init(TaikoData.State storage _state, bytes32 _genesisBlockHash) internal {
if (_genesisBlockHash == 0) revert L1_INVALID_GENESIS_HASH();
require(_genesisBlockHash != 0, L1_INVALID_GENESIS_HASH());
// Initialize state
_state.slotA.genesisHeight = uint64(block.number);
_state.slotA.genesisTimestamp = uint64(block.timestamp);
Expand Down Expand Up @@ -78,7 +78,7 @@ library LibUtils {
{
slot_ = _blockId % _config.blockRingBufferSize;
blk_ = _state.blocks[slot_];
if (blk_.blockId != _blockId) revert L1_INVALID_BLOCK_ID();
require(blk_.blockId == _blockId, L1_INVALID_BLOCK_ID());
}

/// @dev Retrieves a block's block hash and state root.
Expand Down Expand Up @@ -128,7 +128,8 @@ library LibUtils {
{
(TaikoData.BlockV2 storage blk, uint64 slot) = getBlock(_state, _config, _blockId);

if (_tid == 0 || _tid >= blk.nextTransitionId) revert L1_TRANSITION_NOT_FOUND();
require(_tid != 0, L1_TRANSITION_NOT_FOUND());
require(_tid < blk.nextTransitionId, L1_TRANSITION_NOT_FOUND());
return _state.transitions[slot][_tid];
}

Expand All @@ -152,7 +153,7 @@ library LibUtils {
(TaikoData.BlockV2 storage blk, uint64 slot) = getBlock(_state, _config, _blockId);

uint24 tid = getTransitionId(_state, blk, slot, _parentHash);
if (tid == 0) revert L1_TRANSITION_NOT_FOUND();
require(tid != 0, L1_TRANSITION_NOT_FOUND());

return _state.transitions[slot][tid];
}
Expand All @@ -176,10 +177,10 @@ library LibUtils {
{
if (_state.transitions[_slot][1].key == _parentHash) {
tid_ = 1;
if (tid_ >= _blk.nextTransitionId) revert L1_UNEXPECTED_TRANSITION_ID();
require(tid_ < _blk.nextTransitionId, L1_UNEXPECTED_TRANSITION_ID());
} else {
tid_ = _state.transitionIds[_blk.blockId][_parentHash];
if (tid_ != 0 && tid_ >= _blk.nextTransitionId) revert L1_UNEXPECTED_TRANSITION_ID();
require(tid_ == 0 || tid_ < _blk.nextTransitionId, L1_UNEXPECTED_TRANSITION_ID());
}
}

Expand Down
6 changes: 3 additions & 3 deletions packages/protocol/contracts/L1/libs/LibVerifying.sol
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,14 @@ library LibVerifying {
local.slot = local.blockId % _config.blockRingBufferSize;

TaikoData.BlockV2 storage blk = _state.blocks[local.slot];
if (blk.blockId != local.blockId) revert L1_BLOCK_MISMATCH();
require(blk.blockId == local.blockId, L1_BLOCK_MISMATCH());

local.lastVerifiedTransitionId = blk.verifiedTransitionId;
local.tid = local.lastVerifiedTransitionId;

// The following scenario should never occur but is included as a
// precaution.
if (local.tid == 0) revert L1_TRANSITION_ID_ZERO();
require(local.tid != 0, L1_TRANSITION_ID_ZERO());

// The `blockHash` variable represents the most recently trusted
// blockHash on L2.
Expand All @@ -80,7 +80,7 @@ library LibVerifying {
local.slot = local.blockId % _config.blockRingBufferSize;

blk = _state.blocks[local.slot];
if (blk.blockId != local.blockId) revert L1_BLOCK_MISMATCH();
require(blk.blockId == local.blockId, L1_BLOCK_MISMATCH());

local.tid = LibUtils.getTransitionId(_state, blk, local.slot, local.blockHash);
// When `tid` is 0, it indicates that there is no proven
Expand Down
Loading

0 comments on commit ac95a08

Please sign in to comment.