diff --git a/hive_integration/nodocker/consensus/consensus_sim.nim b/hive_integration/nodocker/consensus/consensus_sim.nim index 3afcf52a43..8315936c50 100644 --- a/hive_integration/nodocker/consensus/consensus_sim.nim +++ b/hive_integration/nodocker/consensus/consensus_sim.nim @@ -27,7 +27,7 @@ proc processChainData(cd: ChainData, taskPool: Taskpool): TestStatus = cd.params ) - let c = newForkedChain(com, com.genesisHeader) + let c = ForkedChainRef.init(com) for bytes in cd.blocksRlp: # ignore return value here diff --git a/hive_integration/nodocker/engine/node.nim b/hive_integration/nodocker/engine/node.nim index 82bc4b8216..2109ca05df 100644 --- a/hive_integration/nodocker/engine/node.nim +++ b/hive_integration/nodocker/engine/node.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -42,8 +42,6 @@ proc processBlock( ## implementations (but can be savely removed, as well.) ## variant of `processBlock()` where the `header` argument is explicitely set. template header: Header = blk.header - var dbTx = vmState.com.db.ctx.txFrameBegin() - defer: dbTx.dispose() let com = vmState.com if com.daoForkSupport and @@ -61,61 +59,55 @@ proc processBlock( vmState.ledger.addBalance(withdrawal.address, withdrawal.weiAmount) if header.ommersHash != EMPTY_UNCLE_HASH: - discard com.db.persistUncles(blk.uncles) + discard vmState.ledger.txFrame.persistUncles(blk.uncles) # EIP-3675: no reward for miner in POA/POS - if com.proofOfStake(header): + if com.proofOfStake(header, vmState.ledger.txFrame): vmState.calculateReward(header, blk.uncles) vmState.mutateLedger: let clearEmptyAccount = com.isSpuriousOrLater(header.number) db.persist(clearEmptyAccount) - dbTx.commit() + vmState.ledger.txFrame.commit() ok() -proc getVmState(c: ChainRef, header: Header): - Result[BaseVMState, void] = - let vmState = BaseVMState() - if not vmState.init(header, c.com, storeSlotHash = false): - debug "Cannot initialise VmState", - number = header.number - return err() - +proc getVmState(c: ChainRef, header: Header, txFrame: CoreDbTxRef): + Result[BaseVMState, string] = + let + parent = ?txFrame.getBlockHeader(header.parentHash) + vmState = BaseVMState() + vmState.init(parent, header, c.com, txFrame, storeSlotHash = false) return ok(vmState) # A stripped down version of persistBlocks without validation # intended to accepts invalid block proc setBlock*(c: ChainRef; blk: Block): Result[void, string] = template header: Header = blk.header - let dbTx = c.db.ctx.txFrameBegin() - defer: dbTx.dispose() + let txFrame = c.db.ctx.txFrameBegin(nil) + defer: txFrame.dispose() # Needed for figuring out whether KVT cleanup is due (see at the end) let - vmState = c.getVmState(header).valueOr: - return err("no vmstate") + vmState = ? c.getVmState(header, txFrame) ? vmState.processBlock(blk) - ? c.db.persistHeaderAndSetHead(header, c.com.startOfHistory) + ? txFrame.persistHeaderAndSetHead(header, c.com.startOfHistory) - c.db.persistTransactions(header.number, header.txRoot, blk.transactions) - c.db.persistReceipts(header.receiptsRoot, vmState.receipts) + txFrame.persistTransactions(header.number, header.txRoot, blk.transactions) + txFrame.persistReceipts(header.receiptsRoot, vmState.receipts) if blk.withdrawals.isSome: - c.db.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get) + txFrame.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get) # update currentBlock *after* we persist it # so the rpc return consistent result # between eth_blockNumber and eth_syncing c.com.syncCurrent = header.number - dbTx.commit() + txFrame.commit() - # The `c.db.persistent()` call is ignored by the legacy DB which - # automatically saves persistently when reaching the zero level transaction. - # # For the `Aristo` database, this code position is only reached if the # the parent state of the first block (as registered in `headers[0]`) was # the canonical state before updating. So this state will be saved with diff --git a/hive_integration/nodocker/pyspec/test_env.nim b/hive_integration/nodocker/pyspec/test_env.nim index 4567158ffc..fec93d5a62 100644 --- a/hive_integration/nodocker/pyspec/test_env.nim +++ b/hive_integration/nodocker/pyspec/test_env.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -37,9 +37,9 @@ proc genesisHeader(node: JsonNode): Header = proc initializeDb(memDB: CoreDbRef, node: JsonNode): Hash32 = let genesisHeader = node.genesisHeader - ledger = LedgerRef.init(memDB) + ledger = LedgerRef.init(memDB.baseTxFrame()) - memDB.persistHeaderAndSetHead(genesisHeader).expect("persistHeader no error") + ledger.txFrame.persistHeaderAndSetHead(genesisHeader).expect("persistHeader no error") setupLedger(node["pre"], ledger) ledger.persist() doAssert ledger.getStateRoot == genesisHeader.stateRoot diff --git a/nimbus/beacon/api_handler/api_exchangeconf.nim b/nimbus/beacon/api_handler/api_exchangeconf.nim index 0b54bc50ab..a179db778c 100644 --- a/nimbus/beacon/api_handler/api_exchangeconf.nim +++ b/nimbus/beacon/api_handler/api_exchangeconf.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -43,7 +43,7 @@ proc exchangeConf*(ben: BeaconEngineRef, terminalBlockHash = conf.terminalBlockHash if terminalBlockHash != default(Hash32): - let headerHash = db.getBlockHash(terminalBlockNumber).valueOr: + let headerHash = db.baseTxFrame().getBlockHash(terminalBlockNumber).valueOr: raise newException(ValueError, "cannot get terminal block hash, number $1, msg: $2" % [$terminalBlockNumber, error]) @@ -51,7 +51,7 @@ proc exchangeConf*(ben: BeaconEngineRef, raise newException(ValueError, "invalid terminal block hash, got $1 want $2" % [$terminalBlockHash, $headerHash]) - let header = db.getBlockHeader(headerHash).valueOr: + let header = db.baseTxFrame().getBlockHeader(headerHash).valueOr: raise newException(ValueError, "cannot get terminal block header, hash $1, msg: $2" % [$terminalBlockHash, error]) diff --git a/nimbus/beacon/api_handler/api_forkchoice.nim b/nimbus/beacon/api_handler/api_forkchoice.nim index 05e654f6fa..5ec7a73748 100644 --- a/nimbus/beacon/api_handler/api_forkchoice.nim +++ b/nimbus/beacon/api_handler/api_forkchoice.nim @@ -76,7 +76,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, ForkchoiceUpdatedResponse = let com = ben.com - db = com.db + txFrame = ben.chain.latestTxFrame() chain = ben.chain blockHash = update.headBlockHash @@ -127,8 +127,8 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, let blockNumber = header.number if header.difficulty > 0.u256 or blockNumber == 0'u64: let - td = db.getScore(blockHash) - ptd = db.getScore(header.parentHash) + td = txFrame.getScore(blockHash) + ptd = txFrame.getScore(header.parentHash) ttd = com.ttd.get(high(UInt256)) if td.isNone or (blockNumber > 0'u64 and ptd.isNone): @@ -162,13 +162,14 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, # If the beacon client also advertised a finalized block, mark the local # chain final and completely in PoS mode. + let baseTxFrame = ben.chain.baseTxFrame let finalizedBlockHash = update.finalizedBlockHash if finalizedBlockHash != default(Hash32): if not ben.chain.isCanonical(finalizedBlockHash): warn "Final block not in canonical chain", hash=finalizedBlockHash.short raise invalidForkChoiceState("finalized block not canonical") - db.finalizedHeaderHash(finalizedBlockHash) + baseTxFrame.finalizedHeaderHash(finalizedBlockHash) let safeBlockHash = update.safeBlockHash if safeBlockHash != default(Hash32): @@ -176,7 +177,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, warn "Safe block not in canonical chain", hash=safeBlockHash.short raise invalidForkChoiceState("safe head not canonical") - db.safeHeaderHash(safeBlockHash) + baseTxFrame.safeHeaderHash(safeBlockHash) chain.forkChoice(blockHash, finalizedBlockHash).isOkOr: return invalidFCU(error, chain, header) diff --git a/nimbus/beacon/api_handler/api_newpayload.nim b/nimbus/beacon/api_handler/api_newpayload.nim index 215d57494e..80d4a22a5c 100644 --- a/nimbus/beacon/api_handler/api_newpayload.nim +++ b/nimbus/beacon/api_handler/api_newpayload.nim @@ -137,7 +137,7 @@ proc newPayload*(ben: BeaconEngineRef, let com = ben.com - db = com.db + txFrame = ben.chain.latestTxFrame() timestamp = ethTime payload.timestamp version = payload.version requestsHash = calcRequestsHash(executionRequests) @@ -185,9 +185,9 @@ proc newPayload*(ben: BeaconEngineRef, let ttd = com.ttd.get(high(UInt256)) if version == Version.V1: - let ptd = db.getScore(header.parentHash).valueOr: + let ptd = txFrame.getScore(header.parentHash).valueOr: 0.u256 - let gptd = db.getScore(parent.parentHash) + let gptd = txFrame.getScore(parent.parentHash) if ptd < ttd: warn "Ignoring pre-merge payload", number = header.number, hash = blockHash.short, ptd, ttd @@ -216,7 +216,7 @@ proc newPayload*(ben: BeaconEngineRef, warn "State not available, ignoring new payload", hash = blockHash, number = header.number - let blockHash = latestValidHash(db, parent, ttd) + let blockHash = latestValidHash(txFrame, parent, ttd) return acceptedStatus(blockHash) trace "Inserting block without sethead", @@ -229,7 +229,7 @@ proc newPayload*(ben: BeaconEngineRef, parent = header.parentHash.short, error = vres.error() ben.setInvalidAncestor(header, blockHash) - let blockHash = latestValidHash(db, parent, ttd) + let blockHash = latestValidHash(txFrame, parent, ttd) return invalidStatus(blockHash, vres.error()) ben.txPool.removeNewBlockTxs(blk, Opt.some(blockHash)) diff --git a/nimbus/beacon/api_handler/api_utils.nim b/nimbus/beacon/api_handler/api_utils.nim index 5c1d2cef66..d9d20937c4 100644 --- a/nimbus/beacon/api_handler/api_utils.nim +++ b/nimbus/beacon/api_handler/api_utils.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -172,12 +172,13 @@ proc tooLargeRequest*(msg: string): ref InvalidRequest = msg: msg ) -proc latestValidHash*(db: CoreDbRef, +proc latestValidHash*(txFrame: CoreDbTxRef, parent: Header, ttd: DifficultyInt): Hash32 = if parent.isGenesis: return default(Hash32) - let ptd = db.getScore(parent.parentHash).valueOr(0.u256) + # TODO shouldn't this be in forkedchainref? + let ptd = txFrame.getScore(parent.parentHash).valueOr(0.u256) if ptd >= ttd: parent.blockHash else: @@ -192,6 +193,6 @@ proc invalidFCU*(validationError: string, return invalidFCU(validationError) let blockHash = - latestValidHash(chain.db, parent, chain.com.ttd.get(high(UInt256))) + latestValidHash(chain.latestTxFrame, parent, chain.com.ttd.get(high(UInt256))) invalidFCU(validationError, blockHash) diff --git a/nimbus/common/chain_config_hash.nim b/nimbus/common/chain_config_hash.nim index e78da647d8..ed2252f687 100644 --- a/nimbus/common/chain_config_hash.nim +++ b/nimbus/common/chain_config_hash.nim @@ -66,11 +66,6 @@ func update[T: ref](ctx: var sha256, val: T) = for f in fields(val[]): ctx.update(f) -func update(ctx: var sha256, list: openArray[Opt[BlobSchedule]]) = - mixin update - for val in list: - ctx.update(val) - # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index 7c730b32a9..4bcb4064fb 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -131,10 +131,10 @@ func daoCheck(conf: ChainConfig) = conf.daoForkBlock = conf.homesteadBlock proc initializeDb(com: CommonRef) = - let kvt = com.db.ctx.getKvt() - proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool = - kvt.hasKeyRc(key).expect "valid bool" - if canonicalHeadHashKey().toOpenArray notin kvt: + let txFrame = com.db.baseTxFrame() + proc contains(txFrame: CoreDbTxRef; key: openArray[byte]): bool = + txFrame.hasKeyRc(key).expect "valid bool" + if canonicalHeadHashKey().toOpenArray notin txFrame: info "Writing genesis to DB", blockHash = com.genesisHeader.rlpHash, stateRoot = com.genesisHeader.stateRoot, @@ -144,23 +144,23 @@ proc initializeDb(com: CommonRef) = nonce = com.genesisHeader.nonce doAssert(com.genesisHeader.number == 0.BlockNumber, "can't commit genesis block with number > 0") - com.db.persistHeaderAndSetHead(com.genesisHeader, + txFrame.persistHeaderAndSetHead(com.genesisHeader, startOfHistory=com.genesisHeader.parentHash). expect("can persist genesis header") - doAssert(canonicalHeadHashKey().toOpenArray in kvt) + doAssert(canonicalHeadHashKey().toOpenArray in txFrame) # The database must at least contain the base and head pointers - the base # is implicitly considered finalized let - baseNum = com.db.getSavedStateBlockNumber() - base = com.db.getBlockHeader(baseNum).valueOr: + baseNum = txFrame.getSavedStateBlockNumber() + base = txFrame.getBlockHeader(baseNum).valueOr: fatal "Cannot load base block header", baseNum, err = error quit 1 - finalized = com.db.finalizedHeader().valueOr: + finalized = txFrame.finalizedHeader().valueOr: debug "No finalized block stored in database, reverting to base" base - head = com.db.getCanonicalHead().valueOr: + head = txFrame.getCanonicalHead().valueOr: fatal "Cannot load canonical block header", err = error quit 1 @@ -201,10 +201,12 @@ proc init(com : CommonRef, time: Opt.some(genesis.timestamp) ) fork = toHardFork(com.forkTransitionTable, forkDeterminer) + txFrame = db.baseTxFrame() # Must not overwrite the global state on the single state DB - com.genesisHeader = db.getBlockHeader(0.BlockNumber).valueOr: - toGenesisHeader(genesis, fork, com.db) + + com.genesisHeader = txFrame.getBlockHeader(0.BlockNumber).valueOr: + toGenesisHeader(genesis, fork, txFrame) com.setForkId(com.genesisHeader) @@ -213,13 +215,13 @@ proc init(com : CommonRef, com.initializeDb() -proc isBlockAfterTtd(com: CommonRef, header: Header): bool = +proc isBlockAfterTtd(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool = if com.config.terminalTotalDifficulty.isNone: return false let ttd = com.config.terminalTotalDifficulty.get() - ptd = com.db.getScore(header.parentHash).valueOr: + ptd = txFrame.getScore(header.parentHash).valueOr: return false td = ptd + header.difficulty ptd >= ttd and td >= ttd @@ -331,7 +333,7 @@ func isCancunOrLater*(com: CommonRef, t: EthTime): bool = func isPragueOrLater*(com: CommonRef, t: EthTime): bool = com.config.pragueTime.isSome and t >= com.config.pragueTime.get -proc proofOfStake*(com: CommonRef, header: Header): bool = +proc proofOfStake*(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool = if com.config.posBlock.isSome: # see comments of posBlock in common/hardforks.nim header.number >= com.config.posBlock.get @@ -339,7 +341,7 @@ proc proofOfStake*(com: CommonRef, header: Header): bool = header.number >= com.config.mergeNetsplitBlock.get else: # This costly check is only executed from test suite - com.isBlockAfterTtd(header) + com.isBlockAfterTtd(header, txFrame) func depositContractAddress*(com: CommonRef): Address = com.config.depositContractAddress.get(default(Address)) diff --git a/nimbus/common/genesis.nim b/nimbus/common/genesis.nim index 22299c5a2a..b34bb00b1b 100644 --- a/nimbus/common/genesis.nim +++ b/nimbus/common/genesis.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -25,7 +25,7 @@ import proc toGenesisHeader*( g: Genesis; - db: CoreDbRef; + db: CoreDbTxRef; fork: HardFork; ): Header = ## Initialise block chain DB accounts derived from the `genesis.alloc` table @@ -86,16 +86,16 @@ proc toGenesisHeader*( proc toGenesisHeader*( genesis: Genesis; fork: HardFork; - db = CoreDbRef(nil)): Header = + db = CoreDbTxRef(nil)): Header = ## Generate the genesis block header from the `genesis` and `config` ## argument value. let - db = if db.isNil: AristoDbMemory.newCoreDbRef() else: db + db = if db.isNil: AristoDbMemory.newCoreDbRef().ctx.txFrameBegin(nil) else: db toGenesisHeader(genesis, db, fork) proc toGenesisHeader*( params: NetworkParams; - db = CoreDbRef(nil) + db = CoreDbTxRef(nil) ): Header = ## Generate the genesis block header from the `genesis` and `config` ## argument value. diff --git a/nimbus/core/chain/forked_chain.nim b/nimbus/core/chain/forked_chain.nim index 45d08c59d6..6b1f53d09f 100644 --- a/nimbus/core/chain/forked_chain.nim +++ b/nimbus/core/chain/forked_chain.nim @@ -19,7 +19,7 @@ import ../../evm/state, ../validate, ../executor/process_block, - ./forked_chain/[chain_desc, chain_kvt] + ./forked_chain/[chain_desc, chain_kvt, chain_branch] logScope: topics = "forked chain" @@ -34,44 +34,22 @@ export const BaseDistance = 128 -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -template shouldNotKeyError(info: string, body: untyped) = - try: - body - except KeyError as exc: - raiseAssert info & ": name=" & $exc.name & " msg=" & exc.msg - -proc deleteLineage(c: ForkedChainRef; top: Hash32) = - ## Starting at argument `top`, delete all entries from `c.blocks[]` along - ## the ancestor chain. - ## - var parent = top - while true: - c.blocks.withValue(parent, val): - let w = parent - parent = val.blk.header.parentHash - c.blocks.del(w) - continue - break - # ------------------------------------------------------------------------------ # Private functions # ------------------------------------------------------------------------------ proc processBlock(c: ForkedChainRef, parent: Header, + txFrame: CoreDbTxRef, blk: Block): Result[seq[Receipt], string] = template header(): Header = blk.header let vmState = BaseVMState() - vmState.init(parent, header, c.com) + vmState.init(parent, header, c.com, txFrame) if c.extraValidation: - ?c.com.validateHeaderAndKinship(blk, vmState.parent) + ?c.com.validateHeaderAndKinship(blk, vmState.parent, txFrame) ?vmState.processBlock( blk, @@ -83,7 +61,7 @@ proc processBlock(c: ForkedChainRef, # We still need to write header to database # because validateUncles still need it let blockHash = header.blockHash() - ?c.db.persistHeader( + ?txFrame.persistHeader( blockHash, header, c.com.startOfHistory) @@ -95,263 +73,145 @@ proc processBlock(c: ForkedChainRef, ok(move(vmState.receipts)) -func updateCursorHeads(c: ForkedChainRef, - cursorHash: Hash32, - header: Header) = - # Example of cursorHeads and cursor - # - # -- A1 - A2 - A3 -- D5 - D6 - # / / - # base - B1 - B2 - B3 - B4 - # \ - # --- C3 - C4 - # - # A3, B4, C4, and D6, are in cursorHeads - # Any one of them with blockHash == cursorHash - # is the active chain with cursor pointing to the - # latest block of that chain. - - for i in 0.. update head - c.blocks[c.cursorHash] = BlockDesc( - blk: blk, - receipts: move(receipts)) - c.updateCursorHeads(c.cursorHash, header) + txFrame.persistTransactions(header.number, header.txRoot, blk.transactions) + txFrame.persistReceipts(header.receiptsRoot, receipts) + discard txFrame.persistUncles(blk.uncles) + if blk.withdrawals.isSome: + txFrame.persistWithdrawals( + header.withdrawalsRoot.expect("WithdrawalsRoot should be verified before"), + blk.withdrawals.get) proc validateBlock(c: ForkedChainRef, - parent: Header, - blk: Block, - updateCursor: bool = true): Result[void, string] = - let dbTx = c.db.ctx.txFrameBegin() - defer: - dbTx.dispose() - - var res = c.processBlock(parent, blk) + parent: BlockPos, + blk: Block): Result[void, string] = + let blkHash = blk.header.blockHash + + if c.hashToBlock.hasKey(blkHash): + # Block exists, just return + return ok() + + let + parentFrame = parent.txFrame + txFrame = parentFrame.txFrameBegin + + # TODO shortLog-equivalent for eth types + debug "Validating block", + blkHash, blk = ( + parentHash: blk.header.parentHash, + coinbase: blk.header.coinbase, + stateRoot: blk.header.stateRoot, + transactionsRoot: blk.header.transactionsRoot, + receiptsRoot: blk.header.receiptsRoot, + number: blk.header.number, + gasLimit: blk.header.gasLimit, + gasUsed: blk.header.gasUsed, + nonce: blk.header.nonce, + baseFeePerGas: blk.header.baseFeePerGas, + withdrawalsRoot: blk.header.withdrawalsRoot, + blobGasUsed: blk.header.blobGasUsed, + excessBlobGas: blk.header.excessBlobGas, + parentBeaconBlockRoot: blk.header.parentBeaconBlockRoot, + requestsHash: blk.header.requestsHash, + ) + + var res = c.processBlock(parent.header, txFrame, blk) if res.isErr: - dbTx.rollback() + txFrame.rollback() return err(res.error) - dbTx.commit() - if updateCursor: - c.updateCursor(blk, move(res.value)) + c.writeBaggage(blk, txFrame, res.value) + c.updateBranch(parent, blk, blkHash, txFrame, move(res.value)) - let blkHash = blk.header.blockHash for i, tx in blk.transactions: c.txRecords[rlpHash(tx)] = (blkHash, uint64(i)) ok() -proc replaySegment*(c: ForkedChainRef, target: Hash32) = - # Replay from base+1 to target block - var - prevHash = target - chain = newSeq[Block]() - - shouldNotKeyError "replaySegment(target)": - while prevHash != c.baseHash: - chain.add c.blocks[prevHash].blk - prevHash = chain[^1].header.parentHash - - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - c.cursorHeader = c.baseHeader - for i in countdown(chain.high, chain.low): - c.validateBlock(c.cursorHeader, chain[i], - updateCursor = false).expect("have been validated before") - c.cursorHeader = chain[i].header - c.cursorHash = target - -proc replaySegment(c: ForkedChainRef, - target: Hash32, - parent: Header, - parentHash: Hash32) = - # Replay from parent+1 to target block - # with assumption last state is at parent - var - prevHash = target - chain = newSeq[Block]() - - shouldNotKeyError "replaySegment(target,parent)": - while prevHash != parentHash: - chain.add c.blocks[prevHash].blk - prevHash = chain[^1].header.parentHash - - c.cursorHeader = parent - for i in countdown(chain.high, chain.low): - c.validateBlock(c.cursorHeader, chain[i], - updateCursor = false).expect("have been validated before") - c.cursorHeader = chain[i].header - c.cursorHash = target - -proc writeBaggage(c: ForkedChainRef, target: Hash32) = - # Write baggage from base+1 to target block - template header(): Header = - blk.blk.header - - shouldNotKeyError "writeBaggage": - var prevHash = target - var count = 0'u64 - while prevHash != c.baseHash: - let blk = c.blocks[prevHash] - c.db.persistTransactions(header.number, header.txRoot, blk.blk.transactions) - c.db.persistReceipts(header.receiptsRoot, blk.receipts) - discard c.db.persistUncles(blk.blk.uncles) - if blk.blk.withdrawals.isSome: - c.db.persistWithdrawals( - header.withdrawalsRoot.expect("WithdrawalsRoot should be verified before"), - blk.blk.withdrawals.get) - for tx in blk.blk.transactions: - c.txRecords.del(rlpHash(tx)) - prevHash = header.parentHash - count.inc - - # Log only if more than one block persisted - # This is to avoid log spamming, during normal operation - # of the client following the chain - # When multiple blocks are persisted together, it's mainly - # during `beacon sync` or `nrpc sync` - if count > 1: - notice "Finalized blocks persisted", - numberOfBlocks = count, - baseNumber = c.baseHeader.number, - baseHash = c.baseHash.short - else: - debug "Finalized blocks persisted", - numberOfBlocks = count, - taget = target.short, - baseNumber = c.baseHeader.number, - baseHash = c.baseHash.short - -func updateBase(c: ForkedChainRef, pvarc: PivotArc) = - ## Remove obsolete chains, example: - ## - ## A1 - A2 - A3 D5 - D6 - ## / / - ## base - B1 - B2 - [B3] - B4 - B5 - ## \ \ - ## C2 - C3 E4 - E5 - ## - ## where `B1..B5` is the `pvarc.cursor` arc and `[B5]` is the `pvarc.pv`. - # - ## The `base` will be moved to position `[B3]`. Both chains `A` and `C` - ## will be removed but not so for `D` and `E`, and `pivot` arc `B` will - ## be curtailed below `B4`. +func findHeadPos(c: ForkedChainRef, hash: Hash32): Result[BlockPos, string] = + ## Find the `BlockPos` that contains the block relative to the + ## argument `hash`. ## - var newCursorHeads: seq[CursorDesc] # Will become new `c.cursorHeads` - for ch in c.cursorHeads: - if pvarc.pvNumber < ch.forkJunction: - # On the example, this would be any of chain `D` or `E`. - newCursorHeads.add ch - - elif ch.hash == pvarc.cursor.hash: - # On the example, this would be chain `B`. - newCursorHeads.add CursorDesc( - hash: ch.hash, - forkJunction: pvarc.pvNumber + 1) - - else: - # On the example, this would be either chain `A` or `B`. - c.deleteLineage ch.hash + c.hashToBlock.withValue(hash, val) do: + return ok(val[]) + do: + return err("Block hash is not part of any active chain") - # Cleanup in-memory blocks starting from newBase backward - # while blocks from newBase+1 to canonicalCursor not deleted - # e.g. B4 onward - c.deleteLineage pvarc.pvHash +func findFinalizedPos( + c: ForkedChainRef; + itHash: Hash32; + head: BlockPos, + ): Result[BlockPos, string] = + ## Find header for argument `itHash` on argument `head` ancestor chain. + ## - # Implied deletion of chain heads (if any) - c.cursorHeads.swap newCursorHeads + # OK, new base stays on the argument head branch. + # :: + # - B3 - B4 - B5 - B6 + # / ^ ^ + # A1 - A2 - A3 | | + # head CCH + # + # A1, A2, B3, B4, B5: valid + # A3, B6: invalid - c.baseHeader = pvarc.pvHeader - c.baseHash = pvarc.pvHash + # Find `itHash` on the ancestor lineage of `head` + c.hashToBlock.withValue(itHash, loc): + if loc[].number > head.number: + return err("Invalid finalizedHash: block is newer than head block") -func findCursorArc(c: ForkedChainRef, hash: Hash32): Result[PivotArc, string] = - ## Find the `cursor` arc that contains the block relative to the - ## argument `hash`. - ## - if hash == c.baseHash: - # The cursorHash here should not be used for next step - # because it not point to any active chain - return ok PivotArc( - pvHash: c.baseHash, - pvHeader: c.baseHeader, - cursor: CursorDesc( - forkJunction: c.baseHeader.number, - hash: c.baseHash)) - - for ch in c.cursorHeads: - var top = ch.hash - while true: - c.blocks.withValue(top, val): - if ch.forkJunction <= val.blk.header.number: - if top == hash: - return ok PivotArc( - pvHash: hash, - pvHeader: val.blk.header, - cursor: ch) - if ch.forkJunction < val.blk.header.number: - top = val.blk.header.parentHash - continue - break + var + branch = head.branch + prevBranch = BranchRef(nil) - err("Block hash is not part of any active chain") + while not branch.isNil: + if branch == loc[].branch: + if prevBranch.isNil.not and + loc[].number >= prevBranch.tailNumber: + break # invalid + return ok(loc[]) -func findHeader( - c: ForkedChainRef; - itHash: Hash32; - headHash: Hash32; - ): Result[Header, string] = - ## Find header for argument `itHash` on argument `headHash` ancestor chain. - ## - if itHash == c.baseHash: - return ok(c.baseHeader) - - # Find `pvHash` on the ancestor lineage of `headHash` - var prevHash = headHash - while true: - c.blocks.withValue(prevHash, val): - if prevHash == itHash: - return ok(val.blk.header) - prevHash = val.blk.header.parentHash - continue - break + prevBranch = branch + branch = branch.parent - err("Block not in argument head ancestor lineage") + err("Invalid finalizedHash: block not in argument head ancestor lineage") func calculateNewBase( c: ForkedChainRef; - finalized: BlockNumber; - pvarc: PivotArc; - ): PivotArc = - ## It is required that the `finalized` argument is on the `pvarc` arc, i.e. - ## it ranges beween `pvarc.cursor.forkJunction` and - ## `c.blocks[pvarc.cursor.head].number`. + finalized: BlockPos; + head: BlockPos; + ): BlockPos = + ## It is required that the `finalized` argument is on the `head` chain, i.e. + ## it ranges beween `c.baseBranch.tailNumber` and + ## `head.branch.headNumber`. ## - ## The function returns a cursor arc containing a new base position. It is + ## The function returns a BlockPos containing a new base position. It is ## calculated as follows. ## - ## Starting at the argument `pvarc.pvHead` searching backwards, the new base + ## Starting at the argument `head.branch` searching backwards, the new base ## is the position of the block with number `finalized`. ## ## Before searching backwards, the `finalized` argument might be adjusted @@ -360,107 +220,243 @@ func calculateNewBase( ## # It's important to have base at least `baseDistance` behind head # so we can answer state queries about history that deep. - let target = min(finalized, - max(pvarc.pvNumber, c.baseDistance) - c.baseDistance) - - # Can only increase base block number. - if target <= c.baseHeader.number: - return PivotArc( - pvHash: c.baseHash, - pvHeader: c.baseHeader, - cursor: CursorDesc( - forkJunction: c.baseHeader.number, - hash: c.baseHash)) - - var prevHash = pvarc.pvHash - while true: - c.blocks.withValue(prevHash, val): - if target == val.blk.header.number: - if pvarc.cursor.forkJunction <= target: - # OK, new base stays on the argument pivot arc. - # :: - # B1 - B2 - B3 - B4 - # / ^ ^ ^ - # base - A1 - A2 - A3 | | | - # | pv CCH - # | - # target - # - return PivotArc( - pvHash: prevHash, - pvHeader: val.blk.header, - cursor: pvarc.cursor) - else: - # The new base (aka target) falls out of the argument pivot branch, - # ending up somewhere on a parent branch. - # :: - # B1 - B2 - B3 - B4 - # / ^ ^ - # base - A1 - A2 - A3 | | - # ^ pv CCH - # | - # target - # - return c.findCursorArc(prevHash).expect "valid cursor arc" - prevHash = val.blk.header.parentHash + let target = min(finalized.number, + max(head.number, c.baseDistance) - c.baseDistance) + + # Do not update base. + if target <= c.baseBranch.tailNumber: + return BlockPos(branch: c.baseBranch) + + if target >= head.branch.tailNumber: + # OK, new base stays on the argument head branch. + # :: + # - B3 - B4 - B5 - B6 + # / ^ ^ ^ + # base - A1 - A2 - A3 | | | + # | head CCH + # | + # target + # + return BlockPos( + branch: head.branch, + index : int(target - head.branch.tailNumber) + ) + + # The new base (aka target) falls out of the argument head branch, + # ending up somewhere on a parent branch. + # :: + # - B3 - B4 - B5 - B6 + # / ^ ^ + # base - A1 - A2 - A3 | | + # ^ head CCH + # | + # target + # + # base will not move to A3 onward for this iteration + var branch = head.branch.parent + while not branch.isNil: + if target >= branch.tailNumber: + return BlockPos( + branch: branch, + index : int(target - branch.tailNumber) + ) + branch = branch.parent + + doAssert(false, "Unreachable code, finalized block outside canonical chain") + +proc removeBlockFromCache(c: ForkedChainRef, bd: BlockDesc, commit = false) = + c.hashToBlock.del(bd.hash) + for tx in bd.blk.transactions: + c.txRecords.del(rlpHash(tx)) + if commit: + if bd.txFrame != c.baseTxFrame: + bd.txFrame.commit() + else: + bd.txFrame.dispose() + +proc updateHead(c: ForkedChainRef, head: BlockPos) = + ## Update head if the new head is different from current head. + ## All branches with block number greater than head will be removed too. + + # Update global syncHighest + c.com.syncHighest = head.branch.headNumber + c.activeBranch = head.branch + + # Pruning if necessary + # :: + # - B5 - B6 - B7 - B8 + # / + # A1 - A2 - A3 - [A4] - A5 - A6 + # \ \ + # - C3 - C4 - D6 - D7 + # + # A4 is head + # 'D' and 'A5' onward will be removed + # 'C' and 'B' will stay + + let headNumber = head.number + var i = 0 + while i < c.branches.len: + let branch = c.branches[i] + + # Any branches with block number greater than head+1 should be removed. + if branch.tailNumber > headNumber + 1: + for i in countdown(branch.blocks.len-1, 0): + c.removeBlockFromCache(branch.blocks[i]) + c.branches.del(i) + # no need to increment i when we delete from c.branches. continue - break - doAssert(false, "Unreachable code, finalized block outside cursor arc") + inc i + + # Maybe the current active chain is longer than canonical chain, + # trim the branch. + for i in countdown(head.branch.len-1, head.index+1): + c.removeBlockFromCache(head.branch.blocks[i]) + + head.branch.blocks.setLen(head.index+1) + c.baseTxFrame.setHead(head.branch.headHeader, + head.branch.headHash).expect("OK") + +proc updateFinalized(c: ForkedChainRef, finalized: BlockPos) = + # Pruning + # :: + # - B5 - B6 - B7 - B8 + # / + # A1 - A2 - A3 - [A4] - A5 - A6 + # \ \ + # - C3 - C4 - D6 - D7 + # + # A4 is finalized + # 'B', 'D', and A5 onward will stay + # 'C' will be removed + + func sameLineage(brc: BranchRef, line: BranchRef): bool = + var branch = line + while not branch.isNil: + if branch == brc: + return true + branch = branch.parent + + let finalizedNumber = finalized.number + var i = 0 + while i < c.branches.len: + let branch = c.branches[i] + + # Any branches with tail block number less or equal + # than finalized should be removed. + if not branch.sameLineage(finalized.branch) and branch.tailNumber <= finalizedNumber: + for i in countdown(branch.blocks.len-1, 0): + c.removeBlockFromCache(branch.blocks[i]) + c.branches.del(i) + # no need to increment i when we delete from c.branches. + continue -func trimCursorArc(c: ForkedChainRef, pvarc: PivotArc) = - ## Curb argument `pvarc.cursor` head so that it ends up at `pvarc.pv`. - ## - # Maybe the current active chain is longer than canonical chain - shouldNotKeyError "trimCanonicalChain": - var prevHash = pvarc.cursor.hash - while prevHash != c.baseHash: - let header = c.blocks[prevHash].blk.header - if header.number > pvarc.pvNumber: - c.blocks.del(prevHash) - else: - break - prevHash = header.parentHash + inc i - if c.cursorHeads.len == 0: - return - - # Update cursorHeads if indeed we trim - for i in 0..= tailNumber: + c.removeBlockFromCache(branch.blocks[number - tailNumber], commit = true) + inc count - # update global syncHighest - c.com.syncHighest = pvarc.pvNumber + if number == 0: + # Don't go below genesis + break + dec number -proc updateHeadIfNecessary(c: ForkedChainRef, pvarc: PivotArc) = - # update head if the new head is different - # from current head or current chain - if c.cursorHash != pvarc.cursor.hash: - if not c.stagingTx.isNil: - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - c.replaySegment(pvarc.pvHash) + proc commitBase(c: ForkedChainRef, bd: BlockDesc) = + if bd.txFrame != c.baseTxFrame: + bd.txFrame.commit() - c.trimCursorArc(pvarc) - if c.cursorHash != pvarc.pvHash: - c.cursorHeader = pvarc.pvHeader - c.cursorHash = pvarc.pvHash + let + # Cache to prevent crash after we shift + # the blocks + newBaseHash = newBase.hash - if c.stagingTx.isNil: - # setHead below don't go straight to db - c.stagingTx = c.db.ctx.txFrameBegin() + var + branch = newBase.branch + number = newBase.number - 1 + count = 0 + + let nextIndex = int(newBase.number - branch.tailNumber) + + # Commit base block but don't remove from FC + c.commitBase(branch.blocks[nextIndex]) + + commitBlocks(number, branch) + + # Update base if it indeed changed + if nextIndex > 0: + # Only remove blocks with number lower than newBase.number + var blocks = newSeqOfCap[BlockDesc](branch.len-nextIndex) + for i in nextIndex.. 1: + notice "Finalized blocks persisted", + numberOfBlocks = count, + baseNumber = c.baseBranch.tailNumber, + baseHash = c.baseBranch.tailHash.short + else: + debug "Finalized blocks persisted", + numberOfBlocks = count, + target = newBaseHash.short, + baseNumber = c.baseBranch.tailNumber, + baseHash = c.baseBranch.tailHash.short + + # Update base txFrame + if c.baseBranch.blocks[0].txFrame != c.baseTxFrame: + c.baseBranch.blocks[0].txFrame = c.baseTxFrame + if c.baseBranch.len > 1: + c.baseBranch.blocks[1].txFrame.reparent(c.baseTxFrame) # ------------------------------------------------------------------------------ # Public functions @@ -485,62 +481,39 @@ proc init*( ## `persistentBlocks()` used for `Era1` or `Era` import. ## let - base = com.db.getSavedStateBlockNumber - baseHash = com.db.getBlockHash(base).expect("baseHash exists") - baseHeader = com.db.getBlockHeader(baseHash).expect("base header exists") + baseTxFrame = com.db.baseTxFrame() + base = baseTxFrame.getSavedStateBlockNumber + baseHash = baseTxFrame.getBlockHash(base).expect("baseHash exists") + baseHeader = baseTxFrame.getBlockHeader(baseHash).expect("base header exists") + baseBranch = branch(baseHeader, baseHash, baseTxFrame) # update global syncStart com.syncStart = baseHeader.number T(com: com, - db: com.db, - baseHeader: baseHeader, - cursorHash: baseHash, - baseHash: baseHash, - cursorHeader: baseHeader, + baseBranch: baseBranch, + activeBranch: baseBranch, + branches: @[baseBranch], + hashToBlock: {baseHash: baseBranch.lastBlockPos}.toTable, + baseTxFrame: baseTxFrame, extraValidation: extraValidation, baseDistance: baseDistance) -proc newForkedChain*(com: CommonRef, - baseHeader: Header, - baseDistance: uint64 = BaseDistance, - extraValidation: bool = true): ForkedChainRef = - ## This constructor allows to set up the base state which might be needed - ## for some particular test or other applications. Otherwise consider - ## `init()`. - let baseHash = baseHeader.blockHash - let chain = ForkedChainRef( - com: com, - db : com.db, - baseHeader : baseHeader, - cursorHash : baseHash, - baseHash : baseHash, - cursorHeader: baseHeader, - extraValidation: extraValidation, - baseDistance: baseDistance) - - # update global syncStart - com.syncStart = baseHeader.number - chain - proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] = - # Try to import block to canonical or side chain. - # return error if the block is invalid - if c.stagingTx.isNil: - c.stagingTx = c.db.ctx.txFrameBegin() - + ## Try to import block to canonical or side chain. + ## return error if the block is invalid template header(): Header = blk.header - if header.parentHash == c.cursorHash: - return c.validateBlock(c.cursorHeader, blk) + c.hashToBlock.withValue(header.parentHash, bd) do: + # TODO: If engine API keep importing blocks + # but not finalized it, e.g. current chain length > StagedBlocksThreshold + # We need to persist some of the in-memory stuff + # to a "staging area" or disk-backed memory but it must not afect `base`. + # `base` is the point of no return, we only update it on finality. - if header.parentHash == c.baseHash: - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - return c.validateBlock(c.baseHeader, blk) - - if header.parentHash notin c.blocks: + ?c.validateBlock(bd[], blk) + do: # If it's parent is an invalid block # there is no hope the descendant is valid debug "Parent block not found", @@ -548,269 +521,203 @@ proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] = parentHash = header.parentHash.short return err("Block is not part of valid chain") - # TODO: If engine API keep importing blocks - # but not finalized it, e.g. current chain length > StagedBlocksThreshold - # We need to persist some of the in-memory stuff - # to a "staging area" or disk-backed memory but it must not afect `base`. - # `base` is the point of no return, we only update it on finality. - - c.replaySegment(header.parentHash) - c.validateBlock(c.cursorHeader, blk) + ok() proc forkChoice*(c: ForkedChainRef, headHash: Hash32, finalizedHash: Hash32): Result[void, string] = - if headHash == c.cursorHash and finalizedHash == static(default(Hash32)): + if headHash == c.activeBranch.headHash and finalizedHash == zeroHash32: # Do nothing if the new head already our current head - # and there is no request to new finality + # and there is no request to new finality. return ok() - # Find the unique cursor arc where `headHash` is a member of. - let pvarc = ?c.findCursorArc(headHash) - - if finalizedHash == static(default(Hash32)): - # skip newBase calculation and skip chain finalization - # if finalizedHash is zero - c.updateHeadIfNecessary(pvarc) - return ok() - - # Finalized block must be parent or on the new canonical chain which is - # represented by `pvarc`. - let finalizedHeader = ?c.findHeader(finalizedHash, pvarc.pvHash) + let + # Find the unique branch where `headHash` is a member of. + head = ?c.findHeadPos(headHash) + # Finalized block must be parent or on the new canonical chain which is + # represented by `head`. + finalized = ?c.findFinalizedPos(finalizedHash, head) - let newBase = c.calculateNewBase(finalizedHeader.number, pvarc) + # Head maybe moved backward or moved to other branch. + c.updateHead(head) - if newBase.pvHash == c.baseHash: - # The base is not updated but the cursor maybe need update - c.updateHeadIfNecessary(pvarc) + if finalizedHash == zeroHash32: + # skip updateBase and updateFinalized if finalizedHash is zero. return ok() - # At this point cursorHeader.number > baseHeader.number - if newBase.pvHash == c.cursorHash: - # Paranoid check, guaranteed by `newBase.hash == c.cursorHash` - doAssert(not c.stagingTx.isNil) - - # CL decide to move backward and then forward? - if c.cursorHeader.number < pvarc.pvNumber: - c.replaySegment(pvarc.pvHash, c.cursorHeader, c.cursorHash) - - # Current segment is canonical chain - c.writeBaggage(newBase.pvHash) - c.setHead(pvarc) - - c.stagingTx.commit() - c.stagingTx = nil - - # Move base to newBase - c.updateBase(newBase) - - # Save and record the block number before the last saved block state. - c.db.persistent(newBase.pvNumber).isOkOr: - return err("Failed to save state: " & $$error) + c.updateFinalized(finalized) + let newBase = c.calculateNewBase(finalized, head) + if newBase.hash == c.baseBranch.tailHash: + # The base is not updated, return. return ok() - # At this point finalizedHeader.number is <= headHeader.number - # and possibly switched to other chain beside the one with cursor - doAssert(finalizedHeader.number <= pvarc.pvNumber) - doAssert(newBase.pvNumber <= finalizedHeader.number) - - # Write segment from base+1 to newBase into database - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - - if newBase.pvNumber > c.baseHeader.number: - c.replaySegment(newBase.pvHash) - c.writeBaggage(newBase.pvHash) - c.stagingTx.commit() - c.stagingTx = nil - # Update base forward to newBase - c.updateBase(newBase) - c.db.persistent(newBase.pvNumber).isOkOr: - return err("Failed to save state: " & $$error) - - if c.stagingTx.isNil: - # replaySegment or setHead below don't - # go straight to db - c.stagingTx = c.db.ctx.txFrameBegin() + # Cache the base block number, updateBase might + # alter the BlockPos.index + let newBaseNumber = newBase.number - # Move chain state forward to current head - if newBase.pvNumber < pvarc.pvNumber: - c.replaySegment(pvarc.pvHash) + # At this point head.number >= base.number. + # At this point finalized.number is <= head.number, + # and possibly switched to other chain beside the one with head. + doAssert(finalized.number <= head.number) + doAssert(newBaseNumber <= finalized.number) + c.updateBase(newBase) - c.setHead(pvarc) - - # Move cursor to current head - c.trimCursorArc(pvarc) - if c.cursorHash != pvarc.pvHash: - c.cursorHeader = pvarc.pvHeader - c.cursorHash = pvarc.pvHash + # Save and record the block number before the last saved block state. + if newBaseNumber > 0: + c.com.db.persistent(newBaseNumber).isOkOr: + return err("Failed to save state: " & $$error) ok() func haveBlockAndState*(c: ForkedChainRef, blockHash: Hash32): bool = - if c.blocks.hasKey(blockHash): - return true - if c.baseHash == blockHash: - return true - false + ## Blocks still in memory with it's txFrame + c.hashToBlock.hasKey(blockHash) proc haveBlockLocally*(c: ForkedChainRef, blockHash: Hash32): bool = - if c.blocks.hasKey(blockHash): - return true - if c.baseHash == blockHash: + if c.hashToBlock.hasKey(blockHash): return true - c.db.headerExists(blockHash) + c.baseTxFrame.headerExists(blockHash) + +func txFrame*(c: ForkedChainRef, blockHash: Hash32): CoreDbTxRef = + if blockHash == c.baseBranch.tailHash: + return c.baseTxFrame + + c.hashToBlock.withValue(blockHash, loc) do: + return loc[].txFrame + + c.baseTxFrame + +func txFrame*(c: ForkedChainRef, header: Header): CoreDbTxRef = + c.txFrame(header.blockHash()) -func stateReady*(c: ForkedChainRef, header: Header): bool = - let blockHash = header.blockHash - blockHash == c.cursorHash +func latestTxFrame*(c: ForkedChainRef): CoreDbTxRef = + c.activeBranch.headTxFrame func com*(c: ForkedChainRef): CommonRef = c.com func db*(c: ForkedChainRef): CoreDbRef = - c.db + c.com.db func latestHeader*(c: ForkedChainRef): Header = - c.cursorHeader + c.activeBranch.headHeader func latestNumber*(c: ForkedChainRef): BlockNumber = - c.cursorHeader.number + c.activeBranch.headNumber func latestHash*(c: ForkedChainRef): Hash32 = - c.cursorHash + c.activeBranch.headHash func baseNumber*(c: ForkedChainRef): BlockNumber = - c.baseHeader.number + c.baseBranch.tailNumber func baseHash*(c: ForkedChainRef): Hash32 = - c.baseHash + c.baseBranch.tailHash func txRecords*(c: ForkedChainRef, txHash: Hash32): (Hash32, uint64) = c.txRecords.getOrDefault(txHash, (Hash32.default, 0'u64)) func isInMemory*(c: ForkedChainRef, blockHash: Hash32): bool = - c.blocks.hasKey(blockHash) + c.hashToBlock.hasKey(blockHash) func memoryBlock*(c: ForkedChainRef, blockHash: Hash32): BlockDesc = - c.blocks.getOrDefault(blockHash) + c.hashToBlock.withValue(blockHash, loc): + return loc.branch.blocks[loc.index] + # Return default(BlockDesc) func memoryTransaction*(c: ForkedChainRef, txHash: Hash32): Opt[(Transaction, BlockNumber)] = let (blockHash, index) = c.txRecords.getOrDefault(txHash, (Hash32.default, 0'u64)) - c.blocks.withValue(blockHash, val) do: - return Opt.some( (val.blk.transactions[index], val.blk.header.number) ) + c.hashToBlock.withValue(blockHash, loc) do: + return Opt.some( (loc[].tx(index), loc[].number) ) return Opt.none((Transaction, BlockNumber)) proc latestBlock*(c: ForkedChainRef): Block = - c.blocks.withValue(c.cursorHash, val) do: - return val.blk - c.db.getEthBlock(c.cursorHash).expect("cursorBlock exists") + if c.activeBranch.headNumber == c.baseBranch.tailNumber: + # It's a base block + return c.baseTxFrame.getEthBlock(c.activeBranch.headHash).expect("cursorBlock exists") + c.activeBranch.blocks[^1].blk proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Header, string] = - if number > c.cursorHeader.number: + if number > c.activeBranch.headNumber: return err("Requested block number not exists: " & $number) - if number == c.cursorHeader.number: - return ok(c.cursorHeader) + if number < c.baseBranch.tailNumber: + return c.baseTxFrame.getBlockHeader(number) - if number == c.baseHeader.number: - return ok(c.baseHeader) + var branch = c.activeBranch + while not branch.isNil: + if number >= branch.tailNumber: + return ok(branch.blocks[number - branch.tailNumber].blk.header) + branch = branch.parent - if number < c.baseHeader.number: - return c.db.getBlockHeader(number) - - shouldNotKeyError "headerByNumber": - var prevHash = c.cursorHeader.parentHash - while prevHash != c.baseHash: - let header = c.blocks[prevHash].blk.header - if header.number == number: - return ok(header) - prevHash = header.parentHash - - doAssert(false, "headerByNumber: Unreachable code") + err("Header not found, number = " & $number) proc headerByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Header, string] = - c.blocks.withValue(blockHash, val) do: - return ok(val.blk.header) - do: - if c.baseHash == blockHash: - return ok(c.baseHeader) - return c.db.getBlockHeader(blockHash) + c.hashToBlock.withValue(blockHash, loc): + return ok(loc[].header) + c.baseTxFrame.getBlockHeader(blockHash) proc blockByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Block, string] = # used by getPayloadBodiesByHash # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#specification-3 # 4. Client software MAY NOT respond to requests for finalized blocks by hash. - c.blocks.withValue(blockHash, val) do: - return ok(val.blk) - do: - return c.db.getEthBlock(blockHash) + c.hashToBlock.withValue(blockHash, loc): + return ok(loc[].blk) + c.baseTxFrame.getEthBlock(blockHash) proc blockByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Block, string] = - if number > c.cursorHeader.number: + if number > c.activeBranch.headNumber: return err("Requested block number not exists: " & $number) - if number < c.baseHeader.number: - return c.db.getEthBlock(number) + if number <= c.baseBranch.tailNumber: + return c.baseTxFrame.getEthBlock(number) - if number == c.baseHeader.number: - return c.db.getEthBlock(c.baseHash) + var branch = c.activeBranch + while not branch.isNil: + if number >= branch.tailNumber: + return ok(branch.blocks[number - branch.tailNumber].blk) + branch = branch.parent - shouldNotKeyError "blockByNumber": - var prevHash = c.cursorHash - while prevHash != c.baseHash: - c.blocks.withValue(prevHash, item): - if item.blk.header.number == number: - return ok(item.blk) - prevHash = item.blk.header.parentHash - return err("Block not found, number = " & $number) + err("Block not found, number = " & $number) func blockFromBaseTo*(c: ForkedChainRef, number: BlockNumber): seq[Block] = # return block in reverse order - shouldNotKeyError "blockFromBaseTo": - var prevHash = c.cursorHash - while prevHash != c.baseHash: - c.blocks.withValue(prevHash, item): - if item.blk.header.number <= number: - result.add item.blk - prevHash = item.blk.header.parentHash + var branch = c.activeBranch + while not branch.isNil: + for i in countdown(branch.len-1, 0): + result.add(branch.blocks[i].blk) + branch = branch.parent func isCanonical*(c: ForkedChainRef, blockHash: Hash32): bool = - if blockHash == c.baseHash: - return true - - shouldNotKeyError "isCanonical": - var prevHash = c.cursorHash - while prevHash != c.baseHash: - c.blocks.withValue(prevHash, item): - if blockHash == prevHash: - return true - prevHash = item.blk.header.parentHash + c.hashToBlock.withValue(blockHash, loc): + var branch = c.activeBranch + while not branch.isNil: + if loc.branch == branch: + return true + branch = branch.parent proc isCanonicalAncestor*(c: ForkedChainRef, blockNumber: BlockNumber, blockHash: Hash32): bool = - if blockNumber >= c.cursorHeader.number: + if blockNumber >= c.activeBranch.headNumber: return false - if blockHash == c.cursorHash: + if blockHash == c.activeBranch.headHash: return false - if c.baseHeader.number < c.cursorHeader.number: + if c.baseBranch.tailNumber < c.activeBranch.headNumber: # The current canonical chain in memory is headed by - # cursorHeader - shouldNotKeyError "isCanonicalAncestor": - var prevHash = c.cursorHeader.parentHash - while prevHash != c.baseHash: - var header = c.blocks[prevHash].blk.header - if prevHash == blockHash and blockNumber == header.number: - return true - prevHash = header.parentHash + # activeBranch.header + var branch = c.activeBranch + while not branch.isNil: + if branch.hasHashAndNumber(blockHash, blockNumber): + return true + branch = branch.parent # canonical chain in database should have a marker # and the marker is block number - let canonHash = c.db.getBlockHash(blockNumber).valueOr: + let canonHash = c.baseTxFrame.getBlockHash(blockNumber).valueOr: return false canonHash == blockHash @@ -818,14 +725,15 @@ iterator txHashInRange*(c: ForkedChainRef, fromHash: Hash32, toHash: Hash32): Ha ## toHash should be ancestor of fromHash ## exclude base from iteration, new block produced by txpool ## should not reach base + let baseHash = c.baseBranch.tailHash var prevHash = fromHash - while prevHash != c.baseHash: - c.blocks.withValue(prevHash, item) do: + while prevHash != baseHash: + c.hashToBlock.withValue(prevHash, loc) do: if toHash == prevHash: break - for tx in item.blk.transactions: + for tx in loc[].transactions: let txHash = rlpHash(tx) yield txHash - prevHash = item.blk.header.parentHash + prevHash = loc[].parentHash do: break diff --git a/nimbus/core/chain/forked_chain/chain_branch.nim b/nimbus/core/chain/forked_chain/chain_branch.nim new file mode 100644 index 0000000000..e1d18571f8 --- /dev/null +++ b/nimbus/core/chain/forked_chain/chain_branch.nim @@ -0,0 +1,138 @@ +# Nimbus +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + eth/common/blocks, + eth/common/receipts, + ../../../db/core_db + +type + BlockDesc* = object + blk* : Block + txFrame* : CoreDbTxRef + receipts*: seq[Receipt] + hash* : Hash32 + + BlockPos* = object + branch*: BranchRef + index* : int + + BranchRef* = ref object + blocks*: seq[BlockDesc] + parent*: BranchRef + # If parent.isNil: it is a base branch + +func tailNumber*(brc: BranchRef): BlockNumber = + brc.blocks[0].blk.header.number + +func headNumber*(brc: BranchRef): BlockNumber = + brc.blocks[^1].blk.header.number + +func tailHash*(brc: BranchRef): Hash32 = + brc.blocks[0].hash + +func headHash*(brc: BranchRef): Hash32 = + brc.blocks[^1].hash + +func len*(brc: BranchRef): int = + brc.blocks.len + +func headTxFrame*(brc: BranchRef): CoreDbTxRef = + brc.blocks[^1].txFrame + +func tailHeader*(brc: BranchRef): Header = + brc.blocks[0].blk.header + +func headHeader*(brc: BranchRef): Header = + brc.blocks[^1].blk.header + +func append*(brc: BranchRef, blk: BlockDesc) = + brc.blocks.add(blk) + +func lastBlockPos*(brc: BranchRef): BlockPos = + BlockPos( + branch: brc, + index : brc.len - 1, + ) + +func `==`*(a, b: BranchRef): bool = + a.headHash == b.headHash + +func hasHashAndNumber*(brc: BranchRef, hash: Hash32, number: BlockNumber): bool = + for i in 0.. 0 @@ -125,9 +126,8 @@ proc validateUncles(com: CommonRef; header: Header; uncleSet.incl uncleHash let - chainDB = com.db - recentAncestorHashes = ?chainDB.getAncestorsHashes(MAX_UNCLE_DEPTH + 1, header) - recentUncleHashes = ?chainDB.getUncleHashes(recentAncestorHashes) + recentAncestorHashes = ?txFrame.getAncestorsHashes(MAX_UNCLE_DEPTH + 1, header) + recentUncleHashes = ?txFrame.getUncleHashes(recentAncestorHashes) blockHash = header.blockHash for uncle in uncles: @@ -154,13 +154,13 @@ proc validateUncles(com: CommonRef; header: Header; return err("uncle block number larger than current block number") # check uncle against own parent - let parent = ?chainDB.getBlockHeader(uncle.parentHash) + let parent = ?txFrame.getBlockHeader(uncle.parentHash) if uncle.timestamp <= parent.timestamp: return err("Uncle's parent must me older") - let uncleParent = ?chainDB.getBlockHeader(uncle.parentHash) + let uncleParent = ?txFrame.getBlockHeader(uncle.parentHash) ? com.validateHeader( - Block.init(uncle, BlockBody()), uncleParent) + Block.init(uncle, BlockBody()), uncleParent, txFrame) ok() @@ -361,6 +361,7 @@ proc validateHeaderAndKinship*( com: CommonRef; blk: Block; parent: Header; + txFrame: CoreDbTxRef ): Result[void, string] {.gcsafe, raises: [].} = template header: Header = blk.header @@ -370,13 +371,13 @@ proc validateHeaderAndKinship*( return err("Header.extraData larger than 32 bytes") return ok() - ? com.validateHeader(blk, parent) + ? com.validateHeader(blk, parent, txFrame) if blk.uncles.len > MAX_UNCLES: return err("Number of uncles exceed limit.") - if not com.proofOfStake(header): - ? com.validateUncles(header, blk.uncles) + if not com.proofOfStake(header, txFrame): + ? com.validateUncles(header, txFrame, blk.uncles) ok() diff --git a/nimbus/db/aristo/aristo_api.nim b/nimbus/db/aristo/aristo_api.nim index d81c835716..d63ae2a693 100644 --- a/nimbus/db/aristo/aristo_api.nim +++ b/nimbus/db/aristo/aristo_api.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -49,7 +49,7 @@ type ## was any. AristoApiDeleteAccountRecordFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] {.noRaise.} @@ -58,7 +58,7 @@ type ## as well. AristoApiDeleteStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] @@ -71,7 +71,7 @@ type ## case only the function will return `true`. AristoApiDeleteStorageTreeFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] {.noRaise.} @@ -79,7 +79,7 @@ type ## associated to the account argument `accPath`. AristoApiFetchLastSavedStateFn* = - proc(db: AristoDbRef + proc(db: AristoTxRef ): Result[SavedState,AristoError] {.noRaise.} ## The function returns the state of the last saved state. This is a @@ -87,20 +87,20 @@ type ## (may be interpreted as block number.) AristoApiFetchAccountRecordFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[AristoAccount,AristoError] {.noRaise.} ## Fetch an account record from the database indexed by `accPath`. AristoApiFetchStateRootFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; ): Result[Hash32,AristoError] {.noRaise.} ## Fetch the Merkle hash of the account root. AristoApiFetchStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] @@ -109,7 +109,7 @@ type ## record from the database indexed by `stoPath`. AristoApiFetchStorageRootFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[Hash32,AristoError] {.noRaise.} @@ -130,7 +130,7 @@ type ## This distructor may be used on already *destructed* descriptors. AristoApiForgetFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; ): Result[void,AristoError] {.noRaise.} ## Destruct the non centre argument `db` descriptor (see comments on @@ -140,14 +140,14 @@ type ## also# comments on `fork()`.) AristoApiHashifyFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; ): Result[void,(VertexID,AristoError)] {.noRaise.} ## Add keys to the `Patricia Trie` so that it becomes a `Merkle ## Patricia Tree`. AristoApiHasPathAccountFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] {.noRaise.} @@ -155,7 +155,7 @@ type ## exists on the database. AristoApiHasPathStorageFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] @@ -164,29 +164,15 @@ type ## data record indexed by `stoPath` exists on the database. AristoApiHasStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] {.noRaise.} ## For a storage tree related to account `accPath`, query whether there ## is a non-empty data storage area at all. - AristoApiIsTopFn* = - proc(tx: AristoTxRef; - ): bool - {.noRaise.} - ## Getter, returns `true` if the argument `tx` referes to the current - ## top level transaction. - - AristoApiTxFrameLevelFn* = - proc(db: AristoDbRef; - ): int - {.noRaise.} - ## Getter, non-negative nesting level (i.e. number of pending - ## transactions) - AristoApiMergeAccountRecordFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; accRec: AristoAccount; ): Result[bool,AristoError] @@ -199,7 +185,7 @@ type ## `false` otherwise. AristoApiMergeStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; stoData: UInt256; @@ -210,7 +196,7 @@ type ## and `stoPath` is the slot path of the corresponding storage area. AristoApiPartAccountTwig* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] {.noRaise.} @@ -222,7 +208,7 @@ type ## Errors will only be returned for invalid paths. AristoApiPartStorageTwig* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] @@ -285,24 +271,23 @@ type ## is returned if there was any. AristoApiTxFrameBeginFn* = - proc(db: AristoDbRef; + proc(db: AristoDbRef; parent: AristoTxRef ): Result[AristoTxRef,AristoError] {.noRaise.} ## Starts a new transaction. ## ## Example: ## :: - ## proc doSomething(db: AristoDbRef) = + ## proc doSomething(db: AristoTxRef) = ## let tx = db.begin ## defer: tx.rollback() ## ... continue using db ... ## tx.commit() - AristoApiTxFrameTopFn* = + AristoApiBaseTxFrameFn* = proc(db: AristoDbRef; - ): Result[AristoTxRef,AristoError] + ): AristoTxRef {.noRaise.} - ## Getter, returns top level transaction if there is any. AristoApiRef* = ref AristoApiObj AristoApiObj* = object of RootObj @@ -325,9 +310,6 @@ type hasPathStorage*: AristoApiHasPathStorageFn hasStorageData*: AristoApiHasStorageDataFn - isTop*: AristoApiIsTopFn - txFrameLevel*: AristoApiTxFrameLevelFn - mergeAccountRecord*: AristoApiMergeAccountRecordFn mergeStorageData*: AristoApiMergeStorageDataFn @@ -340,7 +322,7 @@ type persist*: AristoApiPersistFn rollback*: AristoApiRollbackFn txFrameBegin*: AristoApiTxFrameBeginFn - txFrameTop*: AristoApiTxFrameTopFn + baseTxFrame*: AristoApiBaseTxFrameFn AristoApiProfNames* = enum @@ -365,9 +347,6 @@ type AristoApiProfHasPathStorageFn = "hasPathStorage" AristoApiProfHasStorageDataFn = "hasStorageData" - AristoApiProfIsTopFn = "isTop" - AristoApiProfLevelFn = "level" - AristoApiProfMergeAccountRecordFn = "mergeAccountRecord" AristoApiProfMergeStorageDataFn = "mergeStorageData" @@ -380,7 +359,7 @@ type AristoApiProfPersistFn = "persist" AristoApiProfRollbackFn = "rollback" AristoApiProfTxFrameBeginFn = "txFrameBegin" - AristoApiProfTxFrameTopFn = "txFrameTop" + AristoApiProfBaseTxFrameFn = "baseTxFrame" AristoApiProfBeGetVtxFn = "be/getVtx" AristoApiProfBeGetKeyFn = "be/getKey" @@ -449,9 +428,6 @@ func init*(api: var AristoApiObj) = api.hasPathStorage = hasPathStorage api.hasStorageData = hasStorageData - api.isTop = isTop - api.txFrameLevel = txFrameLevel - api.mergeAccountRecord = mergeAccountRecord api.mergeStorageData = mergeStorageData @@ -464,7 +440,8 @@ func init*(api: var AristoApiObj) = api.persist = persist api.rollback = rollback api.txFrameBegin = txFrameBegin - api.txFrameTop = txFrameTop + api.baseTxFrame = baseTxFrame + when AutoValidateApiHooks: api.validate @@ -490,7 +467,7 @@ func init*( ## This constructor creates a profiling API descriptor to be derived from ## an initialised `api` argument descriptor. For profiling the DB backend, ## the field `.be` of the result descriptor must be assigned to the - ## `.backend` field of the `AristoDbRef` descriptor. + ## `.backend` field of the `AristoTxRef` descriptor. ## ## The argument desctiptors `api` and `be` will not be modified and can be ## used to restore the previous set up. @@ -511,92 +488,82 @@ func init*( result = api.commit(a) profApi.deleteAccountRecord = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfDeleteAccountRecordFn.profileRunner: result = api.deleteAccountRecord(a, b) profApi.deleteStorageData = - proc(a: AristoDbRef; b: Hash32, c: Hash32): auto = + proc(a: AristoTxRef; b: Hash32, c: Hash32): auto = AristoApiProfDeleteStorageDataFn.profileRunner: result = api.deleteStorageData(a, b, c) profApi.deleteStorageTree = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfDeleteStorageTreeFn.profileRunner: result = api.deleteStorageTree(a, b) profApi.fetchLastSavedState = - proc(a: AristoDbRef): auto = + proc(a: AristoTxRef): auto = AristoApiProfFetchLastSavedStateFn.profileRunner: result = api.fetchLastSavedState(a) profApi.fetchAccountRecord = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfFetchAccountRecordFn.profileRunner: result = api.fetchAccountRecord(a, b) profApi.fetchStateRoot = - proc(a: AristoDbRef; b: bool): auto = + proc(a: AristoTxRef; b: bool): auto = AristoApiProfFetchStateRootFn.profileRunner: result = api.fetchStateRoot(a, b) profApi.fetchStorageData = - proc(a: AristoDbRef; b, stoPath: Hash32): auto = + proc(a: AristoTxRef; b, stoPath: Hash32): auto = AristoApiProfFetchStorageDataFn.profileRunner: result = api.fetchStorageData(a, b, stoPath) profApi.fetchStorageRoot = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfFetchStorageRootFn.profileRunner: result = api.fetchStorageRoot(a, b) profApi.finish = - proc(a: AristoDbRef; b = false) = + proc(a: AristoTxRef; b = false) = AristoApiProfFinishFn.profileRunner: api.finish(a, b) profApi.hasPathAccount = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfHasPathAccountFn.profileRunner: result = api.hasPathAccount(a, b) profApi.hasPathStorage = - proc(a: AristoDbRef; b, c: Hash32): auto = + proc(a: AristoTxRef; b, c: Hash32): auto = AristoApiProfHasPathStorageFn.profileRunner: result = api.hasPathStorage(a, b, c) profApi.hasStorageData = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfHasStorageDataFn.profileRunner: result = api.hasStorageData(a, b) - profApi.isTop = - proc(a: AristoTxRef): auto = - AristoApiProfIsTopFn.profileRunner: - result = api.isTop(a) - - profApi.level = - proc(a: AristoDbRef): auto = - AristoApiProfLevelFn.profileRunner: - result = api.level(a) - profApi.mergeAccountRecord = - proc(a: AristoDbRef; b: Hash32; c: AristoAccount): auto = + proc(a: AristoTxRef; b: Hash32; c: AristoAccount): auto = AristoApiProfMergeAccountRecordFn.profileRunner: result = api.mergeAccountRecord(a, b, c) profApi.mergeStorageData = - proc(a: AristoDbRef; b, c: Hash32, d: UInt256): auto = + proc(a: AristoTxRef; b, c: Hash32, d: UInt256): auto = AristoApiProfMergeStorageDataFn.profileRunner: result = api.mergeStorageData(a, b, c, d) profApi.partAccountTwig = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfPartAccountTwigFn.profileRunner: result = api.partAccountTwig(a, b) profApi.partStorageTwig = - proc(a: AristoDbRef; b: Hash32; c: Hash32): auto = + proc(a: AristoTxRef; b: Hash32; c: Hash32): auto = AristoApiProfPartStorageTwigFn.profileRunner: result = api.partStorageTwig(a, b, c) @@ -616,7 +583,7 @@ func init*( result = api.pathAsBlob(a) profApi.persist = - proc(a: AristoDbRef; b = 0u64): auto = + proc(a: AristoTxRef; b = 0u64): auto = AristoApiProfPersistFn.profileRunner: result = api.persist(a, b) @@ -626,14 +593,14 @@ func init*( result = api.rollback(a) profApi.txFrameBegin = - proc(a: AristoDbRef): auto = + proc(a: AristoTxRef): auto = AristoApiProfTxFrameBeginFn.profileRunner: result = api.txFrameBegin(a) - profApi.txFrameTop = - proc(a: AristoDbRef): auto = - AristoApiProfTxFrameTopFn.profileRunner: - result = api.txFrameTop(a) + profApi.baseTxFrame = + proc(a: AristoTxRef): auto = + AristoApiProfBaseTxFrameFn.profileRunner: + result = api.baseTxFrame(a) let beDup = be.dup() if beDup.isNil: diff --git a/nimbus/db/aristo/aristo_blobify.nim b/nimbus/db/aristo/aristo_blobify.nim index 7bc68c50af..917e949afa 100644 --- a/nimbus/db/aristo/aristo_blobify.nim +++ b/nimbus/db/aristo/aristo_blobify.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -13,6 +13,7 @@ import results, stew/[arrayops, endians2], + eth/common/accounts, ./aristo_desc export aristo_desc, results diff --git a/nimbus/db/aristo/aristo_check.nim b/nimbus/db/aristo/aristo_check.nim index 31f69f9679..63dc58a5c4 100644 --- a/nimbus/db/aristo/aristo_check.nim +++ b/nimbus/db/aristo/aristo_check.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -25,7 +25,7 @@ import # ------------------------------------------------------------------------------ proc checkTop*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer proofMode = false; # Has proof nodes ): Result[void,(VertexID,AristoError)] = ## Verify that the cache structure is correct as it would be after `merge()` @@ -76,18 +76,18 @@ proc checkBE*( proc check*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database relax = false; # Check existing hashes only cache = true; # Also verify against top layer cache proofMode = false; # Has proof nodes ): Result[void,(VertexID,AristoError)] = ## Shortcut for running `checkTop()` followed by `checkBE()` ? db.checkTop(proofMode = proofMode) - ? db.checkBE() + # ? db.checkBE() ok() proc check*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Account key ): Result[void,AristoError] = ## Check accounts tree path `accPath` against portal proof generation and @@ -98,7 +98,7 @@ proc check*( db.checkTwig(accPath) proc check*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Account key stoPath: Hash32; # Storage key ): Result[void,AristoError] = diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index 8e830fb469..3e2e414fca 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -51,7 +51,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( for (rvid,key) in T.walkKeyBe db: if topVidBe.vid < rvid.vid: topVidBe = rvid - let _ = db.getVtxBE(rvid).valueOr: + let _ = db.getVtxBe(rvid).valueOr: return err((rvid.vid,CheckBeVtxMissing)) # Compare calculated `vTop` against database state @@ -76,25 +76,25 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( block: var topVidCache: RootedVertexID = (VertexID(0), VertexID(0)) - # Check structural table - for (rvid,vtx) in db.layersWalkVtx: - if vtx.isValid and topVidCache.vid < rvid.vid: - topVidCache = rvid - let (key, _) = db.layersGetKey(rvid).valueOr: (VOID_HASH_KEY, 0) - if not vtx.isValid: - # Some vertex is to be deleted, the key must be empty - if key.isValid: - return err((rvid.vid,CheckBeCacheKeyNonEmpty)) + # # Check structural table + # for (rvid,vtx) in db.layersWalkVtx: + # if vtx.isValid and topVidCache.vid < rvid.vid: + # topVidCache = rvid + # let (key, _) = db.layersGetKey(rvid).valueOr: (VOID_HASH_KEY, 0) + # if not vtx.isValid: + # # Some vertex is to be deleted, the key must be empty + # if key.isValid: + # return err((rvid.vid,CheckBeCacheKeyNonEmpty)) - # Check key table - var list: seq[RootedVertexID] - for (rvid,key) in db.layersWalkKey: - if key.isValid and topVidCache.vid < rvid.vid: - topVidCache = rvid - list.add rvid - let vtx = db.getVtx rvid - if db.layersGetVtx(rvid).isErr and not vtx.isValid: - return err((rvid.vid,CheckBeCacheKeyDangling)) + # # Check key table + # var list: seq[RootedVertexID] + # for (rvid,key) in db.layersWalkKey: + # if key.isValid and topVidCache.vid < rvid.vid: + # topVidCache = rvid + # list.add rvid + # let vtx = db.getVtx rvid + # if db.layersGetVtx(rvid).isErr and not vtx.isValid: + # return err((rvid.vid,CheckBeCacheKeyDangling)) # Check vTop # TODO diff --git a/nimbus/db/aristo/aristo_check/check_top.nim b/nimbus/db/aristo/aristo_check/check_top.nim index 331db8a02a..86b4dff0ae 100644 --- a/nimbus/db/aristo/aristo_check/check_top.nim +++ b/nimbus/db/aristo/aristo_check/check_top.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -21,7 +21,7 @@ import # ------------------------------------------------------------------------------ proc checkTopStrict*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer ): Result[void,(VertexID,AristoError)] = # No need to specify zero keys if implied by a leaf path with valid target # vertex ID (i.e. not deleted). @@ -55,7 +55,7 @@ proc checkTopStrict*( proc checkTopProofMode*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer ): Result[void,(VertexID,AristoError)] = for (rvid,key) in db.layersWalkKey: if key.isValid: # Otherwise to be deleted @@ -69,13 +69,13 @@ proc checkTopProofMode*( proc checkTopCommon*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer ): Result[void,(VertexID,AristoError)] = # Some `kMap[]` entries may ne void indicating backend deletion let kMapCount = db.layersWalkKey.toSeq.mapIt(it[1]).filterIt(it.isValid).len kMapNilCount = db.layersWalkKey.toSeq.len - kMapCount - vTop = db.vTop + vTop = db.layer.vTop var topVid = VertexID(0) stoRoots: HashSet[VertexID] diff --git a/nimbus/db/aristo/aristo_check/check_twig.nim b/nimbus/db/aristo/aristo_check/check_twig.nim index 664b9ec33a..0615c4a22a 100644 --- a/nimbus/db/aristo/aristo_check/check_twig.nim +++ b/nimbus/db/aristo/aristo_check/check_twig.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -20,7 +20,7 @@ import # ------------------------------------------------------------------------------ proc checkTwig*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Data path ): Result[void,AristoError] = let @@ -31,7 +31,7 @@ proc checkTwig*( ok() proc checkTwig*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Account key stoPath: Hash32; # Storage key ): Result[void,AristoError] = diff --git a/nimbus/db/aristo/aristo_compute.nim b/nimbus/db/aristo/aristo_compute.nim index ecb4c12c99..f5f8dcd9e8 100644 --- a/nimbus/db/aristo/aristo_compute.nim +++ b/nimbus/db/aristo/aristo_compute.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -63,7 +63,7 @@ func leave(batch: var WriteBatch, nibble: uint8) = batch.depth -= 1 proc putKeyAtLevel( - db: AristoDbRef, + db: AristoTxRef, rvid: RootedVertexID, vtx: VertexRef, key: HashKey, @@ -76,10 +76,10 @@ proc putKeyAtLevel( ## corresponding hash!) if level == -2: - ?batch.putVtx(db, rvid, vtx, key) + ?batch.putVtx(db.db, rvid, vtx, key) if batch.count mod batchSize == 0: - ?batch.flush(db) + ?batch.flush(db.db) if batch.count mod (batchSize * 100) == 0: info "Writing computeKey cache", keys = batch.count, accounts = batch.progress @@ -121,10 +121,10 @@ template encodeExt(w: var RlpWriter, pfx: NibblesBuf, branchKey: HashKey): HashK w.finish().digestTo(HashKey) proc getKey( - db: AristoDbRef, rvid: RootedVertexID, skipLayers: static bool + db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool ): Result[((HashKey, VertexRef), int), AristoError] = ok when skipLayers: - (?db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2) + (?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2) else: ?db.getKeyRc(rvid, {}) @@ -140,7 +140,7 @@ template childVid(v: VertexRef): VertexID = v.startVid proc computeKeyImpl( - db: AristoDbRef, + db: AristoTxRef, rvid: RootedVertexID, batch: var WriteBatch, vtx: VertexRef, @@ -277,11 +277,11 @@ proc computeKeyImpl( ok (key, level) proc computeKeyImpl( - db: AristoDbRef, rvid: RootedVertexID, skipLayers: static bool + db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool ): Result[HashKey, AristoError] = let (keyvtx, level) = when skipLayers: - (?db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2) + (?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2) else: ?db.getKeyRc(rvid, {}) @@ -291,7 +291,7 @@ proc computeKeyImpl( var batch: WriteBatch let res = computeKeyImpl(db, rvid, batch, keyvtx[1], level, skipLayers = skipLayers) if res.isOk: - ?batch.flush(db) + ?batch.flush(db.db) if batch.count > 0: if batch.count >= batchSize * 100: @@ -302,7 +302,7 @@ proc computeKeyImpl( ok (?res)[0] proc computeKey*( - db: AristoDbRef, # Database, top layer + db: AristoTxRef, # Database, top layer rvid: RootedVertexID, # Vertex to convert ): Result[HashKey, AristoError] = ## Compute the key for an arbitrary vertex ID. If successful, the length of @@ -312,7 +312,7 @@ proc computeKey*( ## 32 byte value. computeKeyImpl(db, rvid, skipLayers = false) -proc computeKeys*(db: AristoDbRef, root: VertexID): Result[void, AristoError] = +proc computeKeys*(db: AristoTxRef, root: VertexID): Result[void, AristoError] = ## Ensure that key cache is topped up with the latest state root discard db.computeKeyImpl((root, root), skipLayers = true) diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index bec16559f5..769a37e473 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -24,8 +24,8 @@ import # Private functions # ------------------------------------------------------------------------------ -func orDefault(db: AristoDbRef): AristoDbRef = - if db.isNil: AristoDbRef(top: LayerRef.init()) else: db +func orDefault(db: AristoTxRef): AristoTxRef = + if db.isNil: AristoTxRef(layer: LayerRef()) else: db # -------------------------- @@ -80,17 +80,17 @@ func stripZeros(a: string; toExp = false): string = # --------------------- func ppKeyOk( - db: AristoDbRef; + db: AristoTxRef; key: HashKey; rvid: RootedVertexID; ): string = if key.isValid and rvid.isValid: - let rv = db.xMap.getOrVoid key + let rv = db.db.xMap.getOrVoid key if rv.isValid: if rvid != rv: result = "(!)" return - db.xMap[key] = rvid + db.db.xMap[key] = rvid func ppVid(vid: VertexID; pfx = true): string = if pfx: @@ -130,7 +130,7 @@ func ppVidList(vLst: openArray[VertexID]): string = result &= vLst[^100 .. ^1].mapIt(it.ppVid).join(",") result &= "]" -proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string = +proc ppKey(key: HashKey; db: AristoTxRef; pfx = true): string = if pfx: result = "£" if key.to(Hash32) == default(Hash32): @@ -139,7 +139,7 @@ proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string = result &= "ø" else: # Reverse lookup - let rvid = db.xMap.getOrVoid key + let rvid = db.db.xMap.getOrVoid key if rvid.isValid: result &= rvid.ppVid(pfx=false) let vtx = db.getVtx rvid @@ -153,7 +153,7 @@ proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string = let tag = if key.len < 32: "[#" & $key.len & "]" else: "" result &= @(key.data).toHex.squeeze(hex=true,ignLen=true) & tag -func ppLeafTie(lty: LeafTie, db: AristoDbRef): string = +func ppLeafTie(lty: LeafTie, db: AristoTxRef): string = let pfx = lty.path.to(NibblesBuf) "@" & lty.root.ppVid(pfx=false) & ":" & ($pfx).squeeze(hex=true,ignLen=(pfx.len==64)) @@ -165,7 +165,7 @@ func ppPathPfx(pfx: NibblesBuf): string = func ppNibble(n: int8): string = if n < 0: "ø" elif n < 10: $n else: n.toHexLsb -proc ppEthAccount(a: Account, db: AristoDbRef): string = +proc ppEthAccount(a: Account, db: AristoTxRef): string = result = "(" result &= ($a.nonce).stripZeros(toExp=true) & "," result &= ($a.balance).stripZeros(toExp=true) & "," @@ -178,14 +178,14 @@ func ppAriAccount(a: AristoAccount): string = result &= ($a.balance).stripZeros(toExp=true) & "," result &= a.codeHash.ppCodeHash & ")" -func ppPayload(p: LeafPayload, db: AristoDbRef): string = +func ppPayload(p: LeafPayload, db: AristoTxRef): string = case p.pType: of AccountData: result = "(" & p.account.ppAriAccount() & "," & p.stoID.ppVid & ")" of StoData: result = ($p.stoData).squeeze -func ppVtx(nd: VertexRef, db: AristoDbRef, rvid: RootedVertexID): string = +func ppVtx(nd: VertexRef, db: AristoTxRef, rvid: RootedVertexID): string = if not nd.isValid: result = "ø" else: @@ -210,7 +210,7 @@ func ppVtx(nd: VertexRef, db: AristoDbRef, rvid: RootedVertexID): string = proc ppNode( nd: NodeRef; - db: AristoDbRef; + db: AristoTxRef; rvid = default(RootedVertexID); ): string = if not nd.isValid: @@ -253,7 +253,7 @@ proc ppNode( func ppXTab[T: VertexRef|NodeRef]( tab: Table[RootedVertexID,T]; - db: AristoDbRef; + db: AristoTxRef; indent = 4; ): string = proc ppT(v: T; r: RootedVertexID): string = @@ -268,7 +268,7 @@ func ppXTab[T: VertexRef|NodeRef]( proc ppXMap*( - db: AristoDbRef; + db: AristoTxRef; kMap: Table[RootedVertexID,HashKey]; indent: int; ): string = @@ -289,7 +289,7 @@ proc ppXMap*( if key == VOID_HASH_KEY: 0 else: - db.xMap[key] = w + db.db.xMap[key] = w let vtx = db.getVtx(w) if not vtx.isValid: 1 @@ -360,7 +360,7 @@ proc ppXMap*( proc ppBalancer( fl: LayerRef; - db: AristoDbRef; + db: AristoTxRef; indent: int; ): string = ## Walk over filter tables @@ -385,7 +385,7 @@ proc ppBalancer( result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey(db) & ")" result &= "}" -proc ppBe[T](be: T; db: AristoDbRef; limit: int; indent: int): string = +proc ppBe[T](be: T; db: AristoTxRef; limit: int; indent: int): string = ## Walk over backend tables let pfx = indent.toPfx @@ -435,7 +435,7 @@ proc ppBe[T](be: T; db: AristoDbRef; limit: int; indent: int): string = proc ppLayer( layer: LayerRef; - db: AristoDbRef; + db: AristoTxRef; vTopOk: bool; sTabOk: bool; kMapOk: bool; @@ -495,19 +495,19 @@ func pp*(w: Hash32; codeHashOk: bool): string = func pp*(n: NibblesBuf): string = n.ppPathPfx() -proc pp*(w: HashKey; db = AristoDbRef(nil)): string = +proc pp*(w: HashKey; db = AristoTxRef(nil)): string = w.ppKey(db.orDefault) -proc pp*(w: Hash32; db = AristoDbRef(nil)): string = +proc pp*(w: Hash32; db = AristoTxRef(nil)): string = w.to(HashKey).ppKey(db.orDefault) -proc pp*(w: openArray[HashKey]; db = AristoDbRef(nil)): string = +proc pp*(w: openArray[HashKey]; db = AristoTxRef(nil)): string = "[" & @w.mapIt(it.ppKey(db.orDefault)).join(",") & "]" -func pp*(lty: LeafTie, db = AristoDbRef(nil)): string = +func pp*(lty: LeafTie, db = AristoTxRef(nil)): string = lty.ppLeafTie(db.orDefault) -proc pp*(a: Account, db = AristoDbRef(nil)): string = +proc pp*(a: Account, db = AristoTxRef(nil)): string = a.ppEthAccount(db.orDefault) func pp*(vid: VertexID): string = @@ -519,13 +519,13 @@ func pp*(rvid: RootedVertexID): string = func pp*(vLst: openArray[VertexID]): string = vLst.ppVidList -func pp*(p: LeafPayload, db = AristoDbRef(nil)): string = +func pp*(p: LeafPayload, db = AristoTxRef(nil)): string = p.ppPayload(db.orDefault) -func pp*(nd: VertexRef, db = AristoDbRef(nil)): string = +func pp*(nd: VertexRef, db = AristoTxRef(nil)): string = nd.ppVtx(db.orDefault, default(RootedVertexID)) -proc pp*(nd: NodeRef, db = AristoDbRef(nil)): string = +proc pp*(nd: NodeRef, db = AristoTxRef(nil)): string = nd.ppNode(db.orDefault, default(RootedVertexID)) func pp*(e: (VertexID,AristoError)): string = @@ -542,26 +542,26 @@ func pp*[T](rc: Result[T,(VertexID,AristoError)]): string = func pp*( sTab: Table[RootedVertexID,VertexRef]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = sTab.ppXTab(db.orDefault) -proc pp*(leg: Leg; root: VertexID; db = AristoDbRef(nil)): string = +proc pp*(leg: Leg; root: VertexID; db = AristoTxRef(nil)): string = let db = db.orDefault() result = "(" & leg.wp.vid.ppVid & "," block: let key = db.layersGetKeyOrVoid (root, leg.wp.vid) if not key.isValid: result &= "ø" - elif (root, leg.wp.vid) != db.xMap.getOrVoid key: + elif (root, leg.wp.vid) != db.db.xMap.getOrVoid key: result &= key.ppKey(db) result &= "," if 0 <= leg.nibble: result &= $leg.nibble.ppNibble result &= "," & leg.wp.vtx.pp(db) & ")" -proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string = +proc pp*(hike: Hike; db = AristoTxRef(nil); indent = 4): string = let db = db.orDefault() pfx = indent.toPfx(1) @@ -577,7 +577,7 @@ proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string = func pp*[T: NodeRef|VertexRef|HashKey]( q: seq[(HashKey,T)]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = let db = db.orDefault @@ -591,7 +591,7 @@ func pp*[T: NodeRef|VertexRef|HashKey]( func pp*[T: NodeRef|VertexRef|HashKey]( t: Table[HashKey,T]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = ## Sort hash keys by associated vertex ID were possible @@ -611,7 +611,7 @@ func pp*[T: NodeRef|VertexRef|HashKey]( proc pp*[T: HashKey]( t: Table[T,RootedVertexID]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = ## Sort by second tab item vertex ID @@ -638,14 +638,14 @@ proc pp*[T: HashKey]( func pp*[T: HashKey]( t: TableRef[HashKey,T]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = pp(t[],db,indent) proc pp*( kMap: Table[RootedVertexID,HashKey]; - db: AristoDbRef; + db: AristoTxRef; indent = 4; ): string = db.ppXMap(kMap, indent) @@ -653,18 +653,18 @@ proc pp*( # --------------------- func pp*(tx: AristoTxRef): string = - result = "(uid=" & $tx.txUid & ",level=" & $tx.level + result = "(" & repr(pointer(addr(tx[]))) if not tx.parent.isNil: - result &= ", par=" & $tx.parent.txUid + result &= ", par=" & pp(tx.parent) result &= ")" -func pp*(wp: VidVtxPair; db: AristoDbRef): string = +func pp*(wp: VidVtxPair; db: AristoTxRef): string = "(" & wp.vid.pp & "," & wp.vtx.pp(db) & ")" proc pp*( layer: LayerRef; - db: AristoDbRef; + db: AristoTxRef; indent = 4; sTabOk = true, kMapOk = true, @@ -675,11 +675,11 @@ proc pp*( proc pp*( be: BackendRef; - db: AristoDbRef; + db: AristoTxRef; limit = 100; indent = 4; ): string = - result = db.balancer.ppBalancer(db, indent+1) & indent.toPfx + result = db.layer.ppBalancer(db, indent+1) & indent.toPfx case be.kind: of BackendMemory: result &= be.MemBackendRef.ppBe(db, limit, indent+1) @@ -689,7 +689,7 @@ proc pp*( result &= "" proc pp*( - db: AristoDbRef; + db: AristoTxRef; indent = 4; backendOk = false; balancerOk = true; @@ -699,29 +699,29 @@ proc pp*( sTabOk = true; limit = 100; ): string = - if topOk: - result = db.layersCc.ppLayer( - db, sTabOk=sTabOk, kMapOk=kMapOk, vTopOk=true, indent=indent) - let stackOnlyOk = stackOk and not (topOk or balancerOk or backendOk) - if not stackOnlyOk: - result &= indent.toPfx(1) & "level=" & $db.stack.len - if (stackOk and 0 < db.stack.len) or stackOnlyOk: - let layers = @[db.top] & db.stack.reversed - var lStr = "" - for n,w in layers: - let - m = layers.len - n - 1 - l = db.layersCc m - a = w.kMap.values.toSeq.filterIt(not it.isValid).len - c = l.kMap.values.toSeq.filterIt(not it.isValid).len - result &= "(" & $(w.kMap.len - a) & "," & $a & ")" - lStr &= " " & $m & "=(" & $(l.kMap.len - c) & "," & $c & ")" - result &= " =>" & lStr - if backendOk: - result &= indent.toPfx & db.backend.pp(db, limit=limit, indent) - elif balancerOk: - result &= indent.toPfx & db.balancer.ppBalancer(db, indent+1) - + # if topOk: + # result = db.layersCc.ppLayer( + # db, sTabOk=sTabOk, kMapOk=kMapOk, vTopOk=true, indent=indent) + # let stackOnlyOk = stackOk and not (topOk or balancerOk or backendOk) + # if not stackOnlyOk: + # result &= indent.toPfx(1) & "level=" & $db.stack.len + # if (stackOk and 0 < db.stack.len) or stackOnlyOk: + # let layers = @[db.top] & db.stack.reversed + # var lStr = "" + # for n,w in layers: + # let + # m = layers.len - n - 1 + # l = db.layersCc m + # a = w.kMap.values.toSeq.filterIt(not it.isValid).len + # c = l.kMap.values.toSeq.filterIt(not it.isValid).len + # result &= "(" & $(w.kMap.len - a) & "," & $a & ")" + # lStr &= " " & $m & "=(" & $(l.kMap.len - c) & "," & $c & ")" + # result &= " =>" & lStr + # if backendOk: + # result &= indent.toPfx & db.backend.pp(db, limit=limit, indent) + # elif balancerOk: + # result &= indent.toPfx & db.balancer.ppBalancer(db, indent+1) + discard #TODO # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_delete.nim b/nimbus/db/aristo/aristo_delete.nim index 3223e4ed63..17aa49fcc5 100644 --- a/nimbus/db/aristo/aristo_delete.nim +++ b/nimbus/db/aristo/aristo_delete.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -46,7 +46,7 @@ proc branchStillNeeded(vtx: VertexRef, removed: int8): Result[int8,void] = # ------------------------------------------------------------------------------ proc deleteImpl( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer hike: Hike; # Fully expanded path ): Result[VertexRef,AristoError] = ## Removes the last node in the hike and returns the updated leaf in case @@ -126,7 +126,7 @@ proc deleteImpl( # ------------------------------------------------------------------------------ proc deleteAccountRecord*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] = ## Delete the account leaf entry addressed by the argument `path`. If this @@ -156,7 +156,7 @@ proc deleteAccountRecord*( ok() proc deleteGenericData*( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; path: openArray[byte]; ): Result[bool,AristoError] = @@ -187,7 +187,7 @@ proc deleteGenericData*( ok(not db.getVtx((root, root)).isValid) proc deleteGenericTree*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer root: VertexID; # Root vertex ): Result[void,AristoError] = ## Variant of `deleteGenericData()` for purging the whole MPT sub-tree. @@ -203,7 +203,7 @@ proc deleteGenericTree*( db.delSubTreeImpl root proc deleteStorageData*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; # Implies storage data tree stoPath: Hash32; ): Result[bool,AristoError] = @@ -266,7 +266,7 @@ proc deleteStorageData*( ok(true) proc deleteStorageTree*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer accPath: Hash32; # Implies storage data tree ): Result[void,AristoError] = ## Variant of `deleteStorageData()` for purging the whole storage tree diff --git a/nimbus/db/aristo/aristo_delete/delete_subtree.nim b/nimbus/db/aristo/aristo_delete/delete_subtree.nim index 659a6b9ed2..2cc5eac28d 100644 --- a/nimbus/db/aristo/aristo_delete/delete_subtree.nim +++ b/nimbus/db/aristo/aristo_delete/delete_subtree.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -19,7 +19,7 @@ import # ------------------------------------------------------------------------------ proc delSubTreeNow( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; ): Result[void,AristoError] = ## Delete sub-tree now @@ -38,7 +38,7 @@ proc delSubTreeNow( proc delStoTreeNow( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer rvid: RootedVertexID; # Root vertex accPath: Hash32; # Accounts cache designator stoPath: NibblesBuf; # Current storage path @@ -70,14 +70,14 @@ proc delStoTreeNow( # ------------------------------------------------------------------------------ proc delSubTreeImpl*( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; ): Result[void,AristoError] = db.delSubTreeNow (root,root) proc delStoTreeImpl*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer rvid: RootedVertexID; # Root vertex accPath: Hash32; ): Result[void,AristoError] = diff --git a/nimbus/db/aristo/aristo_delta.nim b/nimbus/db/aristo/aristo_delta.nim index 3e066c42bd..d023e13039 100644 --- a/nimbus/db/aristo/aristo_delta.nim +++ b/nimbus/db/aristo/aristo_delta.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -31,7 +31,7 @@ proc deltaPersistent*( db: AristoDbRef; # Database nxtFid = 0u64; # Next filter ID (if any) ): Result[void,AristoError] = - ## Resolve (i.e. move) the balancer into the physical backend database. + ## Resolve (i.e. move) txRef into the physical backend database. ## ## This needs write permission on the backend DB for the descriptor argument ## `db` (see the function `aristo_desc.isCentre()`.) If the argument flag @@ -47,7 +47,7 @@ proc deltaPersistent*( return err(FilBackendMissing) # Blind or missing filter - if db.balancer.isNil: + if db.txRef.isNil: # Add a blind storage frame. This will do no harm if `Aristo` runs # standalone. Yet it is needed if a `Kvt` is tied to `Aristo` and has # triggered a save cyle already which is to be completed here. @@ -60,30 +60,34 @@ proc deltaPersistent*( return ok() let lSst = SavedState( - key: EMPTY_ROOT_HASH, # placeholder for more + key: emptyRoot, # placeholder for more serial: nxtFid) # Store structural single trie entries let writeBatch = ? be.putBegFn() - for rvid, vtx in db.balancer.sTab: - db.balancer.kMap.withValue(rvid, key) do: + for rvid, vtx in db.txRef.layer.sTab: + db.txRef.layer.kMap.withValue(rvid, key) do: be.putVtxFn(writeBatch, rvid, vtx, key[]) do: be.putVtxFn(writeBatch, rvid, vtx, default(HashKey)) - be.putTuvFn(writeBatch, db.balancer.vTop) + be.putTuvFn(writeBatch, db.txRef.layer.vTop) be.putLstFn(writeBatch, lSst) ? be.putEndFn writeBatch # Finalise write batch # Copy back updated payloads - for accPath, vtx in db.balancer.accLeaves: + for accPath, vtx in db.txRef.layer.accLeaves: db.accLeaves.put(accPath, vtx) - for mixPath, vtx in db.balancer.stoLeaves: + for mixPath, vtx in db.txRef.layer.stoLeaves: db.stoLeaves.put(mixPath, vtx) - # Done with balancer, all saved to backend - db.balancer = LayerRef(nil) + # Done with txRef, all saved to backend + db.txRef.layer.cTop = db.txRef.layer.vTop + db.txRef.layer.sTab.clear() + db.txRef.layer.kMap.clear() + db.txRef.layer.accLeaves.clear() + db.txRef.layer.stoLeaves.clear() ok() diff --git a/nimbus/db/aristo/aristo_delta/delta_merge.nim b/nimbus/db/aristo/aristo_delta/delta_merge.nim deleted file mode 100644 index 7e86f7140c..0000000000 --- a/nimbus/db/aristo/aristo_delta/delta_merge.nim +++ /dev/null @@ -1,44 +0,0 @@ -# nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -import - ".."/[aristo_desc, aristo_layers] - -# ------------------------------------------------------------------------------ -# Public functions -# ------------------------------------------------------------------------------ - -proc deltaMerge*( - upper: LayerRef; # Think of `top`, `nil` is ok - lower: LayerRef; # Think of `balancer`, `nil` is ok - ): LayerRef = - ## Merge argument `upper` into the `lower` filter instance. - ## - ## Note that the namimg `upper` and `lower` indicate that the filters are - ## stacked and the database access is `upper -> lower -> backend`. - ## - if lower.isNil: - # Degenerate case: `upper` is void - upper - - elif upper.isNil: - # Degenerate case: `lower` is void - lower - - else: - # Can modify `lower` which is the prefered action mode but applies only - # in cases where the `lower` argument is not shared. - lower.vTop = upper.vTop - layersMergeOnto(upper, lower[]) - lower - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_desc.nim b/nimbus/db/aristo/aristo_desc.nim index fdb738986b..b65ef39171 100644 --- a/nimbus/db/aristo/aristo_desc.nim +++ b/nimbus/db/aristo/aristo_desc.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -43,18 +43,13 @@ type ## Transaction descriptor db*: AristoDbRef ## Database descriptor parent*: AristoTxRef ## Previous transaction - txUid*: uint ## Unique ID among transactions - level*: int ## Stack index for this transaction + layer*: LayerRef AristoDbRef* = ref object ## Three tier database object supporting distributed instances. - top*: LayerRef ## Database working layer, mutable - stack*: seq[LayerRef] ## Stashed immutable parent layers - balancer*: LayerRef ## Balance out concurrent backend access backend*: BackendRef ## Backend database (may well be `nil`) - txRef*: AristoTxRef ## Latest active transaction - txUidGen*: uint ## Tx-relative unique number generator + txRef*: AristoTxRef ## Bottom-most in-memory frame accLeaves*: LruCache[Hash32, VertexRef] ## Account path to payload cache - accounts are frequently accessed by @@ -128,7 +123,7 @@ func isValid*(layer: LayerRef): bool = layer != LayerRef(nil) func isValid*(root: Hash32): bool = - root != EMPTY_ROOT_HASH + root != emptyRoot func isValid*(key: HashKey): bool = assert key.len != 32 or key.to(Hash32).isValid @@ -156,25 +151,31 @@ func hash*(db: AristoDbRef): Hash = # Public helpers # ------------------------------------------------------------------------------ -iterator rstack*(db: AristoDbRef): LayerRef = +iterator rstack*(tx: AristoTxRef): (LayerRef, int) = # Stack in reverse order - for i in 0.. 0: - doAssert level <= db.stack.len - db.stack[^level] - elif level == -1: - doAssert db.balancer != nil - db.balancer - elif level == -2: + var tx = tx + + var i = 0 + while tx != nil: + let level = if tx.parent == nil: -1 else: i + yield (tx.layer, level) + tx = tx.parent + +proc deltaAtLevel*(db: AristoTxRef, level: int): LayerRef = + if level == -2: nil + elif level == -1: + db.db.txRef.layer else: - raiseAssert "Unknown level " & $level + var + frame = db + level = level + + while level > 0: + frame = frame.parent + level -= 1 + frame.layer # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_desc/desc_error.nim b/nimbus/db/aristo/aristo_desc/desc_error.nim index 0e752bed23..cb2f1ba57d 100644 --- a/nimbus/db/aristo/aristo_desc/desc_error.nim +++ b/nimbus/db/aristo/aristo_desc/desc_error.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -228,16 +228,7 @@ type # Transaction wrappers - TxAccRootMissing - TxArgStaleTx - TxArgsUseless TxBackendNotWritable - TxFrameLevelTooDeep - TxFrameLevelUseless - TxNoPendingTx - TxNotFound - TxNotTopTx - TxPendingTx TxStackGarbled # End diff --git a/nimbus/db/aristo/aristo_desc/desc_structural.nim b/nimbus/db/aristo/aristo_desc/desc_structural.nim index 309feaf647..9801b92f56 100644 --- a/nimbus/db/aristo/aristo_desc/desc_structural.nim +++ b/nimbus/db/aristo/aristo_desc/desc_structural.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -94,8 +94,8 @@ type key*: Hash32 ## Some state hash (if any) serial*: uint64 ## Generic identifier from application - LayerRef* = ref LayerObj - LayerObj* = object + LayerRef* = ref Layer + Layer* = object ## Delta layers are stacked implying a tables hierarchy. Table entries on ## a higher level take precedence over lower layer table entries. So an ## existing key-value table entry of a layer on top supersedes same key @@ -125,7 +125,7 @@ type accLeaves*: Table[Hash32, VertexRef] ## Account path -> VertexRef stoLeaves*: Table[Hash32, VertexRef] ## Storage path -> VertexRef - txUid*: uint ## Transaction identifier if positive + cTop*: VertexID ## Last committed vertex ID GetVtxFlag* = enum PeekCache @@ -150,10 +150,6 @@ func setUsed*(vtx: VertexRef, nibble: uint8, used: static bool): VertexID = vtx.used and (not (1'u16 shl nibble)) vtx.bVid(nibble) -func init*(T: type LayerRef): T = - ## Constructor, returns empty layer - T() - func hash*(node: NodeRef): Hash = ## Table/KeyedQueue/HashSet mixin cast[pointer](node).hash diff --git a/nimbus/db/aristo/aristo_fetch.nim b/nimbus/db/aristo/aristo_fetch.nim index ecca2c34be..2346bfd327 100644 --- a/nimbus/db/aristo/aristo_fetch.nim +++ b/nimbus/db/aristo/aristo_fetch.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -24,7 +24,7 @@ import # ------------------------------------------------------------------------------ proc retrieveLeaf( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; path: Hash32; ): Result[VertexRef,AristoError] = @@ -39,22 +39,22 @@ proc retrieveLeaf( return err(FetchPathNotFound) -proc cachedAccLeaf*(db: AristoDbRef; accPath: Hash32): Opt[VertexRef] = +proc cachedAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] = # Return vertex from layers or cache, `nil` if it's known to not exist and # none otherwise db.layersGetAccLeaf(accPath) or - db.accLeaves.get(accPath) or + db.db.accLeaves.get(accPath) or Opt.none(VertexRef) -proc cachedStoLeaf*(db: AristoDbRef; mixPath: Hash32): Opt[VertexRef] = +proc cachedStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] = # Return vertex from layers or cache, `nil` if it's known to not exist and # none otherwise db.layersGetStoLeaf(mixPath) or - db.stoLeaves.get(mixPath) or + db.db.stoLeaves.get(mixPath) or Opt.none(VertexRef) proc retrieveAccountLeaf( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[VertexRef,AristoError] = if (let leafVtx = db.cachedAccLeaf(accPath); leafVtx.isSome()): @@ -67,27 +67,27 @@ proc retrieveAccountLeaf( let leafVtx = db.retrieveLeaf(VertexID(1), accPath).valueOr: if error == FetchPathNotFound: - db.accLeaves.put(accPath, nil) + db.db.accLeaves.put(accPath, nil) return err(error) - db.accLeaves.put(accPath, leafVtx) + db.db.accLeaves.put(accPath, leafVtx) ok leafVtx proc retrieveMerkleHash( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; ): Result[Hash32,AristoError] = let key = db.computeKey((root, root)).valueOr: if error in [GetVtxNotFound, GetKeyNotFound]: - return ok(EMPTY_ROOT_HASH) + return ok(emptyRoot) return err(error) ok key.to(Hash32) proc hasAccountPayload( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] = let error = db.retrieveAccountLeaf(accPath).errorOr: @@ -98,7 +98,7 @@ proc hasAccountPayload( err(error) proc fetchStorageIdImpl( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; enaStoRootMissing = false; ): Result[VertexID,AristoError] = @@ -119,7 +119,7 @@ proc fetchStorageIdImpl( # ------------------------------------------------------------------------------ proc fetchAccountHike*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Implies a storage ID (if any) accHike: var Hike ): Result[void,AristoError] = @@ -142,7 +142,7 @@ proc fetchAccountHike*( ok() proc fetchStorageID*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[VertexID,AristoError] = ## Public helper function for retrieving a storage (vertex) ID for a given account. This @@ -152,7 +152,7 @@ proc fetchStorageID*( db.fetchStorageIdImpl(accPath, enaStoRootMissing=true) proc retrieveStoragePayload( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] = @@ -167,15 +167,15 @@ proc retrieveStoragePayload( # it must have been in the database let leafVtx = db.retrieveLeaf(? db.fetchStorageIdImpl(accPath), stoPath).valueOr: if error == FetchPathNotFound: - db.stoLeaves.put(mixPath, nil) + db.db.stoLeaves.put(mixPath, nil) return err(error) - db.stoLeaves.put(mixPath, leafVtx) + db.db.stoLeaves.put(mixPath, leafVtx) ok leafVtx.lData.stoData proc hasStoragePayload( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] = @@ -191,15 +191,16 @@ proc hasStoragePayload( # ------------------------------------------------------------------------------ proc fetchLastSavedState*( - db: AristoDbRef; + db: AristoTxRef; ): Result[SavedState,AristoError] = - ## Wrapper around `getLstUbe()`. The function returns the state of the last + ## Wrapper around `getLstBe()`. The function returns the state of the last ## saved state. This is a Merkle hash tag for vertex with ID 1 and a bespoke ## `uint64` identifier (may be interpreted as block number.) - db.getLstUbe() + # TODO store in frame!! + db.db.getLstBe() proc fetchAccountRecord*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[AristoAccount,AristoError] = ## Fetch an account record from the database indexed by `accPath`. @@ -210,13 +211,13 @@ proc fetchAccountRecord*( ok leafVtx.lData.account proc fetchStateRoot*( - db: AristoDbRef; + db: AristoTxRef; ): Result[Hash32,AristoError] = ## Fetch the Merkle hash of the account root. db.retrieveMerkleHash(VertexID(1)) proc hasPathAccount*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] = ## For an account record indexed by `accPath` query whether this record exists @@ -225,7 +226,7 @@ proc hasPathAccount*( db.hasAccountPayload(accPath) proc fetchStorageData*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] = @@ -235,18 +236,18 @@ proc fetchStorageData*( db.retrieveStoragePayload(accPath, stoPath) proc fetchStorageRoot*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[Hash32,AristoError] = ## Fetch the Merkle hash of the storage root related to `accPath`. let stoID = db.fetchStorageIdImpl(accPath).valueOr: if error == FetchPathNotFound: - return ok(EMPTY_ROOT_HASH) # no sub-tree + return ok(emptyRoot) # no sub-tree return err(error) db.retrieveMerkleHash(stoID) proc hasPathStorage*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] = @@ -256,7 +257,7 @@ proc hasPathStorage*( db.hasStoragePayload(accPath, stoPath) proc hasStorageData*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] = ## For a storage tree related to account `accPath`, query whether there diff --git a/nimbus/db/aristo/aristo_get.nim b/nimbus/db/aristo/aristo_get.nim index d3e83be377..f1e810c947 100644 --- a/nimbus/db/aristo/aristo_get.nim +++ b/nimbus/db/aristo/aristo_get.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -21,16 +21,16 @@ import # Public functions # ------------------------------------------------------------------------------ -proc getTuvUbe*( +proc getTuvBe*( db: AristoDbRef; ): Result[VertexID,AristoError] = - ## Get the ID generator state from the unfiltered backened if available. + ## Get the ID generator state from the backened if available. let be = db.backend if not be.isNil: return be.getTuvFn() err(GetTuvNotFound) -proc getLstUbe*( +proc getLstBe*( db: AristoDbRef; ): Result[SavedState,AristoError] = ## Get the last saved state @@ -39,23 +39,23 @@ proc getLstUbe*( return be.getLstFn() err(GetLstNotFound) -proc getVtxUbe*( +proc getVtxBe*( db: AristoDbRef; rvid: RootedVertexID; flags: set[GetVtxFlag] = {}; ): Result[VertexRef,AristoError] = - ## Get the vertex from the unfiltered backened if available. + ## Get the vertex from the backened if available. let be = db.backend if not be.isNil: return be.getVtxFn(rvid, flags) err GetVtxNotFound -proc getKeyUbe*( +proc getKeyBe*( db: AristoDbRef; rvid: RootedVertexID; flags: set[GetVtxFlag]; ): Result[(HashKey, VertexRef),AristoError] = - ## Get the Merkle hash/key from the unfiltered backend if available. + ## Get the Merkle hash/key from the backend if available. let be = db.backend if not be.isNil: return be.getKeyFn(rvid, flags) @@ -63,47 +63,8 @@ proc getKeyUbe*( # ------------------ -proc getTuvBE*( - db: AristoDbRef; - ): Result[VertexID,AristoError] = - ## Get the ID generator state the `backened` layer if available. - if not db.balancer.isNil: - return ok(db.balancer.vTop) - db.getTuvUbe() - -proc getVtxBE*( - db: AristoDbRef; - rvid: RootedVertexID; - flags: set[GetVtxFlag] = {}; - ): Result[(VertexRef, int),AristoError] = - ## Get the vertex from the (filtered) backened if available. - if not db.balancer.isNil: - db.balancer.sTab.withValue(rvid, w): - if w[].isValid: - return ok (w[], -1) - return err(GetVtxNotFound) - ok (? db.getVtxUbe(rvid, flags), -2) - -proc getKeyBE*( - db: AristoDbRef; - rvid: RootedVertexID; - flags: set[GetVtxFlag]; - ): Result[((HashKey, VertexRef), int),AristoError] = - ## Get the merkle hash/key from the (filtered) backend if available. - if not db.balancer.isNil: - db.balancer.kMap.withValue(rvid, w): - if w[].isValid: - return ok(((w[], nil), -1)) - db.balancer.sTab.withValue(rvid, s): - if s[].isValid: - return ok(((VOID_HASH_KEY, s[]), -1)) - return err(GetKeyNotFound) - ok ((?db.getKeyUbe(rvid, flags)), -2) - -# ------------------ - proc getVtxRc*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; flags: set[GetVtxFlag] = {}; ): Result[(VertexRef, int),AristoError] = @@ -120,16 +81,16 @@ proc getVtxRc*( else: return err(GetVtxNotFound) - db.getVtxBE(rvid, flags) + ok (?db.db.getVtxBe(rvid, flags), -2) -proc getVtx*(db: AristoDbRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef = +proc getVtx*(db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef = ## Cascaded attempt to fetch a vertex from the cache layers or the backend. ## The function returns `nil` on error or failure. ## db.getVtxRc(rvid).valueOr((VertexRef(nil), 0))[0] proc getKeyRc*( - db: AristoDbRef; rvid: RootedVertexID, flags: set[GetVtxFlag]): Result[((HashKey, VertexRef), int),AristoError] = + db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag]): Result[((HashKey, VertexRef), int),AristoError] = ## Cascaded attempt to fetch a Merkle hash from the cache layers or the ## backend. This function will never return a `VOID_HASH_KEY` but rather ## some `GetKeyNotFound` or `GetKeyUpdateNeeded` error. @@ -154,9 +115,9 @@ proc getKeyRc*( # The vertex is to be deleted. So is the value key. return err(GetKeyNotFound) - db.getKeyBE(rvid, flags) + ok (?db.db.getKeyBe(rvid, flags), -2) -proc getKey*(db: AristoDbRef; rvid: RootedVertexID): HashKey = +proc getKey*(db: AristoTxRef; rvid: RootedVertexID): HashKey = ## Cascaded attempt to fetch a vertex from the cache layers or the backend. ## The function returns `nil` on error or failure. ## diff --git a/nimbus/db/aristo/aristo_hike.nim b/nimbus/db/aristo/aristo_hike.nim index f627b158e4..2e6f9e12b8 100644 --- a/nimbus/db/aristo/aristo_hike.nim +++ b/nimbus/db/aristo/aristo_hike.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -62,7 +62,7 @@ func legsTo*(hike: Hike; numLegs: int; T: type NibblesBuf): T = # -------- proc step*( - path: NibblesBuf, rvid: RootedVertexID, db: AristoDbRef + path: NibblesBuf, rvid: RootedVertexID, db: AristoTxRef ): Result[(VertexRef, NibblesBuf, VertexID), AristoError] = # Fetch next vertex let (vtx, _) = db.getVtxRc(rvid).valueOr: @@ -102,7 +102,7 @@ proc step*( iterator stepUp*( path: NibblesBuf; # Partial path root: VertexID; # Start vertex - db: AristoDbRef; # Database + db: AristoTxRef; # Database ): Result[VertexRef, AristoError] = ## For the argument `path`, iterate over the logest possible path in the ## argument database `db`. @@ -124,7 +124,7 @@ iterator stepUp*( proc hikeUp*( path: NibblesBuf; # Partial path root: VertexID; # Start vertex - db: AristoDbRef; # Database + db: AristoTxRef; # Database leaf: Opt[VertexRef]; hike: var Hike; ): Result[void,(VertexID,AristoError)] = @@ -171,7 +171,7 @@ proc hikeUp*( proc hikeUp*( lty: LeafTie; - db: AristoDbRef; + db: AristoTxRef; leaf: Opt[VertexRef]; hike: var Hike ): Result[void,(VertexID,AristoError)] = @@ -181,7 +181,7 @@ proc hikeUp*( proc hikeUp*( path: openArray[byte]; root: VertexID; - db: AristoDbRef; + db: AristoTxRef; leaf: Opt[VertexRef]; hike: var Hike ): Result[void,(VertexID,AristoError)] = @@ -191,7 +191,7 @@ proc hikeUp*( proc hikeUp*( path: Hash32; root: VertexID; - db: AristoDbRef; + db: AristoTxRef; leaf: Opt[VertexRef]; hike: var Hike ): Result[void,(VertexID,AristoError)] = diff --git a/nimbus/db/aristo/aristo_init/memory_only.nim b/nimbus/db/aristo/aristo_init/memory_only.nim index 6bff63a391..3a61f7cc39 100644 --- a/nimbus/db/aristo/aristo_init/memory_only.nim +++ b/nimbus/db/aristo/aristo_init/memory_only.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -53,11 +53,15 @@ proc init*( ): T = ## Memory backend constructor. ## - when B is VoidBackendRef: - AristoDbRef(top: LayerRef.init()) - elif B is MemBackendRef: - AristoDbRef(top: LayerRef.init(), backend: memoryBackend()) + let db = + when B is VoidBackendRef: + AristoDbRef(txRef: AristoTxRef(layer: LayerRef())) + + elif B is MemBackendRef: + AristoDbRef(txRef: AristoTxRef(layer: LayerRef()), backend: memoryBackend()) + db.txRef.db = db + db proc init*( T: type AristoDbRef; # Target type diff --git a/nimbus/db/aristo/aristo_init/persistent.nim b/nimbus/db/aristo/aristo_init/persistent.nim index 7934b4425e..5c4af5948d 100644 --- a/nimbus/db/aristo/aristo_init/persistent.nim +++ b/nimbus/db/aristo/aristo_init/persistent.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -51,12 +51,16 @@ proc newAristoRdbDbRef( be.closeFn(eradicate = false) return err(rc.error) rc.value - ok((AristoDbRef( - top: LayerRef(vTop: vTop), + db = (AristoDbRef( + txRef: AristoTxRef(layer: LayerRef(vTop: vTop, cTop: vTop)), backend: be, accLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE), stoLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE), - ), oCfs)) + ), oCfs) + + db[0].txRef.db = db[0] # TODO evaluate if this cyclic ref is worth the convenience + + ok(db) # ------------------------------------------------------------------------------ # Public database constuctors, destructor diff --git a/nimbus/db/aristo/aristo_layers.nim b/nimbus/db/aristo/aristo_layers.nim index 742caf3eb2..8ac2a9896d 100644 --- a/nimbus/db/aristo/aristo_layers.nim +++ b/nimbus/db/aristo/aristo_layers.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -11,101 +11,52 @@ {.push raises: [].} import - std/[enumerate, sequtils, sets, tables], + std/[sets, tables], eth/common/hashes, results, - ./aristo_desc - -# ------------------------------------------------------------------------------ -# Private functions -# ------------------------------------------------------------------------------ - -func dup(sTab: Table[RootedVertexID,VertexRef]): Table[RootedVertexID,VertexRef] = - ## Explicit dup for `VertexRef` values - for (k,v) in sTab.pairs: - result[k] = v.dup - -# ------------------------------------------------------------------------------ -# Public getters: lazy value lookup for read only versions -# ------------------------------------------------------------------------------ - -func vTop*(db: AristoDbRef): VertexID = - db.top.vTop - -# ------------------------------------------------------------------------------ -# Public getters/helpers -# ------------------------------------------------------------------------------ - -func nLayersVtx*(db: AristoDbRef): int = - ## Number of vertex ID/vertex entries on the cache layers. This is an upper - ## bound for the number of effective vertex ID mappings held on the cache - ## layers as there might be duplicate entries for the same vertex ID on - ## different layers. - ## - db.stack.mapIt(it.sTab.len).foldl(a + b, db.top.sTab.len) - -func nLayersKey*(db: AristoDbRef): int = - ## Number of vertex ID/key entries on the cache layers. This is an upper - ## bound for the number of effective vertex ID mappingss held on the cache - ## layers as there might be duplicate entries for the same vertex ID on - ## different layers. - ## - db.stack.mapIt(it.kMap.len).foldl(a + b, db.top.kMap.len) + ./aristo_desc, + ../../utils/mergeutils # ------------------------------------------------------------------------------ # Public functions: getter variants # ------------------------------------------------------------------------------ -func layersGetVtx*(db: AristoDbRef; rvid: RootedVertexID): Opt[(VertexRef, int)] = +func layersGetVtx*(db: AristoTxRef; rvid: RootedVertexID): Opt[(VertexRef, int)] = ## Find a vertex on the cache layers. An `ok()` result might contain a ## `nil` vertex if it is stored on the cache that way. ## - db.top.sTab.withValue(rvid, item): - return Opt.some((item[], 0)) - - for i, w in enumerate(db.rstack): + for w, level in db.rstack: w.sTab.withValue(rvid, item): - return Opt.some((item[], i + 1)) + return Opt.some((item[], level)) Opt.none((VertexRef, int)) -func layersGetKey*(db: AristoDbRef; rvid: RootedVertexID): Opt[(HashKey, int)] = +func layersGetKey*(db: AristoTxRef; rvid: RootedVertexID): Opt[(HashKey, int)] = ## Find a hash key on the cache layers. An `ok()` result might contain a void ## hash key if it is stored on the cache that way. ## - db.top.kMap.withValue(rvid, item): - return Opt.some((item[], 0)) - if rvid in db.top.sTab: - return Opt.some((VOID_HASH_KEY, 0)) - - for i, w in enumerate(db.rstack): + for w, level in db.rstack: w.kMap.withValue(rvid, item): - return ok((item[], i + 1)) + return ok((item[], level)) if rvid in w.sTab: - return Opt.some((VOID_HASH_KEY, i + 1)) + return Opt.some((VOID_HASH_KEY, level)) Opt.none((HashKey, int)) -func layersGetKeyOrVoid*(db: AristoDbRef; rvid: RootedVertexID): HashKey = +func layersGetKeyOrVoid*(db: AristoTxRef; rvid: RootedVertexID): HashKey = ## Simplified version of `layersGetKey()` (db.layersGetKey(rvid).valueOr (VOID_HASH_KEY, 0))[0] -func layersGetAccLeaf*(db: AristoDbRef; accPath: Hash32): Opt[VertexRef] = - db.top.accLeaves.withValue(accPath, item): - return Opt.some(item[]) - - for w in db.rstack: +func layersGetAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] = + for w, _ in db.rstack: w.accLeaves.withValue(accPath, item): return Opt.some(item[]) Opt.none(VertexRef) -func layersGetStoLeaf*(db: AristoDbRef; mixPath: Hash32): Opt[VertexRef] = - db.top.stoLeaves.withValue(mixPath, item): - return Opt.some(item[]) - - for w in db.rstack: +func layersGetStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] = + for w, _ in db.rstack: w.stoLeaves.withValue(mixPath, item): return Opt.some(item[]) @@ -116,48 +67,47 @@ func layersGetStoLeaf*(db: AristoDbRef; mixPath: Hash32): Opt[VertexRef] = # ------------------------------------------------------------------------------ func layersPutVtx*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; vtx: VertexRef; ) = ## Store a (potentally empty) vertex on the top layer - db.top.sTab[rvid] = vtx - db.top.kMap.del(rvid) + db.layer.sTab[rvid] = vtx + db.layer.kMap.del(rvid) func layersResVtx*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; ) = ## Shortcut for `db.layersPutVtx(vid, VertexRef(nil))`. It is sort of the ## equivalent of a delete function. db.layersPutVtx(rvid, VertexRef(nil)) - func layersPutKey*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; vtx: VertexRef, key: HashKey; ) = ## Store a (potentally void) hash key on the top layer - db.top.sTab[rvid] = vtx - db.top.kMap[rvid] = key + db.layer.sTab[rvid] = vtx + db.layer.kMap[rvid] = key -func layersResKey*(db: AristoDbRef; rvid: RootedVertexID, vtx: VertexRef) = +func layersResKey*(db: AristoTxRef; rvid: RootedVertexID, vtx: VertexRef) = ## Shortcut for `db.layersPutKey(vid, VOID_HASH_KEY)`. It is sort of the ## equivalent of a delete function. db.layersPutVtx(rvid, vtx) -func layersResKeys*(db: AristoDbRef; hike: Hike) = +func layersResKeys*(db: AristoTxRef; hike: Hike) = ## Reset all cached keys along the given hike for i in 1..hike.legs.len: db.layersResKey((hike.root, hike.legs[^i].wp.vid), hike.legs[^i].wp.vtx) -func layersPutAccLeaf*(db: AristoDbRef; accPath: Hash32; leafVtx: VertexRef) = - db.top.accLeaves[accPath] = leafVtx +func layersPutAccLeaf*(db: AristoTxRef; accPath: Hash32; leafVtx: VertexRef) = + db.layer.accLeaves[accPath] = leafVtx -func layersPutStoLeaf*(db: AristoDbRef; mixPath: Hash32; leafVtx: VertexRef) = - db.top.stoLeaves[mixPath] = leafVtx +func layersPutStoLeaf*(db: AristoTxRef; mixPath: Hash32; leafVtx: VertexRef) = + db.layer.stoLeaves[mixPath] = leafVtx # ------------------------------------------------------------------------------ # Public functions @@ -165,64 +115,60 @@ func layersPutStoLeaf*(db: AristoDbRef; mixPath: Hash32; leafVtx: VertexRef) = func isEmpty*(ly: LayerRef): bool = ## Returns `true` if the layer does not contain any changes, i.e. all the - ## tables are empty. The field `txUid` is ignored, here. + ## tables are empty. ly.sTab.len == 0 and ly.kMap.len == 0 and ly.accLeaves.len == 0 and ly.stoLeaves.len == 0 - -func layersMergeOnto*(src: LayerRef; trg: var LayerObj) = - ## Merges the argument `src` into the argument `trg` and returns `trg`. For - ## the result layer, the `txUid` value set to `0`. - ## - trg.txUid = 0 - - for (vid,vtx) in src.sTab.pairs: - trg.sTab[vid] = vtx - trg.kMap.del vid - for (vid,key) in src.kMap.pairs: - trg.kMap[vid] = key +proc mergeAndReset*(trg, src: var Layer) = + ## Merges the argument `src` into the argument `trg` and clears `src`. trg.vTop = src.vTop - for (accPath,leafVtx) in src.accLeaves.pairs: - trg.accLeaves[accPath] = leafVtx - for (mixPath,leafVtx) in src.stoLeaves.pairs: - trg.stoLeaves[mixPath] = leafVtx - -func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = - ## Provide a collapsed copy of layers up to a particular transaction level. - ## If the `level` argument is too large, the maximum transaction level is - ## returned. For the result layer, the `txUid` value set to `0`. - ## - let layers = if db.stack.len <= level: db.stack & @[db.top] - else: db.stack[0 .. level] - - # Set up initial layer (bottom layer) - result = LayerRef( - sTab: layers[0].sTab.dup, # explicit dup for ref values - kMap: layers[0].kMap, - vTop: layers[^1].vTop, - accLeaves: layers[0].accLeaves, - stoLeaves: layers[0].stoLeaves) - - # Consecutively merge other layers on top - for n in 1 ..< layers.len: - for (vid,vtx) in layers[n].sTab.pairs: - result.sTab[vid] = vtx - result.kMap.del vid - for (vid,key) in layers[n].kMap.pairs: - result.kMap[vid] = key - for (accPath,vtx) in layers[n].accLeaves.pairs: - result.accLeaves[accPath] = vtx - for (mixPath,vtx) in layers[n].stoLeaves.pairs: - result.stoLeaves[mixPath] = vtx + + if trg.kMap.len > 0: + # Invalidate cached keys in the lower layer + for vid in src.sTab.keys: + trg.kMap.del vid + + mergeAndReset(trg.sTab, src.sTab) + mergeAndReset(trg.kMap, src.kMap) + mergeAndReset(trg.accLeaves, src.accLeaves) + mergeAndReset(trg.stoLeaves, src.stoLeaves) + +# func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = +# ## Provide a collapsed copy of layers up to a particular transaction level. +# ## If the `level` argument is too large, the maximum transaction level is +# ## returned. +# ## +# let layers = if db.stack.len <= level: db.stack & @[db.top] +# else: db.stack[0 .. level] + +# # Set up initial layer (bottom layer) +# result = LayerRef( +# sTab: layers[0].sTab.dup, # explicit dup for ref values +# kMap: layers[0].kMap, +# vTop: layers[^1].vTop, +# accLeaves: layers[0].accLeaves, +# stoLeaves: layers[0].stoLeaves) + +# # Consecutively merge other layers on top +# for n in 1 ..< layers.len: +# for (vid,vtx) in layers[n].sTab.pairs: +# result.sTab[vid] = vtx +# result.kMap.del vid +# for (vid,key) in layers[n].kMap.pairs: +# result.kMap[vid] = key +# for (accPath,vtx) in layers[n].accLeaves.pairs: +# result.accLeaves[accPath] = vtx +# for (mixPath,vtx) in layers[n].stoLeaves.pairs: +# result.stoLeaves[mixPath] = vtx # ------------------------------------------------------------------------------ # Public iterators # ------------------------------------------------------------------------------ iterator layersWalkVtx*( - db: AristoDbRef; + db: AristoTxRef; seen: var HashSet[VertexID]; ): tuple[rvid: RootedVertexID, vtx: VertexRef] = ## Walk over all `(VertexID,VertexRef)` pairs on the cache layers. Note that @@ -232,18 +178,14 @@ iterator layersWalkVtx*( ## the one with a zero vertex which are othewise skipped by the iterator. ## The `seen` argument must not be modified while the iterator is active. ## - for (rvid,vtx) in db.top.sTab.pairs: - yield (rvid,vtx) - seen.incl rvid.vid - - for w in db.rstack: + for w, _ in db.rstack: for (rvid,vtx) in w.sTab.pairs: if rvid.vid notin seen: yield (rvid,vtx) seen.incl rvid.vid iterator layersWalkVtx*( - db: AristoDbRef; + db: AristoTxRef; ): tuple[rvid: RootedVertexID, vtx: VertexRef] = ## Variant of `layersWalkVtx()`. var seen: HashSet[VertexID] @@ -252,16 +194,12 @@ iterator layersWalkVtx*( iterator layersWalkKey*( - db: AristoDbRef; + db: AristoTxRef; ): tuple[rvid: RootedVertexID, key: HashKey] = ## Walk over all `(VertexID,HashKey)` pairs on the cache layers. Note that ## entries are unsorted. var seen: HashSet[VertexID] - for (rvid,key) in db.top.kMap.pairs: - yield (rvid,key) - seen.incl rvid.vid - - for w in db.rstack: + for w, _ in db.rstack: for (rvid,key) in w.kMap.pairs: if rvid.vid notin seen: yield (rvid,key) diff --git a/nimbus/db/aristo/aristo_merge.nim b/nimbus/db/aristo/aristo_merge.nim index bafc22c6fe..84fa597bdd 100644 --- a/nimbus/db/aristo/aristo_merge.nim +++ b/nimbus/db/aristo/aristo_merge.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -32,14 +32,14 @@ import proc layersPutLeaf( - db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload + db: AristoTxRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload ): VertexRef = let vtx = VertexRef(vType: Leaf, pfx: path, lData: payload) db.layersPutVtx(rvid, vtx) vtx proc mergePayloadImpl( - db: AristoDbRef, # Database, top layer + db: AristoTxRef, # Database, top layer root: VertexID, # MPT state root path: Hash32, # Leaf item to add to the database leaf: Opt[VertexRef], @@ -171,7 +171,7 @@ proc mergePayloadImpl( # ------------------------------------------------------------------------------ proc mergeAccountRecord*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer accPath: Hash32; # Even nibbled byte path accRec: AristoAccount; # Account data ): Result[bool,AristoError] = @@ -201,7 +201,7 @@ proc mergeAccountRecord*( ok true proc mergeStorageData*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer accPath: Hash32; # Needed for accounts payload stoPath: Hash32; # Storage data path (aka key) stoData: UInt256; # Storage data payload value diff --git a/nimbus/db/aristo/aristo_nearby.nim b/nimbus/db/aristo/aristo_nearby.nim index 323ead85cc..71d232b086 100644 --- a/nimbus/db/aristo/aristo_nearby.nim +++ b/nimbus/db/aristo/aristo_nearby.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -85,7 +85,7 @@ proc toLeafTiePayload(hike: Hike): (LeafTie,LeafPayload) = proc complete( hike: Hike; # Partially expanded chain of vertices vid: VertexID; # Start ID - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer hikeLenMax: static[int]; # Beware of loops (if any) doLeast: static[bool]; # Direction: *least* or *most* ): Result[Hike,(VertexID,AristoError)] = @@ -124,7 +124,7 @@ proc complete( proc zeroAdjust( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer doLeast: static[bool]; # Direction: *least* or *most* ): Result[Hike,(VertexID,AristoError)] = ## Adjust empty argument path to the first vertex entry to the right. Ths @@ -142,7 +142,7 @@ proc zeroAdjust( else: w.branchNibbleMax n - proc toHike(pfx: NibblesBuf, root: VertexID, db: AristoDbRef): Hike = + proc toHike(pfx: NibblesBuf, root: VertexID, db: AristoTxRef): Hike = when doLeast: discard pfx.pathPfxPad(0).hikeUp(root, db, Opt.none(VertexRef), result) else: @@ -197,7 +197,7 @@ proc zeroAdjust( proc finalise( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer moveRight: static[bool]; # Direction of next vertex ): Result[Hike,(VertexID,AristoError)] = ## Handle some pathological cases after main processing failed @@ -251,7 +251,7 @@ proc finalise( proc nearbyNext( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer hikeLenMax: static[int]; # Beware of loops (if any) moveRight: static[bool]; # Direction of next vertex ): Result[Hike,(VertexID,AristoError)] = @@ -343,7 +343,7 @@ proc nearbyNext( proc nearbyNextLeafTie( lty: LeafTie; # Some `Patricia Trie` path - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer hikeLenMax: static[int]; # Beware of loops (if any) moveRight:static[bool]; # Direction of next vertex ): Result[PathID,(VertexID,AristoError)] = @@ -368,7 +368,7 @@ proc nearbyNextLeafTie( proc right*( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[Hike,(VertexID,AristoError)] = ## Extends the maximally extended argument vertices `hike` to the right (i.e. ## with non-decreasing path value). This function does not backtrack if @@ -383,7 +383,7 @@ proc right*( proc right*( lty: LeafTie; # Some `Patricia Trie` path - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[LeafTie,(VertexID,AristoError)] = ## Variant of `nearbyRight()` working with a `LeafTie` argument instead ## of a `Hike`. @@ -392,7 +392,7 @@ proc right*( path: ? lty.nearbyNextLeafTie(db, 64, moveRight=true)) iterator rightPairs*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer start = low(LeafTie); # Before or at first value ): (LeafTie,LeafPayload) = ## Traverse the sub-trie implied by the argument `start` with increasing @@ -432,7 +432,7 @@ iterator rightPairs*( # End while iterator rightPairsAccount*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer start = low(PathID); # Before or at first value ): (PathID,AristoAccount) = ## Variant of `rightPairs()` for accounts tree @@ -440,7 +440,7 @@ iterator rightPairsAccount*( yield (lty.path, pyl.account) iterator rightPairsStorage*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer accPath: Hash32; # Account the storage data belong to start = low(PathID); # Before or at first value ): (PathID,UInt256) = @@ -456,7 +456,7 @@ iterator rightPairsStorage*( proc left*( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[Hike,(VertexID,AristoError)] = ## Similar to `nearbyRight()`. ## @@ -466,7 +466,7 @@ proc left*( proc left*( lty: LeafTie; # Some `Patricia Trie` path - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[LeafTie,(VertexID,AristoError)] = ## Similar to `nearbyRight()` for `LeafTie` argument instead of a `Hike`. ok LeafTie( @@ -474,7 +474,7 @@ proc left*( path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false)) iterator leftPairs*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer start = high(LeafTie); # Before or at first value ): (LeafTie,LeafPayload) = ## Traverse the sub-trie implied by the argument `start` with decreasing @@ -523,7 +523,7 @@ iterator leftPairs*( proc rightMissing*( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[bool,AristoError] = ## Returns `true` if the maximally extended argument vertex `hike` is the ## right most on the hexary trie database. It verifies that there is no more diff --git a/nimbus/db/aristo/aristo_part.nim b/nimbus/db/aristo/aristo_part.nim index c9ebf8a9e3..28dda229c0 100644 --- a/nimbus/db/aristo/aristo_part.nim +++ b/nimbus/db/aristo/aristo_part.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -67,7 +67,7 @@ iterator vkPairs*(ps: PartStateRef): (RootedVertexID, HashKey) = # ------------------------------------------------------------------------------ proc partTwig( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; path: NibblesBuf; ): Result[(seq[seq[byte]],bool), AristoError] = @@ -88,13 +88,13 @@ proc partTwig( err(rc.error) proc partAccountTwig*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] = db.partTwig(VertexID(1), NibblesBuf.fromBytes accPath.data) proc partStorageTwig*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] = @@ -113,9 +113,7 @@ proc partUntwigPath*( root: Hash32; path: Hash32; ): Result[Opt[seq[byte]],AristoError] = - ## Verify the chain of rlp-encoded nodes and return the payload. If a - ## `Opt.none()` result is returned then the `path` argument does provably - ## not exist relative to `chain`. + ## Variant of `partUntwigGeneric()`. try: let nibbles = NibblesBuf.fromBytes path.data diff --git a/nimbus/db/aristo/aristo_part/part_chain_rlp.nim b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim index c0b70571d2..1520280609 100644 --- a/nimbus/db/aristo/aristo_part/part_chain_rlp.nim +++ b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -30,7 +30,7 @@ const # ------------------------------------------------------------------------------ proc chainRlpNodes*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; path: NibblesBuf, chain: var seq[seq[byte]]; diff --git a/nimbus/db/aristo/aristo_part/part_debug.nim b/nimbus/db/aristo/aristo_part/part_debug.nim index 852b149447..f0cf7e2157 100644 --- a/nimbus/db/aristo/aristo_part/part_debug.nim +++ b/nimbus/db/aristo/aristo_part/part_debug.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -76,7 +76,7 @@ proc pp*[T: PrfNode|PrfExtension]( t0: Table[RootedVertexID,(HashKey,T)] t1: Table[HashKey,T] for (key,val) in t.pairs: - ps.db.xMap.withValue(key,rv): + ps.db.db.xMap.withValue(key,rv): t0[rv[]] = (key,val) do: t1[key] = val diff --git a/nimbus/db/aristo/aristo_part/part_desc.nim b/nimbus/db/aristo/aristo_part/part_desc.nim index bdd2d4395d..441fba849e 100644 --- a/nimbus/db/aristo/aristo_part/part_desc.nim +++ b/nimbus/db/aristo/aristo_part/part_desc.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -17,7 +17,7 @@ import type PartStateRef* = ref object of RootRef - db*: AristoDbRef + db*: AristoTxRef core*: Table[VertexID,HashSet[HashKey]] # Existing vertices pureExt*: Table[HashKey,PrfExtension] # On-demand node (usually hidden) byKey*: Table[HashKey,RootedVertexID] # All keys, instead of `kMap[]` @@ -69,7 +69,7 @@ type # Public helpers # ------------------------------------------------------------------------------ -proc init*(T: type PartStateRef; db: AristoDbRef): T = +proc init*(T: type PartStateRef; db: AristoTxRef): T = ## Constructor for a partial database. T(db: db) diff --git a/nimbus/db/aristo/aristo_serialise.nim b/nimbus/db/aristo/aristo_serialise.nim index 0092b7ea55..7a2e8632c1 100644 --- a/nimbus/db/aristo/aristo_serialise.nim +++ b/nimbus/db/aristo/aristo_serialise.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -134,7 +134,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T = wr.finish().digestTo(HashKey) proc serialise*( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; pyl: LeafPayload; ): Result[seq[byte],(VertexID,AristoError)] = diff --git a/nimbus/db/aristo/aristo_tx/tx_frame.nim b/nimbus/db/aristo/aristo_tx/tx_frame.nim index 17610db02e..1bb26521b0 100644 --- a/nimbus/db/aristo/aristo_tx/tx_frame.nim +++ b/nimbus/db/aristo/aristo_tx/tx_frame.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -17,52 +17,11 @@ import results, ".."/[aristo_desc, aristo_layers] -func isTop*(tx: AristoTxRef): bool - -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -func getDbDescFromTopTx(tx: AristoTxRef): Result[AristoDbRef,AristoError] = - if not tx.isTop(): - return err(TxNotTopTx) - let db = tx.db - if tx.level != db.stack.len: - return err(TxStackGarbled) - ok db - -proc getTxUid(db: AristoDbRef): uint = - if db.txUidGen == high(uint): - db.txUidGen = 0 - db.txUidGen.inc - db.txUidGen - -# ------------------------------------------------------------------------------ -# Public functions, getters -# ------------------------------------------------------------------------------ - -func txFrameTop*(db: AristoDbRef): Result[AristoTxRef,AristoError] = - ## Getter, returns top level transaction if there is any. - if db.txRef.isNil: - err(TxNoPendingTx) - else: - ok(db.txRef) - -func isTop*(tx: AristoTxRef): bool = - ## Getter, returns `true` if the argument `tx` referes to the current top - ## level transaction. - tx.db.txRef == tx and tx.db.top.txUid == tx.txUid - -func txFrameLevel*(db: AristoDbRef): int = - ## Getter, non-negative nesting level (i.e. number of pending transactions) - if not db.txRef.isNil: - result = db.txRef.level - # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ -proc txFrameBegin*(db: AristoDbRef): Result[AristoTxRef,AristoError] = +proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): Result[AristoTxRef,AristoError] = ## Starts a new transaction. ## ## Example: @@ -73,23 +32,23 @@ proc txFrameBegin*(db: AristoDbRef): Result[AristoTxRef,AristoError] = ## ... continue using db ... ## tx.commit() ## - if db.txFrameLevel != db.stack.len: - return err(TxStackGarbled) - let vTop = db.top.vTop - db.stack.add db.top - db.top = LayerRef( - vTop: vTop, - txUid: db.getTxUid) + let parent = if parent == nil: + db.txRef + else: + parent - db.txRef = AristoTxRef( - db: db, - txUid: db.top.txUid, - parent: db.txRef, - level: db.stack.len) + let + vTop = parent.layer.vTop + layer = LayerRef(vTop: vTop, cTop: vTop) - ok db.txRef + ok AristoTxRef( + db: db, + parent: parent, + layer: layer) +proc baseTxFrame*(db: AristoDbRef): AristoTxRef= + db.txRef proc rollback*( tx: AristoTxRef; # Top transaction on database @@ -97,41 +56,26 @@ proc rollback*( ## Given a *top level* handle, this function discards all database operations ## performed for this transactio. The previous transaction is returned if ## there was any. - ## - let db = ? tx.getDbDescFromTopTx() + # TODO Everyone using this txref should repoint their parent field - # Roll back to previous layer. - db.top = db.stack[^1] - db.stack.setLen(db.stack.len-1) + let vTop = tx.layer[].cTop + tx.layer[] = Layer(vTop: vTop, cTop: vTop) - db.txRef = db.txRef.parent ok() proc commit*( tx: AristoTxRef; # Top transaction on database ): Result[void,AristoError] = - ## Given a *top level* handle, this function accepts all database operations - ## performed through this handle and merges it to the previous layer. The - ## previous transaction is returned if there was any. + ## This function pushes all changes done in this frame to its parent ## - let db = ? tx.getDbDescFromTopTx() - - # Pop layer from stack and merge database top layer onto it - let merged = db.stack.pop() - if not merged.isEmpty(): - # No need to update top if we popped an empty layer - if not db.top.isEmpty(): - # Only call `layersMergeOnto()` if layer is empty - db.top.layersMergeOnto merged[] - - # Install `merged` stack top layer and update stack - db.top = merged - - db.txRef = tx.parent - if 0 < db.stack.len: - db.txRef.txUid = db.getTxUid - db.top.txUid = db.txRef.txUid + # TODO Everyone using this txref should repoint their parent field + doAssert tx.parent != nil, "should not commit the base tx" + + # A rollback after commit should reset to the new vTop! + tx.layer[].cTop = tx.layer[].vTop + + mergeAndReset(tx.parent.layer[], tx.layer[]) ok() @@ -147,11 +91,11 @@ proc collapse*( ## if db.txFrameTop.isErr: break ## tx = db.txFrameTop.value ## - let db = ? tx.getDbDescFromTopTx() + # let db = ? tx.getDbDescFromTopTx() - db.top.txUid = 0 - db.stack.setLen(0) - db.txRef = AristoTxRef(nil) + # db.top.txUid = 0 + # db.stack.setLen(0) + # db.txRef = AristoTxRef(nil) ok() # ------------------------------------------------------------------------------ @@ -160,32 +104,33 @@ proc collapse*( iterator walk*(tx: AristoTxRef): (int,AristoTxRef,LayerRef,AristoError) = ## Walk down the transaction stack chain. - let db = tx.db - var tx = tx - - block body: - # Start at top layer if tx refers to that - if tx.level == db.stack.len: - if tx.txUid != db.top.txUid: - yield (-1,tx,db.top,TxStackGarbled) - break body - - # Yield the top level - yield (0,tx,db.top,AristoError(0)) - - # Walk down the transaction stack - for level in (tx.level-1).countdown(1): - tx = tx.parent - if tx.isNil or tx.level != level: - yield (-1,tx,LayerRef(nil),TxStackGarbled) - break body - - var layer = db.stack[level] - if tx.txUid != layer.txUid: - yield (-1,tx,layer,TxStackGarbled) - break body - - yield (db.stack.len-level,tx,layer,AristoError(0)) + discard + #let db = tx.db + # var tx = tx + + # block body: + # # Start at top layer if tx refers to that + # if tx.level == db.stack.len: + # if tx.txUid != db.top.txUid: + # yield (-1,tx,db.top,TxStackGarbled) + # break body + + # # Yield the top level + # yield (0,tx,db.top,AristoError(0)) + + # # Walk down the transaction stack + # for level in (tx.level-1).countdown(1): + # tx = tx.parent + # if tx.isNil or tx.level != level: + # yield (-1,tx,LayerRef(nil),TxStackGarbled) + # break body + + # var layer = db.stack[level] + # if tx.txUid != layer.txUid: + # yield (-1,tx,layer,TxStackGarbled) + # break body + + # yield (db.stack.len-level,tx,layer,AristoError(0)) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_tx/tx_stow.nim b/nimbus/db/aristo/aristo_tx/tx_stow.nim index b141e2bda2..e3b4a2bdd0 100644 --- a/nimbus/db/aristo/aristo_tx/tx_stow.nim +++ b/nimbus/db/aristo/aristo_tx/tx_stow.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -15,8 +15,7 @@ import results, - ../aristo_delta/delta_merge, - ".."/[aristo_desc, aristo_delta, aristo_layers] + ../[aristo_desc, aristo_delta] # ------------------------------------------------------------------------------ # Private functions @@ -25,10 +24,6 @@ import proc txPersistOk*( db: AristoDbRef; # Database ): Result[void,AristoError] = - if not db.txRef.isNil: - return err(TxPendingTx) - if 0 < db.stack.len: - return err(TxStackGarbled) if not db.deltaPersistentOk(): return err(TxBackendNotWritable) ok() @@ -45,17 +40,7 @@ proc txPersist*( ## ? db.txPersistOk() - if not db.top.isEmpty(): - # Note that `deltaMerge()` will return the `db.top` argument if the - # `db.balancer` is `nil`. Also, the `db.balancer` is read-only. In the - # case that there are no forked peers one can ignore that restriction as - # no balancer is shared. - db.balancer = deltaMerge(db.top, db.balancer) - - # New empty top layer - db.top = LayerRef(vTop: db.balancer.vTop) - - # Merge/move `balancer` into persistent tables (unless missing) + # Merge/move `txRef` into persistent tables (unless missing) ? db.deltaPersistent nxtSid ok() diff --git a/nimbus/db/aristo/aristo_utils.nim b/nimbus/db/aristo/aristo_utils.nim index 912175aa91..92a7a188fb 100644 --- a/nimbus/db/aristo/aristo_utils.nim +++ b/nimbus/db/aristo/aristo_utils.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -24,7 +24,7 @@ import proc toNode*( vtx: VertexRef; # Vertex to convert root: VertexID; # Sub-tree root the `vtx` belongs to - db: AristoDbRef; # Database + db: AristoTxRef; # Database ): Result[NodeRef,seq[VertexID]] = ## Convert argument the vertex `vtx` to a node type. Missing Merkle hash ## keys are searched for on the argument database `db`. diff --git a/nimbus/db/aristo/aristo_vid.nim b/nimbus/db/aristo/aristo_vid.nim index e8afb5691a..9cf92de900 100644 --- a/nimbus/db/aristo/aristo_vid.nim +++ b/nimbus/db/aristo/aristo_vid.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -20,14 +20,14 @@ import # Public functions # ------------------------------------------------------------------------------ -proc vidFetch*(db: AristoDbRef, n = 1): VertexID = +proc vidFetch*(db: AristoTxRef, n = 1): VertexID = ## Fetch next vertex ID. ## - if db.top.vTop == 0: - db.top.vTop = VertexID(LEAST_FREE_VID) - var ret = db.top.vTop + if db.layer.vTop == 0: + db.layer.vTop = VertexID(LEAST_FREE_VID) + var ret = db.layer.vTop ret.inc - db.top.vTop.inc(n) + db.layer.vTop.inc(n) ret # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_walk.nim b/nimbus/db/aristo/aristo_walk.nim deleted file mode 100644 index 217f916bc4..0000000000 --- a/nimbus/db/aristo/aristo_walk.nim +++ /dev/null @@ -1,26 +0,0 @@ -# Nimbus -# Copyright (c) 2023 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or -# distributed except according to those terms. - -## Backend DB traversal for Aristo DB -## ================================== -## -## This module provides iterators for the memory based backend or the -## backend-less database. Do import `aristo_walk/persistent` for the -## persistent backend though avoiding to unnecessarily link to the persistent -## backend library (e.g. `rocksdb`) when a memory only database is used. -## -{.push raises: [].} - -import - ./aristo_walk/memory_only -export - memory_only - -# End diff --git a/nimbus/db/aristo/aristo_walk/persistent.nim b/nimbus/db/aristo/aristo_walk/persistent.nim index 5c6d79fcca..d4a20bcd47 100644 --- a/nimbus/db/aristo/aristo_walk/persistent.nim +++ b/nimbus/db/aristo/aristo_walk/persistent.nim @@ -1,6 +1,6 @@ # Nimbus - Types, data structures and shared utilities used in network sync # -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -36,7 +36,7 @@ iterator walkVtxBe*[T: RdbBackendRef]( db: AristoDbRef; kinds = {Branch, Leaf}; ): tuple[rvid: RootedVertexID, vtx: VertexRef] = - ## Iterate over filtered RocksDB backend vertices. This function depends on + ## Iterate over RocksDB backend vertices. This function depends on ## the particular backend type name which must match the backend descriptor. for (rvid,vtx) in walkVtxBeImpl[T](db, kinds): yield (rvid,vtx) diff --git a/nimbus/db/aristo/aristo_walk/walk_private.nim b/nimbus/db/aristo/aristo_walk/walk_private.nim index 6933da8077..68b6304011 100644 --- a/nimbus/db/aristo/aristo_walk/walk_private.nim +++ b/nimbus/db/aristo/aristo_walk/walk_private.nim @@ -1,6 +1,6 @@ # Nimbus - Types, data structures and shared utilities used in network sync # -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -24,14 +24,14 @@ iterator walkVtxBeImpl*[T]( ): tuple[rvid: RootedVertexID, vtx: VertexRef] = ## Generic iterator when T is VoidBackendRef: - let filter = if db.balancer.isNil: LayerRef() else: db.balancer + let filter = if db.txRef.isNil: LayerRef() else: db.txRef.layer else: mixin walkVtx let filter = LayerRef() - if not db.balancer.isNil: - filter.sTab = db.balancer.sTab # copy table + if not db.txRef.isNil: + filter.sTab = db.txRef.layer.sTab # copy table for (rvid,vtx) in db.backend.T.walkVtx(kinds): if filter.sTab.hasKey rvid: @@ -55,14 +55,14 @@ iterator walkKeyBeImpl*[T]( ): tuple[rvid: RootedVertexID, key: HashKey] = ## Generic iterator when T is VoidBackendRef: - let filter = if db.balancer.isNil: LayerRef() else: db.balancer + let filter = if db.txRef.isNil: LayerRef() else: db.txRef.layer else: mixin walkKey let filter = LayerRef() - if not db.balancer.isNil: - filter.kMap = db.balancer.kMap # copy table + if not db.txRef.isNil: + filter.kMap = db.txRef.layer.kMap # copy table for (rvid,key) in db.backend.T.walkKey: if filter.kMap.hasKey rvid: diff --git a/nimbus/db/core_db/backend/aristo_db.nim b/nimbus/db/core_db/backend/aristo_db.nim index 7caa55b361..a9edf81073 100644 --- a/nimbus/db/core_db/backend/aristo_db.nim +++ b/nimbus/db/core_db/backend/aristo_db.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -12,9 +12,9 @@ import ../../aristo as use_ari, - ../../aristo/[aristo_init/memory_only, aristo_walk], + ../../aristo/[aristo_init/memory_only, aristo_walk/memory_only], ../../kvt as use_kvt, - ../../kvt/[kvt_init/memory_only, kvt_walk], + ../../kvt/[kvt_init/memory_only, kvt_walk/memory_only], ../base/[base_config, base_desc, base_helpers] # ------------------------------------------------------------------------------ diff --git a/nimbus/db/core_db/backend/aristo_rocksdb.nim b/nimbus/db/core_db/backend/aristo_rocksdb.nim index dd70c92114..6719c8a702 100644 --- a/nimbus/db/core_db/backend/aristo_rocksdb.nim +++ b/nimbus/db/core_db/backend/aristo_rocksdb.nim @@ -171,7 +171,7 @@ proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef = if opts.rdbKeyCacheSize > 0: # Make sure key cache isn't empty - adb.computeKeys(VertexID(1)).isOkOr: + adb.txRef.computeKeys(VertexID(1)).isOkOr: fatal "Cannot compute root keys", msg=error quit(QuitFailure) diff --git a/nimbus/db/core_db/backend/aristo_trace.nim b/nimbus/db/core_db/backend/aristo_trace.nim index ad3bb9a954..1794168bbe 100644 --- a/nimbus/db/core_db/backend/aristo_trace.nim +++ b/nimbus/db/core_db/backend/aristo_trace.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -349,7 +349,7 @@ proc kvtTraceRecorder(tr: TraceRecorderRef) = # Update production api tracerApi.get = - proc(kvt: KvtDbRef; key: openArray[byte]): Result[seq[byte],KvtError] = + proc(kvt: KvtTxRef; key: openArray[byte]): Result[seq[byte],KvtError] = const info = KvtApiProfGetFn when CoreDbNoisyCaptJournal: @@ -368,7 +368,7 @@ proc kvtTraceRecorder(tr: TraceRecorderRef) = ok(data) tracerApi.del = - proc(kvt: KvtDbRef; key: openArray[byte]): Result[void,KvtError] = + proc(kvt: KvtTxRef; key: openArray[byte]): Result[void,KvtError] = const info = KvtApiProfDelFn when CoreDbNoisyCaptJournal: @@ -402,7 +402,7 @@ proc kvtTraceRecorder(tr: TraceRecorderRef) = ok() tracerApi.put = - proc(kvt: KvtDbRef; key, data: openArray[byte]): Result[void,KvtError] = + proc(kvt: KvtTxRef; key, data: openArray[byte]): Result[void,KvtError] = const info = KvtApiProfPutFn when CoreDbNoisyCaptJournal: @@ -445,7 +445,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = tr.db.ariApi = tracerApi tracerApi.fetchAccountRecord = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[AristoAccount,AristoError] = const info = AristoApiProfFetchAccountRecordFn @@ -467,7 +467,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok accRec tracerApi.fetchStateRoot = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; ): Result[Hash32,AristoError] = const info = AristoApiProfFetchStateRootFn @@ -488,7 +488,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok state tracerApi.fetchStorageData = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] = @@ -511,7 +511,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok stoData tracerApi.fetchStorageRoot = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[Hash32,AristoError] = const info = AristoApiProfFetchStorageRootFn @@ -533,7 +533,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok state tracerApi.deleteAccountRecord = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] = const info = AristoApiProfDeleteAccountRecordFn @@ -569,7 +569,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok() tracerApi.deleteStorageData = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] = @@ -605,7 +605,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok emptyTrie tracerApi.deleteStorageTree = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] = const info = AristoApiProfDeleteStorageTreeFn @@ -628,7 +628,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok() tracerApi.mergeAccountRecord = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; accRec: AristoAccount; ): Result[bool,AristoError] = @@ -661,7 +661,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok updated tracerApi.mergeStorageData = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; stoPath: Hash32; stoData: UInt256; diff --git a/nimbus/db/core_db/base.nim b/nimbus/db/core_db/base.nim index 8c91b9aedf..d9e5ac6f94 100644 --- a/nimbus/db/core_db/base.nim +++ b/nimbus/db/core_db/base.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -68,6 +68,18 @@ proc ctx*(db: CoreDbRef): CoreDbCtxRef = ## db.defCtx +proc baseTxFrame*(db: CoreDbRef): CoreDbTxRef = + ## The base tx frame is a staging are for reading and writing "almost" + ## directly from/to the database without using any pending frames - when a + ## transaction created using `beginTxFrame` is committed, it ultimately ends + ## up in the base txframe before being persisted to the database with a + ## persist call. + + CoreDbTxRef( + ctx: db.ctx, + aTx: db.ctx.parent.ariApi.call(baseTxFrame, db.ctx.mpt), + kTx: db.ctx.parent.kvtApi.call(baseTxFrame, db.ctx.kvt)) + # ------------------------------------------------------------------------------ # Public base descriptor methods # ------------------------------------------------------------------------------ @@ -102,17 +114,14 @@ proc persistent*( ## db.setTrackNewApi BasePersistentFn block body: - block: - let rc = CoreDbKvtRef(db.ctx).call(persist, db.ctx.kvt) - if rc.isOk or rc.error == TxPersistDelayed: - # The latter clause is OK: Piggybacking on `Aristo` backend - discard - elif CoreDbKvtRef(db.ctx).call(txFrameLevel, db.ctx.kvt) != 0: - result = err(rc.error.toError($api, TxPending)) - break body - else: - result = err(rc.error.toError $api) - break body + let rc = CoreDbKvtRef(db.ctx).call(persist, db.ctx.kvt) + if rc.isOk or rc.error == TxPersistDelayed: + # The latter clause is OK: Piggybacking on `Aristo` backend + discard + else: + result = err(rc.error.toError $api) + break body + # Having reached here `Aristo` must not fail as both `Kvt` and `Aristo` # are kept in sync. So if there is a legit fail condition it mist be # caught in the previous clause. @@ -121,13 +130,13 @@ proc persistent*( result = ok() db.ifTrackNewApi: debug logTxt, api, elapsed, blockNumber, result -proc stateBlockNumber*(db: CoreDbRef): BlockNumber = - ## Rhis function returns the block number stored with the latest `persist()` +proc stateBlockNumber*(db: CoreDbTxRef): BlockNumber = + ## This function returns the block number stored with the latest `persist()` ## directive. ## db.setTrackNewApi BaseStateBlockNumberFn result = block: - let rc = CoreDbAccRef(db.ctx).call(fetchLastSavedState, db.ctx.mpt) + let rc = db.ctx.parent.ariApi.call(fetchLastSavedState, db.aTx) if rc.isOk: rc.value.serial.BlockNumber else: @@ -169,11 +178,11 @@ proc getKvt*(ctx: CoreDbCtxRef): CoreDbKvtRef = # ----------- KVT --------------- -proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = +proc get*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[seq[byte]] = ## This function always returns a non-empty `seq[byte]` or an error code. kvt.setTrackNewApi KvtGetFn result = block: - let rc = kvt.call(get, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(get, kvt.kTx, key) if rc.isOk: ok(rc.value) elif rc.error == GetNotFound: @@ -182,13 +191,13 @@ proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc getOrEmpty*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = +proc getOrEmpty*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[seq[byte]] = ## Variant of `get()` returning an empty `seq[byte]` if the key is not found ## on the database. ## kvt.setTrackNewApi KvtGetOrEmptyFn result = block: - let rc = kvt.call(get, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(get, kvt.kTx, key) if rc.isOk: ok(rc.value) elif rc.error == GetNotFound: @@ -197,11 +206,11 @@ proc getOrEmpty*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc len*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[int] = +proc len*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[int] = ## This function returns the size of the value associated with `key`. kvt.setTrackNewApi KvtLenFn result = block: - let rc = kvt.call(len, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(len, kvt.kTx, key) if rc.isOk: ok(rc.value) elif rc.error == GetNotFound: @@ -210,10 +219,10 @@ proc len*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[int] = err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[void] = +proc del*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[void] = kvt.setTrackNewApi KvtDelFn result = block: - let rc = kvt.call(del, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(del, kvt.kTx, key) if rc.isOk: ok() else: @@ -221,13 +230,13 @@ proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[void] = kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result proc put*( - kvt: CoreDbKvtRef; + kvt: CoreDbTxRef; key: openArray[byte]; val: openArray[byte]; ): CoreDbRc[void] = kvt.setTrackNewApi KvtPutFn result = block: - let rc = kvt.call(put, kvt.kvt, key, val) + let rc = kvt.ctx.parent.kvtApi.call(put, kvt.kTx, key, val) if rc.isOk: ok() else: @@ -235,21 +244,21 @@ proc put*( kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result -proc hasKeyRc*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[bool] = +proc hasKeyRc*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[bool] = ## For the argument `key` return `true` if `get()` returned a value on ## that argument, `false` if it returned `GetNotFound`, and an error ## otherwise. ## kvt.setTrackNewApi KvtHasKeyRcFn result = block: - let rc = kvt.call(hasKeyRc, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(hasKeyRc, kvt.kTx, key) if rc.isOk: ok(rc.value) else: err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc hasKey*(kvt: CoreDbKvtRef; key: openArray[byte]): bool = +proc hasKey*(kvt: CoreDbTxRef; key: openArray[byte]): bool = ## Simplified version of `hasKeyRc` where `false` is returned instead of ## an error. ## @@ -257,7 +266,7 @@ proc hasKey*(kvt: CoreDbKvtRef; key: openArray[byte]): bool = ## `Tables`. ## kvt.setTrackNewApi KvtHasKeyFn - result = kvt.call(hasKeyRc, kvt.kvt, key).valueOr: false + result = kvt.ctx.parent.kvtApi.call(hasKeyRc, kvt.kTx, key).valueOr: false kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result # ------------------------------------------------------------------------------ @@ -274,7 +283,7 @@ proc getAccounts*(ctx: CoreDbCtxRef): CoreDbAccRef = # ----------- accounts --------------- proc proof*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[(seq[seq[byte]],bool)] = ## On the accounts MPT, collect the nodes along the `accPath` interpreted as @@ -285,7 +294,7 @@ proc proof*( ## acc.setTrackNewApi AccProofFn result = block: - let rc = acc.call(partAccountTwig, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(partAccountTwig, acc.aTx, accPath) if rc.isOk: ok(rc.value) else: @@ -293,7 +302,7 @@ proc proof*( acc.ifTrackNewApi: debug logTxt, api, elapsed, result proc fetch*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[CoreDbAccount] = ## Fetch the account data record for the particular account indexed by @@ -301,7 +310,7 @@ proc fetch*( ## acc.setTrackNewApi AccFetchFn result = block: - let rc = acc.call(fetchAccountRecord, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(fetchAccountRecord, acc.aTx, accPath) if rc.isOk: ok(rc.value) elif rc.error == FetchPathNotFound: @@ -311,7 +320,7 @@ proc fetch*( acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result proc delete*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[void] = ## Delete the particular account indexed by the key `accPath`. This @@ -319,7 +328,7 @@ proc delete*( ## acc.setTrackNewApi AccDeleteFn result = block: - let rc = acc.call(deleteAccountRecord, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(deleteAccountRecord, acc.aTx, accPath) if rc.isOk: ok() elif rc.error == DelPathNotFound: @@ -331,7 +340,7 @@ proc delete*( debug logTxt, api, elapsed, accPath=($$accPath), result proc clearStorage*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[void] = ## Delete all data slots from the storage area associated with the @@ -339,7 +348,7 @@ proc clearStorage*( ## acc.setTrackNewApi AccClearStorageFn result = block: - let rc = acc.call(deleteStorageTree, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(deleteStorageTree, acc.aTx, accPath) if rc.isOk or rc.error in {DelStoRootMissing,DelStoAccMissing}: ok() else: @@ -348,7 +357,7 @@ proc clearStorage*( debug logTxt, api, elapsed, accPath=($$accPath), result proc merge*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; accRec: CoreDbAccount; ): CoreDbRc[void] = @@ -357,7 +366,7 @@ proc merge*( ## acc.setTrackNewApi AccMergeFn result = block: - let rc = acc.call(mergeAccountRecord, acc.mpt, accPath, accRec) + let rc = acc.ctx.parent.ariApi.call(mergeAccountRecord, acc.aTx, accPath, accRec) if rc.isOk: ok() else: @@ -366,14 +375,14 @@ proc merge*( debug logTxt, api, elapsed, accPath=($$accPath), result proc hasPath*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[bool] = ## Would be named `contains` if it returned `bool` rather than `Result[]`. ## acc.setTrackNewApi AccHasPathFn result = block: - let rc = acc.call(hasPathAccount, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(hasPathAccount, acc.aTx, accPath) if rc.isOk: ok(rc.value) else: @@ -381,12 +390,12 @@ proc hasPath*( acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result -proc getStateRoot*(acc: CoreDbAccRef): CoreDbRc[Hash32] = +proc getStateRoot*(acc: CoreDbTxRef): CoreDbRc[Hash32] = ## This function retrieves the Merkle state hash of the accounts ## column (if available.) acc.setTrackNewApi AccStateFn result = block: - let rc = acc.call(fetchStateRoot, acc.mpt) + let rc = acc.ctx.parent.ariApi.call(fetchStateRoot, acc.aTx) if rc.isOk: ok(rc.value) else: @@ -396,7 +405,7 @@ proc getStateRoot*(acc: CoreDbAccRef): CoreDbRc[Hash32] = # ------------ storage --------------- proc slotProof*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[(seq[seq[byte]],bool)] = @@ -412,7 +421,7 @@ proc slotProof*( ## acc.setTrackNewApi AccSlotProofFn result = block: - let rc = acc.call(partStorageTwig, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(partStorageTwig, acc.aTx, accPath, stoPath) if rc.isOk: ok(rc.value) else: @@ -420,14 +429,14 @@ proc slotProof*( acc.ifTrackNewApi: debug logTxt, api, elapsed, result proc slotFetch*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[UInt256] = ## Like `fetch()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotFetchFn result = block: - let rc = acc.call(fetchStorageData, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(fetchStorageData, acc.aTx, accPath, stoPath) if rc.isOk: ok(rc.value) elif rc.error == FetchPathNotFound: @@ -439,14 +448,14 @@ proc slotFetch*( stoPath=($$stoPath), result proc slotDelete*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[void] = ## Like `delete()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotDeleteFn result = block: - let rc = acc.call(deleteStorageData, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(deleteStorageData, acc.aTx, accPath, stoPath) if rc.isOk or rc.error == DelStoRootMissing: # The second `if` clause is insane but legit: A storage column was # announced for an account but no data have been added, yet. @@ -460,14 +469,14 @@ proc slotDelete*( stoPath=($$stoPath), result proc slotHasPath*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[bool] = ## Like `hasPath()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotHasPathFn result = block: - let rc = acc.call(hasPathStorage, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(hasPathStorage, acc.aTx, accPath, stoPath) if rc.isOk: ok(rc.value) else: @@ -477,7 +486,7 @@ proc slotHasPath*( stoPath=($$stoPath), result proc slotMerge*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; stoData: UInt256; @@ -485,7 +494,7 @@ proc slotMerge*( ## Like `merge()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotMergeFn result = block: - let rc = acc.call(mergeStorageData, acc.mpt, accPath, stoPath, stoData) + let rc = acc.ctx.parent.ariApi.call(mergeStorageData, acc.aTx, accPath, stoPath, stoData) if rc.isOk: ok() else: @@ -495,7 +504,7 @@ proc slotMerge*( stoPath=($$stoPath), stoData, result proc slotStorageRoot*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[Hash32] = ## This function retrieves the Merkle state hash of the storage data @@ -504,7 +513,7 @@ proc slotStorageRoot*( ## acc.setTrackNewApi AccSlotStorageRootFn result = block: - let rc = acc.call(fetchStorageRoot, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(fetchStorageRoot, acc.aTx, accPath) if rc.isOk: ok(rc.value) else: @@ -513,7 +522,7 @@ proc slotStorageRoot*( debug logTxt, api, elapsed, accPath=($$accPath), result proc slotStorageEmpty*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[bool] = ## This function returns `true` if the storage data column is empty or @@ -521,7 +530,7 @@ proc slotStorageEmpty*( ## acc.setTrackNewApi AccSlotStorageEmptyFn result = block: - let rc = acc.call(hasStorageData, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(hasStorageData, acc.aTx, accPath) if rc.isOk: ok(not rc.value) else: @@ -530,13 +539,13 @@ proc slotStorageEmpty*( debug logTxt, api, elapsed, accPath=($$accPath), result proc slotStorageEmptyOrVoid*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): bool = ## Convenience wrapper, returns `true` where `slotStorageEmpty()` would fail. acc.setTrackNewApi AccSlotStorageEmptyOrVoidFn result = block: - let rc = acc.call(hasStorageData, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(hasStorageData, acc.aTx, accPath) if rc.isOk: not rc.value else: @@ -547,7 +556,7 @@ proc slotStorageEmptyOrVoid*( # ------------- other ---------------- proc recast*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; accRec: CoreDbAccount; ): CoreDbRc[Account] = @@ -556,7 +565,7 @@ proc recast*( ## hash (see `slotStorageRoot()` above) is currently unavailable. ## acc.setTrackNewApi AccRecastFn - let rc = acc.call(fetchStorageRoot, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(fetchStorageRoot, acc.aTx, accPath) result = block: if rc.isOk: ok Account( @@ -574,21 +583,14 @@ proc recast*( # Public transaction related methods # ------------------------------------------------------------------------------ -proc txFrameLevel*(db: CoreDbRef): int = - ## Retrieve transaction level (zero if there is no pending transaction). - ## - db.setTrackNewApi BaseLevelFn - result = CoreDbAccRef(db.ctx).call(txFrameLevel, db.ctx.mpt) - db.ifTrackNewApi: debug logTxt, api, elapsed, result - -proc txFrameBegin*(ctx: CoreDbCtxRef): CoreDbTxRef = +proc txFrameBegin*(ctx: CoreDbCtxRef, parent: CoreDbTxRef): CoreDbTxRef = ## Constructor ## ctx.setTrackNewApi BaseNewTxFn let - kTx = CoreDbKvtRef(ctx).call(txFrameBegin, ctx.kvt).valueOr: + kTx = CoreDbKvtRef(ctx).call(txFrameBegin, ctx.kvt, if parent != nil: parent.kTx else: nil).valueOr: raiseAssert $api & ": " & $error - aTx = CoreDbAccRef(ctx).call(txFrameBegin, ctx.mpt).valueOr: + aTx = CoreDbAccRef(ctx).call(txFrameBegin, ctx.mpt, if parent != nil: parent.aTx else: nil).valueOr: raiseAssert $api & ": " & $error result = ctx.bless CoreDbTxRef(kTx: kTx, aTx: aTx) ctx.ifTrackNewApi: @@ -616,14 +618,21 @@ proc rollback*(tx: CoreDbTxRef) = proc dispose*(tx: CoreDbTxRef) = tx.setTrackNewApi TxDisposeFn: let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx) - if CoreDbAccRef(tx.ctx).call(isTop, tx.aTx): - CoreDbAccRef(tx.ctx).call(rollback, tx.aTx).isOkOr: - raiseAssert $api & ": " & $error - if CoreDbKvtRef(tx.ctx).call(isTop, tx.kTx): - CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr: - raiseAssert $api & ": " & $error + # if CoreDbAccRef(tx.ctx).call(isTop, tx.aTx): + CoreDbAccRef(tx.ctx).call(rollback, tx.aTx).isOkOr: + raiseAssert $api & ": " & $error + # if CoreDbKvtRef(tx.ctx).call(isTop, tx.kTx): + CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr: + raiseAssert $api & ": " & $error tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel +func reparent*(tx: CoreDbTxRef, parent: CoreDbTxRef) = + tx.aTx.parent = parent.aTx + tx.kTx.parent = parent.kTx + +proc txFrameBegin*(tx: CoreDbTxRef): CoreDbTxRef = + tx.ctx.txFrameBegin(tx) + # ------------------------------------------------------------------------------ # Public tracer methods # ------------------------------------------------------------------------------ diff --git a/nimbus/db/core_db/base/api_tracking.nim b/nimbus/db/core_db/base/api_tracking.nim index 5ebac1c885..954ba537d7 100644 --- a/nimbus/db/core_db/base/api_tracking.nim +++ b/nimbus/db/core_db/base/api_tracking.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -76,7 +76,6 @@ type TxCommitFn = "commit" TxDisposeFn = "dispose" - TxFrameLevelFn = "level" TxRollbackFn = "rollback" TxSaveDisposeFn = "safeDispose" diff --git a/nimbus/db/core_db/base/base_helpers.nim b/nimbus/db/core_db/base/base_helpers.nim index 01b12e8e62..8b43889e91 100644 --- a/nimbus/db/core_db/base/base_helpers.nim +++ b/nimbus/db/core_db/base/base_helpers.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -53,6 +53,9 @@ proc bless*(ctx: CoreDbCtxRef; dsc: CoreDbTxRef): auto = template kvt*(dsc: CoreDbKvtRef): KvtDbRef = CoreDbCtxRef(dsc).kvt +template kvt*(tx: CoreDbTxRef): KvtDbRef = + tx.ctx.kvt + template ctx*(kvt: CoreDbKvtRef): CoreDbCtxRef = CoreDbCtxRef(kvt) diff --git a/nimbus/db/core_db/base_iterators.nim b/nimbus/db/core_db/base_iterators.nim index e896f15b3c..b791f50cf6 100644 --- a/nimbus/db/core_db/base_iterators.nim +++ b/nimbus/db/core_db/base_iterators.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -11,11 +11,9 @@ {.push raises: [].} import - std/typetraits, stint, eth/common/hashes, ../aristo as use_ari, - ../kvt as use_kvt, ./base/[api_tracking, base_config, base_desc] export stint, hashes @@ -34,50 +32,22 @@ when CoreDbEnableApiTracking: const logTxt = "API" -template dbType(dsc: CoreDbKvtRef | CoreDbAccRef): CoreDbType = - dsc.distinctBase.parent.dbType - # --------------- -template call(api: KvtApiRef; fn: untyped; args: varargs[untyped]): untyped = - when CoreDbEnableApiJumpTable: - api.fn(args) - else: - fn(args) - -template call(kvt: CoreDbKvtRef; fn: untyped; args: varargs[untyped]): untyped = - kvt.distinctBase.parent.kvtApi.call(fn, args) - -# --------------- - -template mpt(dsc: CoreDbAccRef): AristoDbRef = - dsc.distinctBase.mpt - template call(api: AristoApiRef; fn: untyped; args: varargs[untyped]): untyped = when CoreDbEnableApiJumpTable: api.fn(args) else: fn(args) -template call( - acc: CoreDbAccRef; - fn: untyped; - args: varargs[untyped]; - ): untyped = - acc.distinctBase.parent.ariApi.call(fn, args) - # ------------------------------------------------------------------------------ # Public iterators # ------------------------------------------------------------------------------ -iterator slotPairs*(acc: CoreDbAccRef; accPath: Hash32): (seq[byte], UInt256) = +iterator slotPairs*(acc: CoreDbTxRef; accPath: Hash32): (seq[byte], UInt256) = acc.setTrackNewApi AccSlotPairsIt - case acc.dbType: - of AristoDbMemory, AristoDbRocks, AristoDbVoid: - for (path,data) in acc.mpt.rightPairsStorage accPath: - yield (acc.call(pathAsBlob, path), data) - of Ooops: - raiseAssert: "Unsupported database type: " & $acc.dbType + for (path,data) in acc.aTx.rightPairsStorage accPath: + yield (acc.ctx.parent.ariApi.call(pathAsBlob, path), data) acc.ifTrackNewApi: debug logTxt, api, elapsed diff --git a/nimbus/db/core_db/core_apps.nim b/nimbus/db/core_db/core_apps.nim index 869a31e922..abf6fe76a6 100644 --- a/nimbus/db/core_db/core_apps.nim +++ b/nimbus/db/core_db/core_apps.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -36,27 +36,27 @@ type # ------------------------------------------------------------------------------ proc getBlockHeader*( - db: CoreDbRef; + db: CoreDbTxRef; n: BlockNumber; ): Result[Header, string] proc getBlockHeader*( - db: CoreDbRef, + db: CoreDbTxRef, blockHash: Hash32; ): Result[Header, string] proc getBlockHash*( - db: CoreDbRef; + db: CoreDbTxRef; n: BlockNumber; ): Result[Hash32, string] proc addBlockNumberToHashLookup*( - db: CoreDbRef; + db: CoreDbTxRef; blockNumber: BlockNumber; blockHash: Hash32; ) -proc getCanonicalHeaderHash*(db: CoreDbRef): Result[Hash32, string] +proc getCanonicalHeaderHash*(db: CoreDbTxRef): Result[Hash32, string] # ------------------------------------------------------------------------------ # Private helpers @@ -73,17 +73,16 @@ template wrapRlpException(info: static[string]; code: untyped) = # ------------------------------------------------------------------------------ iterator getBlockTransactionData*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32; ): seq[byte] = block body: if txRoot == EMPTY_ROOT_HASH: break body - let kvt = db.ctx.getKvt() for idx in 0'u16..blockHash - discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray) + discard db.del(blockNumberToHashKey(blockNum).toOpenArray) # delete blockHash->header, stateRoot->blockNum - discard kvt.del(genericHashKey(blockHash).toOpenArray) + discard db.del(genericHashKey(blockHash).toOpenArray) true proc getTransactionByIndex*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32; txIndex: uint16; ): Result[Transaction, string] = const info = "getTransaction()" - let kvt = db.ctx.getKvt() let key = hashIndexKey(txRoot, txIndex) - let txData = kvt.getOrEmpty(key).valueOr: + let txData = db.getOrEmpty(key).valueOr: return err($$error) if txData.len == 0: return err("tx data is empty for root=" & $txRoot & " and index=" & $txIndex) @@ -323,17 +316,16 @@ proc getTransactionByIndex*( return ok(rlp.decode(txData, Transaction)) proc getTransactionCount*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32; ): int = const info = "getTransactionCount()" - let kvt = db.ctx.getKvt() var txCount = 0'u16 while true: let key = hashIndexKey(txRoot, txCount) - let yes = kvt.hasKeyRc(key).valueOr: + let yes = db.hasKeyRc(key).valueOr: warn info, txRoot, key, error=($$error) return 0 if yes: @@ -344,7 +336,7 @@ proc getTransactionCount*( doAssert(false, "unreachable") proc getUnclesCount*( - db: CoreDbRef; + db: CoreDbTxRef; ommersHash: Hash32; ): Result[int, string] = const info = "getUnclesCount()" @@ -354,14 +346,14 @@ proc getUnclesCount*( wrapRlpException info: let encodedUncles = block: let key = genericHashKey(ommersHash) - db.ctx.getKvt().get(key.toOpenArray).valueOr: + db.get(key.toOpenArray).valueOr: if error.error != KvtNotFound: warn info, ommersHash, error=($$error) return ok(0) return ok(rlpFromBytes(encodedUncles).listLen) proc getUncles*( - db: CoreDbRef; + db: CoreDbTxRef; ommersHash: Hash32; ): Result[seq[Header], string] = const info = "getUncles()" @@ -371,29 +363,28 @@ proc getUncles*( wrapRlpException info: let encodedUncles = block: let key = genericHashKey(ommersHash) - db.ctx.getKvt().get(key.toOpenArray).valueOr: + db.get(key.toOpenArray).valueOr: if error.error != KvtNotFound: warn info, ommersHash, error=($$error) return ok(default(seq[Header])) return ok(rlp.decode(encodedUncles, seq[Header])) proc persistWithdrawals*( - db: CoreDbRef; + db: CoreDbTxRef; withdrawalsRoot: Hash32; withdrawals: openArray[Withdrawal]; ) = const info = "persistWithdrawals()" if withdrawals.len == 0: return - let kvt = db.ctx.getKvt() for idx, wd in withdrawals: let key = hashIndexKey(withdrawalsRoot, idx.uint16) - kvt.put(key, rlp.encode(wd)).isOkOr: + db.put(key, rlp.encode(wd)).isOkOr: warn info, idx, error=($$error) return proc getWithdrawals*( - db: CoreDbRef; + db: CoreDbTxRef; withdrawalsRoot: Hash32 ): Result[seq[Withdrawal], string] = wrapRlpException "getWithdrawals": @@ -403,7 +394,7 @@ proc getWithdrawals*( return ok(res) proc getTransactions*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32 ): Result[seq[Transaction], string] = wrapRlpException "getTransactions": @@ -413,7 +404,7 @@ proc getTransactions*( return ok(res) proc getBlockBody*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; ): Result[BlockBody, string] = wrapRlpException "getBlockBody": @@ -427,14 +418,14 @@ proc getBlockBody*( return ok(body) proc getBlockBody*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; ): Result[BlockBody, string] = let header = ?db.getBlockHeader(blockHash) db.getBlockBody(header) proc getEthBlock*( - db: CoreDbRef; + db: CoreDbTxRef; hash: Hash32; ): Result[EthBlock, string] = var @@ -443,7 +434,7 @@ proc getEthBlock*( ok(EthBlock.init(move(header), move(blockBody))) proc getEthBlock*( - db: CoreDbRef; + db: CoreDbTxRef; blockNumber: BlockNumber; ): Result[EthBlock, string] = var @@ -454,7 +445,7 @@ proc getEthBlock*( proc getUncleHashes*( - db: CoreDbRef; + db: CoreDbTxRef; blockHashes: openArray[Hash32]; ): Result[seq[Hash32], string] = var res: seq[Hash32] @@ -464,7 +455,7 @@ proc getUncleHashes*( ok(res) proc getUncleHashes*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; ): Result[seq[Hash32], string] = if header.ommersHash != EMPTY_UNCLE_HASH: @@ -473,59 +464,56 @@ proc getUncleHashes*( wrapRlpException "getUncleHashes": let key = genericHashKey(header.ommersHash) - encodedUncles = db.ctx.getKvt().get(key.toOpenArray).valueOr: + encodedUncles = db.get(key.toOpenArray).valueOr: if error.error != KvtNotFound: warn "getUncleHashes()", ommersHash=header.ommersHash, error=($$error) return ok(default(seq[Hash32])) return ok(rlp.decode(encodedUncles, seq[Header]).mapIt(it.rlpHash)) proc getTransactionKey*( - db: CoreDbRef; + db: CoreDbTxRef; transactionHash: Hash32; ): Result[TransactionKey, string] = wrapRlpException "getTransactionKey": let txKey = transactionHashToBlockKey(transactionHash) - tx = db.ctx.getKvt().get(txKey.toOpenArray).valueOr: + tx = db.get(txKey.toOpenArray).valueOr: if error.error != KvtNotFound: warn "getTransactionKey()", transactionHash, error=($$error) return ok(default(TransactionKey)) return ok(rlp.decode(tx, TransactionKey)) -proc headerExists*(db: CoreDbRef; blockHash: Hash32): bool = +proc headerExists*(db: CoreDbTxRef; blockHash: Hash32): bool = ## Returns True if the header with the given block hash is in our DB. - db.ctx.getKvt().hasKeyRc(genericHashKey(blockHash).toOpenArray).valueOr: + db.hasKeyRc(genericHashKey(blockHash).toOpenArray).valueOr: if error.error != KvtNotFound: warn "headerExists()", blockHash, error=($$error) return false # => true/false proc setHead*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; ): Result[void, string] = let canonicalHeadHash = canonicalHeadHashKey() - db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr: + db.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr: return err($$error) ok() proc setHead*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; - writeHeader = false; + headerHash: Hash32; ): Result[void, string] = - var headerHash = rlpHash(header) - let kvt = db.ctx.getKvt() - if writeHeader: - kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr: - return err($$error) + db.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr: + return err($$error) let canonicalHeadHash = canonicalHeadHashKey() - kvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr: + db.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr: return err($$error) ok() proc persistReceipts*( - db: CoreDbRef; + db: CoreDbTxRef; receiptsRoot: Hash32; receipts: openArray[Receipt]; ) = @@ -533,14 +521,13 @@ proc persistReceipts*( if receipts.len == 0: return - let kvt = db.ctx.getKvt() for idx, rec in receipts: let key = hashIndexKey(receiptsRoot, idx.uint16) - kvt.put(key, rlp.encode(rec)).isOkOr: + db.put(key, rlp.encode(rec)).isOkOr: warn info, idx, error=($$error) proc getReceipts*( - db: CoreDbRef; + db: CoreDbTxRef; receiptsRoot: Hash32; ): Result[seq[Receipt], string] = wrapRlpException "getReceipts": @@ -550,21 +537,20 @@ proc getReceipts*( return ok(receipts) proc persistScore*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; score: UInt256 ): Result[void, string] = const info = "persistScore" let - kvt = db.ctx.getKvt() scoreKey = blockHashToScoreKey(blockHash) - kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: + db.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: return err(info & ": " & $$error) ok() proc persistHeader*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; header: Header; startOfHistory = GENESIS_PARENT_HASH; @@ -572,13 +558,12 @@ proc persistHeader*( const info = "persistHeader" let - kvt = db.ctx.getKvt() isStartOfHistory = header.parentHash == startOfHistory if not isStartOfHistory and not db.headerExists(header.parentHash): return err(info & ": parent header missing number " & $header.number) - kvt.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr: + db.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr: return err(info & ": " & $$error) let @@ -599,7 +584,7 @@ proc persistHeader*( ok() proc persistHeaderAndSetHead*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; header: Header; startOfHistory = GENESIS_PARENT_HASH; @@ -621,7 +606,7 @@ proc persistHeaderAndSetHead*( db.setHead(blockHash) proc persistHeaderAndSetHead*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; startOfHistory = GENESIS_PARENT_HASH; ): Result[void, string] = @@ -629,43 +614,43 @@ proc persistHeaderAndSetHead*( blockHash = header.blockHash db.persistHeaderAndSetHead(blockHash, header, startOfHistory) -proc persistUncles*(db: CoreDbRef, uncles: openArray[Header]): Hash32 = +proc persistUncles*(db: CoreDbTxRef, uncles: openArray[Header]): Hash32 = ## Persists the list of uncles to the database. ## Returns the uncles hash. let enc = rlp.encode(uncles) result = keccak256(enc) - db.ctx.getKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr: + db.put(genericHashKey(result).toOpenArray, enc).isOkOr: warn "persistUncles()", unclesHash=result, error=($$error) return EMPTY_ROOT_HASH -proc safeHeaderHash*(db: CoreDbRef): Hash32 = +proc safeHeaderHash*(db: CoreDbTxRef): Hash32 = db.getHash(safeHashKey()).valueOr(default(Hash32)) -proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash32) = +proc safeHeaderHash*(db: CoreDbTxRef, headerHash: Hash32) = let safeHashKey = safeHashKey() - db.ctx.getKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: + db.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: warn "safeHeaderHash()", safeHashKey, error=($$error) return proc finalizedHeaderHash*( - db: CoreDbRef; + db: CoreDbTxRef; ): Hash32 = db.getHash(finalizedHashKey()).valueOr(default(Hash32)) -proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash32) = +proc finalizedHeaderHash*(db: CoreDbTxRef, headerHash: Hash32) = let finalizedHashKey = finalizedHashKey() - db.ctx.getKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: + db.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: warn "finalizedHeaderHash()", finalizedHashKey, error=($$error) return proc safeHeader*( - db: CoreDbRef; + db: CoreDbTxRef; ): Result[Header, string] = db.getBlockHeader(db.safeHeaderHash) proc finalizedHeader*( - db: CoreDbRef; + db: CoreDbTxRef; ): Result[Header, string] = db.getBlockHeader(db.finalizedHeaderHash) diff --git a/nimbus/db/kvt/kvt_api.nim b/nimbus/db/kvt/kvt_api.nim index 93dbe79ff9..05b8b93edd 100644 --- a/nimbus/db/kvt/kvt_api.nim +++ b/nimbus/db/kvt/kvt_api.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -41,26 +41,23 @@ type ## Borrowed from `aristo_profile` KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.} - KvtApiDelFn* = proc(db: KvtDbRef, + KvtApiDelFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[void,KvtError] {.noRaise.} KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.} KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.} - KvtApiGetFn* = proc(db: KvtDbRef, + KvtApiGetFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[seq[byte],KvtError] {.noRaise.} - KvtApiLenFn* = proc(db: KvtDbRef, + KvtApiLenFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[int,KvtError] {.noRaise.} - KvtApiHasKeyRcFn* = proc(db: KvtDbRef, + KvtApiHasKeyRcFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[bool,KvtError] {.noRaise.} - KvtApiIsTopFn* = proc(tx: KvtTxRef): bool {.noRaise.} - KvtApiTxFrameLevelFn* = proc(db: KvtDbRef): int {.noRaise.} - KvtApiPutFn* = proc(db: KvtDbRef, + KvtApiPutFn* = proc(db: KvtTxRef, key, data: openArray[byte]): Result[void,KvtError] {.noRaise.} KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.} KvtApiPersistFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.} KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.} - KvtApiTxFrameBeginFn* = proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.} - KvtApiTxFrameTopFn* = - proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.} + KvtApiTxFrameBeginFn* = proc(db: KvtDbRef, parent: KvtTxRef): Result[KvtTxRef,KvtError] {.noRaise.} + KvtApiBaseTxFrameFn* = proc(db: KvtDbRef): KvtTxRef {.noRaise.} KvtApiRef* = ref KvtApiObj KvtApiObj* = object of RootObj @@ -72,14 +69,12 @@ type get*: KvtApiGetFn len*: KvtApiLenFn hasKeyRc*: KvtApiHasKeyRcFn - isTop*: KvtApiIsTopFn - txFrameLevel*: KvtApiTxFrameLevelFn put*: KvtApiPutFn rollback*: KvtApiRollbackFn persist*: KvtApiPersistFn toKvtDbRef*: KvtApiToKvtDbRefFn txFrameBegin*: KvtApiTxFrameBeginFn - txFrameTop*: KvtApiTxFrameTopFn + baseTxFrame*: KvtApiBaseTxFrameFn KvtApiProfNames* = enum @@ -92,14 +87,12 @@ type KvtApiProfGetFn = "get" KvtApiProfLenFn = "len" KvtApiProfHasKeyRcFn = "hasKeyRc" - KvtApiProfIsTopFn = "isTop" - KvtApiProfLevelFn = "level" KvtApiProfPutFn = "put" KvtApiProfRollbackFn = "rollback" KvtApiProfPersistFn = "persist" KvtApiProfToKvtDbRefFn = "toKvtDbRef" KvtApiProfTxFrameBeginFn = "txFrameBegin" - KvtApiProfTxFrameTopFn = "txFrameTop" + KvtApiProfBaseTxFrameFn = "baseTxFrame" KvtApiProfBeGetKvpFn = "be/getKvp" KvtApiProfBeLenKvpFn = "be/lenKvp" @@ -149,14 +142,13 @@ func init*(api: var KvtApiObj) = api.get = get api.len = len api.hasKeyRc = hasKeyRc - api.isTop = isTop - api.txFrameLevel = txFrameLevel api.put = put api.rollback = rollback api.persist = persist api.toKvtDbRef = toKvtDbRef api.txFrameBegin = txFrameBegin - api.txFrameTop = txFrameTop + api.baseTxFrame = baseTxFrame + when AutoValidateApiHooks: api.validate @@ -226,16 +218,6 @@ func init*( KvtApiProfHasKeyRcFn.profileRunner: result = api.hasKeyRc(a, b) - profApi.isTop = - proc(a: KvtTxRef): auto = - KvtApiProfIsTopFn.profileRunner: - result = api.isTop(a) - - profApi.level = - proc(a: KvtDbRef): auto = - KvtApiProfLevelFn.profileRunner: - result = api.level(a) - profApi.put = proc(a: KvtDbRef; b, c: openArray[byte]): auto = KvtApiProfPutFn.profileRunner: @@ -261,11 +243,6 @@ func init*( KvtApiProfTxFrameBeginFn.profileRunner: result = api.txFrameBegin(a) - profApi.txFrameTop = - proc(a: KvtDbRef): auto = - KvtApiProfTxFrameTopFn.profileRunner: - result = api.txFrameTop(a) - let beDup = be.dup() if beDup.isNil: profApi.be = be diff --git a/nimbus/db/kvt/kvt_delta.nim b/nimbus/db/kvt/kvt_delta.nim index a5e084af3b..b9a4406db3 100644 --- a/nimbus/db/kvt/kvt_delta.nim +++ b/nimbus/db/kvt/kvt_delta.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -23,7 +23,7 @@ import # ------------------------------------------------------------------------------ proc deltaPersistentOk*(db: KvtDbRef): bool = - ## Check whether the balancer filter can be merged into the backend + ## Check whether txRef can be merged into the backend not db.backend.isNil @@ -45,17 +45,17 @@ proc deltaPersistent*( return err(FilBackendMissing) # Blind or missing filter - if db.balancer.isNil: + if db.txRef.isNil: return ok() # Store structural single trie entries let writeBatch = ? be.putBegFn() - for k,v in db.balancer.sTab: + for k,v in db.txRef.layer.sTab: be.putKvpFn(writeBatch, k, v) ? be.putEndFn writeBatch - # Done with balancer, all saved to backend - db.balancer = LayerRef(nil) + # Done with txRef, all saved to backend + db.txRef.layer.sTab.clear() ok() diff --git a/nimbus/db/kvt/kvt_delta/delta_merge.nim b/nimbus/db/kvt/kvt_delta/delta_merge.nim deleted file mode 100644 index 4cde2d39da..0000000000 --- a/nimbus/db/kvt/kvt_delta/delta_merge.nim +++ /dev/null @@ -1,51 +0,0 @@ -# nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -import - ../kvt_desc - -# ------------------------------------------------------------------------------ -# Private functions -# ------------------------------------------------------------------------------ - -proc layersMergeOnto(src: LayerRef; trg: var LayerObj) = - for (key,val) in src.sTab.pairs: - trg.sTab[key] = val - -# ------------------------------------------------------------------------------ -# Public functions -# ------------------------------------------------------------------------------ - -proc deltaMerge*( - upper: LayerRef; # Think of `top`, `nil` is ok - lower: LayerRef; # Think of `balancer`, `nil` is ok - ): LayerRef = - ## Merge argument `upper` into the `lower` filter instance. - ## - ## Note that the namimg `upper` and `lower` indicate that the filters are - ## stacked and the database access is `upper -> lower -> backend`. - ## - if lower.isNil: - # Degenerate case: `upper` is void - upper - - elif upper.isNil: - # Degenerate case: `lower` is void - lower - - else: - # Can modify `lower` which is the prefered action mode but applies only - # in cases where the `lower` argument is not shared. - layersMergeOnto(upper, lower[]) - lower - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/kvt/kvt_desc.nim b/nimbus/db/kvt/kvt_desc.nim index 0f007e20e8..ab33bf36d0 100644 --- a/nimbus/db/kvt/kvt_desc.nim +++ b/nimbus/db/kvt/kvt_desc.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -30,18 +30,15 @@ type ## Transaction descriptor db*: KvtDbRef ## Database descriptor parent*: KvtTxRef ## Previous transaction - txUid*: uint ## Unique ID among transactions - level*: int ## Stack index for this transaction + layer*: LayerRef KvtDbRef* = ref object of RootRef ## Three tier database object supporting distributed instances. - top*: LayerRef ## Database working layer, mutable - stack*: seq[LayerRef] ## Stashed immutable parent layers - balancer*: LayerRef ## Balance out concurrent backend access backend*: BackendRef ## Backend database (may well be `nil`) - txRef*: KvtTxRef ## Latest active transaction - txUidGen*: uint ## Tx-relative unique number generator + txRef*: KvtTxRef + ## Tx holding data scheduled to be written to disk during the next + ## `persist` call # Debugging data below, might go away in future xIdGen*: uint64 @@ -68,19 +65,15 @@ func isValid*(layer: LayerRef): bool = # Public functions, miscellaneous # ------------------------------------------------------------------------------ -# Hash set helper -func hash*(db: KvtDbRef): Hash = - ## Table/KeyedQueue/HashSet mixin - cast[pointer](db).hash +# Don't put in a hash! +func hash*(db: KvtDbRef): Hash {.error.} -# ------------------------------------------------------------------------------ -# Public functions, `dude` related -# ------------------------------------------------------------------------------ - -iterator rstack*(db: KvtDbRef): LayerRef = +iterator rstack*(tx: KvtTxRef): LayerRef = + var tx = tx # Stack in reverse order - for i in 0.. 0: - ac.ledger.slotMerge(acc.toAccountKey, slotKey, value).isOkOr: + ac.txFrame.slotMerge(acc.toAccountKey, slotKey, value).isOkOr: raiseAssert info & $$error # move the overlayStorage to originalStorage, related to EIP2200, EIP1283 acc.originalStorage[slot] = value else: - ac.ledger.slotDelete(acc.toAccountKey, slotKey).isOkOr: + ac.txFrame.slotDelete(acc.toAccountKey, slotKey).isOkOr: if error.error != StoNotFound: raiseAssert info & $$error discard @@ -332,7 +331,7 @@ proc persistStorage(acc: AccountRef, ac: LedgerRef) = # over.. let key = slotKey.data.slotHashToSlotKey - rc = ac.kvt.put(key.toOpenArray, blobify(slot).data) + rc = ac.txFrame.put(key.toOpenArray, blobify(slot).data) if rc.isErr: warn logTxt "persistStorage()", slot, error=($$rc.error) @@ -358,17 +357,16 @@ proc makeDirty(ac: LedgerRef, address: Address, cloneStorage = true): AccountRef # ------------------------------------------------------------------------------ # The LedgerRef is modeled after TrieDatabase for it's transaction style -proc init*(x: typedesc[LedgerRef], db: CoreDbRef, storeSlotHash: bool): LedgerRef = +proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef, storeSlotHash: bool): LedgerRef = new result - result.ledger = db.ctx.getAccounts() - result.kvt = db.ctx.getKvt() + result.txFrame = db result.witnessCache = Table[Address, WitnessData]() result.storeSlotHash = storeSlotHash result.code = typeof(result.code).init(codeLruSize) result.slots = typeof(result.slots).init(slotsLruSize) discard result.beginSavepoint -proc init*(x: typedesc[LedgerRef], db: CoreDbRef): LedgerRef = +proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef): LedgerRef = init(x, db, false) proc getStateRoot*(ac: LedgerRef): Hash32 = @@ -376,7 +374,7 @@ proc getStateRoot*(ac: LedgerRef): Hash32 = doAssert(ac.savePoint.parentSavepoint.isNil) # make sure all cache already committed doAssert(ac.isDirty == false) - ac.ledger.getStateRoot().expect("working database") + ac.txFrame.getStateRoot().expect("working database") proc isTopLevelClean*(ac: LedgerRef): bool = ## Getter, returns `true` if all pending data have been commited. @@ -464,7 +462,7 @@ proc getCode*(ac: LedgerRef, acc.code = if acc.statement.codeHash != EMPTY_CODE_HASH: ac.code.get(acc.statement.codeHash).valueOr: - var rc = ac.kvt.get(contractHashKey(acc.statement.codeHash).toOpenArray) + var rc = ac.txFrame.get(contractHashKey(acc.statement.codeHash).toOpenArray) if rc.isErr: warn logTxt "getCode()", codeHash=acc.statement.codeHash, error=($$rc.error) CodeBytesRef() @@ -494,7 +492,7 @@ proc getCodeSize*(ac: LedgerRef, address: Address): int = # cached and easily accessible in the database layer - this is to prevent # EXTCODESIZE calls from messing up the code cache and thus causing # recomputation of the jump destination table - var rc = ac.kvt.len(contractHashKey(acc.statement.codeHash).toOpenArray) + var rc = ac.txFrame.len(contractHashKey(acc.statement.codeHash).toOpenArray) return rc.valueOr: warn logTxt "getCodeSize()", codeHash=acc.statement.codeHash, error=($$rc.error) @@ -526,7 +524,7 @@ proc contractCollision*(ac: LedgerRef, address: Address): bool = return acc.statement.nonce != 0 or acc.statement.codeHash != EMPTY_CODE_HASH or - not ac.ledger.slotStorageEmptyOrVoid(acc.toAccountKey) + not ac.txFrame.slotStorageEmptyOrVoid(acc.toAccountKey) proc accountExists*(ac: LedgerRef, address: Address): bool = let acc = ac.getAccount(address, false) @@ -612,11 +610,11 @@ proc clearStorage*(ac: LedgerRef, address: Address) = let acc = ac.getAccount(address) acc.flags.incl {Alive, NewlyCreated} - let empty = ac.ledger.slotStorageEmpty(acc.toAccountKey).valueOr: return + let empty = ac.txFrame.slotStorageEmpty(acc.toAccountKey).valueOr: return if not empty: # need to clear the storage from the database first let acc = ac.makeDirty(address, cloneStorage = false) - ac.ledger.clearStorage(acc.toAccountKey).isOkOr: + ac.txFrame.clearStorage(acc.toAccountKey).isOkOr: raiseAssert info & $$error # update caches if acc.originalStorage.isNil.not: @@ -704,10 +702,10 @@ proc persist*(ac: LedgerRef, else: # This one is only necessary unless `persistStorage()` is run which needs # to `merge()` the latest statement as well. - ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr: + ac.txFrame.merge(acc.toAccountKey, acc.statement).isOkOr: raiseAssert info & $$error of Remove: - ac.ledger.delete(acc.toAccountKey).isOkOr: + ac.txFrame.delete(acc.toAccountKey).isOkOr: if error.error != AccNotFound: raiseAssert info & $$error ac.savePoint.cache.del eAddr @@ -744,14 +742,14 @@ iterator accounts*(ac: LedgerRef): Account = # make sure all savepoint already committed doAssert(ac.savePoint.parentSavepoint.isNil) for _, acc in ac.savePoint.cache: - yield ac.ledger.recast( + yield ac.txFrame.recast( acc.toAccountKey, acc.statement).value iterator pairs*(ac: LedgerRef): (Address, Account) = # make sure all savepoint already committed doAssert(ac.savePoint.parentSavepoint.isNil) for address, acc in ac.savePoint.cache: - yield (address, ac.ledger.recast( + yield (address, ac.txFrame.recast( acc.toAccountKey, acc.statement).value) iterator storage*( @@ -760,8 +758,8 @@ iterator storage*( ): (UInt256, UInt256) = # beware that if the account not persisted, # the storage root will not be updated - for (slotHash, value) in ac.ledger.slotPairs eAddr.toAccountKey: - let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray) + for (slotHash, value) in ac.txFrame.slotPairs eAddr.toAccountKey: + let rc = ac.txFrame.get(slotHashToSlotKey(slotHash).toOpenArray) if rc.isErr: warn logTxt "storage()", slotHash, error=($$rc.error) continue @@ -783,7 +781,7 @@ proc getStorageRoot*(ac: LedgerRef, address: Address): Hash32 = # the storage root will not be updated let acc = ac.getAccount(address, false) if acc.isNil: EMPTY_ROOT_HASH - else: ac.ledger.slotStorageRoot(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH + else: ac.txFrame.slotStorageRoot(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH proc update(wd: var WitnessData, acc: AccountRef) = # once the code is touched make sure it doesn't get reset back to false in another update @@ -877,13 +875,13 @@ proc getEthAccount*(ac: LedgerRef, address: Address): Account = return emptyEthAccount ## Convert to legacy object, will throw an assert if that fails - let rc = ac.ledger.recast(acc.toAccountKey, acc.statement) + let rc = ac.txFrame.recast(acc.toAccountKey, acc.statement) if rc.isErr: raiseAssert "getAccount(): cannot convert account: " & $$rc.error rc.value proc getAccountProof*(ac: LedgerRef, address: Address): seq[seq[byte]] = - let accProof = ac.ledger.proof(address.toAccountKey).valueOr: + let accProof = ac.txFrame.proof(address.toAccountKey).valueOr: raiseAssert "Failed to get account proof: " & $$error accProof[0] @@ -893,7 +891,7 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] let addressHash = address.toAccountKey - accountExists = ac.ledger.hasPath(addressHash).valueOr: + accountExists = ac.txFrame.hasPath(addressHash).valueOr: raiseAssert "Call to hasPath failed: " & $$error for slot in slots: @@ -904,7 +902,7 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] let slotKey = ac.slots.get(slot).valueOr: slot.toBytesBE.keccak256 - slotProof = ac.ledger.slotProof(addressHash, slotKey).valueOr: + slotProof = ac.txFrame.slotProof(addressHash, slotKey).valueOr: if error.aErr == FetchPathNotFound: storageProof.add(@[]) continue diff --git a/nimbus/evm/evm_errors.nim b/nimbus/evm/evm_errors.nim index fb32f286d5..72050fd165 100644 --- a/nimbus/evm/evm_errors.nim +++ b/nimbus/evm/evm_errors.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -33,7 +33,6 @@ type InvalidJumpDest OutOfBounds InvalidInitCode - EvmHeaderNotFound EvmInvalidParam EvmErrorObj* = object diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index f8b4bf8344..06634c1d7c 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -79,6 +79,7 @@ proc new*( parent: Header; ## parent header, account sync position blockCtx: BlockContext; com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false): T = ## Create a new `BaseVMState` descriptor from a parent block header. This @@ -90,7 +91,7 @@ proc new*( ## with the `parent` block header. new result result.init( - ac = LedgerRef.init(com.db, storeSlotHash), + ac = LedgerRef.init(txFrame, storeSlotHash), parent = parent, blockCtx = blockCtx, com = com, @@ -99,7 +100,6 @@ proc new*( proc reinit*(self: BaseVMState; ## Object descriptor parent: Header; ## parent header, account sync pos. blockCtx: BlockContext; - linear: bool ): bool = ## Re-initialise state descriptor. The `LedgerRef` database is ## re-initilaise only if its `getStateRoot()` doe not point to `parent.stateRoot`, @@ -116,9 +116,7 @@ proc reinit*(self: BaseVMState; ## Object descriptor let tracer = self.tracer com = self.com - db = com.db - ac = if linear or self.ledger.getStateRoot() == parent.stateRoot: self.ledger - else: LedgerRef.init(db, self.ledger.storeSlotHash) + ac = self.ledger flags = self.flags self.init( ac = ac, @@ -132,7 +130,6 @@ proc reinit*(self: BaseVMState; ## Object descriptor proc reinit*(self: BaseVMState; ## Object descriptor parent: Header; ## parent header, account sync pos. header: Header; ## header with tx environment data fields - linear: bool ): bool = ## Variant of `reinit()`. The `parent` argument is used to sync the accounts ## cache and the `header` is used as a container to pass the `timestamp`, @@ -143,7 +140,6 @@ proc reinit*(self: BaseVMState; ## Object descriptor self.reinit( parent = parent, blockCtx = blockCtx(header), - linear = linear ) proc init*( @@ -151,6 +147,7 @@ proc init*( parent: Header; ## parent header, account sync position header: Header; ## header with tx environment data fields com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false) = ## Variant of `new()` constructor above for in-place initalisation. The @@ -161,7 +158,7 @@ proc init*( ## It requires the `header` argument properly initalised so that for PoA ## networks, the miner address is retrievable via `ecRecover()`. self.init( - ac = LedgerRef.init(com.db, storeSlotHash), + ac = LedgerRef.init(txFrame, storeSlotHash), parent = parent, blockCtx = blockCtx(header), com = com, @@ -172,6 +169,7 @@ proc new*( parent: Header; ## parent header, account sync position header: Header; ## header with tx environment data fields com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false): T = ## This is a variant of the `new()` constructor above where the `parent` @@ -185,46 +183,10 @@ proc new*( parent = parent, header = header, com = com, + txFrame = txFrame, tracer = tracer, storeSlotHash = storeSlotHash) -proc new*( - T: type BaseVMState; - header: Header; ## header with tx environment data fields - com: CommonRef; ## block chain config - tracer: TracerRef = nil, - storeSlotHash = false): EvmResult[T] = - ## This is a variant of the `new()` constructor above where the field - ## `header.parentHash`, is used to fetch the `parent` Header to be - ## used in the `new()` variant, above. - let parent = com.db.getBlockHeader(header.parentHash).valueOr: - return err(evmErr(EvmHeaderNotFound)) - - ok(BaseVMState.new( - parent = parent, - header = header, - com = com, - tracer = tracer, - storeSlotHash = storeSlotHash)) - -proc init*( - vmState: BaseVMState; - header: Header; ## header with tx environment data fields - com: CommonRef; ## block chain config - tracer: TracerRef = nil, - storeSlotHash = false): bool = - ## Variant of `new()` which does not throw an exception on a dangling - ## `Header` parent hash reference. - let parent = com.db.getBlockHeader(header.parentHash).valueOr: - return false - vmState.init( - parent = parent, - header = header, - com = com, - tracer = tracer, - storeSlotHash = storeSlotHash) - return true - func coinbase*(vmState: BaseVMState): Address = vmState.blockCtx.coinbase @@ -238,7 +200,7 @@ proc proofOfStake*(vmState: BaseVMState): bool = number: vmState.blockNumber, parentHash: vmState.blockCtx.parentHash, difficulty: vmState.blockCtx.difficulty, - )) + ), vmState.ledger.txFrame) proc difficultyOrPrevRandao*(vmState: BaseVMState): UInt256 = if vmState.proofOfStake(): @@ -252,7 +214,7 @@ func baseFeePerGas*(vmState: BaseVMState): UInt256 = method getAncestorHash*( vmState: BaseVMState, blockNumber: BlockNumber): Hash32 {.gcsafe, base.} = - let db = vmState.com.db + let db = vmState.ledger.txFrame let blockHash = db.getBlockHash(blockNumber).valueOr: return default(Hash32) blockHash diff --git a/nimbus/graphql/ethapi.nim b/nimbus/graphql/ethapi.nim index 9a337a327e..fa5671edcc 100644 --- a/nimbus/graphql/ethapi.nim +++ b/nimbus/graphql/ethapi.nim @@ -1,5 +1,5 @@ # nim-graphql -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -152,7 +152,7 @@ proc getLedger(com: CommonRef, header: Header): LedgerRef {.deprecated: "Ledger ## Retrieves the account db from canonical head ## we don't use accounst_cache here because it's read only operations # TODO the ledger initialized here refers to the base, not the given header! - LedgerRef.init(com.db) + LedgerRef.init(com.db.ctx.txFrameBegin(nil)) # TODO use frame from forkedchain! proc getBlockByNumber(ctx: GraphqlContextRef, number: Node): RespResult = try: @@ -181,7 +181,8 @@ proc getLatestBlock(ctx: GraphqlContextRef): RespResult = ok(headerNode(ctx, header)) proc getTxCount(ctx: GraphqlContextRef, txRoot: Hash32): RespResult = - let txCount = ctx.chainDB.getTransactionCount(txRoot) + # TODO forkedchain! + let txCount = ctx.chainDB.baseTxFrame().getTransactionCount(txRoot) ok(resp(txCount)) proc longNode(val: uint64 | int64): RespResult = @@ -234,17 +235,17 @@ proc resp(data: openArray[byte]): RespResult = ok(resp("0x" & data.toHex)) proc getTotalDifficulty(ctx: GraphqlContextRef, blockHash: Hash32): RespResult = - let score = getScore(ctx.chainDB, blockHash).valueOr: + let score = getScore(ctx.chainDB.baseTxFrame(), blockHash).valueOr: return err("can't get total difficulty") bigIntNode(score) proc getOmmerCount(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = - let ommers = ?ctx.chainDB.getUnclesCount(ommersHash) + let ommers = ?ctx.chainDB.baseTxFrame().getUnclesCount(ommersHash) ok(resp(ommers)) proc getOmmers(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = - let uncles = ?ctx.chainDB.getUncles(ommersHash) + let uncles = ?ctx.chainDB.baseTxFrame().getUncles(ommersHash) when false: # EIP 1767 says no ommers == null # but hive test case want empty array [] @@ -256,7 +257,7 @@ proc getOmmers(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = ok(list) proc getOmmerAt(ctx: GraphqlContextRef, ommersHash: Hash32, index: int): RespResult = - let uncles = ?ctx.chainDB.getUncles(ommersHash) + let uncles = ?ctx.chainDB.baseTxFrame().getUncles(ommersHash) if uncles.len == 0: return ok(respNull()) if index < 0 or index >= uncles.len: @@ -264,20 +265,20 @@ proc getOmmerAt(ctx: GraphqlContextRef, ommersHash: Hash32, index: int): RespRes ok(headerNode(ctx, uncles[index])) proc getTxs(ctx: GraphqlContextRef, header: Header): RespResult = - let txCount = getTransactionCount(ctx.chainDB, header.txRoot) + let txCount = getTransactionCount(ctx.chainDB.baseTxFrame(), header.txRoot) if txCount == 0: return ok(respNull()) var list = respList() var index = 0'u64 - let txList = ?ctx.chainDB.getTransactions(header.txRoot) + let txList = ?ctx.chainDB.baseTxFrame().getTransactions(header.txRoot) for tx in txList: list.add txNode(ctx, tx, index, header.number, header.baseFeePerGas) inc index index = 0'u64 var prevUsed = 0.GasInt - let receiptList = ?ctx.chainDB.getReceipts(header.receiptsRoot) + let receiptList = ?ctx.chainDB.baseTxFrame().getReceipts(header.receiptsRoot) for r in receiptList: let tx = TxNode(list.sons[index]) tx.receipt = r @@ -291,20 +292,20 @@ proc getWithdrawals(ctx: GraphqlContextRef, header: Header): RespResult = if header.withdrawalsRoot.isNone: return ok(respNull()) - let wds = ?ctx.chainDB.getWithdrawals(header.withdrawalsRoot.get) + let wds = ?ctx.chainDB.baseTxFrame().getWithdrawals(header.withdrawalsRoot.get) var list = respList() for wd in wds: list.add wdNode(ctx, wd) ok(list) proc getTxAt(ctx: GraphqlContextRef, header: Header, index: uint64): RespResult = - let tx = ctx.chainDB.getTransactionByIndex(header.txRoot, index.uint16).valueOr: + let tx = ctx.chainDB.baseTxFrame().getTransactionByIndex(header.txRoot, index.uint16).valueOr: return ok(respNull()) let txn = txNode(ctx, tx, index, header.number, header.baseFeePerGas) var i = 0'u64 var prevUsed = 0.GasInt - let receiptList = ?ctx.chainDB.getReceipts(header.receiptsRoot) + let receiptList = ?ctx.chainDB.baseTxFrame().getReceipts(header.receiptsRoot) for r in receiptList: if i == index: let tx = TxNode(txn) @@ -316,8 +317,8 @@ proc getTxAt(ctx: GraphqlContextRef, header: Header, index: uint64): RespResult proc getTxByHash(ctx: GraphqlContextRef, hash: Hash32): RespResult = let - txKey = ?ctx.chainDB.getTransactionKey(hash) - header = ?ctx.chainDB.getBlockHeader(txKey.blockNumber) + txKey = ?ctx.chainDB.baseTxFrame().getTransactionKey(hash) + header = ?ctx.chainDB.baseTxFrame().getBlockHeader(txKey.blockNumber) getTxAt(ctx, header, txKey.index) proc accountNode(ctx: GraphqlContextRef, header: Header, address: Address): RespResult = @@ -1031,7 +1032,10 @@ proc toTxArgs(n: Node): TransactionArgs {.gcsafe, raises: [ValueError].} = proc makeCall(ctx: GraphqlContextRef, args: TransactionArgs, header: Header): RespResult = - let res = rpcCallEvm(args, header, ctx.com).valueOr: + let + headerHash = header.blockHash + txFrame = ctx.chain.txFrame(headerHash) + res = rpcCallEvm(args, header, headerHash, ctx.com, txFrame).valueOr: return err("Failed to call rpcCallEvm") var map = respMap(ctx.ids[ethCallResult]) map["data"] = resp("0x" & res.output.toHex) @@ -1055,10 +1059,13 @@ proc blockEstimateGas(ud: RootRef, params: Args, parent: Node): RespResult {.api let h = HeaderNode(parent) let param = params[0].val try: - let args = toTxArgs(param) + let + args = toTxArgs(param) + headerHash = h.header.blockHash + txFrame = ctx.chain.txFrame(headerHash) # TODO: DEFAULT_RPC_GAS_CAP should configurable {.cast(noSideEffect).}: - let gasUsed = rpcEstimateGas(args, h.header, ctx.com, DEFAULT_RPC_GAS_CAP).valueOr: + let gasUsed = rpcEstimateGas(args, h.header, headerHash, ctx.com, txFrame, DEFAULT_RPC_GAS_CAP).valueOr: return err("Failed to call rpcEstimateGas") longNode(gasUsed) except CatchableError as em: diff --git a/nimbus/nimbus_execution_client.nim b/nimbus/nimbus_execution_client.nim index 83a7db92b4..86d57cedc7 100644 --- a/nimbus/nimbus_execution_client.nim +++ b/nimbus/nimbus_execution_client.nim @@ -156,14 +156,14 @@ proc setupMetrics(nimbus: NimbusNode, conf: NimbusConf) = waitFor nimbus.metricsServer.start() proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef; conf: NimbusConf) = - proc writeDataDirId(kvt: CoreDbKvtRef, calculatedId: Hash32) = + proc writeDataDirId(kvt: CoreDbTxRef, calculatedId: Hash32) = info "Writing data dir ID", ID=calculatedId kvt.put(dataDirIdKey().toOpenArray, calculatedId.data).isOkOr: fatal "Cannot write data dir ID", ID=calculatedId quit(QuitFailure) let - kvt = db.ctx.getKvt() + kvt = db.baseTxFrame() calculatedId = calcHash(conf.networkId, conf.networkParams) dataDirIdBytes = kvt.get(dataDirIdKey().toOpenArray).valueOr: # an empty database diff --git a/nimbus/nimbus_import.nim b/nimbus/nimbus_import.nim index b9771de2df..66e7468f17 100644 --- a/nimbus/nimbus_import.nim +++ b/nimbus/nimbus_import.nim @@ -97,7 +97,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = setControlCHook(controlCHandler) let - start = com.db.getSavedStateBlockNumber() + 1 + start = com.db.baseTxFrame().getSavedStateBlockNumber() + 1 chain = com.newChain() (cfg, genesis_validators_root, lastEra1Block, firstSlotAfterMerge) = getMetadata(conf.networkId) diff --git a/nimbus/rpc/rpc_utils.nim b/nimbus/rpc/rpc_utils.nim index 176bb3c864..2418856915 100644 --- a/nimbus/rpc/rpc_utils.nim +++ b/nimbus/rpc/rpc_utils.nim @@ -242,6 +242,7 @@ proc populateReceipt*(receipt: Receipt, gasUsed: GasInt, tx: Transaction, proc createAccessList*(header: Header, com: CommonRef, + chain: ForkedChainRef, args: TransactionArgs): AccessListResult = template handleError(msg: string) = @@ -256,8 +257,10 @@ proc createAccessList*(header: Header, args.gas = Opt.some(Quantity DEFAULT_RPC_GAS_CAP) let - vmState = BaseVMState.new(header, com).valueOr: - handleError("failed to create vmstate: " & $error.code) + txFrame = chain.txFrame(header.blockHash) + parent = txFrame.getBlockHeader(header.parentHash).valueOr: + handleError(error) + vmState = BaseVMState.new(parent, header, com, txFrame) fork = com.toEVMFork(forkDeterminationInfo(header.number, header.timestamp)) sender = args.sender # TODO: nonce should be retrieved from txPool @@ -283,12 +286,15 @@ proc createAccessList*(header: Header, # Apply the transaction with the access list tracer let + txFrame = txFrame.ctx.txFrameBegin(txFrame) tracer = AccessListTracer.new(accessList, sender, to, precompiles) - vmState = BaseVMState.new(header, com, tracer).valueOr: - handleError("failed to create vmstate: " & $error.code) - res = rpcCallEvm(args, header, com, vmState).valueOr: + vmState = BaseVMState.new(parent, header, com, txFrame, tracer) + res = rpcCallEvm(args, header, vmState).valueOr: + txFrame.dispose() handleError("failed to call evm: " & $error.code) + txFrame.dispose() + if res.isError: handleError("failed to apply transaction: " & res.error) @@ -298,4 +304,4 @@ proc createAccessList*(header: Header, gasUsed: Quantity res.gasUsed, ) - prevTracer = tracer \ No newline at end of file + prevTracer = tracer diff --git a/nimbus/rpc/server_api.nim b/nimbus/rpc/server_api.nim index 464ef100f7..4df9047a69 100644 --- a/nimbus/rpc/server_api.nim +++ b/nimbus/rpc/server_api.nim @@ -48,8 +48,9 @@ func newServerAPI*(txPool: TxPoolRef): ServerAPIRef = ServerAPIRef(txPool: txPool) proc getTotalDifficulty*(api: ServerAPIRef, blockHash: Hash32): UInt256 = - let totalDifficulty = api.com.db.getScore(blockHash).valueOr: - return api.com.db.headTotalDifficulty() + # TODO forkedchain! + let totalDifficulty = api.com.db.baseTxFrame().getScore(blockHash).valueOr: + return api.com.db.baseTxFrame().headTotalDifficulty() return totalDifficulty proc getProof*( @@ -105,11 +106,13 @@ proc headerFromTag(api: ServerAPIRef, blockTag: Opt[BlockTag]): Result[Header, s api.headerFromTag(blockId) proc ledgerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[LedgerRef, string] = - let header = ?api.headerFromTag(blockTag) - if not api.chain.stateReady(header): - api.chain.replaySegment(header.blockHash) + # TODO avoid loading full header if hash is given + let + header = ?api.headerFromTag(blockTag) + txFrame = api.chain.txFrame(header) - ok(LedgerRef.init(api.com.db)) + # TODO maybe use a new frame derived from txFrame, to protect against abuse? + ok(LedgerRef.init(txFrame)) proc blockFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[Block, string] = if blockTag.kind == bidAlias: @@ -227,9 +230,9 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = let blk = api.chain.memoryBlock(header.blockHash) (blk.receipts, blk.blk.transactions) else: - let rcs = chain.db.getReceipts(header.receiptsRoot).valueOr: + let rcs = chain.baseTxFrame.getReceipts(header.receiptsRoot).valueOr: return Opt.some(newSeq[FilterLog](0)) - let txs = chain.db.getTransactions(header.txRoot).valueOr: + let txs = chain.baseTxFrame.getTransactions(header.txRoot).valueOr: return Opt.some(newSeq[FilterLog](0)) (rcs, txs) # Note: this will hit assertion error if number of block transactions @@ -332,7 +335,9 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = let header = api.headerFromTag(blockTag).valueOr: raise newException(ValueError, "Block not found") - res = rpcCallEvm(args, header, api.com).valueOr: + headerHash = header.blockHash + txFrame = api.chain.txFrame(headerHash) + res = rpcCallEvm(args, header, headerHash, api.com, txFrame).valueOr: raise newException(ValueError, "rpcCallEvm error: " & $error.code) res.output @@ -351,17 +356,17 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = if blockhash == zeroHash32: # Receipt in database - let txDetails = api.chain.db.getTransactionKey(data).valueOr: + let txDetails = api.chain.baseTxFrame.getTransactionKey(data).valueOr: raise newException(ValueError, "TransactionKey not found") if txDetails.index < 0: return nil let header = api.chain.headerByNumber(txDetails.blockNumber).valueOr: raise newException(ValueError, "Block not found") - let tx = api.chain.db.getTransactionByIndex( + let tx = api.chain.baseTxFrame.getTransactionByIndex( header.txRoot, uint16(txDetails.index)).valueOr: return nil - let receipts = api.chain.db.getReceipts(header.receiptsRoot).valueOr: + let receipts = api.chain.baseTxFrame.getReceipts(header.receiptsRoot).valueOr: return nil for receipt in receipts: let gasUsed = receipt.cumulativeGasUsed - prevGasUsed @@ -396,8 +401,10 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = let header = api.headerFromTag(blockId("latest")).valueOr: raise newException(ValueError, "Block not found") + headerHash = header.blockHash + txFrame = api.chain.txFrame(headerHash) #TODO: change 0 to configureable gas cap - gasUsed = rpcEstimateGas(args, header, api.chain.com, DEFAULT_RPC_GAS_CAP).valueOr: + gasUsed = rpcEstimateGas(args, header, headerHash, api.com, txFrame, DEFAULT_RPC_GAS_CAP).valueOr: raise newException(ValueError, "rpcEstimateGas error: " & $error.code) Quantity(gasUsed) @@ -564,11 +571,11 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = break blockOne return populateTransactionObject(tx, Opt.some(blockHash), Opt.some(number), Opt.some(txid)) - let txDetails = api.chain.db.getTransactionKey(txHash).valueOr: + let txDetails = api.chain.baseTxFrame.getTransactionKey(txHash).valueOr: return nil - let header = api.chain.db.getBlockHeader(txDetails.blockNumber).valueOr: + let header = api.chain.baseTxFrame.getBlockHeader(txDetails.blockNumber).valueOr: return nil - let tx = api.chain.db.getTransactionByIndex(header.txRoot, uint16(txDetails.index)).valueOr: + let tx = api.chain.baseTxFrame.getTransactionByIndex(header.txRoot, uint16(txDetails.index)).valueOr: return nil return populateTransactionObject( tx, @@ -653,11 +660,11 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = receipts = blkdesc.receipts txs = blkdesc.blk.transactions else: - let receiptList = api.chain.db.getReceipts(header.receiptsRoot).valueOr: + let receiptList = api.chain.baseTxFrame.getReceipts(header.receiptsRoot).valueOr: return Opt.none(seq[ReceiptObject]) for receipt in receiptList: receipts.add receipt - txs = api.chain.db.getTransactions(header.txRoot).valueOr: + txs = api.chain.baseTxFrame.getTransactions(header.txRoot).valueOr: return Opt.none(seq[ReceiptObject]) try: @@ -677,7 +684,7 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = try: let header = api.headerFromTag(quantityTag).valueOr: raise newException(ValueError, "Block not found") - return createAccessList(header, api.com, args) + return createAccessList(header, api.com, api.chain, args) except CatchableError as exc: return AccessListResult(error: Opt.some("createAccessList error: " & exc.msg)) diff --git a/nimbus/sync/beacon/worker/db.nim b/nimbus/sync/beacon/worker/db.nim index c165b1421e..5e75acfa7a 100644 --- a/nimbus/sync/beacon/worker/db.nim +++ b/nimbus/sync/beacon/worker/db.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at # https://opensource.org/licenses/MIT). @@ -26,10 +26,6 @@ let # Private helpers # ------------------------------------------------------------------------------ -template kvtNotAvailable(info: static[string]): string = - info & ": kvt table not available (locked by FC module)" - - proc fetchSyncStateLayout(ctx: BeaconCtxRef): Opt[SyncStateLayout] = let data = ctx.pool.chain.fcKvtGet(LhcStateKey.toOpenArray).valueOr: return err() @@ -47,24 +43,21 @@ proc deleteStaleHeadersAndState( ) = ## Delete stale headers and state let c = ctx.pool.chain - if not c.fcKvtAvailable(): - trace kvtNotAvailable(info) - return var bn = upTo while 0 < bn and c.fcKvtHasKey(beaconHeaderKey(bn).toOpenArray): - discard c.fcKvtDel(beaconHeaderKey(bn).toOpenArray) + c.fcKvtDel(beaconHeaderKey(bn).toOpenArray) bn.dec # Occasionallly persist the deleted headers (so that the internal DB cache # does not grow extra large.) This will succeed if this function is called # early enough after restart when there is no database transaction pending. if (upTo - bn) mod 8192 == 0: - discard c.fcKvtPersistent() + c.fcKvtPersistent() # Delete persistent state record, there will be no use of it anymore - discard c.fcKvtDel(LhcStateKey.toOpenArray) - discard c.fcKvtPersistent() + c.fcKvtDel(LhcStateKey.toOpenArray) + c.fcKvtPersistent() if bn < upTo: debug info & ": deleted stale sync headers", iv=BnRange.new(bn+1,upTo) @@ -76,11 +69,8 @@ proc deleteStaleHeadersAndState( proc dbStoreSyncStateLayout*(ctx: BeaconCtxRef; info: static[string]) = ## Save chain layout to persistent db let c = ctx.pool.chain - if c.fcKvtAvailable(): - discard c.fcKvtPut(LhcStateKey.toOpenArray, rlp.encode(ctx.layout)) - discard c.fcKvtPersistent() - else: - trace kvtNotAvailable(info) + c.fcKvtPut(LhcStateKey.toOpenArray, rlp.encode(ctx.layout)) + c.fcKvtPersistent() proc dbLoadSyncStateLayout*(ctx: BeaconCtxRef; info: static[string]): bool = ## Restore chain layout from persistent db. It returns `true` if a previous @@ -162,14 +152,9 @@ proc dbHeadersStash*( let c = ctx.pool.chain last = first + revBlobs.len.uint64 - 1 - if not c.fcKvtAvailable(): - # Need to cache it because FCU has blocked writing through to disk. - for n,data in revBlobs: - ctx.stash[last - n.uint64] = data - else: - for n,data in revBlobs: - let key = beaconHeaderKey(last - n.uint64) - discard c.fcKvtPut(key.toOpenArray, data) + for n,data in revBlobs: + let key = beaconHeaderKey(last - n.uint64) + c.fcKvtPut(key.toOpenArray, data) proc dbHeaderPeek*(ctx: BeaconCtxRef; num: BlockNumber): Opt[Header] = ## Retrieve some stashed header. @@ -199,7 +184,7 @@ proc dbHeaderUnstash*(ctx: BeaconCtxRef; bn: BlockNumber) = ctx.stash.withValue(bn, _): ctx.stash.del bn return - discard ctx.pool.chain.fcKvtDel(beaconHeaderKey(bn).toOpenArray) + ctx.pool.chain.fcKvtDel(beaconHeaderKey(bn).toOpenArray) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/sync/beacon/worker/update/metrics.nim b/nimbus/sync/beacon/worker/update/metrics.nim index bae084a675..c605e1786c 100644 --- a/nimbus/sync/beacon/worker/update/metrics.nim +++ b/nimbus/sync/beacon/worker/update/metrics.nim @@ -23,7 +23,7 @@ declareGauge nec_base, "" & declareGauge nec_execution_head, "" & "Block number of latest imported blocks" - + declareGauge nec_sync_coupler, "" & "Max block number for header chain starting at genesis" diff --git a/nimbus/sync/handlers/eth.nim b/nimbus/sync/handlers/eth.nim index 476d63792c..4314573f91 100644 --- a/nimbus/sync/handlers/eth.nim +++ b/nimbus/sync/handlers/eth.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -61,7 +61,8 @@ proc successorHeader(db: CoreDbRef, skip = 0'u): Opt[Header] = let offset = 1 + skip.BlockNumber if h.number <= (not 0.BlockNumber) - offset: - let header = db.getBlockHeader(h.number + offset).valueOr: + # TODO why is this using base db? + let header = db.baseTxFrame().getBlockHeader(h.number + offset).valueOr: return Opt.none(Header) return Opt.some(header) Opt.none(Header) @@ -71,7 +72,8 @@ proc ancestorHeader(db: CoreDbRef, skip = 0'u): Opt[Header] = let offset = 1 + skip.BlockNumber if h.number >= offset: - let header = db.getBlockHeader(h.number - offset).valueOr: + # TODO why is this using base db? + let header = db.baseTxFrame().getBlockHeader(h.number - offset).valueOr: return Opt.none(Header) return Opt.some(header) Opt.none(Header) @@ -79,10 +81,10 @@ proc ancestorHeader(db: CoreDbRef, proc blockHeader(db: CoreDbRef, b: BlockHashOrNumber): Opt[Header] = let header = if b.isHash: - db.getBlockHeader(b.hash).valueOr: + db.baseTxFrame().getBlockHeader(b.hash).valueOr: return Opt.none(Header) else: - db.getBlockHeader(b.number).valueOr: + db.baseTxFrame().getBlockHeader(b.number).valueOr: return Opt.none(Header) Opt.some(header) @@ -305,7 +307,8 @@ method getStatus*(ctx: EthWireRef): Result[EthState, string] forkId = com.forkId(bestBlock.number, bestBlock.timestamp) return ok(EthState( - totalDifficulty: db.headTotalDifficulty, + # TODO forkedChain + totalDifficulty: db.baseTxFrame().headTotalDifficulty, genesisHash: com.genesisHash, bestBlockHash: bestBlock.blockHash, forkId: ChainForkId( @@ -321,11 +324,12 @@ method getReceipts*(ctx: EthWireRef, let db = ctx.db var list: seq[seq[Receipt]] for blockHash in hashes: - let header = db.getBlockHeader(blockHash).valueOr: + # TODO forkedChain + let header = db.baseTxFrame().getBlockHeader(blockHash).valueOr: list.add @[] trace "handlers.getReceipts: blockHeader not found", blockHash continue - let receiptList = ?db.getReceipts(header.receiptsRoot) + let receiptList = ?db.baseTxFrame().getReceipts(header.receiptsRoot) list.add receiptList return ok(list) @@ -356,7 +360,8 @@ method getBlockBodies*(ctx: EthWireRef, let db = ctx.db var list: seq[BlockBody] for blockHash in hashes: - let body = db.getBlockBody(blockHash).valueOr: + # TODO forkedChain + let body = db.baseTxFrame().getBlockBody(blockHash).valueOr: list.add BlockBody() trace "handlers.getBlockBodies: blockBody not found", blockHash continue diff --git a/nimbus/tracer.nim b/nimbus/tracer.nim index ef5adb983b..e022901460 100644 --- a/nimbus/tracer.nim +++ b/nimbus/tracer.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -72,7 +72,7 @@ proc init( com: CommonRef; topHeader: Header; ): T = - let header = com.db.getBlockHeader(topHeader.parentHash).expect("top header parent exists") + let header = com.db.baseTxFrame().getBlockHeader(topHeader.parentHash).expect("top header parent exists") T.init(com, header.stateRoot) proc activate(cc: CaptCtxRef): CaptCtxRef {.discardable.} = @@ -111,7 +111,7 @@ proc toJson(receipt: Receipt): JsonNode = result["status"] = %receipt.status proc dumpReceiptsImpl( - chainDB: CoreDbRef; + chainDB: CoreDbTxRef; header: Header; ): JsonNode = result = newJArray() @@ -168,7 +168,10 @@ proc traceTransactionImpl( let tracerInst = newLegacyTracer(tracerFlags) cc = activate CaptCtxRef.init(com, header) - vmState = BaseVMState.new(header, com, storeSlotHash = true).valueOr: return newJNull() + txFrame = com.db.baseTxFrame() + parent = txFrame.getBlockHeader(header.parentHash).valueOr: + return newJNull() + vmState = BaseVMState.new(parent, header, com, txFrame, storeSlotHash = true) ledger = vmState.ledger defer: cc.release() @@ -197,14 +200,12 @@ proc traceTransactionImpl( before.captureAccount(ledger, miner, minerName) ledger.persist() stateDiff["beforeRoot"] = %(ledger.getStateRoot().toHex) - discard com.db.ctx.getAccounts.getStateRoot() # lazy hashing! stateCtx = CaptCtxRef.init(com, ledger.getStateRoot()) let rc = vmState.processTransaction(tx, sender, header) gasUsed = if rc.isOk: rc.value else: 0 if idx.uint64 == txIndex: - discard com.db.ctx.getAccounts.getStateRoot() # lazy hashing! after.captureAccount(ledger, sender, senderName) after.captureAccount(ledger, recipient, recipientName) after.captureAccount(ledger, miner, minerName) @@ -216,7 +217,7 @@ proc traceTransactionImpl( # internal transactions: let cx = activate stateCtx - ldgBefore = LedgerRef.init(com.db, storeSlotHash = true) + ldgBefore = LedgerRef.init(com.db.baseTxFrame(), storeSlotHash = true) defer: cx.release() for idx, acc in tracedAccountsPairs(tracerInst): @@ -249,8 +250,10 @@ proc dumpBlockStateImpl( # only need a stack dump when scanning for internal transaction address captureFlags = {DisableMemory, DisableStorage, EnableAccount} tracerInst = newLegacyTracer(captureFlags) - vmState = BaseVMState.new(header, com, tracerInst, storeSlotHash = true).valueOr: + txFrame = com.db.baseTxFrame() + parent = txFrame.getBlockHeader(header.parentHash).valueOr: return newJNull() + vmState = BaseVMState.new(parent, header, com, txFrame, tracerInst, storeSlotHash = true) miner = vmState.coinbase() defer: cc.release() @@ -258,7 +261,7 @@ proc dumpBlockStateImpl( var before = newJArray() after = newJArray() - stateBefore = LedgerRef.init(com.db, storeSlotHash = true) + stateBefore = LedgerRef.init(com.db.baseTxFrame(), storeSlotHash = true) for idx, tx in blk.transactions: let sender = tx.recoverSender().expect("valid signature") @@ -313,8 +316,10 @@ proc traceBlockImpl( cc = activate CaptCtxRef.init(com, header) tracerInst = newLegacyTracer(tracerFlags) # Tracer needs a database where the reverse slot hash table has been set up - vmState = BaseVMState.new(header, com, tracerInst, storeSlotHash = true).valueOr: + txFrame = com.db.baseTxFrame() + parent = txFrame.getBlockHeader(header.parentHash).valueOr: return newJNull() + vmState = BaseVMState.new(parent, header, com, txFrame, tracerInst, storeSlotHash = true) defer: cc.release() @@ -369,7 +374,7 @@ proc dumpMemoryDB*(node: JsonNode, cpt: CoreDbCaptRef) = n[k.toHex(false)] = %v node["state"] = n -proc dumpReceipts*(chainDB: CoreDbRef, header: Header): JsonNode = +proc dumpReceipts*(chainDB: CoreDbTxRef, header: Header): JsonNode = chainDB.dumpReceiptsImpl header proc traceTransaction*( diff --git a/nimbus/transaction/call_evm.nim b/nimbus/transaction/call_evm.nim index 418811df7c..5598b2cebf 100644 --- a/nimbus/transaction/call_evm.nim +++ b/nimbus/transaction/call_evm.nim @@ -27,45 +27,50 @@ export proc rpcCallEvm*(args: TransactionArgs, header: Header, - com: CommonRef): EvmResult[CallResult] = + headerHash: Hash32, + com: CommonRef, + parentFrame: CoreDbTxRef): EvmResult[CallResult] = const globalGasCap = 0 # TODO: globalGasCap should configurable by user let topHeader = Header( - parentHash: header.blockHash, + parentHash: headerHash, timestamp: EthTime.now(), gasLimit: 0.GasInt, ## ??? baseFeePerGas: Opt.none UInt256, ## ??? ) - let vmState = ? BaseVMState.new(topHeader, com) - let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas) - var dbTx = com.db.ctx.txFrameBegin() - defer: dbTx.dispose() # always dispose state changes + let txFrame = parentFrame.ctx.txFrameBegin(parentFrame) + defer: txFrame.dispose() # always dispose state changes + + let vmState = BaseVMState.new(header, topHeader, com, txFrame) + let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas) ok(runComputation(params, CallResult)) proc rpcCallEvm*(args: TransactionArgs, header: Header, - com: CommonRef, vmState: BaseVMState): EvmResult[CallResult] = const globalGasCap = 0 # TODO: globalGasCap should configurable by user let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas) - - var dbTx = com.db.ctx.txFrameBegin() - defer: dbTx.dispose() # always dispose state changes - ok(runComputation(params, CallResult)) proc rpcEstimateGas*(args: TransactionArgs, header: Header, - com: CommonRef, gasCap: GasInt): EvmResult[GasInt] = + headerHash: Hash32, + com: CommonRef, + parentFrame: CoreDbTxRef, + gasCap: GasInt): EvmResult[GasInt] = # Binary search the gas requirement, as it may be higher than the amount used let topHeader = Header( - parentHash: header.blockHash, + parentHash: headerHash, timestamp: EthTime.now(), gasLimit: 0.GasInt, ## ??? baseFeePerGas: Opt.none UInt256, ## ??? ) - let vmState = ? BaseVMState.new(topHeader, com) + + let txFrame = parentFrame.ctx.txFrameBegin(parentFrame) + defer: txFrame.dispose() # always dispose state changes + + let vmState = BaseVMState.new(header, topHeader, com, txFrame) let fork = vmState.fork let txGas = GasInt gasFees[fork][GasTransaction] # txGas always 21000, use constants? var params = ? toCallParams(vmState, args, gasCap, header.baseFeePerGas) @@ -75,8 +80,6 @@ proc rpcEstimateGas*(args: TransactionArgs, hi : GasInt = GasInt args.gas.get(0.Quantity) cap: GasInt - var dbTx = com.db.ctx.txFrameBegin() - defer: dbTx.dispose() # always dispose state changes # Determine the highest gas limit can be used during the estimation. if hi < txGas: diff --git a/nimbus/utils/debug.nim b/nimbus/utils/debug.nim index f3b96ea46d..44de0b4ad5 100644 --- a/nimbus/utils/debug.nim +++ b/nimbus/utils/debug.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -105,7 +105,7 @@ proc debug*(vms: BaseVMState): string = proc `$`(x: ChainId): string = $int(x) -proc `$`(acl: AccessList): string = +proc `$`(acl: transactions.AccessList): string = if acl.len == 0: return "zero length" diff --git a/tests/macro_assembler.nim b/tests/macro_assembler.nim index b47ccc0129..390f6608dd 100644 --- a/tests/macro_assembler.nim +++ b/tests/macro_assembler.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -279,7 +279,7 @@ proc initVMEnv*(network: string): BaseVMState = gasLimit: 100_000 ) - BaseVMState.new(parent, header, com) + BaseVMState.new(parent, header, com, com.db.baseTxFrame()) proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: DebugCallResult): bool = let com = vmState.com @@ -326,7 +326,7 @@ proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: DebugCallR ledger.persist() let - al = com.db.ctx.getAccounts() + al = com.db.baseTxFrame() accPath = keccak256(codeAddress.data) for kv in boa.storage: diff --git a/tests/replay/undump_blocks_gz.nim b/tests/replay/undump_blocks_gz.nim index d300e51f77..495caf901a 100644 --- a/tests/replay/undump_blocks_gz.nim +++ b/tests/replay/undump_blocks_gz.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -43,7 +43,7 @@ proc dumpBlocksEndNl*: string = proc dumpBlocksListNl*(header: Header; body: BlockBody): string = dumpBlocksList(header, body) & "\n" -proc dumpBlocksBeginNl*(db: CoreDbRef; +proc dumpBlocksBeginNl*(db: CoreDbTxRef; headers: openArray[Header]): string = if headers[0].number == 1'u64: let @@ -57,7 +57,7 @@ proc dumpBlocksBeginNl*(db: CoreDbRef; result &= dumpBlocksBegin(headers) & "\n" -proc dumpBlocksNl*(db: CoreDbRef; headers: openArray[Header]; +proc dumpBlocksNl*(db: CoreDbTxRef; headers: openArray[Header]; bodies: openArray[BlockBody]): string = ## Add this below the line `transaction.commit()` in the function ## `p2p/chain/persist_blocks.persistBlocksImpl()`: diff --git a/tests/test_aristo/test_balancer.nim b/tests/test_aristo/test_balancer.nim deleted file mode 100644 index fffce5ab8d..0000000000 --- a/tests/test_aristo/test_balancer.nim +++ /dev/null @@ -1,307 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or -# distributed except according to those terms. - -## Aristo (aka Patricia) DB records distributed backend access test. -## - -import - eth/common, - results, - unittest2, - ../../nimbus/db/opts, - ../../nimbus/db/core_db/backend/aristo_rocksdb, - ../../nimbus/db/aristo/[ - aristo_check, - aristo_debug, - aristo_desc, - aristo_get, - aristo_persistent, - aristo_tx], - ../replay/xcheck, - ./test_helpers - -type - LeafQuartet = - array[0..3, seq[LeafTiePayload]] - - DbTriplet = - array[0..2, AristoDbRef] - -const - testRootVid = VertexID(2) - ## Need to reconfigure for the test, root ID 1 cannot be deleted as a trie - -# ------------------------------------------------------------------------------ -# Private debugging helpers -# ------------------------------------------------------------------------------ - -proc dump(pfx: string; dx: varargs[AristoDbRef]): string = - if 0 < dx.len: - result = "\n " - var - pfx = pfx - qfx = "" - if pfx.len == 0: - (pfx,qfx) = ("[","]") - elif 1 < dx.len: - pfx = pfx & "#" - for n in 0 ..< dx.len: - let n1 = n + 1 - result &= pfx - if 1 < dx.len: - result &= $n1 - result &= qfx & "\n " & dx[n].pp(backendOk=true) & "\n" - if n1 < dx.len: - result &= " ==========\n " - -proc dump(dx: varargs[AristoDbRef]): string {.used.} = - "".dump dx - -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -iterator quadripartite(td: openArray[ProofTrieData]): LeafQuartet = - ## ... - var collect: seq[seq[LeafTiePayload]] - - for w in td: - let lst = w.kvpLst.mapRootVid testRootVid - - if lst.len < 8: - if 2 < collect.len: - yield [collect[0], collect[1], collect[2], lst] - collect.setLen(0) - else: - collect.add lst - else: - if collect.len == 0: - let a = lst.len div 4 - yield [lst[0 ..< a], lst[a ..< 2*a], lst[2*a ..< 3*a], lst[3*a .. ^1]] - else: - if collect.len == 1: - let a = lst.len div 3 - yield [collect[0], lst[0 ..< a], lst[a ..< 2*a], lst[a .. ^1]] - elif collect.len == 2: - let a = lst.len div 2 - yield [collect[0], collect[1], lst[0 ..< a], lst[a .. ^1]] - else: - yield [collect[0], collect[1], collect[2], lst] - collect.setLen(0) - -proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[AristoDbRef,AristoError] = - let db = block: - if 0 < rdbPath.len: - let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() - let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) - xCheckRc rc.error == 0: - result = err(rc.error) - rc.value()[0] - else: - AristoDbRef.init MemBackendRef - - # Set failed `xCheck()` error result - result = err(AristoError 1) - - # Fill backend - block: - let report = db.mergeList w[0] - if report.error != 0: - db.finish(eradicate=true) - xCheck report.error == 0 - let rc = db.persist() - xCheckRc rc.error == 0: - result = err(rc.error) - - let dx = db - -# ---------------------- - -proc cleanUp(dx: var AristoDbRef) = - if not dx.isNil: - dx.finish(eradicate=true) - dx.reset - -# ---------------------- - -proc isDbEq(a, b: LayerRef; db: AristoDbRef; noisy = true): bool = - ## Verify that argument filter `a` has the same effect on the - ## physical/unfiltered backend of `db` as argument filter `b`. - if a.isNil: - return b.isNil - if b.isNil: - return false - if unsafeAddr(a[]) != unsafeAddr(b[]): - if a.kMap.getOrVoid((testRootVid, testRootVid)) != - b.kMap.getOrVoid((testRootVid, testRootVid)) or - a.vTop != b.vTop: - return false - - # Void entries may differ unless on physical backend - var (aTab, bTab) = (a.sTab, b.sTab) - if aTab.len < bTab.len: - aTab.swap bTab - for (vid,aVtx) in aTab.pairs: - let bVtx = bTab.getOrVoid vid - bTab.del vid - - if aVtx != bVtx: - if aVtx.isValid and bVtx.isValid: - return false - # The valid one must match the backend data - let rc = db.getVtxUbe vid - if rc.isErr: - return false - let vtx = if aVtx.isValid: aVtx else: bVtx - if vtx != rc.value: - return false - - elif not vid.isValid and not bTab.hasKey vid: - let rc = db.getVtxUbe vid - if rc.isOk: - return false # Exists on backend but missing on `bTab[]` - elif rc.error != GetKeyNotFound: - return false # general error - - if 0 < bTab.len: - noisy.say "***", "not dbEq:", "bTabLen=", bTab.len - return false - - # Similar for `kMap[]` - var (aMap, bMap) = (a.kMap, b.kMap) - if aMap.len < bMap.len: - aMap.swap bMap - for (vid,aKey) in aMap.pairs: - let bKey = bMap.getOrVoid vid - bMap.del vid - - if aKey != bKey: - if aKey.isValid and bKey.isValid: - return false - # The valid one must match the backend data - let rc = db.getKeyUbe(vid, {}) - if rc.isErr: - return false - let key = if aKey.isValid: aKey else: bKey - if key != rc.value[0]: - return false - - elif not vid.isValid and not bMap.hasKey vid: - let rc = db.getKeyUbe(vid, {}) - if rc.isOk: - return false # Exists on backend but missing on `bMap[]` - elif rc.error != GetKeyNotFound: - return false # general error - - if 0 < bMap.len: - noisy.say "***", "not dbEq:", " bMapLen=", bMap.len - return false - - true - -# ---------------------- - -proc checkBeOk( - dx: AristoDbRef; - forceCache = false; - noisy = true; - ): bool = - ## .. - let rc = dx.checkBE() - xCheckRc rc.error == (0,0): - noisy.say "***", "db checkBE failed" - true - -# ------------------------------------------------------------------------------ -# Public test function -# ------------------------------------------------------------------------------ - -proc testBalancer*( - noisy: bool; - list: openArray[ProofTrieData]; - rdbPath: string; # Rocks DB storage directory - ): bool = - var n = 0 - for w in list.quadripartite: - n.inc - - # Resulting clause (11) filters from `aristo/README.md` example - # which will be used in the second part of the tests - var - c11Filter1 = LayerRef(nil) - c11Filter3 = LayerRef(nil) - - # Work through clauses (8)..(11) from `aristo/README.md` example - block: - - # Clause (8) from `aristo/README.md` example - var - dx = block: - let rc = dbTriplet(w, rdbPath) - xCheckRc rc.error == 0 - rc.value - db1 = dx - defer: - dx.cleanUp() - - when false: # or true: - noisy.say "*** testDistributedAccess (1)", "n=", n # , dx.dump - - # Clause (9) from `aristo/README.md` example - block: - let rc = db1.persist() - xCheckRc rc.error == 0 - xCheck db1.balancer == LayerRef(nil) - - # Check/verify backends - block: - let ok = dx.checkBeOk(noisy=noisy) - xCheck ok: - noisy.say "*** testDistributedAccess (4)", "n=", n - - # Capture filters from clause (11) - c11Filter1 = db1.balancer - - # Clean up - dx.cleanUp() - - # ---------- - - # Work through clauses (12)..(15) from `aristo/README.md` example - block: - var - dy = block: - let rc = dbTriplet(w, rdbPath) - xCheckRc rc.error == 0 - rc.value - db1 = dy - defer: - dy.cleanUp() - - # Clause (14) from `aristo/README.md` check - let c11Fil1_eq_db1RoFilter = c11Filter1.isDbEq(db1.balancer, db1, noisy) - xCheck c11Fil1_eq_db1RoFilter: - noisy.say "*** testDistributedAccess (7)", "n=", n, - "db1".dump(db1), - "" - - # Check/verify backends - block: - let ok = dy.checkBeOk(noisy=noisy) - xCheck ok - - when false: # or true: - noisy.say "*** testDistributedAccess (9)", "n=", n # , dy.dump - - true - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/tests/test_aristo/test_compute.nim b/tests/test_aristo/test_compute.nim index 239f0222ba..c91db8d0d0 100644 --- a/tests/test_aristo/test_compute.nim +++ b/tests/test_aristo/test_compute.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -79,19 +79,20 @@ suite "Aristo compute": test "Add and delete entries " & $n: let db = AristoDbRef.init VoidBackendRef + txFrame = db.txRef root = VertexID(1) for (k, v, r) in sample: checkpoint("k = " & k.toHex & ", v = " & $v) check: - db.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) + txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) # Check state against expected value - let w = db.computeKey((root, root)).expect("no errors") + let w = txFrame.computeKey((root, root)).expect("no errors") check r == w.to(Hash32) - let rc = db.check + let rc = txFrame.check check rc == typeof(rc).ok() # Reverse run deleting entries @@ -103,29 +104,30 @@ suite "Aristo compute": deletedKeys.incl k # Check state against expected value - let w = db.computeKey((root, root)).value.to(Hash32) + let w = txFrame.computeKey((root, root)).value.to(Hash32) check r == w check: - db.deleteAccountRecord(k).isOk + txFrame.deleteAccountRecord(k).isOk - let rc = db.check + let rc = txFrame.check check rc == typeof(rc).ok() test "Pre-computed key": # TODO use mainnet genesis in this test? let db = AristoDbRef.init MemBackendRef + txFrame = db.txRef root = VertexID(1) for (k, v, r) in samples[^1]: check: - db.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) + txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) check db.txPersist(1).isOk() - check db.computeKeys(root).isOk() + check txFrame.computeKeys(root).isOk() - let w = db.computeKey((root, root)).value.to(Hash32) + let w = txFrame.computeKey((root, root)).value.to(Hash32) check w == samples[^1][^1][2] diff --git a/tests/test_aristo/test_helpers.nim b/tests/test_aristo/test_helpers.nim index 7f73bb17cc..603939cb3f 100644 --- a/tests/test_aristo/test_helpers.nim +++ b/tests/test_aristo/test_helpers.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -49,7 +49,7 @@ func to(a: NodeKey; T: type PathID): T = proc pp*( w: ProofTrieData; rootID: VertexID; - db: AristoDbRef; + db: AristoTxRef; indent = 4; ): string = let @@ -65,13 +65,13 @@ proc pp*( result &= "])" proc pp*(w: ProofTrieData; indent = 4): string = - var db = AristoDbRef() + var db = AristoTxRef() w.pp(VertexID(1), db, indent) proc pp*( w: openArray[ProofTrieData]; rootID: VertexID; - db: AristoDbRef; + db: AristoTxRef; indent = 4): string = let pfx = indent.toPfx "[" & w.mapIt(it.pp(rootID, db, indent + 1)).join("," & pfx & " ") & "]" @@ -80,7 +80,7 @@ proc pp*(w: openArray[ProofTrieData]; indent = 4): string = let pfx = indent.toPfx "[" & w.mapIt(it.pp(indent + 1)).join("," & pfx & " ") & "]" -proc pp*(ltp: LeafTiePayload; db: AristoDbRef): string = +proc pp*(ltp: LeafTiePayload; db: AristoTxRef): string = "(" & ltp.leafTie.pp(db) & "," & ltp.payload.pp(db) & ")" # ---------- @@ -208,11 +208,6 @@ proc schedStow*( db: AristoDbRef; # Database ): Result[void,AristoError] = ## Context based scheduled persistent/non-persistent storage. - let - layersMeter = db.nLayersVtx() + db.nLayersKey() - filterMeter = if db.balancer.isNil: 0 - else: db.balancer.sTab.len + db.balancer.kMap.len - persistent = MaxFilterBulk < max(layersMeter, filterMeter) db.persist() # ------------------ diff --git a/tests/test_aristo/test_merge_proof.nim b/tests/test_aristo/test_merge_proof.nim index fe6ca338d8..163b5e558a 100644 --- a/tests/test_aristo/test_merge_proof.nim +++ b/tests/test_aristo/test_merge_proof.nim @@ -32,7 +32,7 @@ const proc innerCleanUp(ps: var PartStateRef) = if not ps.isNil: - ps.db.finish(eradicate=true) + ps.db.db.finish(eradicate=true) ps = PartStateRef(nil) # ----------------------- @@ -42,43 +42,43 @@ proc saveToBackend( noisy: bool; debugID: int; ): bool = - var db = tx.to(AristoDbRef) + # var db = tx.to(AristoDbRef) - # Verify context: nesting level must be 2 (i.e. two transactions) - xCheck tx.level == 2 + # # Verify context: nesting level must be 2 (i.e. two transactions) + # xCheck tx.level == 2 - # Commit and hashify the current layer - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and hashify the current layer + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckRc rc.error == 0 - tx = rc.value + # block: + # let rc = db.txFrameTop() + # xCheckRc rc.error == 0 + # tx = rc.value - # Verify context: nesting level must be 1 (i.e. one transaction) - xCheck tx.level == 1 + # # Verify context: nesting level must be 1 (i.e. one transaction) + # xCheck tx.level == 1 - block: - let rc = db.checkBE() - xCheckRc rc.error == (0,0) + # block: + # let rc = db.checkBE() + # xCheckRc rc.error == (0,0) - # Commit and save to backend - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and save to backend + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckErr rc.value.level < 0 # force error + # block: + # let rc = db.txFrameTop() + # xCheckErr rc.value.level < 0 # force error - block: - let rc = db.schedStow() - xCheckRc rc.error == 0 + # block: + # let rc = db.schedStow() + # xCheckRc rc.error == 0 - # Update layers to original level - tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value + # # Update layers to original level + # tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value true diff --git a/tests/test_aristo/test_portal_proof.nim b/tests/test_aristo/test_portal_proof.nim index 116006d1ba..e184926706 100644 --- a/tests/test_aristo/test_portal_proof.nim +++ b/tests/test_aristo/test_portal_proof.nim @@ -51,7 +51,7 @@ proc createPartDb(ps: PartStateRef; data: seq[seq[byte]]; info: static[string]) proc preLoadAristoDb(jKvp: JsonNode): PartStateRef = const info = "preLoadAristoDb" - let ps = PartStateRef.init AristoDbRef.init() + let ps = PartStateRef.init AristoDbRef.init().txRef # Collect rlp-encodede node blobs var proof: seq[seq[byte]] @@ -198,7 +198,7 @@ proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) {.dep let chain = proof.chain # Create another partial database from tree - let pq = PartStateRef.init AristoDbRef.init() + let pq = PartStateRef.init AristoDbRef.init().txRef pq.createPartDb(chain, info) # Create the same proof again which must result into the same as before @@ -227,7 +227,7 @@ proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) {.dep chain = @[ext] & tail # Create a third partial database from modified proof - let pq = PartStateRef.init AristoDbRef.init() + let pq = PartStateRef.init AristoDbRef.init().txRef pq.createPartDb(chain, info) # Re-create proof again diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index 849f86ff3f..25ea6646f0 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -82,35 +82,35 @@ func rand(td: var PrngDesc; top: int): int = # ----------------------- -proc randomisedLeafs( - db: AristoDbRef; - ltys: HashSet[LeafTie]; - td: var PrngDesc; - ): Result[seq[(LeafTie,RootedVertexID)],(VertexID,AristoError)] = - var lvp: seq[(LeafTie,RootedVertexID)] - for lty in ltys: - var hike: Hike - ?lty.hikeUp(db, Opt.none(VertexRef), hike) - lvp.add (lty,(hike.root, hike.legs[^1].wp.vid)) - - var lvp2 = lvp.sorted( - cmp = proc(a,b: (LeafTie,RootedVertexID)): int = cmp(a[0],b[0])) - if 2 < lvp2.len: - for n in 0 ..< lvp2.len-1: - let r = n + td.rand(lvp2.len - n) - lvp2[n].swap lvp2[r] - ok lvp2 - -proc innerCleanUp(db: var AristoDbRef): bool {.discardable.} = - ## Defer action - if not db.isNil: - let rx = db.txFrameTop() - if rx.isOk: - let rc = rx.value.collapse(commit=false) - xCheckRc rc.error == 0 - db.finish(eradicate=true) - db = AristoDbRef(nil) - true +# proc randomisedLeafs( +# db: AristoTxRef; +# ltys: HashSet[LeafTie]; +# td: var PrngDesc; +# ): Result[seq[(LeafTie,RootedVertexID)],(VertexID,AristoError)] = +# var lvp: seq[(LeafTie,RootedVertexID)] +# for lty in ltys: +# var hike: Hike +# ?lty.hikeUp(db, Opt.none(VertexRef), hike) +# lvp.add (lty,(hike.root, hike.legs[^1].wp.vid)) + +# var lvp2 = lvp.sorted( +# cmp = proc(a,b: (LeafTie,RootedVertexID)): int = cmp(a[0],b[0])) +# if 2 < lvp2.len: +# for n in 0 ..< lvp2.len-1: +# let r = n + td.rand(lvp2.len - n) +# lvp2[n].swap lvp2[r] +# ok lvp2 + +# proc innerCleanUp(db: var AristoTxRef): bool {.discardable.} = +# ## Defer action +# if not db.isNil: +# let rx = db.txFrameTop() +# if rx.isOk: +# let rc = rx.value.collapse(commit=false) +# xCheckRc rc.error == 0 +# db.finish(eradicate=true) +# db = AristoDbRef(nil) +# true # -------------------------------- @@ -122,50 +122,50 @@ proc saveToBackend( ): bool = var db = tx.to(AristoDbRef) - # Verify context: nesting level must be 2 (i.e. two transactions) - xCheck tx.level == 2 + # # Verify context: nesting level must be 2 (i.e. two transactions) + # xCheck tx.level == 2 - block: - let rc = db.checkTop() - xCheckRc rc.error == (0,0) + # block: + # let rc = db.checkTop() + # xCheckRc rc.error == (0,0) - # Commit and hashify the current layer - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and hashify the current layer + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckRc rc.error == 0 - tx = rc.value + # block: + # let rc = db.txFrameTop() + # xCheckRc rc.error == 0 + # tx = rc.value - # Verify context: nesting level must be 1 (i.e. one transaction) - xCheck tx.level == 1 + # # Verify context: nesting level must be 1 (i.e. one transaction) + # xCheck tx.level == 1 - block: - let rc = db.checkBE() - xCheckRc rc.error == (0,0) + # block: + # let rc = db.checkBE() + # xCheckRc rc.error == (0,0) - # Commit and save to backend - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and save to backend + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckErr rc.value.level < 0 # force error + # block: + # let rc = db.txFrameTop() + # xCheckErr rc.value.level < 0 # force error - block: - let rc = db.schedStow() - xCheckRc rc.error == 0 + # block: + # let rc = db.schedStow() + # xCheckRc rc.error == 0 - block: - let rc = db.checkBE() - xCheckRc rc.error == (0,0): - noisy.say "***", "saveToBackend (8)", " debugID=", debugID + # block: + # let rc = db.checkBE() + # xCheckRc rc.error == (0,0): + # noisy.say "***", "saveToBackend (8)", " debugID=", debugID - # Update layers to original level - tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value + # # Update layers to original level + # tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value true @@ -179,26 +179,26 @@ proc fwdWalkVerify( ): bool = let nLeafs = leftOver.len - var - leftOver = leftOver - last = LeafTie() - n = 0 - for (key,_) in db.rightPairs low(LeafTie,root): - xCheck key in leftOver: - noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID - leftOver.excl key - last = key - n.inc - - # Verify stop condition - if last.root == VertexID(0): - last = low(LeafTie,root) - elif last != high(LeafTie,root): - last = last.next - let rc = last.right db - xCheck rc.isErr - xCheck rc.error[1] == NearbyBeyondRange - xCheck n == nLeafs + # var + # leftOver = leftOver + # last = LeafTie() + # n = 0 + # for (key,_) in db.rightPairs low(LeafTie,root): + # xCheck key in leftOver: + # noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID + # leftOver.excl key + # last = key + # n.inc + + # # Verify stop condition + # if last.root == VertexID(0): + # last = low(LeafTie,root) + # elif last != high(LeafTie,root): + # last = last.next + # let rc = last.right db + # xCheck rc.isErr + # xCheck rc.error[1] == NearbyBeyondRange + # xCheck n == nLeafs true @@ -211,26 +211,26 @@ proc revWalkVerify( ): bool = let nLeafs = leftOver.len - var - leftOver = leftOver - last = LeafTie() - n = 0 - for (key,_) in db.leftPairs high(LeafTie,root): - xCheck key in leftOver: - noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID - leftOver.excl key - last = key - n.inc - - # Verify stop condition - if last.root == VertexID(0): - last = high(LeafTie,root) - elif last != low(LeafTie,root): - last = last.prev - let rc = last.left db - xCheck rc.isErr - xCheck rc.error[1] == NearbyBeyondRange - xCheck n == nLeafs + # var + # leftOver = leftOver + # last = LeafTie() + # n = 0 + # for (key,_) in db.leftPairs high(LeafTie,root): + # xCheck key in leftOver: + # noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID + # leftOver.excl key + # last = key + # n.inc + + # # Verify stop condition + # if last.root == VertexID(0): + # last = high(LeafTie,root) + # elif last != low(LeafTie,root): + # last = last.prev + # let rc = last.left db + # xCheck rc.isErr + # xCheck rc.error[1] == NearbyBeyondRange + # xCheck n == nLeafs true diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index 3a2c78f57e..59230ca124 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -58,8 +58,8 @@ proc parseEnv(node: JsonNode): TestEnv = result.network = node["network"].getStr result.pre = node["pre"] -proc rootExists(db: CoreDbRef; root: Hash32): bool = - let state = db.ctx.getAccounts().getStateRoot().valueOr: +proc rootExists(db: CoreDbTxRef; root: Hash32): bool = + let state = db.getStateRoot().valueOr: return false state == root @@ -67,22 +67,22 @@ proc executeCase(node: JsonNode): bool = let env = parseEnv(node) memDB = newCoreDbRef DefaultDbMemory - ledger = LedgerRef.init(memDB) + ledger = LedgerRef.init(memDB.baseTxFrame()) config = getChainConfig(env.network) com = CommonRef.new(memDB, nil, config) setupLedger(env.pre, ledger) ledger.persist() - com.db.persistHeaderAndSetHead(env.genesisHeader).isOkOr: + ledger.txFrame.persistHeaderAndSetHead(env.genesisHeader).isOkOr: debugEcho "Failed to put genesis header into database: ", error return false - var c = ForkedChainRef.init(com) + var c = ForkedChainRef.init(com) if c.latestHash != env.genesisHeader.blockHash: debugEcho "Genesis block hash in database is different with expected genesis block hash" return false - + var lastStateRoot = env.genesisHeader.stateRoot for blk in env.blocks: let res = c.importBlock(blk.blk) @@ -100,14 +100,14 @@ proc executeCase(node: JsonNode): bool = c.forkChoice(env.lastBlockHash, env.lastBlockHash).isOkOr: debugEcho error return false - + let headHash = c.latestHash if headHash != env.lastBlockHash: debugEcho "lastestBlockHash mismatch, get: ", headHash, " expect: ", env.lastBlockHash return false - if not memDB.rootExists(lastStateRoot): + if not c.txFrame(headHash).rootExists(lastStateRoot): debugEcho "Last stateRoot not exists" return false @@ -148,6 +148,6 @@ when isMainModule: if testStatusIMPL == FAILED: quit(QuitFailure) - executeFile("tests/fixtures/eth_tests/BlockchainTests/GeneralStateTests/stTransactionTest/ValueOverflowParis.json") + executeFile("tests/fixtures/eth_tests/BlockchainTests/ValidBlocks/bcWalletTest/walletReorganizeOwners.json") else: blockchainJsonMain() diff --git a/tests/test_coredb/test_chainsync.nim b/tests/test_coredb/test_chainsync.nim index b2ee27f438..511714c456 100644 --- a/tests/test_coredb/test_chainsync.nim +++ b/tests/test_coredb/test_chainsync.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -147,7 +147,7 @@ proc test_chainSync*( let sayBlocks = 900'u64 chain = com.newChain() - blockOnDb = com.db.getSavedStateBlockNumber() + blockOnDb = com.db.baseTxFrame().getSavedStateBlockNumber() lastBlock = max(1, numBlocks).BlockNumber noisy.initLogging com @@ -203,7 +203,7 @@ proc test_chainSync*( for w in files.undumpBlocks(least = start): let (fromBlock, toBlock) = (w[0].header.number, w[^1].header.number) if fromBlock == 0'u64: - xCheck w[0].header == com.db.getBlockHeader(0'u64).expect("block header exists") + xCheck w[0].header == com.db.baseTxFrame().getBlockHeader(0'u64).expect("block header exists") continue # Process groups of blocks ... diff --git a/tests/test_evm_support.nim b/tests/test_evm_support.nim index 19bc792346..c36f63aa94 100644 --- a/tests/test_evm_support.nim +++ b/tests/test_evm_support.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) # * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) @@ -355,6 +355,7 @@ proc runTestOverflow() = header, header, com, + com.db.baseTxFrame() ) s.ledger.setCode(codeAddress, @data) diff --git a/tests/test_forked_chain.nim b/tests/test_forked_chain.nim index dd74d3c0e2..68e3c61364 100644 --- a/tests/test_forked_chain.nim +++ b/tests/test_forked_chain.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -42,7 +42,15 @@ proc newCom(env: TestEnv): CommonRef = env.conf.networkParams ) -proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: Block): Block = +proc newCom(env: TestEnv, db: CoreDbRef): CommonRef = + CommonRef.new( + db, + nil, + env.conf.networkId, + env.conf.networkParams + ) + +proc makeBlk(txFrame: CoreDbTxRef, number: BlockNumber, parentBlk: Block): Block = template parent(): Header = parentBlk.header @@ -55,7 +63,7 @@ proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: Block): Block = amount: 1, ) - let ledger = LedgerRef.init(com.db) + let ledger = LedgerRef.init(txFrame) for wd in wds: ledger.addBalance(wd.address, wd.weiAmount) @@ -85,446 +93,553 @@ proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: Block): Block = Block.init(header, body) -proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: Block, extraData: byte): Block = - var blk = com.makeBlk(number, parentBlk) +proc makeBlk(txFrame: CoreDbTxRef, number: BlockNumber, parentBlk: Block, extraData: byte): Block = + var blk = txFrame.makeBlk(number, parentBlk) blk.header.extraData = @[extraData] blk -proc headHash(c: CommonRef): Hash32 = - c.db.getCanonicalHead().expect("canonical head exists").blockHash +proc headHash(c: ForkedChainRef): Hash32 = + c.latestTxFrame.getCanonicalHead().expect("canonical head exists").blockHash func blockHash(x: Block): Hash32 = x.header.blockHash -proc wdWritten(com: CommonRef, blk: Block): int = +proc wdWritten(c: ForkedChainRef, blk: Block): int = if blk.header.withdrawalsRoot.isSome: - com.db.getWithdrawals(blk.header.withdrawalsRoot.get). + c.latestTxFrame.getWithdrawals(blk.header.withdrawalsRoot.get). expect("withdrawals exists").len else: 0 +template checkImportBlock(chain, blk) = + let res = chain.importBlock(blk) + check res.isOk + if res.isErr: + debugEcho "IMPORT BLOCK FAIL: ", res.error + debugEcho "Block Number: ", blk.header.number + +template checkImportBlockErr(chain, blk) = + let res = chain.importBlock(blk) + check res.isErr + if res.isOk: + debugEcho "IMPORT BLOCK SHOULD FAIL" + debugEcho "Block Number: ", blk.header.number + +template checkForkChoice(chain, a, b) = + let res = chain.forkChoice(a.blockHash, b.blockHash) + check res.isOk + if res.isErr: + debugEcho "FORK CHOICE FAIL: ", res.error + debugEcho "Block Number: ", a.header.number, " ", b.header.number + +template checkForkChoiceErr(chain, a, b) = + let res = chain.forkChoice(a.blockHash, b.blockHash) + check res.isErr + if res.isOk: + debugEcho "FORK CHOICE SHOULD FAIL" + debugEcho "Block Number: ", a.header.number, " ", b.header.number + +template checkPersisted(chain, blk) = + let res = chain.baseTxFrame.getBlockHeader(blk.blockHash) + check res.isOk + if res.isErr: + debugEcho "CHECK FINALIZED FAIL: ", res.error + debugEcho "Block Number: ", blk.header.number + proc forkedChainMain*() = - suite "ForkedChainRef tests": - var env = setupEnv() - let - cc = env.newCom - genesisHash = cc.genesisHeader.blockHash - genesis = Block.init(cc.genesisHeader, BlockBody()) - - let - blk1 = cc.makeBlk(1, genesis) - blk2 = cc.makeBlk(2, blk1) - blk3 = cc.makeBlk(3, blk2) - - dbTx = cc.db.ctx.txFrameBegin() - blk4 = cc.makeBlk(4, blk3) - blk5 = cc.makeBlk(5, blk4) - blk6 = cc.makeBlk(6, blk5) - blk7 = cc.makeBlk(7, blk6) - - dbTx.dispose() - - let - B4 = cc.makeBlk(4, blk3, 1.byte) - B5 = cc.makeBlk(5, B4) - B6 = cc.makeBlk(6, B5) - B7 = cc.makeBlk(7, B6) - - test "newBase == oldBase": - const info = "newBase == oldBase" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader) - check chain.importBlock(blk1).isOk - - # same header twice - check chain.importBlock(blk1).isOk - - check chain.importBlock(blk2).isOk - - check chain.importBlock(blk3).isOk - check chain.validate info & " (1)" - - # no parent - check chain.importBlock(blk5).isErr - - check com.headHash == genesisHash - check chain.latestHash == blk3.blockHash - check chain.validate info & " (2)" - - # finalized > head -> error - check chain.forkChoice(blk1.blockHash, blk3.blockHash).isErr - check chain.validate info & " (3)" - - # blk4 is not part of chain - check chain.forkChoice(blk4.blockHash, blk2.blockHash).isErr - - # finalized > head -> error - check chain.forkChoice(blk1.blockHash, blk2.blockHash).isErr - - # blk4 is not part of chain - check chain.forkChoice(blk2.blockHash, blk4.blockHash).isErr - - # finalized < head -> ok - check chain.forkChoice(blk2.blockHash, blk1.blockHash).isOk - check com.headHash == blk2.blockHash - check chain.latestHash == blk2.blockHash - check chain.validate info & " (7)" - - # finalized == head -> ok - check chain.forkChoice(blk2.blockHash, blk2.blockHash).isOk - check com.headHash == blk2.blockHash - check chain.latestHash == blk2.blockHash - check chain.validate info & " (8)" - - # no baggage written - check com.wdWritten(blk1) == 0 - check com.wdWritten(blk2) == 0 - check chain.validate info & " (9)" - - test "newBase == cursor": - const info = "newBase == cursor" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(blk4).isOk - check chain.validate info & " (1)" - - # newbase == cursor - check chain.forkChoice(blk7.blockHash, blk6.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - - check com.wdWritten(blk7) == 0 - - # head - baseDistance must been finalized - check com.wdWritten(blk4) == 4 - # make sure aristo not wiped out baggage - check com.wdWritten(blk3) == 3 - check chain.validate info & " (9)" - - test "newBase between oldBase and cursor": - const info = "newBase between oldBase and cursor" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk6.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - - check com.wdWritten(blk6) == 0 - check com.wdWritten(blk7) == 0 - - # head - baseDistance must been finalized - check com.wdWritten(blk4) == 4 - # make sure aristo not wiped out baggage - check com.wdWritten(blk3) == 3 - check chain.validate info & " (9)" - - test "newBase == oldBase, fork and stay on that fork": - const info = "newBase == oldBase, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.validate info & " (9)" - - test "newBase == cursor, fork and stay on that fork": - const info = "newBase == cursor, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - - check chain.importBlock(B4).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B6.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.validate info & " (9)" - - test "newBase on shorter canonical arc, discard arc with oldBase": - const info = "newBase on shorter canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.baseNumber >= B4.header.number - check chain.cursorHeads.len == 1 - check chain.validate info & " (9)" - - test "newBase on curbed non-canonical arc": - const info = "newBase on curbed non-canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 5) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.baseNumber > 0 - check chain.baseNumber < B4.header.number - check chain.cursorHeads.len == 2 - check chain.validate info & " (9)" - - test "newBase == oldBase, fork and return to old chain": - const info = "newBase == oldBase, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - check chain.validate info & " (9)" - - test "newBase == cursor, fork and return to old chain": - const info = "newBase == cursor, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - - check chain.importBlock(blk4).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - check chain.validate info & " (9)" - - test "newBase on shorter canonical arc, discard arc with oldBase" & - " (ign dup block)": - const info = "newBase on shorter canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - - check chain.importBlock(blk4).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.baseNumber >= B4.header.number - check chain.cursorHeads.len == 1 - check chain.validate info & " (9)" - - test "newBase on longer canonical arc, discard arc with oldBase": - const info = "newBase on longer canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - check chain.baseNumber > 0 - check chain.baseNumber < blk5.header.number - check chain.cursorHeads.len == 1 - check chain.validate info & " (9)" - - test "headerByNumber": - const info = "headerByNumber" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - # cursor - check chain.headerByNumber(8).isErr - check chain.headerByNumber(7).expect("OK").number == 7 - check chain.headerByNumber(7).expect("OK").blockHash == blk7.blockHash - - # from db - check chain.headerByNumber(3).expect("OK").number == 3 - check chain.headerByNumber(3).expect("OK").blockHash == blk3.blockHash - - # base - check chain.headerByNumber(4).expect("OK").number == 4 - check chain.headerByNumber(4).expect("OK").blockHash == blk4.blockHash - - # from cache - check chain.headerByNumber(5).expect("OK").number == 5 - check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash - check chain.validate info & " (9)" - - test "Import after Replay Segment": - const info = "Import after Replay Segment" - let com = env.newCom() - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.validate info & " (1)" - - chain.replaySegment(blk2.header.blockHash) - chain.replaySegment(blk5.header.blockHash) - check chain.validate info & " (2)" - - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - check chain.validate info & " (9)" + suite "ForkedChainRef tests": + var env = setupEnv() + let + cc = env.newCom + genesisHash = cc.genesisHeader.blockHash + genesis = Block.init(cc.genesisHeader, BlockBody()) + baseTxFrame = cc.db.baseTxFrame() + + let + blk1 = baseTxFrame.makeBlk(1, genesis) + blk2 = baseTxFrame.makeBlk(2, blk1) + blk3 = baseTxFrame.makeBlk(3, blk2) + + dbTx = baseTxFrame.txFrameBegin + blk4 = dbTx.makeBlk(4, blk3) + blk5 = dbTx.makeBlk(5, blk4) + blk6 = dbTx.makeBlk(6, blk5) + blk7 = dbTx.makeBlk(7, blk6) + + dbTx.dispose() + + let + B4 = baseTxFrame.makeBlk(4, blk3, 1.byte) + dbTx2 = baseTxFrame.txFrameBegin + B5 = dbTx2.makeBlk(5, B4) + B6 = dbTx2.makeBlk(6, B5) + B7 = dbTx2.makeBlk(7, B6) + + dbTx2.dispose() + + let + C5 = baseTxFrame.makeBlk(5, blk4, 1.byte) + C6 = baseTxFrame.makeBlk(6, C5) + C7 = baseTxFrame.makeBlk(7, C6) + + test "newBase == oldBase": + const info = "newBase == oldBase" + let com = env.newCom() + + var chain = ForkedChainRef.init(com) + + # same header twice + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk1) + + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + check chain.validate info & " (1)" + + # no parent + checkImportBlockErr(chain, blk5) + + check chain.headHash == genesisHash + check chain.latestHash == blk3.blockHash + check chain.validate info & " (2)" + + # finalized > head -> error + checkForkChoiceErr(chain, blk1, blk3) + check chain.validate info & " (3)" + + # blk4 is not part of chain + checkForkChoiceErr(chain, blk4, blk2) + + # finalized > head -> error + checkForkChoiceErr(chain, blk1, blk2) + + # blk4 is not part of chain + checkForkChoiceErr(chain, blk2, blk4) + + # finalized < head -> ok + checkForkChoice(chain, blk2, blk1) + check chain.headHash == blk2.blockHash + check chain.latestHash == blk2.blockHash + check chain.validate info & " (7)" + + # finalized == head -> ok + checkForkChoice(chain, blk2, blk2) + check chain.headHash == blk2.blockHash + check chain.latestHash == blk2.blockHash + check chain.baseNumber == 0'u64 + check chain.validate info & " (8)" + + # baggage written + check chain.wdWritten(blk1) == 1 + check chain.wdWritten(blk2) == 2 + check chain.validate info & " (9)" + + test "newBase on activeBranch": + const info = "newBase on activeBranch" + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, blk4) + check chain.validate info & " (1)" + + # newbase == head + checkForkChoice(chain, blk7, blk6) + check chain.validate info & " (2)" + + check chain.headHash == blk7.blockHash + check chain.latestHash == blk7.blockHash + check chain.baseBranch == chain.activeBranch + + check chain.wdWritten(blk7) == 7 + + # head - baseDistance must been persisted + checkPersisted(chain, blk3) + + # make sure aristo not wiped out baggage + check chain.wdWritten(blk3) == 3 + check chain.validate info & " (9)" + + test "newBase between oldBase and head": + const info = "newBase between oldBase and head" + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + check chain.validate info & " (1)" + + checkForkChoice(chain, blk7, blk6) + check chain.validate info & " (2)" + + check chain.headHash == blk7.blockHash + check chain.latestHash == blk7.blockHash + check chain.baseBranch == chain.activeBranch + + check chain.wdWritten(blk6) == 6 + check chain.wdWritten(blk7) == 7 + + # head - baseDistance must been persisted + checkPersisted(chain, blk3) + + # make sure aristo not wiped out baggage + check chain.wdWritten(blk3) == 3 + check chain.validate info & " (9)" + + test "newBase == oldBase, fork and stay on that fork": + const info = "newBase == oldBase, fork .." + let com = env.newCom() + + var chain = ForkedChainRef.init(com) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + check chain.validate info & " (1)" + + checkForkChoice(chain, B7, B5) + + check chain.headHash == B7.blockHash + check chain.latestHash == B7.blockHash + check chain.baseNumber == 0'u64 + check chain.branches.len == 2 + + check chain.validate info & " (9)" + + test "newBase move forward, fork and stay on that fork": + const info = "newBase move forward, fork .." + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + + checkImportBlock(chain, B4) + check chain.validate info & " (1)" + + checkForkChoice(chain, B6, B4) + check chain.validate info & " (2)" + + check chain.headHash == B6.blockHash + check chain.latestHash == B6.blockHash + check chain.baseNumber == 3'u64 + check chain.branches.len == 2 + check chain.validate info & " (9)" + + test "newBase on shorter canonical arc, remove oldBase branches": + const info = "newBase on shorter canonical, remove oldBase branches" + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + check chain.validate info & " (1)" + + checkForkChoice(chain, B7, B6) + check chain.validate info & " (2)" + + check chain.headHash == B7.blockHash + check chain.latestHash == B7.blockHash + check chain.baseNumber == 4'u64 + check chain.branches.len == 1 + check chain.validate info & " (9)" + + test "newBase on curbed non-canonical arc": + const info = "newBase on curbed non-canonical .." + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 5) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + check chain.validate info & " (1)" + + checkForkChoice(chain, B7, B5) + check chain.validate info & " (2)" + + check chain.headHash == B7.blockHash + check chain.latestHash == B7.blockHash + check chain.baseNumber > 0 + check chain.baseNumber < B4.header.number + check chain.branches.len == 2 + check chain.validate info & " (9)" + + test "newBase == oldBase, fork and return to old chain": + const info = "newBase == oldBase, fork .." + let com = env.newCom() + + var chain = ForkedChainRef.init(com) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + check chain.validate info & " (1)" + + checkForkChoice(chain, blk7, blk5) + check chain.validate info & " (2)" + + check chain.headHash == blk7.blockHash + check chain.latestHash == blk7.blockHash + check chain.baseNumber == 0'u64 + check chain.validate info & " (9)" + + test "newBase on activeBranch, fork and return to old chain": + const info = "newBase on activeBranch, fork .." + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + + checkImportBlock(chain, blk4) + check chain.validate info & " (1)" + + checkForkChoice(chain, blk7, blk5) + check chain.validate info & " (2)" + + check chain.headHash == blk7.blockHash + check chain.latestHash == blk7.blockHash + check chain.baseBranch == chain.activeBranch + check chain.validate info & " (9)" + + test "newBase on shorter canonical arc, discard arc with oldBase" & + " (ign dup block)": + const info = "newBase on shorter canonical .." + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + + checkImportBlock(chain, blk4) + check chain.validate info & " (1)" + + checkForkChoice(chain, B7, B5) + check chain.validate info & " (2)" + + check chain.headHash == B7.blockHash + check chain.latestHash == B7.blockHash + check chain.baseNumber == 4'u64 + check chain.branches.len == 1 + check chain.validate info & " (9)" + + test "newBase on longer canonical arc, discard new branch": + const info = "newBase on longer canonical .." + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + check chain.validate info & " (1)" + + checkForkChoice(chain, blk7, blk5) + check chain.validate info & " (2)" + + check chain.headHash == blk7.blockHash + check chain.latestHash == blk7.blockHash + check chain.baseNumber > 0 + check chain.baseNumber < blk5.header.number + check chain.branches.len == 1 + check chain.validate info & " (9)" + + test "headerByNumber": + const info = "headerByNumber" + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkImportBlock(chain, blk4) + checkImportBlock(chain, blk5) + checkImportBlock(chain, blk6) + checkImportBlock(chain, blk7) + + checkImportBlock(chain, B4) + checkImportBlock(chain, B5) + checkImportBlock(chain, B6) + checkImportBlock(chain, B7) + check chain.validate info & " (1)" + + checkForkChoice(chain, blk7, blk5) + check chain.validate info & " (2)" + + # cursor + check chain.headerByNumber(8).isErr + check chain.headerByNumber(7).expect("OK").number == 7 + check chain.headerByNumber(7).expect("OK").blockHash == blk7.blockHash + + # from db + check chain.headerByNumber(3).expect("OK").number == 3 + check chain.headerByNumber(3).expect("OK").blockHash == blk3.blockHash + + # base + check chain.headerByNumber(4).expect("OK").number == 4 + check chain.headerByNumber(4).expect("OK").blockHash == blk4.blockHash + + # from cache + check chain.headerByNumber(5).expect("OK").number == 5 + check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash + check chain.validate info & " (9)" + + test "3 branches, alternating imports": + const info = "3 branches, alternating imports" + let com = env.newCom() + + var chain = ForkedChainRef.init(com, baseDistance = 3) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + + checkImportBlock(chain, B4) + checkImportBlock(chain, blk4) + + checkImportBlock(chain, B5) + checkImportBlock(chain, blk5) + checkImportBlock(chain, C5) + + checkImportBlock(chain, B6) + checkImportBlock(chain, blk6) + checkImportBlock(chain, C6) + + checkImportBlock(chain, B7) + checkImportBlock(chain, blk7) + checkImportBlock(chain, C7) + check chain.validate info & " (1)" + + check chain.latestHash == C7.blockHash + check chain.latestNumber == 7'u64 + check chain.branches.len == 3 + + checkForkChoice(chain, B7, blk3) + check chain.validate info & " (2)" + check chain.branches.len == 3 + + checkForkChoice(chain, B7, B6) + check chain.validate info & " (2)" + check chain.branches.len == 1 + + test "importing blocks with new CommonRef and FC instance, 3 blocks": + const info = "importing blocks with new CommonRef and FC instance, 3 blocks" + let com = env.newCom() + + let chain = ForkedChainRef.init(com, baseDistance = 0) + checkImportBlock(chain, blk1) + checkImportBlock(chain, blk2) + checkImportBlock(chain, blk3) + checkForkChoice(chain, blk3, blk3) + check chain.validate info & " (1)" + + let cc = env.newCom(com.db) + let fc = ForkedChainRef.init(cc, baseDistance = 0) + check fc.headHash == blk3.blockHash + checkImportBlock(fc, blk4) + checkForkChoice(fc, blk4, blk4) + check chain.validate info & " (2)" + + test "importing blocks with new CommonRef and FC instance, 1 block": + const info = "importing blocks with new CommonRef and FC instance, 1 block" + let com = env.newCom() + + let chain = ForkedChainRef.init(com, baseDistance = 0) + checkImportBlock(chain, blk1) + checkForkChoice(chain, blk1, blk1) + check chain.validate info & " (1)" + + let cc = env.newCom(com.db) + let fc = ForkedChainRef.init(cc, baseDistance = 0) + check fc.headHash == blk1.blockHash + checkImportBlock(fc, blk2) + checkForkChoice(fc, blk2, blk2) + check chain.validate info & " (2)" + when isMainModule: forkedChainMain() diff --git a/tests/test_forked_chain/chain_debug.nim b/tests/test_forked_chain/chain_debug.nim index 4e47b03b9d..6ef5790e8f 100644 --- a/tests/test_forked_chain/chain_debug.nim +++ b/tests/test_forked_chain/chain_debug.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -13,12 +13,13 @@ {.push raises: [].} import - std/[algorithm, sequtils, sets, strutils, tables], + std/[sequtils, tables], pkg/chronicles, pkg/stew/interval_set, ../../nimbus/common, ../../nimbus/sync/beacon/worker/helpers, - ../../nimbus/core/chain/forked_chain/chain_desc + ../../nimbus/core/chain/forked_chain/chain_desc, + ../../nimbus/core/chain/forked_chain/chain_branch logScope: topics = "forked-chain" @@ -27,43 +28,20 @@ logScope: topics = "forked-chain" # ------------------------------------------------------------------------------ func header(h: Hash32; c: ForkedChainRef): Header = - c.blocks.withValue(h, val): - return val.blk.header - -func cmp(c: ForkedChainRef; _: type CursorDesc): auto = - return func(x,y: CursorDesc): int = - result = cmp(x.forkJunction, y.forkJunction) - if result == 0: - result = cmp(x.hash.header(c).number, y.hash.header(c).number) - -func cmp(c: ForkedChainRef; _: type seq[Hash32]): auto = - return func(x,y: seq[Hash32]): int = - result = cmp(x[0].header(c).number, y[0].header(c).number) - if result == 0: - result = cmp(x[^1].header(c).number, y[^1].header(c).number) - -# ---------------- + c.hashToBlock.withValue(h, loc): + return loc[].header func baseChains(c: ForkedChainRef): seq[seq[Hash32]] = - # find leafs - var leafs = c.blocks.pairs.toSeq.mapIt((it[0],it[1].blk.header)).toTable - for w in c.blocks.values: - leafs.del w.blk.header.parentHash - # Assemble separate chain per leaf - for (k,v) in leafs.pairs: - var - q = @[k] - w = v.parentHash - while true: - c.blocks.withValue(w, val): - q.add w - w = val.blk.header.parentHash - do: - break - result.add q.reversed - -func baseChainsSorted(c: ForkedChainRef): seq[seq[Hash32]] = - c.baseChains.sorted(c.cmp seq[Hash32]) + for brc in c.branches: + var hs: seq[Hash32] + var branch = brc + while not branch.isNil: + var hss: seq[Hash32] + for blk in branch.blocks: + hss.add blk.hash + hs = hss.concat(hs) + branch = branch.parent + result.add move(hs) # ---------------- @@ -73,14 +51,6 @@ func cnStr(q: openArray[Hash32]; c: ForkedChainRef): string = if a != b: result &= "<<" & b.bnStr -func ppImpl[T: Block|Header](q: openArray[T]): string = - func number(b: Block): BlockNumber = b.header.number - let bns = IntervalSetRef[BlockNumber,uint64].init() - for w in q: - discard bns.merge(w.number,w.number) - let (a,b) = (bns.total, q.len.uint64 - bns.total) - "{" & bns.increasing.toSeq.mapIt($it).join(",") & "}[#" & $a & "+" & $b & "]" - # ------------------------------------------------------------------------------ # Public pretty printers # ------------------------------------------------------------------------------ @@ -92,99 +62,58 @@ func pp*(b: Block): string = b.bnStr func pp*(h: Hash32): string = h.short func pp*(d: BlockDesc): string = d.blk.header.pp func pp*(d: ptr BlockDesc): string = d[].pp - -func pp*(q: openArray[Block]): string = q.ppImpl -func pp*(q: openArray[Header]): string = q.ppImpl - func pp*(rc: Result[Header,string]): string = if rc.isOk: rc.value.pp else: "err(" & rc.error & ")" -# -------------------- - -func pp*(h: Hash32; c: ForkedChainRef): string = - c.blocks.withValue(h, val) do: - return val.blk.header.pp - if h == c.baseHash: - return c.baseHeader.pp - h.short - -func pp*(d: CursorDesc; c: ForkedChainRef): string = - let (a,b) = (d.forkJunction, d.hash.header(c).number) - result = a.bnStr - if a != b: - result &= ".." & (if b == 0: d.hash.pp else: b.pp) - -func pp*(d: PivotArc; c: ForkedChainRef): string = - "(" & d.pvHeader.pp & "," & d.cursor.pp(c) & ")" - -func pp*(q: openArray[CursorDesc]; c: ForkedChainRef): string = - "{" & q.sorted(c.cmp CursorDesc).mapIt(it.pp(c)).join(",") & "}" - -func pp*(c: ForkedChainRef): string = - "(" & c.baseHeader.pp & - ",{" & c.baseChainsSorted.mapIt(it.cnStr(c)).join(",") & "}" & - "," & c.cursorHeader.pp & - "," & c.cursorHeads.pp(c) & - "," & (if c.extraValidation: "t" else: "f") & - "," & $c.baseDistance & - ")" - # ------------------------------------------------------------------------------ # Public object validators # ------------------------------------------------------------------------------ +func headNumber(c: ForkedChainRef): BlockNumber = + c.activeBranch.headNumber + +func headHash(c: ForkedChainRef): Hash32 = + c.activeBranch.headHash + +func baseNumber(c: ForkedChainRef): BlockNumber = + c.baseBranch.tailNumber + +func baseHash(c: ForkedChainRef): Hash32 = + c.baseBranch.tailHash func validate*(c: ForkedChainRef): Result[void,string] = - if c.cursorHeader.number < c.baseHeader.number: - return err("cursor block number too low") + if c.headNumber < c.baseNumber: + return err("head block number too low") # Empty descriptor (mainly used with unit tests) - if c.cursorHash == c.baseHash and - c.blocks.len == 0 and - c.cursorHeads.len == 0: + if c.headHash == c.baseHash and + c.branches.len == 1 and + c.hashToBlock.len == 0: return ok() - # `cursorHeader` must be in the `c.blocks[]` table but `base` must not - if not c.blocks.hasKey(c.cursorHash): - return err("cursor must be in blocks[] table: " & c.cursorHeader.pp) - if c.blocks.hasKey(c.baseHash): - return err("base must not be in blocks[] table: " & c.baseHeader.pp) + # `head` and `base` must be in the `c.hashToBlock[]` + if not c.hashToBlock.hasKey(c.headHash): + return err("head must be in hashToBlock[] table: " & $c.headNumber) + if not c.hashToBlock.hasKey(c.baseHash): + return err("base must be in hashToBlock[] table: " & $c.baseNumber) - # Base chains must range inside `(base,cursor]`, rooted on `base` - var bcHeads: HashSet[Hash32] + # Base chains must range inside `(base,head]`, rooted on `base` for chain in c.baseChains: - if chain[0].header(c).parentHash != c.baseHash: + if chain[0] != c.baseHash: return err("unbased chain: " & chain.cnStr(c)) - bcHeads.incl chain[^1] # Cursor heads must refer to items of `c.blocks[]` - for ch in c.cursorHeads: - if not c.blocks.hasKey(ch.hash): - return err("stray cursor head: " & ch.pp(c)) - - if ch.forkJunction <= c.baseHeader.number: - return err("cursor head junction too small: " & ch.pp(c)) - - # Get fork junction header - var h = ch.hash.header(c) - while ch.forkJunction < h.number: - c.blocks.withValue(h.parentHash, val): - h = val.blk.header - do: - return err("inconsistent/broken cursor chain " & ch.pp(c)) - - # Now: `cn.forkJunction == h.number`, check parent - if h.parentHash != c.baseHash and not c.blocks.hasKey(h.parentHash): - return err("unaligned junction of cursor chain " & ch.pp(c)) - - # Check cursor heads against assembled chain heads - if ch.hash notin bcHeads: - return err("stale or dup cursor chain " & ch.pp(c)) + for brc in c.branches: + for bd in brc.blocks: + if not c.hashToBlock.hasKey(bd.hash): + return err("stray block: " & pp(bd)) - bcHeads.excl ch.hash + if brc.tailNumber < c.baseNumber: + return err("branch junction too small: " & $brc.tailNumber) - # Each chain must have exactly one cursor head - if bcHeads.len != 0: - return err("missing cursor chain for head " & bcHeads.toSeq[0].pp(c)) + let parent = brc.parent + if not parent.isNil: + if brc.tailNumber < parent.tailNumber: + return err("branch junction too small: " & $brc.tailNumber) ok() @@ -192,7 +121,7 @@ proc validate*(c: ForkedChainRef; info: static[string]): bool {.discardable.} = let rc = c.validate() if rc.isOk: return true - error info & ": invalid desc", error=rc.error, c=c.pp + error info & ": invalid desc", error=rc.error # ------------------------------------------------------------------------------ # End diff --git a/tests/test_generalstate_json.nim b/tests/test_generalstate_json.nim index c49615c97a..dcd6c34dd5 100644 --- a/tests/test_generalstate_json.nim +++ b/tests/test_generalstate_json.nim @@ -98,6 +98,7 @@ proc testFixtureIndexes(ctx: var TestCtx, testStatusIMPL: var TestStatus) = parent = parent, header = ctx.header, com = com, + txFrame = com.db.baseTxFrame(), tracer = tracer, storeSlotHash = ctx.trace, ) diff --git a/tests/test_genesis.nim b/tests/test_genesis.nim index 4a62d82e9f..a57409ec31 100644 --- a/tests/test_genesis.nim +++ b/tests/test_genesis.nim @@ -36,7 +36,7 @@ proc proofOfStake(params: NetworkParams): bool = networkId = params.config.chainId.NetworkId, params = params) let header = com.genesisHeader - com.proofOfStake(header) + com.proofOfStake(header, com.db.baseTxFrame()) proc genesisTest() = suite "Genesis": @@ -72,7 +72,7 @@ proc customGenesisTest() = let genesisHash = hash32"a28d8d73e087a01d09d8cb806f60863652f30b6b6dfa4e0157501ff07d422399" check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Devnet5.json (aka Kiln in all but chainId and TTD)": var cg: NetworkParams @@ -82,7 +82,7 @@ proc customGenesisTest() = let genesisHash = hash32"51c7fe41be669f69c45c33a56982cbde405313342d9e2b00d7c91a7b284dd4f8" check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Mainnet shadow fork 1": var cg: NetworkParams @@ -94,7 +94,7 @@ proc customGenesisTest() = check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Geth shadow fork 1": # parse using geth format should produce the same result with nimbus format @@ -107,7 +107,7 @@ proc customGenesisTest() = check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false check cg.config.mergeNetsplitBlock.isSome check cg.config.mergeNetsplitBlock.get == 14660963.BlockNumber diff --git a/tests/test_getproof_json.nim b/tests/test_getproof_json.nim index ebaeb1e274..0c4ebb2c11 100644 --- a/tests/test_getproof_json.nim +++ b/tests/test_getproof_json.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -129,9 +129,9 @@ proc getProofJsonMain*() = let accounts = getGenesisAlloc("tests" / "customgenesis" / file) coreDb = newCoreDbRef(DefaultDbMemory) - ledger = LedgerRef.init(coreDb) + ledger = LedgerRef.init(coreDb.baseTxFrame()) stateRootHash = setupLedger(accounts, ledger) - accountDb = LedgerRef.init(coreDb) + accountDb = LedgerRef.init(coreDb.baseTxFrame()) checkProofsForExistingLeafs(accounts, accountDb, stateRootHash) @@ -141,9 +141,9 @@ proc getProofJsonMain*() = let accounts = getGenesisAlloc("tests" / "customgenesis" / file) coreDb = newCoreDbRef(DefaultDbMemory) - ledger = LedgerRef.init(coreDb) + ledger = LedgerRef.init(coreDb.baseTxFrame()) stateRootHash = setupLedger(accounts, ledger) - accountDb = LedgerRef.init(coreDb) + accountDb = LedgerRef.init(coreDb.baseTxFrame()) checkProofsForMissingLeafs(accounts, accountDb, stateRootHash) diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index eb21475161..fc8d77a149 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -102,7 +102,7 @@ proc initEnv(): TestEnv = conf.networkId, conf.networkParams ) - chain = newForkedChain(com, com.genesisHeader) + chain = ForkedChainRef.init(com) TestEnv( com : com, @@ -146,8 +146,8 @@ proc importBlock(env: TestEnv; blk: Block) = raiseAssert "persistBlocks() failed at block #" & $blk.header.number & " msg: " & error -proc getLedger(com: CommonRef): LedgerRef = - LedgerRef.init(com.db) +proc getLedger(txFrame: CoreDbTxRef): LedgerRef = + LedgerRef.init(txFrame) func getRecipient(tx: Transaction): Address = tx.to.expect("transaction have no recipient") @@ -213,8 +213,6 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false) let eAddr = env.txs[inx].getRecipient block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) @@ -226,11 +224,7 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false) ledger.modBalance(eAddr) ledger.rollback(accTx) - dbTx.rollback() - block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) @@ -240,16 +234,11 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false) ledger.persist() - dbTx.commit() - - proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) = ## Like `runTrial3()` but with four blocks and extra db transaction frames. let eAddr = env.txs[inx].getRecipient block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) @@ -273,21 +262,13 @@ proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) = ledger.commit(accTx) ledger.persist() - # There must be no dbTx.rollback() here unless `ledger` is - # discarded and/or re-initialised. - dbTx.commit() - block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) ledger.commit(accTx) ledger.persist() - dbTx.commit() - # ------------------------------------------------------------------------------ # Test Runner # ------------------------------------------------------------------------------ @@ -346,38 +327,38 @@ proc runLedgerTransactionTests(noisy = true) = test &"Run {env.txi.len} two-step trials with rollback": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial2ok(ledger, n) test &"Run {env.txi.len} three-step trials with rollback": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial3(ledger, n, rollback = true) test &"Run {env.txi.len} three-step trials with extra db frame rollback" & " throwing Exceptions": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial3Survive(ledger, n, noisy) test &"Run {env.txi.len} tree-step trials without rollback": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial3(ledger, n, rollback = false) test &"Run {env.txi.len} four-step trials with rollback and db frames": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial4(ledger, n, rollback = true) proc runLedgerBasicOperationsTests() = @@ -387,7 +368,7 @@ proc runLedgerBasicOperationsTests() = var memDB = newCoreDbRef DefaultDbMemory - ledger {.used.} = LedgerRef.init(memDB) + ledger {.used.} = LedgerRef.init(memDB.baseTxFrame()) address {.used.} = address"0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" code {.used.} = hexToSeqByte("0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6") stateRoot {.used.} : Hash32 @@ -444,7 +425,7 @@ proc runLedgerBasicOperationsTests() = check y.originalStorage.len == 3 test "Ledger various operations": - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) var addr1 = initAddr(1) check ac.isDeadAccount(addr1) == true @@ -476,7 +457,7 @@ proc runLedgerBasicOperationsTests() = ac.persist() stateRoot = ac.getStateRoot() - var db = LedgerRef.init(memDB) + var db = LedgerRef.init(memDB.baseTxFrame()) db.setBalance(addr1, 1100.u256) db.setNonce(addr1, 2) db.setCode(addr1, code) @@ -484,7 +465,7 @@ proc runLedgerBasicOperationsTests() = check stateRoot == db.getStateRoot() # Ledger readonly operations using previous hash - var ac2 = LedgerRef.init(memDB) + var ac2 = LedgerRef.init(memDB.baseTxFrame()) var addr2 = initAddr(2) check ac2.getCodeHash(addr2) == emptyAcc.codeHash @@ -504,14 +485,14 @@ proc runLedgerBasicOperationsTests() = check ac2.getStateRoot() == stateRoot test "Ledger code retrieval after persist called": - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) var addr2 = initAddr(2) ac.setCode(addr2, code) ac.persist() check ac.getCode(addr2) == code let key = contractHashKey(keccak256(code)) - val = memDB.ctx.getKvt().get(key.toOpenArray).valueOr: EmptyBlob + val = memDB.baseTxFrame().get(key.toOpenArray).valueOr: EmptyBlob check val == code test "accessList operations": @@ -537,7 +518,7 @@ proc runLedgerBasicOperationsTests() = proc accessList(ac: LedgerRef, address, slot: int) {.inline.} = ac.accessList(address.initAddr, slot.u256) - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) ac.accessList(0xaa) ac.accessList(0xbb, 0x01) @@ -579,7 +560,7 @@ proc runLedgerBasicOperationsTests() = check ac.verifySlots(0xdd, 0x04) test "transient storage operations": - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) proc tStore(ac: LedgerRef, address, slot, val: int) = ac.setTransientStorage(address.initAddr, slot.u256, val.u256) @@ -646,7 +627,7 @@ proc runLedgerBasicOperationsTests() = test "ledger contractCollision": # use previous hash - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) let addr2 = initAddr(2) check ac.contractCollision(addr2) == false @@ -667,7 +648,7 @@ proc runLedgerBasicOperationsTests() = check ac.contractCollision(addr4) == true test "Ledger storage iterator": - var ac = LedgerRef.init(memDB, storeSlotHash = true) + var ac = LedgerRef.init(memDB.baseTxFrame(), storeSlotHash = true) let addr2 = initAddr(2) ac.setStorage(addr2, 1.u256, 2.u256) ac.setStorage(addr2, 2.u256, 3.u256) diff --git a/tests/test_precompiles.nim b/tests/test_precompiles.nim index 84ef1c01f4..d0bc10d750 100644 --- a/tests/test_precompiles.nim +++ b/tests/test_precompiles.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) # * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) @@ -75,7 +75,8 @@ proc testFixture(fixtures: JsonNode, testStatusIMPL: var TestStatus) = vmState = BaseVMState.new( Header(number: 1'u64, stateRoot: emptyRlpHash), Header(), - com + com, + com.db.baseTxFrame() ) case toLowerAscii(label) diff --git a/tests/test_rpc.nim b/tests/test_rpc.nim index 7552387a51..e64786bb6a 100644 --- a/tests/test_rpc.nim +++ b/tests/test_rpc.nim @@ -93,10 +93,10 @@ proc verifySlotProof(trustedStorageRoot: Hash32, slot: StorageProof): MptProofVe key, value) -proc persistFixtureBlock(chainDB: CoreDbRef) = +proc persistFixtureBlock(chainDB: CoreDbTxRef) = let header = getBlockHeader4514995() # Manually inserting header to avoid any parent checks - discard chainDB.ctx.getKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header)) + discard chainDB.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header)) chainDB.addBlockNumberToHashLookup(header.number, header.blockHash) chainDB.persistTransactions(header.number, header.txRoot, getBlockBody4514995().transactions) chainDB.persistReceipts(header.receiptsRoot, getReceipts4514995()) @@ -226,6 +226,7 @@ proc generateBlock(env: var TestEnv) = com = env.com xp = env.txPool ctx = env.ctx + txFrame = com.db.baseTxFrame() acc = ctx.am.getAccount(signer).tryGet() tx1 = env.makeTx(acc.privateKey, zeroAddress, 1.u256, 30_000_000_000'u64) tx2 = env.makeTx(acc.privateKey, zeroAddress, 2.u256, 30_000_000_100'u64) @@ -254,7 +255,7 @@ proc generateBlock(env: var TestEnv) = xp.removeNewBlockTxs(blk) - com.db.persistFixtureBlock() + txFrame.persistFixtureBlock() env.txHash = tx1.rlpHash env.blockHash = blk.header.blockHash diff --git a/tests/test_tracer_json.nim b/tests/test_tracer_json.nim index cb46dfc7f6..f061b87b70 100644 --- a/tests/test_tracer_json.nim +++ b/tests/test_tracer_json.nim @@ -35,37 +35,37 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = predRoot: Hash32 # from predecessor header txRoot: Hash32 # header with block number `num` rcptRoot: Hash32 # ditto - let - adb = cdb.ctx.mpt # `Aristo` db - kdb = cdb.ctx.kvt # `Kvt` db - ps = PartStateRef.init adb # Partial DB descriptor - - # Fill KVT and collect `proof` data - for (k,v) in jKvp.pairs: - let - key = hexToSeqByte(k) - val = hexToSeqByte(v.getStr()) - if key.len == 32: - doAssert key == val.keccak256.data - if val != @[0x80u8]: # Exclude empty item - proof.add val - else: - if key[0] == 0: - try: - # Pull our particular header fields (if possible) - let header = rlp.decode(val, Header) - if header.number == num: - txRoot = header.txRoot - rcptRoot = header.receiptsRoot - elif header.number == num-1: - predRoot = header.stateRoot - except RlpError: - discard - check kdb.put(key, val).isOk - - # Set up production MPT - ps.partPut(proof, AutomaticPayload).isOkOr: - raiseAssert info & ": partPut => " & $error + # let + # adb = cdb.ctx.mpt # `Aristo` db + # kdb = cdb.ctx.kvt # `Kvt` db + # ps = PartStateRef.init cdb.baseTxFrame # Partial DB descriptor + + # # Fill KVT and collect `proof` data + # for (k,v) in jKvp.pairs: + # let + # key = hexToSeqByte(k) + # val = hexToSeqByte(v.getStr()) + # if key.len == 32: + # doAssert key == val.keccak256.data + # if val != @[0x80u8]: # Exclude empty item + # proof.add val + # else: + # if key[0] == 0: + # try: + # # Pull our particular header fields (if possible) + # let header = rlp.decode(val, Header) + # if header.number == num: + # txRoot = header.txRoot + # rcptRoot = header.receiptsRoot + # elif header.number == num-1: + # predRoot = header.stateRoot + # except RlpError: + # discard + # check kdb.put(key, val).isOk + + # # Set up production MPT + # ps.partPut(proof, AutomaticPayload).isOkOr: + # raiseAssert info & ": partPut => " & $error # TODO code needs updating after removal of generic payloads # # Handle transaction sub-tree @@ -112,8 +112,8 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = # for (rvid,key) in ps.vkPairs: # adb.layersPutKey(rvid, key) - ps.check().isOkOr: - raiseAssert info & ": check => " & $error + # ps.check().isOkOr: + # raiseAssert info & ": check => " & $error #echo ">>> preLoadAristoDb (9)", # "\n ps\n ", ps.pp(byKeyOk=false,byVidOk=false), @@ -141,7 +141,7 @@ proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: C # Some hack for `Aristo` using the `snap` protocol proof-loader memoryDB.preLoadAristoDb(state, blockNumber) - var blk = com.db.getEthBlock(blockNumber).expect("eth block exists") + var blk = com.db.baseTxFrame().getEthBlock(blockNumber).expect("eth block exists") let txTraces = traceTransactions(com, blk.header, blk.transactions) let stateDump = dumpBlockState(com, blk) diff --git a/tests/test_txpool.nim b/tests/test_txpool.nim index 2af6d692e1..ae41680851 100644 --- a/tests/test_txpool.nim +++ b/tests/test_txpool.nim @@ -455,7 +455,7 @@ proc txPoolMain*() = xp.checkImportBlock(bundle, 0) let - sdb = LedgerRef.init(com.db) + sdb = LedgerRef.init(chain.latestTxFrame) val = sdb.getStorage(recipient, slot) randao = Bytes32(val.toBytesBE) fee = sdb.getBalance(feeRecipient) @@ -493,7 +493,7 @@ proc txPoolMain*() = xp.checkImportBlock(bundle, 0) let - sdb = LedgerRef.init(com.db) + sdb = LedgerRef.init(chain.latestTxFrame) val = sdb.getStorage(recipient, slot) randao = Bytes32(val.toBytesBE) bal = sdb.getBalance(feeRecipient) @@ -526,7 +526,7 @@ proc txPoolMain*() = check com.syncCurrent == lastNumber + numBlocks let head = chain.headerByNumber(com.syncCurrent).expect("block header exists") - sdb = LedgerRef.init(com.db) + sdb = LedgerRef.init(chain.latestTxFrame) expected = u256(txPerblock * numBlocks) * amount balance = sdb.getBalance(recipient214) check balance == expected diff --git a/tools/evmstate/evmstate.nim b/tools/evmstate/evmstate.nim index f4f2c8a802..958e0767cf 100644 --- a/tools/evmstate/evmstate.nim +++ b/tools/evmstate/evmstate.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -117,6 +117,7 @@ proc runExecution(ctx: var StateContext, conf: StateConf, pre: JsonNode): StateR parent = ctx.parent, header = ctx.header, com = com, + txFrame = com.db.baseTxFrame(), tracer = tracer) var gasUsed: GasInt diff --git a/tools/evmstate/helpers.nim b/tools/evmstate/helpers.nim index 2d0c020fc2..c168568d3f 100644 --- a/tools/evmstate/helpers.nim +++ b/tools/evmstate/helpers.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -10,9 +10,7 @@ import std/[json, strutils], - eth/common/keys, - eth/common/headers, - eth/common/transactions, + eth/common/[base, keys, headers, transactions], stint, stew/byteutils, ../../nimbus/transaction, @@ -58,7 +56,7 @@ proc fromJson(T: type PrivateKey, n: JsonNode): PrivateKey = removePrefix(secretKey, "0x") PrivateKey.fromHex(secretKey).tryGet() -proc fromJson(T: type AccessList, n: JsonNode): AccessList = +proc fromJson(T: type transactions.AccessList, n: JsonNode): transactions.AccessList = if n.kind == JNull: return diff --git a/tools/t8n/transition.nim b/tools/t8n/transition.nim index 391a0002fc..03c20272cc 100644 --- a/tools/t8n/transition.nim +++ b/tools/t8n/transition.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -549,6 +549,7 @@ proc transitionAction*(ctx: var TransContext, conf: T8NConf) = parent = parent, header = header, com = com, + txFrame = com.db.baseTxFrame(), storeSlotHash = true )