From f90166311a61abdd47e44b719cb18b6d75c0819a Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Tue, 1 Oct 2024 15:20:23 -0600 Subject: [PATCH 01/11] Add testing feature to mc-db dev dependency --- crates/client/block_import/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/client/block_import/Cargo.toml b/crates/client/block_import/Cargo.toml index f1ca6e95d..980a4eec7 100644 --- a/crates/client/block_import/Cargo.toml +++ b/crates/client/block_import/Cargo.toml @@ -38,3 +38,4 @@ starknet_api = { workspace = true } [dev-dependencies] tempfile = { workspace = true } rstest = { workspace = true } +mc-db = { workspace = true, features = ["testing"] } From 49e677dc4f28d288b56ccf89e626fad2837c489c Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Tue, 1 Oct 2024 15:32:52 -0600 Subject: [PATCH 02/11] Mention #294 in CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f6756451..08369491e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Next release +- fix:(tests): Add testing feature to mc-db dev dependency (#294) - feat(cli): launcher script and release workflows - fix: cleaned cli settings for sequencer, devnet and full - feat: move to karnot runner From c3c77767af7f3225424a20f65daf55712aeb47d5 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Tue, 1 Oct 2024 16:03:26 -0600 Subject: [PATCH 03/11] Fix changelog PR reference --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08369491e..73f34bf3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## Next release -- fix:(tests): Add testing feature to mc-db dev dependency (#294) +- fix:(tests): Add testing feature to mc-db dev dependency (#295) - feat(cli): launcher script and release workflows - fix: cleaned cli settings for sequencer, devnet and full - feat: move to karnot runner From 6be007713887b80e69e1aaa4085199a9af6dbce1 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Tue, 1 Oct 2024 17:03:31 -0600 Subject: [PATCH 04/11] Basic reorg detection and test case --- crates/client/block_import/src/types.rs | 7 ++ .../client/block_import/src/verify_apply.rs | 74 ++++++++++++++++++- 2 files changed, 78 insertions(+), 3 deletions(-) diff --git a/crates/client/block_import/src/types.rs b/crates/client/block_import/src/types.rs index e19ff2d25..f671eacc8 100644 --- a/crates/client/block_import/src/types.rs +++ b/crates/client/block_import/src/types.rs @@ -215,3 +215,10 @@ pub struct BlockImportResult { #[derive(Clone, Debug, Eq, PartialEq)] pub struct PendingBlockImportResult {} + +/// Output of a [`crate::reorg`] operation. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ReorgResult { + pub from: Felt, + pub to: Felt, +} diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index f21d28f9d..039209ac3 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -1,6 +1,6 @@ use crate::{ BlockImportError, BlockImportResult, BlockValidationContext, PendingBlockImportResult, PreValidatedBlock, - PreValidatedPendingBlock, RayonPool, UnverifiedHeader, ValidatedCommitments, + PreValidatedPendingBlock, RayonPool, ReorgResult, UnverifiedHeader, ValidatedCommitments, }; use itertools::Itertools; use mc_db::{MadaraBackend, MadaraStorageError}; @@ -64,8 +64,26 @@ pub fn verify_apply_inner( validation: BlockValidationContext, ) -> Result { // Check block number and block hash against db - let (block_number, parent_block_hash) = - check_parent_hash_and_num(backend, block.header.parent_block_hash, block.unverified_block_number, &validation)?; + let (block_number, parent_block_hash) = match check_parent_hash_and_num( + backend, + block.header.parent_block_hash, + block.unverified_block_number, + &validation, + ) { + Ok((block_number, parent_block_hash)) => (block_number, parent_block_hash), + Err(BlockImportError::ParentHash { got, expected }) => { + let reorg_result = reorg(expected, got)?; + log::warn!("Reorg from {} to {}", reorg_result.from, reorg_result.to); + // attempt check again + check_parent_hash_and_num( + backend, + block.header.parent_block_hash, + block.unverified_block_number, + &validation, + )? + } + Err(err) => return Err(err), + }; // Update contract and its storage tries let global_state_root = update_tries(backend, &block, &validation, block_number)?; @@ -309,6 +327,10 @@ fn block_hash( Ok((block_hash, header)) } +fn reorg(from: Felt, to: Felt) -> Result { + todo!("Should perform reorg here, from: {}, to: {}", from, to); +} + #[cfg(test)] mod verify_apply_tests { use super::*; @@ -690,6 +712,52 @@ mod verify_apply_tests { assert!(matches!(result.unwrap_err(), BlockImportError::LatestBlockN { .. })); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); } + + /// TODO: document + #[rstest] + #[tokio::test] + async fn test_verify_apply_reorg(setup_test_backend: Arc) { + let backend = setup_test_backend; + let mut header = create_dummy_header(); + header.block_number = 0; + let pending_block = finalized_block_zero(header); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + + // insert a block at height 1 that will be reorged away from + let mut header = create_dummy_header(); + header.parent_block_hash = felt!("0x12345"); + header.block_number = 1; + backend + .store_block( + MadaraMaybePendingBlock { + info: MadaraMaybePendingBlockInfo::NotPending(MadaraBlockInfo { + header: header.clone(), + block_hash: felt!("0x1a"), + // get tx hashes from receipts, they have been validated in pre_validate. + tx_hashes: Default::default(), + }), + inner: Default::default(), + }, + finalized_state_diff_zero(), + vec![], + ) + .unwrap(); + + assert_eq!(backend.get_latest_block_n().unwrap(), Some(1)); + + let mut block = create_dummy_block(); + block.header.parent_block_hash = Some(felt!("0x12345")); + block.unverified_global_state_root = Some(felt!("0x0")); + block.unverified_block_hash = Some(felt!("0x1b")); + let validation = create_validation_context(false); + + let _result = verify_apply_inner(&backend, block, validation.clone()); + + let mabye_block_1_hash = backend.get_block_hash(&BlockId::Number(1)).unwrap(); + assert_eq!(mabye_block_1_hash, Some(felt!("0x1b"))); + + assert_eq!(backend.get_latest_block_n().unwrap(), Some(1)); + } } mod verify_apply_pending_tests { From 9b7c8978d34e216a7fe3e81d58539193fd10c9fc Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Wed, 2 Oct 2024 10:25:19 -0600 Subject: [PATCH 05/11] Incremental --- .../client/block_import/src/verify_apply.rs | 63 +++++++++++++++---- crates/client/db/src/lib.rs | 4 +- 2 files changed, 53 insertions(+), 14 deletions(-) diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index 039209ac3..da589bbf8 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -2,6 +2,7 @@ use crate::{ BlockImportError, BlockImportResult, BlockValidationContext, PendingBlockImportResult, PreValidatedBlock, PreValidatedPendingBlock, RayonPool, ReorgResult, UnverifiedHeader, ValidatedCommitments, }; +use bonsai_trie::id::BasicId; use itertools::Itertools; use mc_db::{MadaraBackend, MadaraStorageError}; use mp_block::{ @@ -71,9 +72,9 @@ pub fn verify_apply_inner( &validation, ) { Ok((block_number, parent_block_hash)) => (block_number, parent_block_hash), - Err(BlockImportError::ParentHash { got, expected }) => { - let reorg_result = reorg(expected, got)?; - log::warn!("Reorg from {} to {}", reorg_result.from, reorg_result.to); + Err(BlockImportError::ParentHash { got: _, expected: _ }) => { + let reorg_result = reorg(backend, &block)?; + log::warn!("Reorg from 0x{:x} to 0x{:x}", reorg_result.from, reorg_result.to); // attempt check again check_parent_hash_and_num( backend, @@ -178,6 +179,13 @@ fn check_parent_hash_and_num( (0, Felt::ZERO) }; + // TODO: this originally was checked after block number, but we need to check this first to detect a reorg. + if let Some(parent_block_hash) = parent_block_hash { + if parent_block_hash != expected_parent_block_hash && !validation.ignore_block_order { + return Err(BlockImportError::ParentHash { expected: expected_parent_block_hash, got: parent_block_hash }); + } + } + let block_number = if let Some(block_n) = unverified_block_number { if block_n != expected_block_number && !validation.ignore_block_order { return Err(BlockImportError::LatestBlockN { expected: expected_block_number, got: block_n }); @@ -187,12 +195,6 @@ fn check_parent_hash_and_num( expected_block_number }; - if let Some(parent_block_hash) = parent_block_hash { - if parent_block_hash != expected_parent_block_hash && !validation.ignore_block_order { - return Err(BlockImportError::ParentHash { expected: expected_parent_block_hash, got: parent_block_hash }); - } - } - Ok((block_number, expected_parent_block_hash)) } @@ -327,8 +329,40 @@ fn block_hash( Ok((block_hash, header)) } -fn reorg(from: Felt, to: Felt) -> Result { - todo!("Should perform reorg here, from: {}, to: {}", from, to); +// TODO: document +fn reorg( + backend: &MadaraBackend, + block: &PreValidatedBlock, +) -> Result { + + // TODO: return proper error + let block_number = block.unverified_block_number.expect("Can't reorg without block number"); + let block_id = BasicId::new(block_number); + + // TODO: the error type returned from revert_to is slightly different from others, so + // make_db_error() doesn't work + + backend + .contract_trie() + .revert_to(block_id) + .map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting contract trie: {}", error))))?; + + backend + .contract_storage_trie() + .revert_to(block_id) + .map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting contract storage trie: {}", error))))?; + + backend + .class_trie() + .revert_to(block_id) + .map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting class trie: {}", error))))?; + + // TODO: proper results + Ok(ReorgResult { + from: Default::default(), + to: Default::default(), + }) + } #[cfg(test)] @@ -723,6 +757,9 @@ mod verify_apply_tests { let pending_block = finalized_block_zero(header); backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + // TODO: need to call update_tries, but this takes PreValidatedBlock, not MadaraMaybePendingBlocks... + // update_tries(&backend, ...) + // insert a block at height 1 that will be reorged away from let mut header = create_dummy_header(); header.parent_block_hash = felt!("0x12345"); @@ -747,11 +784,13 @@ mod verify_apply_tests { let mut block = create_dummy_block(); block.header.parent_block_hash = Some(felt!("0x12345")); + block.unverified_block_number = Some(1); block.unverified_global_state_root = Some(felt!("0x0")); block.unverified_block_hash = Some(felt!("0x1b")); let validation = create_validation_context(false); - let _result = verify_apply_inner(&backend, block, validation.clone()); + let result = verify_apply_inner(&backend, block, validation.clone()); + assert!(result.is_ok(), "verify_apply_inner failed: {:?}", result.err()); let mabye_block_1_hash = backend.get_block_hash(&BlockId::Number(1)).unwrap(); assert_eq!(mabye_block_1_hash, Some(felt!("0x1b"))); diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index c76f0a7df..2e732afd7 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -481,8 +481,8 @@ impl MadaraBackend { let bonsai = BonsaiStorage::new( BonsaiDb::new(&self.db, map), BonsaiStorageConfig { - max_saved_trie_logs: Some(0), - max_saved_snapshots: Some(0), + max_saved_trie_logs: Some(128), + max_saved_snapshots: Some(128), snapshot_interval: u64::MAX, }, ) From 2ad9e90e78b9d70111ca59cbefb32c3b793695af Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Wed, 2 Oct 2024 15:24:55 -0600 Subject: [PATCH 06/11] Use verify_apply_inner to apply blocks in test --- .../client/block_import/src/verify_apply.rs | 63 +++++++------------ 1 file changed, 24 insertions(+), 39 deletions(-) diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index da589bbf8..2dce12f8d 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -752,49 +752,34 @@ mod verify_apply_tests { #[tokio::test] async fn test_verify_apply_reorg(setup_test_backend: Arc) { let backend = setup_test_backend; - let mut header = create_dummy_header(); - header.block_number = 0; - let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); - - // TODO: need to call update_tries, but this takes PreValidatedBlock, not MadaraMaybePendingBlocks... - // update_tries(&backend, ...) - - // insert a block at height 1 that will be reorged away from - let mut header = create_dummy_header(); - header.parent_block_hash = felt!("0x12345"); - header.block_number = 1; - backend - .store_block( - MadaraMaybePendingBlock { - info: MadaraMaybePendingBlockInfo::NotPending(MadaraBlockInfo { - header: header.clone(), - block_hash: felt!("0x1a"), - // get tx hashes from receipts, they have been validated in pre_validate. - tx_hashes: Default::default(), - }), - inner: Default::default(), - }, - finalized_state_diff_zero(), - vec![], - ) - .unwrap(); - - assert_eq!(backend.get_latest_block_n().unwrap(), Some(1)); - - let mut block = create_dummy_block(); - block.header.parent_block_hash = Some(felt!("0x12345")); - block.unverified_block_number = Some(1); - block.unverified_global_state_root = Some(felt!("0x0")); - block.unverified_block_hash = Some(felt!("0x1b")); let validation = create_validation_context(false); - let result = verify_apply_inner(&backend, block, validation.clone()); - assert!(result.is_ok(), "verify_apply_inner failed: {:?}", result.err()); + let mut block_0 = create_dummy_block(); + // block_0.unverified_block_hash = Some(felt!("0xf0")); + block_0.unverified_block_number = Some(0); + block_0.header.parent_block_hash = None; + block_0.unverified_global_state_root = Some(felt!("0x0")); + let block_0_import = verify_apply_inner(&backend, block_0, validation.clone()).expect("verify_apply_inner failed"); + println!("block_0 hash: {:x}", block_0_import.block_hash); + + // add a block that will be reorged away from + let mut block_1a = create_dummy_block(); + block_1a.unverified_block_number = Some(1); + block_1a.unverified_global_state_root = Some(felt!("0x0")); + block_1a.header.parent_block_hash = Some(block_0_import.block_hash); + let block_1a_import = verify_apply_inner(&backend, block_1a, validation.clone()).expect("verify_apply_inner failed"); + println!("block_1a hash: {:x}", block_1a_import.block_hash); + + // reorg from 1a to 1b + let mut block_1b = create_dummy_block(); + block_1b.unverified_block_number = Some(1); + block_1b.unverified_global_state_root = Some(felt!("0x0")); + block_1b.header.parent_block_hash = Some(block_0_import.block_hash); + let block_1b_import = verify_apply_inner(&backend, block_1b, validation.clone()).expect("verify_apply_inner failed"); + println!("block_1b hash: {:x}", block_1b_import.block_hash); let mabye_block_1_hash = backend.get_block_hash(&BlockId::Number(1)).unwrap(); - assert_eq!(mabye_block_1_hash, Some(felt!("0x1b"))); - + assert_eq!(mabye_block_1_hash, Some(block_1b_import.block_hash)); assert_eq!(backend.get_latest_block_n().unwrap(), Some(1)); } } From 1d5184d37233cebf145e82950ef36ac2defd22d8 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Wed, 2 Oct 2024 15:25:30 -0600 Subject: [PATCH 07/11] Hack to init BonsaiStorage with id_queue height --- crates/client/db/src/lib.rs | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index 2e732afd7..9587d1edb 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -478,14 +478,35 @@ impl MadaraBackend { &self, map: DatabaseKeyMapping, ) -> BonsaiStorage, H> { - let bonsai = BonsaiStorage::new( - BonsaiDb::new(&self.db, map), - BonsaiStorageConfig { - max_saved_trie_logs: Some(128), - max_saved_snapshots: Some(128), - snapshot_interval: u64::MAX, - }, - ) + // TODO: Bonsai doesn't load the its id_queue on new. Instead, it requires the caller + // to pass the highest value in. In our case, it should be equal to the latest block, + // so we pass that in if we can obtain it. + // Bonsai does, however, store the id_queue in its db, so I believe it should be able + // to load this... + // TODO: unwrap + let block_n = self.get_latest_block_n().unwrap(); + let identifiers: Vec> = Default::default(); + let bonsai = if let Some(block_n) = block_n { + BonsaiStorage::new_from_transactional_state( + BonsaiDb::new(&self.db, map), + BonsaiStorageConfig { + max_saved_trie_logs: Some(128), + max_saved_snapshots: Some(128), + snapshot_interval: u64::MAX, + }, + BasicId::new(block_n), + identifiers, + ) + } else { + BonsaiStorage::new( + BonsaiDb::new(&self.db, map), + BonsaiStorageConfig { + max_saved_trie_logs: Some(128), + max_saved_snapshots: Some(128), + snapshot_interval: u64::MAX, + } + ) + } // TODO(bonsai-trie): change upstream to reflect that. .expect("New bonsai storage can never error"); From ea59b98156d360f3819160046147e367cee431e1 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Fri, 8 Nov 2024 18:01:06 -0700 Subject: [PATCH 08/11] WIP reverting block db --- .../client/block_import/src/verify_apply.rs | 3 ++ crates/client/db/src/block_db.rs | 53 +++++++++++++++++++ crates/client/db/src/storage_updates.rs | 4 ++ 3 files changed, 60 insertions(+) diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index c5789db50..efe04a7a7 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -367,6 +367,9 @@ fn reorg( .revert_to(block_id) .map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting class trie: {}", error))))?; + backend.revert_to(block_number.checked_sub(1).unwrap()) + .map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting block db: {}", error))))?; + // TODO: proper results Ok(ReorgResult { from: Default::default(), diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index 44d368140..b9ddcde14 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -281,6 +281,59 @@ impl MadaraBackend { Ok(()) } + pub(crate) fn block_db_revert(&self, revert_to: u64) -> Result<()> { + let mut tx = WriteBatchWithTransaction::default(); + + // TODO: this was grouped with the other columns in `fn block_db_store_block()`, we should + // probably clean it up if possible (or be sure some other mechanism will do so) + // + // if we have the block struct with the txns from this block, it should be trivial to handle + // + // let tx_hash_to_block_n = self.db.get_column(Column::TxHashToBlockN); + + let block_hash_to_block_n = self.db.get_column(Column::BlockHashToBlockN); + let block_n_to_block = self.db.get_column(Column::BlockNToBlockInfo); + let block_n_to_block_inner = self.db.get_column(Column::BlockNToBlockInner); + let block_n_to_state_diff = self.db.get_column(Column::BlockNToStateDiff); + let meta = self.db.get_column(Column::BlockStorageMeta); + + // clear pending + tx.delete_cf(&meta, ROW_PENDING_INFO); + tx.delete_cf(&meta, ROW_PENDING_INNER); + tx.delete_cf(&meta, ROW_PENDING_STATE_UPDATE); + + let latest_block_n = self.get_latest_block_n()?.unwrap(); // TODO: unwrap + println!("reverting to {} from {}", revert_to, latest_block_n); + for block_n in (revert_to + 1 .. latest_block_n + 1).rev() { + println!("reverting block {}", block_n); + + let block_n_encoded = bincode::serialize(&block_n)?; + + let res = self.db.get_cf(&block_n_to_block, &block_n_encoded)?; + let block_info: MadaraBlockInfo = bincode::deserialize(&res.unwrap())?; // TODO: unwrap + + let block_hash_encoded = bincode::serialize(&block_info.block_hash)?; + + println!(" - original block hash: {:x}", block_info.block_hash); + println!(" - block height/hash: {} => {:x?}", block_n, block_hash_encoded); + + tx.delete_cf(&block_n_to_block, &block_n_encoded); + tx.delete_cf(&block_hash_to_block_n, &block_hash_encoded); + tx.delete_cf(&block_n_to_block_inner, &block_n_encoded); + tx.delete_cf(&block_n_to_state_diff, &block_n_encoded); + + } + + // TODO: now that we cleared out all the blocks, we need to grab the next parent + // and set it up for e.g. the latest block + + let mut writeopts = WriteOptions::new(); + writeopts.disable_wal(true); + self.db.write_opt(tx, &writeopts)?; + + Ok(()) + } + // Convenience functions pub(crate) fn id_to_storage_type(&self, id: &BlockId) -> Result> { diff --git a/crates/client/db/src/storage_updates.rs b/crates/client/db/src/storage_updates.rs index f74ed0d4d..c4252140e 100644 --- a/crates/client/db/src/storage_updates.rs +++ b/crates/client/db/src/storage_updates.rs @@ -81,6 +81,10 @@ impl MadaraBackend { r1.and(r2).and(r3) } + pub fn revert_to(&self, revert_to: u64) -> Result<(), MadaraStorageError> { + self.block_db_revert(revert_to) + } + pub fn clear_pending_block(&self) -> Result<(), MadaraStorageError> { self.block_db_clear_pending()?; self.contract_db_clear_pending()?; From 76b1bb99c6e85191530fe473d68c1169233331f6 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Fri, 8 Nov 2024 18:21:35 -0700 Subject: [PATCH 09/11] Set ROW_SYNC_TIP at the end of reorg --- crates/client/db/src/block_db.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index b9ddcde14..d8b57dcab 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -323,9 +323,9 @@ impl MadaraBackend { tx.delete_cf(&block_n_to_state_diff, &block_n_encoded); } - - // TODO: now that we cleared out all the blocks, we need to grab the next parent - // and set it up for e.g. the latest block + + let latest_block_n_encoded = bincode::serialize(&revert_to)?; + tx.put_cf(&meta, ROW_SYNC_TIP, latest_block_n_encoded); let mut writeopts = WriteOptions::new(); writeopts.disable_wal(true); From e7b1253450e3d85d31be00b53a08a146c3b54f16 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Fri, 15 Nov 2024 15:20:39 -0700 Subject: [PATCH 10/11] Don't detect reorg during validate_apply_inner --- .../client/block_import/src/verify_apply.rs | 182 ++++++++++++++---- crates/client/db/src/block_db.rs | 16 +- 2 files changed, 154 insertions(+), 44 deletions(-) diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index efe04a7a7..95fb91e5c 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -82,6 +82,7 @@ pub fn verify_apply_inner( &validation, ) { Ok((block_number, parent_block_hash)) => (block_number, parent_block_hash), + /* Err(BlockImportError::ParentHash { got: _, expected: _ }) => { let reorg_result = reorg(backend, &block)?; log::warn!("Reorg from 0x{:x} to 0x{:x}", reorg_result.from, reorg_result.to); @@ -93,6 +94,7 @@ pub fn verify_apply_inner( &validation, )? } + */ Err(err) => return Err(err), }; @@ -190,6 +192,7 @@ fn check_parent_hash_and_num( }; // TODO: this originally was checked after block number, but we need to check this first to detect a reorg. + // TODO: this fn shouldn't be responsible for reorg detection anyway... if let Some(parent_block_hash) = parent_block_hash { if parent_block_hash != expected_parent_block_hash && !validation.ignore_block_order { return Err(BlockImportError::ParentHash { expected: expected_parent_block_hash, got: parent_block_hash }); @@ -370,6 +373,8 @@ fn reorg( backend.revert_to(block_number.checked_sub(1).unwrap()) .map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting block db: {}", error))))?; + // TODO: should reorg actually append new blocks? if not, it should be renamed to `revert_to` + // TODO: proper results Ok(ReorgResult { from: Default::default(), @@ -759,42 +764,6 @@ mod verify_apply_tests { assert!(matches!(result.unwrap_err(), BlockImportError::LatestBlockN { .. })); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); } - - /// TODO: document - #[rstest] - #[tokio::test] - async fn test_verify_apply_reorg(setup_test_backend: Arc) { - let backend = setup_test_backend; - let validation = create_validation_context(false); - - let mut block_0 = create_dummy_block(); - // block_0.unverified_block_hash = Some(felt!("0xf0")); - block_0.unverified_block_number = Some(0); - block_0.header.parent_block_hash = None; - block_0.unverified_global_state_root = Some(felt!("0x0")); - let block_0_import = verify_apply_inner(&backend, block_0, validation.clone()).expect("verify_apply_inner failed"); - println!("block_0 hash: {:x}", block_0_import.block_hash); - - // add a block that will be reorged away from - let mut block_1a = create_dummy_block(); - block_1a.unverified_block_number = Some(1); - block_1a.unverified_global_state_root = Some(felt!("0x0")); - block_1a.header.parent_block_hash = Some(block_0_import.block_hash); - let block_1a_import = verify_apply_inner(&backend, block_1a, validation.clone()).expect("verify_apply_inner failed"); - println!("block_1a hash: {:x}", block_1a_import.block_hash); - - // reorg from 1a to 1b - let mut block_1b = create_dummy_block(); - block_1b.unverified_block_number = Some(1); - block_1b.unverified_global_state_root = Some(felt!("0x0")); - block_1b.header.parent_block_hash = Some(block_0_import.block_hash); - let block_1b_import = verify_apply_inner(&backend, block_1b, validation.clone()).expect("verify_apply_inner failed"); - println!("block_1b hash: {:x}", block_1b_import.block_hash); - - let mabye_block_1_hash = backend.get_block_hash(&BlockId::Number(1)).unwrap(); - assert_eq!(mabye_block_1_hash, Some(block_1b_import.block_hash)); - assert_eq!(backend.get_latest_block_n().unwrap(), Some(1)); - } } mod verify_apply_pending_tests { @@ -901,4 +870,145 @@ mod verify_apply_tests { ); } } + + mod reorg_tests { + use super::*; + + // other ideas for testing: + // * basic tests + // * deeper reorgs + // * reorgs not from block 0 + // * multiple reorgs + // * reorg back-and-forth + // * show that we can build blocks (grow) on top of reorg + // * actually change some state in blocks, reorg, show that state changes + // * show that parent height / hash work properly after reorgs + // * verify various storage has been erased + // * test MPT + // * can include some of the basic test mentioned above + // * verify proofs after reorg + // * introduce patricia trie shape changes, verify proof + // * verify non-inclusion proofs + // * negative tests + // * reject reorgs from finalized blocks + // * reject reorgs with no known parent + // * reject reorgs for wrong height + // * reject long distance reorgs (perhaps when L1 hasn't been processed recently?) + + /// TODO: document + #[rstest] + #[tokio::test] + async fn test_verify_apply_reorg(setup_test_backend: Arc) { + let backend = setup_test_backend; + let validation = create_validation_context(false); + + let mut block_0 = create_dummy_block(); + // block_0.unverified_block_hash = Some(felt!("0xf0")); + block_0.unverified_block_number = Some(0); + block_0.header.parent_block_hash = None; + block_0.unverified_global_state_root = Some(felt!("0x0")); + let block_0_import = verify_apply_inner(&backend, block_0, validation.clone()).expect("verify_apply_inner failed"); + println!("block_0 hash: {:x}", block_0_import.block_hash); + + // add a block that will be reorged away from + let mut block_1a = create_dummy_block(); + block_1a.unverified_block_number = Some(1); + block_1a.unverified_global_state_root = Some(felt!("0x0")); + block_1a.header.parent_block_hash = Some(block_0_import.block_hash); + let block_1a_import = verify_apply_inner(&backend, block_1a, validation.clone()).expect("verify_apply_inner failed"); + println!("block_1a hash: {:x}", block_1a_import.block_hash); + + // reorg from 1a to 1b + let mut block_1b = create_dummy_block(); + block_1b.unverified_block_number = Some(1); + block_1b.unverified_global_state_root = Some(felt!("0x0")); + block_1b.header.parent_block_hash = Some(block_0_import.block_hash); + let block_1b_import = verify_apply_inner(&backend, block_1b, validation.clone()).expect("verify_apply_inner failed"); + println!("block_1b hash: {:x}", block_1b_import.block_hash); + + let mabye_block_1_hash = backend.get_block_hash(&BlockId::Number(1)).unwrap(); + assert_eq!(mabye_block_1_hash, Some(block_1b_import.block_hash)); + assert_eq!(backend.get_latest_block_n().unwrap(), Some(1)); + } + + struct ReorgTestArgs { + /// original chain depth (including block 0) + original_chain_depth: u64, + /// the parent (in block height) whose child will be reorged away from + reorg_parent_height: u64, + /// number of blocks of the reorg (0 == no reorg) + num_reorg_blocks: u64, + } + + /// TODO: document + #[rstest] + #[case::reorg_from_genesis( + ReorgTestArgs{ + original_chain_depth: 2, + reorg_parent_height: 0, + num_reorg_blocks: 1, + }, + )] + #[case::success( + ReorgTestArgs{ + original_chain_depth: 6, + reorg_parent_height: 3, + num_reorg_blocks: 2, + }, + )] + #[tokio::test] + async fn test_reorg( + setup_test_backend: Arc, + #[case] args: ReorgTestArgs, + ) { + let backend = setup_test_backend; + let validation = create_validation_context(false); + + // appends an empty block to the given parent block, returning the new block's hash + let append_empty_block = |new_block_height: u64, parent_hash: Option| -> Felt { + println!("attempting to add block {}", new_block_height); + let mut block = create_dummy_block(); + block.unverified_block_number = Some(new_block_height); + block.unverified_global_state_root = Some(felt!("0x0")); + block.header.parent_block_hash = parent_hash; + let block_import = verify_apply_inner(&backend, block, validation.clone()).expect("verify_apply_inner failed"); + println!("added block {} (0x{:x})", new_block_height, block_import.block_hash); + + block_import.block_hash + }; + + // create the original chain + let mut parent_hash = None; + let mut reorg_parent_hash = None; + for i in 0..args.original_chain_depth { + let new_block_hash = append_empty_block(i, parent_hash); + if i == args.reorg_parent_height { + reorg_parent_hash = Some(new_block_hash); + } + parent_hash = Some(new_block_hash); + } + + assert!(reorg_parent_hash.is_some()); + println!("Reorging from parent {} ({:?})", args.reorg_parent_height, reorg_parent_hash); + + let mut reorg_block = create_dummy_block(); + reorg_block.unverified_block_number = Some(args.reorg_parent_height + 1); + reorg_block.unverified_global_state_root = Some(felt!("0x0")); + reorg_block.header.parent_block_hash = reorg_parent_hash; + let _ = reorg(&backend, &reorg_block).expect("reorg failed"); + let block_import = verify_apply_inner(&backend, reorg_block, validation.clone()).expect("verify_apply_inner failed on reorg block"); + + // reorg after given parent (start with 1 since we already added our reorg block) + for i in 1..args.num_reorg_blocks { + let block_height = args.reorg_parent_height + i; + let new_block_hash = append_empty_block(block_height, parent_hash); + parent_hash = Some(new_block_hash); + } + + let latest_block_n = backend.get_latest_block_n().expect("get_latest_block_n() failed").expect("latest_block_n is None"); + assert_eq!(args.reorg_parent_height + args.num_reorg_blocks, latest_block_n); + let latest_block_hash = backend.get_block_hash(&BlockId::Number(latest_block_n)).expect("get_block_hash failed after reorg"); + assert_eq!(latest_block_hash, parent_hash); + } + } } diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index d8b57dcab..f81c89f34 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -284,13 +284,7 @@ impl MadaraBackend { pub(crate) fn block_db_revert(&self, revert_to: u64) -> Result<()> { let mut tx = WriteBatchWithTransaction::default(); - // TODO: this was grouped with the other columns in `fn block_db_store_block()`, we should - // probably clean it up if possible (or be sure some other mechanism will do so) - // - // if we have the block struct with the txns from this block, it should be trivial to handle - // - // let tx_hash_to_block_n = self.db.get_column(Column::TxHashToBlockN); - + let tx_hash_to_block_n = self.db.get_column(Column::TxHashToBlockN); let block_hash_to_block_n = self.db.get_column(Column::BlockHashToBlockN); let block_n_to_block = self.db.get_column(Column::BlockNToBlockInfo); let block_n_to_block_inner = self.db.get_column(Column::BlockNToBlockInner); @@ -311,9 +305,15 @@ impl MadaraBackend { let res = self.db.get_cf(&block_n_to_block, &block_n_encoded)?; let block_info: MadaraBlockInfo = bincode::deserialize(&res.unwrap())?; // TODO: unwrap + + // clear all txns from this block + for txn_hash in block_info.tx_hashes { + let txn_hash_encoded = bincode::serialize(&txn_hash)?; + tx.delete_cf(&tx_hash_to_block_n, &txn_hash_encoded); + } let block_hash_encoded = bincode::serialize(&block_info.block_hash)?; - + println!(" - original block hash: {:x}", block_info.block_hash); println!(" - block height/hash: {} => {:x?}", block_n, block_hash_encoded); From a688ac6cea29b878f401609ea16570accc853e8b Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Fri, 22 Nov 2024 12:39:40 -0700 Subject: [PATCH 11/11] Remove old reorg test which expects reorg detection --- .../client/block_import/src/verify_apply.rs | 38 +------------------ 1 file changed, 1 insertion(+), 37 deletions(-) diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index 95fb91e5c..ec5875a5e 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -895,42 +895,6 @@ mod verify_apply_tests { // * reject reorgs for wrong height // * reject long distance reorgs (perhaps when L1 hasn't been processed recently?) - /// TODO: document - #[rstest] - #[tokio::test] - async fn test_verify_apply_reorg(setup_test_backend: Arc) { - let backend = setup_test_backend; - let validation = create_validation_context(false); - - let mut block_0 = create_dummy_block(); - // block_0.unverified_block_hash = Some(felt!("0xf0")); - block_0.unverified_block_number = Some(0); - block_0.header.parent_block_hash = None; - block_0.unverified_global_state_root = Some(felt!("0x0")); - let block_0_import = verify_apply_inner(&backend, block_0, validation.clone()).expect("verify_apply_inner failed"); - println!("block_0 hash: {:x}", block_0_import.block_hash); - - // add a block that will be reorged away from - let mut block_1a = create_dummy_block(); - block_1a.unverified_block_number = Some(1); - block_1a.unverified_global_state_root = Some(felt!("0x0")); - block_1a.header.parent_block_hash = Some(block_0_import.block_hash); - let block_1a_import = verify_apply_inner(&backend, block_1a, validation.clone()).expect("verify_apply_inner failed"); - println!("block_1a hash: {:x}", block_1a_import.block_hash); - - // reorg from 1a to 1b - let mut block_1b = create_dummy_block(); - block_1b.unverified_block_number = Some(1); - block_1b.unverified_global_state_root = Some(felt!("0x0")); - block_1b.header.parent_block_hash = Some(block_0_import.block_hash); - let block_1b_import = verify_apply_inner(&backend, block_1b, validation.clone()).expect("verify_apply_inner failed"); - println!("block_1b hash: {:x}", block_1b_import.block_hash); - - let mabye_block_1_hash = backend.get_block_hash(&BlockId::Number(1)).unwrap(); - assert_eq!(mabye_block_1_hash, Some(block_1b_import.block_hash)); - assert_eq!(backend.get_latest_block_n().unwrap(), Some(1)); - } - struct ReorgTestArgs { /// original chain depth (including block 0) original_chain_depth: u64, @@ -996,7 +960,7 @@ mod verify_apply_tests { reorg_block.unverified_global_state_root = Some(felt!("0x0")); reorg_block.header.parent_block_hash = reorg_parent_hash; let _ = reorg(&backend, &reorg_block).expect("reorg failed"); - let block_import = verify_apply_inner(&backend, reorg_block, validation.clone()).expect("verify_apply_inner failed on reorg block"); + let _block_import = verify_apply_inner(&backend, reorg_block, validation.clone()).expect("verify_apply_inner failed on reorg block"); // reorg after given parent (start with 1 since we already added our reorg block) for i in 1..args.num_reorg_blocks {