From 5d3b10f924f9ffecd997f3cf7af2a528efb38fab Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:31:34 -0600 Subject: [PATCH 01/65] Refactor pruning proof validation to many functions Co-authored-by: Ori Newman --- consensus/src/consensus/services.rs | 1 + consensus/src/processes/pruning_proof/mod.rs | 194 ++++++++++++++----- 2 files changed, 145 insertions(+), 50 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 4afb5938a..3db1e8d38 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -185,6 +185,7 @@ impl ConsensusServices { parents_manager.clone(), reachability_service.clone(), ghostdag_managers.clone(), + ghostdag_primary_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6324aa4ee..3dfed8660 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -26,7 +26,10 @@ use kaspa_consensus_core::{ BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; use kaspa_core::{debug, info, trace}; -use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; +use kaspa_database::{ + prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}, + utils::DbLifetime, +}; use kaspa_hashes::Hash; use kaspa_pow::calc_block_level; use kaspa_utils::{binary_heap::BinaryHeapExtensions, vec::VecExtensions}; @@ -41,7 +44,7 @@ use crate::{ services::reachability::{MTReachabilityService, ReachabilityService}, stores::{ depth::DbDepthStore, - ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -88,6 +91,16 @@ impl Clone for CachedPruningPointData { } } +struct TempProofContext { + headers_store: Arc, + ghostdag_stores: Vec>, + relations_stores: Vec, + reachability_stores: Vec>>, + ghostdag_managers: + Vec, DbHeadersStore>>, + db_lifetime: DbLifetime, +} + pub struct PruningProofManager { db: Arc, @@ -96,6 +109,7 @@ pub struct PruningProofManager { reachability_relations_store: Arc>, reachability_service: MTReachabilityService, ghostdag_stores: Arc>>, + ghostdag_primary_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, past_pruning_points_store: Arc, @@ -106,6 +120,7 @@ pub struct PruningProofManager { selected_chain_store: Arc>, ghostdag_managers: Arc>, + ghostdag_primary_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -130,6 +145,7 @@ impl PruningProofManager { parents_manager: DbParentsManager, reachability_service: MTReachabilityService, ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, max_block_level: BlockLevel, @@ -146,6 +162,7 @@ impl PruningProofManager { reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_primary_store: storage.ghostdag_primary_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -168,6 +185,7 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, + ghostdag_primary_manager: ghostdag_manager, is_consensus_exiting, } @@ -244,8 +262,12 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); let gd = if header.hash == self.genesis_hash { self.ghostdag_managers[level].genesis_ghostdag_data() - } else if level == 0 { - if let Some(gd) = trusted_gd_map.get(&header.hash) { + } else { + self.ghostdag_managers[level].ghostdag(&parents) + }; + + if level == 0 { + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); @@ -258,18 +280,18 @@ impl PruningProofManager { mergeset_reds: calculated_gd.mergeset_reds.clone(), blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), } - } + }; + self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + } } } let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_managers[0].ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_primary_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -387,18 +409,16 @@ impl PruningProofManager { } } - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { - if proof.len() != self.max_block_level as usize + 1 { - return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); - } + fn init_validate_pruning_point_proof_stores_and_processes( + &self, + proof: &PruningPointProof, + ) -> PruningImportResult { if proof[0].is_empty() { return Err(PruningImportError::PruningProofNotEnoughHeaders); } let headers_estimate = self.estimate_proof_unique_size(proof); - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -444,6 +464,23 @@ impl PruningProofManager { db.write(batch).unwrap(); } + Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) + } + + fn populate_stores_for_validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + stores_and_processes: &mut TempProofContext, + ) -> PruningImportResult> { + let headers_store = &stores_and_processes.headers_store; + let ghostdag_stores = &stores_and_processes.ghostdag_stores; + let mut relations_stores = stores_and_processes.relations_stores.clone(); + let reachability_stores = &stores_and_processes.reachability_stores; + let ghostdag_managers = &stores_and_processes.ghostdag_managers; + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; for level in (0..=self.max_block_level).rev() { // Before processing this level, check if the process is exiting so we can end early @@ -533,45 +570,91 @@ impl PruningProofManager { selected_tip_by_level[level_idx] = selected_tip; } + Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) + } + + fn validate_proof_selected_tip( + &self, + proof_selected_tip: Hash, + level: BlockLevel, + proof_pp_level: BlockLevel, + proof_pp: Hash, + proof_pp_header: &Header, + ) -> PruningImportResult<()> { + // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level + // otherwise. + if level <= proof_pp_level { + if proof_selected_tip != proof_pp { + return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); + } + } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { + return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); + } + + Ok(()) + } + + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( + &self, + ghostdag_stores: &[Arc], + proof_selected_tip: Hash, + level: BlockLevel, + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match self.ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; + } + } + + pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + if proof.len() != self.max_block_level as usize + 1 { + return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); + } + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(&proof)?; + let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; + let ghostdag_stores = stores_and_processes.ghostdag_stores; + let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - for (level_idx, selected_tip) in selected_tip_by_level.into_iter().enumerate() { + for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; - let selected_tip = selected_tip.unwrap(); - if level <= proof_pp_level { - if selected_tip != proof_pp { - return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(selected_tip, level)); - } - } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip) { - return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(selected_tip, level)); - } + self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - let mut proof_current = selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - let common_ancestor_data = loop { - match self.ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap(); - } - }; - }; - - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = common_ancestor_data { + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &ghostdag_stores, + selected_tip, + level, + proof_selected_tip_gd, + ) { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { @@ -593,8 +676,19 @@ impl PruningProofManager { return Ok(()); } + // If we got here it means there's no level with shared blocks + // between the proof and the current consensus. In this case we + // consider the proof to be better if it has at least one level + // with 2*self.pruning_proof_m blue blocks where consensus doesn't. for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; + + let proof_selected_tip = selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { if parents @@ -614,7 +708,7 @@ impl PruningProofManager { drop(pruning_read); drop(relations_read); - drop(db_lifetime); + drop(stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) } @@ -816,7 +910,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_stores[0].get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -836,7 +930,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_managers[0].sort_blocks(anticone); + let mut anticone = self.ghostdag_primary_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -847,14 +941,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_stores[0].get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_primary_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_stores[0].get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_primary_store.get_data(hash).unwrap()).into(), }); } } @@ -862,7 +956,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_stores[0].get_data(hash).unwrap(); + let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -894,7 +988,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_stores[0].get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_primary_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); From 40f1cc9bbe24031f9aefaae0db9bef7e1897bbb2 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:32:26 -0600 Subject: [PATCH 02/65] Use blue score as work for higher levels Co-authored-by: Ori Newman --- consensus/src/consensus/services.rs | 1 + consensus/src/processes/ghostdag/protocol.rs | 24 +++++++++++++------- consensus/src/processes/pruning_proof/mod.rs | 1 + 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 3db1e8d38..b5617ea76 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -126,6 +126,7 @@ impl ConsensusServices { relations_services[level].clone(), storage.headers_store.clone(), reachability_service.clone(), + level != 0, ) }) .collect_vec(), diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 87beeb565..ac9ae41d7 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -5,6 +5,7 @@ use kaspa_consensus_core::{ BlockHashMap, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; +use kaspa_math::Uint192; use kaspa_utils::refs::Refs; use crate::{ @@ -29,6 +30,7 @@ pub struct GhostdagManager, pub(super) reachability_service: U, + use_score_as_work: bool, } impl GhostdagManager { @@ -39,8 +41,9 @@ impl, reachability_service: U, + use_score_as_work: bool, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store } + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -115,14 +118,19 @@ impl Date: Mon, 3 Jun 2024 23:34:17 -0600 Subject: [PATCH 03/65] Remove pruning processor dependency on gd managers Co-authored-by: Ori Newman --- consensus/src/pipeline/pruning_processor/processor.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 8cded745a..bee46834a 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -2,7 +2,7 @@ use crate::{ consensus::{ - services::{ConsensusServices, DbGhostdagManager, DbPruningPointManager}, + services::{ConsensusServices, DbPruningPointManager}, storage::ConsensusStorage, }, model::{ @@ -69,7 +69,6 @@ pub struct PruningProcessor { // Managers and Services reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, pruning_point_manager: DbPruningPointManager, pruning_proof_manager: Arc, @@ -106,7 +105,6 @@ impl PruningProcessor { db, storage: storage.clone(), reachability_service: services.reachability_service.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), pruning_point_manager: services.pruning_point_manager.clone(), pruning_proof_manager: services.pruning_proof_manager.clone(), pruning_lock, From 1df5a22e2c5afb8b45a7cc59ae2579ac640cf238 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:35:17 -0600 Subject: [PATCH 04/65] Consistency renaming Co-authored-by: Ori Newman --- .../pipeline/body_processor/body_validation_in_context.rs | 2 +- consensus/src/pipeline/body_processor/processor.rs | 6 +++--- consensus/src/pipeline/header_processor/processor.rs | 7 ++++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 2425556d0..b437f1f13 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -25,7 +25,7 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; + let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_primary_store.get_data(block.hash()).unwrap())?; for tx in block.transactions.iter() { if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { return Err(RuleError::TxInContextFailed(tx.id(), e)); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 1ea674263..8b6d35e19 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -55,7 +55,7 @@ pub struct BlockBodyProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_store: Arc, + pub(super) ghostdag_primary_store: Arc, pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, @@ -92,7 +92,7 @@ impl BlockBodyProcessor { db: Arc, statuses_store: Arc>, - ghostdag_store: Arc, + ghostdag_primary_store: Arc, headers_store: Arc, block_transactions_store: Arc, body_tips_store: Arc>, @@ -116,7 +116,7 @@ impl BlockBodyProcessor { db, statuses_store, reachability_service, - ghostdag_store, + ghostdag_primary_store, headers_store, block_transactions_store, body_tips_store, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index d1b74aeb5..a90e67c50 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -356,13 +356,13 @@ impl HeaderProcessor { .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) }) .collect_vec(); - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); + let ghostdag_primary_data = &ghostdag_data[0]; let pp = ctx.pruning_point(); // Create a DB batch writer @@ -375,6 +375,7 @@ impl HeaderProcessor { for (level, datum) in ghostdag_data.iter().enumerate() { self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); } + if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); } @@ -395,8 +396,8 @@ impl HeaderProcessor { // time, and thus serializing this part will do no harm. However this should be benchmarked. The // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - let selected_parent = ghostdag_data[0].selected_parent; - let mut reachability_mergeset = ghostdag_data[0].unordered_mergeset_without_selected_parent(); + let selected_parent = ghostdag_primary_data.selected_parent; + let mut reachability_mergeset = ghostdag_primary_data.unordered_mergeset_without_selected_parent(); reachability::add_block(&mut staging, ctx.hash, selected_parent, &mut reachability_mergeset).unwrap(); // Non-append only stores need to use write locks. From d12592c34f3055fd474c3ae8847c4c83c853aff1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:35:37 -0600 Subject: [PATCH 05/65] Update db version Co-authored-by: Ori Newman --- consensus/src/consensus/factory.rs | 2 +- database/src/registry.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f34aa54f9 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { diff --git a/database/src/registry.rs b/database/src/registry.rs index 9e1b129d6..981af729d 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -36,10 +36,12 @@ pub enum DatabaseStorePrefixes { UtxoMultisets = 26, VirtualUtxoset = 27, VirtualState = 28, + GhostdagProof = 29, + GhostdagCompactProof = 30, // ---- Decomposed reachability stores ---- - ReachabilityTreeChildren = 30, - ReachabilityFutureCoveringSet = 31, + ReachabilityTreeChildren = 31, + ReachabilityFutureCoveringSet = 32, // ---- Metadata ---- MultiConsensusMetadata = 124, From 2bea765a2228f2c003589c89193ce66307087c62 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 22:44:21 -0600 Subject: [PATCH 06/65] GD Optimizations Co-authored-by: Ori Newman --- consensus/src/model/stores/ghostdag.rs | 21 ++ .../pipeline/pruning_processor/processor.rs | 3 +- consensus/src/processes/pruning_proof/mod.rs | 296 ++++++++++++++++-- database/src/registry.rs | 2 + simpa/src/main.rs | 7 + 5 files changed, 304 insertions(+), 25 deletions(-) diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index 89c4686c5..3ffe23e7e 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -271,6 +271,27 @@ impl DbGhostdagStore { } } + pub fn new_temp( + db: Arc, + level: BlockLevel, + cache_policy: CachePolicy, + compact_cache_policy: CachePolicy, + temp_index: u8, + ) -> Self { + assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); + let lvl_bytes = level.to_le_bytes(); + let temp_index_bytes = temp_index.to_le_bytes(); + let prefix = DatabaseStorePrefixes::TempGhostdag.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + let compact_prefix = + DatabaseStorePrefixes::TempGhostdagCompact.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_policy, prefix), + compact_access: CachedDbAccess::new(db, compact_cache_policy, compact_prefix), + } + } + pub fn clone_with_new_cache(&self, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { Self::new(Arc::clone(&self.db), self.level, cache_policy, compact_cache_policy) } diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index bee46834a..cd9026565 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -455,7 +455,8 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - self.assert_proof_rebuilding(proof, new_pruning_point); + // self.assert_proof_rebuilding(proof, new_pruning_point); + self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); self.assert_data_rebuilding(data, new_pruning_point); } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 31b1df833..8b4b3e299 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -25,7 +25,7 @@ use kaspa_consensus_core::{ trusted::{TrustedBlock, TrustedGhostdagData, TrustedHeader}, BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; -use kaspa_core::{debug, info, trace}; +use kaspa_core::{debug, info, trace, warn}; use kaspa_database::{ prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}, utils::DbLifetime, @@ -41,11 +41,14 @@ use crate::{ storage::ConsensusStorage, }, model::{ - services::reachability::{MTReachabilityService, ReachabilityService}, + services::{ + reachability::{MTReachabilityService, ReachabilityService}, + relations::MTRelationsService, + }, stores::{ depth::DbDepthStore, ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, - headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, pruning::{DbPruningStore, PruningStoreReader}, @@ -78,7 +81,11 @@ enum PruningProofManagerInternalError { #[error("cannot find a common ancestor: {0}")] NoCommonAncestor(String), + + #[error("missing headers to build proof: {0}")] + NotEnoughHeadersToBuildProof(String), } +type PruningProofManagerInternalResult = std::result::Result; struct CachedPruningPointData { pruning_point: Hash, @@ -714,40 +721,280 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } + // TODO: Find a better name + fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { + let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); + pp_header + .parents_by_level + .iter() + .enumerate() + .skip(1) + .find_map(|(level, parents)| { + if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { + None + } else { + Some((level - 1) as BlockLevel) + } + }) + .unwrap_or(self.max_block_level) + } + + fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { + level_depth << current_dag_level.saturating_sub(level) + } + + fn find_selected_parent_header_at_level( + &self, + header: &Header, + level: BlockLevel, + ) -> PruningProofManagerInternalResult> { + let parents = self.parents_manager.parents_at_level(header, level); + let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; + for parent in parents.iter().copied().skip(1) { + let sblock = SortableBlock { + hash: parent, + blue_work: self + .headers_store + .get_blue_score(parent) + .unwrap_option() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(format!( + "find_selected_parent_header_at_level (level {level}) couldn't find the header for block {parent}" + )))? + .into(), + }; + if sblock > sp { + sp = sblock; + } + } + // TODO: For higher levels the chance of having more than two parents is very small, so it might make sense to fetch the whole header for the SortableBlock instead of blue_score (which will probably come from a compact header). + self.headers_store.get_header(sp.hash).unwrap_option().ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof( + format!("find_selected_parent_header_at_level (level {level}) couldn't find the header for block {}", sp.hash,), + )) + // Ok(self.headers_store.get_header(sp.hash).unwrap_option().expect("already checked if compact header exists above")) + } + + fn find_sufficient_root( + &self, + pp_header: &HeaderWithBlockLevel, + level: BlockLevel, + current_dag_level: BlockLevel, + required_block: Option, + temp_db: Arc, + ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + let selected_tip_header = if pp_header.block_level >= level { + pp_header.header.clone() + } else { + self.find_selected_parent_header_at_level(&pp_header.header, level)? + }; + let selected_tip = selected_tip_header.hash; + let pp = pp_header.header.hash; + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size + let required_level_depth = 2 * self.pruning_proof_m; + let mut required_level_0_depth = if level == 0 { + required_level_depth + } else { + self.estimated_blue_depth_at_level_0( + level, + required_level_depth * 5 / 4, // We take a safety margin + current_dag_level, + ) + }; + + let mut tries = 0; + loop { + let required_block = if let Some(required_block) = required_block { + // TODO: We can change it to skip related checks if `None` + required_block + } else { + selected_tip + }; + + let mut finished_headers = false; + let mut finished_headers_for_required_block_chain = false; + let mut current_header = selected_tip_header.clone(); + let mut required_block_chain = BlockHashSet::new(); + let mut selected_chain = BlockHashSet::new(); + let mut intersected_with_required_block_chain = false; + let mut current_required_chain_block = self.headers_store.get_header(required_block).unwrap(); + let root_header = loop { + if !intersected_with_required_block_chain { + required_block_chain.insert(current_required_chain_block.hash); + selected_chain.insert(current_header.hash); + if required_block_chain.contains(¤t_header.hash) + || required_block_chain.contains(¤t_required_chain_block.hash) + { + intersected_with_required_block_chain = true; + } + } + + if current_header.direct_parents().is_empty() // Stop at genesis + || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + && intersected_with_required_block_chain) + { + break current_header; + } + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + if !intersected_with_required_block_chain { + warn!("it's unknown if the selected root for level {level} ( {} ) is in the chain of the required block {required_block}", current_header.hash) + } + finished_headers = true; // We want to give this root a shot if all its past is pruned + break current_header; + } + Err(e) => return Err(e), + }; + + if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { + current_required_chain_block = + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; + } + }; + let root = root_header.hash; + + if level == 0 { + return Ok((self.ghostdag_primary_store.clone(), selected_tip, root)); + } + + let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); + let gd_manager = GhostdagManager::new( + root, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + true, + ); + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(root).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + + let mut has_required_block = root == required_block; + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, pp) { + // We don't care about blocks in the antipast of the pruning point + continue; + } + + if !has_required_block && current_hash == required_block { + has_required_block = true; + } + + let relevant_parents: Box<[Hash]> = relations_service + .get_parents(current_hash) + .unwrap() + .iter() + .copied() + .filter(|parent| self.reachability_service.is_dag_ancestor_of(root, *parent)) + .collect(); + let current_gd = gd_manager.ghostdag(&relevant_parents); + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap(); + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + } + + if has_required_block + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + { + break Ok((ghostdag_store, selected_tip, root)); + } + + tries += 1; + if finished_headers { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned") + } + required_level_0_depth <<= 1; + warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + } + } + + fn calc_gd_for_all_levels( + &self, + pp_header: &HeaderWithBlockLevel, + temp_db: Arc, + ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); + let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + let mut root_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + let level_usize = level as usize; + let required_block = if level != self.max_block_level { + let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); + let block_at_depth_m_at_next_level = self + .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + Some(block_at_depth_m_at_next_level) + } else { + None + }; + let (store, selected_tip, root) = self + .find_sufficient_root(&pp_header, level, current_dag_level, required_block, temp_db.clone()) + .expect(&format!("find_sufficient_root failed for level {level}")); + ghostdag_stores[level_usize] = Some(store); + selected_tip_by_level[level_usize] = Some(selected_tip); + root_by_level[level_usize] = Some(root); + } + + ( + ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), + selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), + root_by_level.into_iter().map(Option::unwrap).collect_vec(), + ) + } + pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { if pp == self.genesis_hash { return vec![]; } + let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); - let selected_tip_by_level = (0..=self.max_block_level) - .map(|level| { - if level <= pp_header.block_level { - pp - } else { - self.ghostdag_managers[level as usize].find_selected_parent( - self.parents_manager - .parents_at_level(&pp_header.header, level) - .iter() - .filter(|parent| self.ghostdag_stores[level as usize].has(**parent).unwrap()) - .cloned(), - ) - } - }) - .collect_vec(); + let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); (0..=self.max_block_level) .map(|level| { let level = level as usize; let selected_tip = selected_tip_by_level[level]; let block_at_depth_2m = self - .block_at_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); - let root = if level != self.max_block_level as usize { + let root = roots_by_level[level]; + let old_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self - .block_at_depth(&*self.ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) .unwrap(); if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { @@ -756,7 +1003,7 @@ impl PruningProofManager { block_at_depth_2m } else { self.find_common_ancestor_in_chain_of_a( - &*self.ghostdag_stores[level], + &*ghostdag_stores[level], block_at_depth_m_at_next_level, block_at_depth_2m, ) @@ -766,11 +1013,12 @@ impl PruningProofManager { } else { block_at_depth_2m }; + // assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.ghostdag_stores[level].get_blue_work(root).unwrap()))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -783,7 +1031,7 @@ impl PruningProofManager { headers.push(self.headers_store.get_header(current).unwrap()); for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.ghostdag_stores[level].get_blue_work(child).unwrap()))); + queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); } } diff --git a/database/src/registry.rs b/database/src/registry.rs index 981af729d..0b4f6e5d0 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -38,6 +38,8 @@ pub enum DatabaseStorePrefixes { VirtualState = 28, GhostdagProof = 29, GhostdagCompactProof = 30, + TempGhostdag = 33, + TempGhostdagCompact = 34, // ---- Decomposed reachability stores ---- ReachabilityTreeChildren = 31, diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1baecc3e7..8975e974a 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -222,6 +222,11 @@ fn main_impl(mut args: Args) { Default::default(), unix_now(), )); + + // TODO: Remove the call to get_pruning_point_proof + // let the_hash = Hash::from_str("45d0bb998ab8c3513d18fef3f70d9c686539da7cbe4fab8021e55be1b3a0f8df").unwrap(); + // assert!(topologically_ordered_hashes(&consensus, config.params.genesis.hash).into_iter().contains(&the_hash)); + let _ = consensus.get_pruning_point_proof(); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -441,6 +446,8 @@ mod tests { args.target_blocks = Some(5000); args.tpb = 1; args.test_pruning = true; + // args.output_dir = Some("/tmp/simpa".into()); + // args.input_dir = Some("/tmp/simpa".into()); kaspa_core::log::try_init_logger(&args.log_level); // As we log the panic, we want to set it up after the logger From 902b2172528982fe61c3a5ac2d41396960188d48 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:52:38 -0600 Subject: [PATCH 07/65] Remove remnant of old impl. optimize db prefixes --- database/src/registry.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/database/src/registry.rs b/database/src/registry.rs index 0b4f6e5d0..87e89a491 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -36,14 +36,14 @@ pub enum DatabaseStorePrefixes { UtxoMultisets = 26, VirtualUtxoset = 27, VirtualState = 28, - GhostdagProof = 29, - GhostdagCompactProof = 30, - TempGhostdag = 33, - TempGhostdagCompact = 34, // ---- Decomposed reachability stores ---- - ReachabilityTreeChildren = 31, - ReachabilityFutureCoveringSet = 32, + ReachabilityTreeChildren = 30, + ReachabilityFutureCoveringSet = 31, + + // ---- Ghostdag Proof + TempGhostdag = 40, + TempGhostdagCompact = 41, // ---- Metadata ---- MultiConsensusMetadata = 124, From 7f1f412a7abc3b60fe8148483617ae9b187c6a44 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:13:31 -0600 Subject: [PATCH 08/65] Ensure parents are in relations; Add comments apply_proof only inserts parent entries for a header from the proof into the relations store for a level if there was GD data in the old stores for that header. This adds a check to filter out parent records not in relations store --- consensus/src/processes/pruning_proof/mod.rs | 62 +++++++++++++------- 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8b4b3e299..c2aca9f49 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -721,7 +721,8 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } - // TODO: Find a better name + /// Looks for the first level whose parents are different from the direct parents of the pp_header + /// The current DAG level is the one right below that. fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); pp_header @@ -743,12 +744,26 @@ impl PruningProofManager { level_depth << current_dag_level.saturating_sub(level) } + /// selected parent at level = the parent of the header at the level + /// with the highest blue_work (using score as work in this case) fn find_selected_parent_header_at_level( &self, header: &Header, level: BlockLevel, + relations_service: MTRelationsService, ) -> PruningProofManagerInternalResult> { - let parents = self.parents_manager.parents_at_level(header, level); + // Logic of apply_proof only inserts parent entries for a header from the proof + // into the relations store for a level if there was GD data in the old stores for that + // header. To mimic that logic here, we need to filter out parents that are NOT in the relations_service + let parents = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|parent| relations_service.has(*parent).unwrap()) + .collect_vec() + .push_if_empty(ORIGIN); + let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; for parent in parents.iter().copied().skip(1) { let sblock = SortableBlock { @@ -781,14 +796,16 @@ impl PruningProofManager { required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level)? + self.find_selected_parent_header_at_level(&pp_header.header, level, relations_service.clone())? }; + let selected_tip = selected_tip_header.hash; let pp = pp_header.header.hash; - let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; let mut required_level_0_depth = if level == 0 { @@ -822,7 +839,7 @@ impl PruningProofManager { required_block_chain.insert(current_required_chain_block.hash); selected_chain.insert(current_header.hash); if required_block_chain.contains(¤t_header.hash) - || required_block_chain.contains(¤t_required_chain_block.hash) + || selected_chain.contains(¤t_required_chain_block.hash) { intersected_with_required_block_chain = true; } @@ -834,7 +851,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level, relations_service.clone()) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -847,15 +864,18 @@ impl PruningProofManager { }; if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { - current_required_chain_block = - match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - finished_headers_for_required_block_chain = true; - current_required_chain_block - } - Err(e) => return Err(e), - }; + current_required_chain_block = match self.find_selected_parent_header_at_level( + ¤t_required_chain_block, + level, + relations_service.clone(), + ) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; } }; let root = root_header.hash; @@ -1038,7 +1058,7 @@ impl PruningProofManager { // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); let chain_2m = self - .chain_up_to_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| { dbg!(level, selected_tip, block_at_depth_2m, root); format!("Assert 2M chain -- level: {}, err: {}", level, err) @@ -1049,13 +1069,13 @@ impl PruningProofManager { if !set.contains(&chain_hash) { let next_level_tip = selected_tip_by_level[level + 1]; let next_level_chain_m = - self.chain_up_to_depth(&*self.ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); + self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); let next_level_block_m = next_level_chain_m.last().copied().unwrap(); dbg!(next_level_chain_m.len()); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); dbg!(level, selected_tip, block_at_depth_2m, root); panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); } From f49478af23674a30fe8b53f4a3332942e1a17603 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:18:24 -0600 Subject: [PATCH 09/65] Match depth check to block_at_depth logic --- consensus/src/processes/pruning_proof/mod.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index c2aca9f49..c03c29449 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -721,8 +721,12 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } - /// Looks for the first level whose parents are different from the direct parents of the pp_header - /// The current DAG level is the one right below that. + // The "current dag level" is the level right before the level whose parents are + // not the same as our header's direct parents + // + // Find the current DAG level by going through all the parents at each level, + // starting from the bottom level and see which is the first level that has + // parents that are NOT our current pp_header's direct parents. fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); pp_header @@ -846,7 +850,8 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + // Need to ensure this does the same 2M+1 depth that block_at_depth does + || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -942,8 +947,9 @@ impl PruningProofManager { } } + // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } From 879c135bd09edf7f9bd5feadbd75c23f8f603519 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:21:52 -0600 Subject: [PATCH 10/65] Use singular GD store for header processing --- .../pipeline/header_processor/processor.rs | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index a90e67c50..141c15418 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_stores: Arc>>, + pub(super) ghostdag_primary_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_managers: Arc>, + pub(super) ghostdag_primary_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_primary_store: storage.ghostdag_primary_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), + ghostdag_primary_manager: services.ghostdag_primary_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -348,14 +348,11 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = (0..=ctx.block_level as usize) - .map(|level| { - self.ghostdag_stores[level] - .get_data(ctx.hash) - .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) - }) - .collect_vec(); + let ghostdag_data = vec![self + .ghostdag_primary_store + .get_data(ctx.hash) + .unwrap_option() + .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0])))]; self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -373,7 +370,7 @@ impl HeaderProcessor { // for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap(); } if let Some(window) = ctx.block_window_for_difficulty { @@ -454,7 +451,7 @@ impl HeaderProcessor { for (level, datum) in ghostdag_data.iter().enumerate() { // This data might have been already written when applying the pruning proof. - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); } let mut relations_write = self.relations_stores.write(); @@ -495,8 +492,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = - Some(self.ghostdag_managers.iter().map(|manager_by_level| Arc::new(manager_by_level.genesis_ghostdag_data())).collect()); + ctx.ghostdag_data = Some(vec![Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())]); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); From 56c0b40505b70c3cb2a98a50f8628d5b3888e5e4 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 11 Jun 2024 21:52:11 -0600 Subject: [PATCH 11/65] Relax the panic to warn when finished_headers and couldn't find sufficient root This happens when there's not enough headers in the pruning proof but it satisfies validation --- consensus/src/processes/pruning_proof/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index c03c29449..35b502e33 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -956,7 +956,8 @@ impl PruningProofManager { tries += 1; if finished_headers { - panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned") + warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); From 43e9f9e82b85c352ce01d5173c38a0dd3bd8233d Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:26:17 -0600 Subject: [PATCH 12/65] Error handling for gd on higher levels relations.get_parents on GD gets extra parents that aren't in the current GD store. so get_blue_work throws an error next, ORIGIN was mising from the GD so add that --- consensus/src/processes/ghostdag/ordering.rs | 12 ++++++++++-- consensus/src/processes/pruning_proof/mod.rs | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index 88b648b8c..21306e5b8 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -44,8 +44,16 @@ impl Ord for SortableBlock { impl GhostdagManager { pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks - .sort_by_cached_key(|block| SortableBlock { hash: *block, blue_work: self.ghostdag_store.get_blue_work(*block).unwrap() }); + sorted_blocks.sort_by_cached_key(|block| SortableBlock { + hash: *block, + // Since we're only calculating GD at all levels on-demand, we may get blocks from the relations + // store in the mergeset that are not on our level + // Options for fixes: + // - do this + // - guarantee that we're only getting parents that are in this store + // - make relations store only return parents at the same or higher level + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_default(), + }); sorted_blocks } } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 35b502e33..26b011134 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -900,6 +900,7 @@ impl PruningProofManager { true, ); ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); let mut topological_heap: BinaryHeap<_> = Default::default(); let mut visited = BlockHashSet::new(); for child in relations_service.get_children(root).unwrap().read().iter().copied() { From 34f20abd64c913ebe66d007eefdc2ddbc603ad3c Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sat, 15 Jun 2024 09:57:43 -0600 Subject: [PATCH 13/65] remove using deeper requirements in lower levels --- consensus/src/processes/pruning_proof/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 26b011134..313063172 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -812,15 +812,16 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; - let mut required_level_0_depth = if level == 0 { - required_level_depth - } else { - self.estimated_blue_depth_at_level_0( - level, - required_level_depth * 5 / 4, // We take a safety margin - current_dag_level, - ) - }; + let mut required_level_0_depth = required_level_depth; + // let mut required_level_0_depth = if level == 0 { + // required_level_depth + // } else { + // self.estimated_blue_depth_at_level_0( + // level, + // required_level_depth * 5 / 4, // We take a safety margin + // current_dag_level, + // ) + // }; let mut tries = 0; loop { From 2654b254b97144cdf758e121f66b261987fd7b19 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 00:04:44 -0600 Subject: [PATCH 14/65] Fix missed references to self.ghostdag_stores in validate_pruning_point_proof --- consensus/src/processes/pruning_proof/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 313063172..6f7840ea6 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -666,7 +666,7 @@ impl PruningProofManager { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = self.ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { @@ -702,7 +702,7 @@ impl PruningProofManager { if parents .iter() .copied() - .any(|parent| self.ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) + .any(|parent| ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) { return Ok(()); } From ba049296b978577b17868fa854c3ab9b5ece362a Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:03:17 -0600 Subject: [PATCH 15/65] Refactoring for single GD header processing --- .../pipeline/header_processor/processor.rs | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 141c15418..c4ccc8bae 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -55,7 +55,7 @@ pub struct HeaderProcessingContext { pub known_parents: Vec, // Staging data - pub ghostdag_data: Option>>, + pub ghostdag_data: Option>, pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap()[0] + &self.ghostdag_data.as_ref().unwrap() } } @@ -348,18 +348,17 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = vec![self + let ghostdag_data = self .ghostdag_primary_store .get_data(ctx.hash) .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0])))]; - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); + .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0]))); + self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { - let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); - let ghostdag_primary_data = &ghostdag_data[0]; + let ghostdag_primary_data = ctx.ghostdag_data.as_ref().unwrap(); let pp = ctx.pruning_point(); // Create a DB batch writer @@ -369,9 +368,7 @@ impl HeaderProcessor { // Append-only stores: these require no lock and hence done first in order to reduce locking time // - for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap(); - } + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); @@ -449,10 +446,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - for (level, datum) in ghostdag_data.iter().enumerate() { - // This data might have been already written when applying the pruning proof. - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); - } + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -492,7 +486,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = Some(vec![Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())]); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); From a45b57122be5f9e8bec551d894f859dcae16d303 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:03:40 -0600 Subject: [PATCH 16/65] Add assertion to check root vs old_root --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6f7840ea6..8fbcb8b3c 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1042,7 +1042,7 @@ impl PruningProofManager { } else { block_at_depth_2m }; - // assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); From edb5cd3d9300f93f6b8e3f3abf9e11ff8bc627c7 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:04:57 -0600 Subject: [PATCH 17/65] Lint fix current_dag_level --- consensus/src/processes/pruning_proof/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8fbcb8b3c..e92cc6772 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -796,7 +796,6 @@ impl PruningProofManager { &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, - current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -971,7 +970,6 @@ impl PruningProofManager { pp_header: &HeaderWithBlockLevel, temp_db: Arc, ) -> (Vec>, Vec, Vec) { - let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; let mut root_by_level = vec![None; self.max_block_level as usize + 1]; @@ -988,7 +986,7 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(&pp_header, level, current_dag_level, required_block, temp_db.clone()) + .find_sufficient_root(&pp_header, level, required_block, temp_db.clone()) .expect(&format!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); From e81394fe48bf797066407f53b220f02293472e83 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:19:46 -0600 Subject: [PATCH 18/65] Keep DB Version at 3 The new prefixes added are compatible with the old version. We don't want to trigger a db delete with this change --- consensus/src/consensus/factory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f34aa54f9..f3ee51d9c 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 4; +const LATEST_DB_VERSION: u32 = 3; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { From 0e8c788c8e9b936df6327f4888ed3fb12400a008 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:38:47 -0600 Subject: [PATCH 19/65] Cleanup apply_proof logic and handle more ghostdag_stores logic --- .../pipeline/pruning_processor/processor.rs | 4 ++- consensus/src/processes/pruning_proof/mod.rs | 27 ++++++++++--------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index cd9026565..a6f3edf65 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -411,7 +411,9 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option(); + if level == 0 { + self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + } }); // Remove additional header related data diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e92cc6772..5db4708be 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1,7 +1,9 @@ use std::{ cmp::{max, Reverse}, - collections::{hash_map::Entry, BinaryHeap}, - collections::{hash_map::Entry::Vacant, VecDeque}, + collections::{ + hash_map::Entry::{self, Vacant}, + BinaryHeap, HashSet, VecDeque, + }, ops::{Deref, DerefMut}, sync::{ atomic::{AtomicBool, Ordering}, @@ -254,30 +256,29 @@ impl PruningProofManager { for (level, headers) in proof.iter().enumerate() { trace!("Applying level {} from the pruning point proof", level); - self.ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + let mut level_ancestors: HashSet = HashSet::new(); + level_ancestors.insert(ORIGIN); + for header in headers.iter() { let parents = Arc::new( self.parents_manager .parents_at_level(header, level as BlockLevel) .iter() .copied() - .filter(|parent| self.ghostdag_stores[level].has(*parent).unwrap()) + .filter(|parent| level_ancestors.contains(parent)) .collect_vec() .push_if_empty(ORIGIN), ); self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - let gd = if header.hash == self.genesis_hash { - self.ghostdag_managers[level].genesis_ghostdag_data() - } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; if level == 0 { + self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); + let calculated_gd = self.ghostdag_primary_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, @@ -289,9 +290,9 @@ impl PruningProofManager { } }; self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); - } else { - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); } + + level_ancestors.insert(header.hash); } } @@ -616,7 +617,7 @@ impl PruningProofManager { let mut proof_current = proof_selected_tip; let mut proof_current_gd = proof_selected_tip_gd; loop { - match self.ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + match ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { Some(current_gd) => { break Some((proof_current_gd, current_gd)); } From 56f9dab2059d59541bb5ebdb8df69f0814c2a2a5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:40:27 -0600 Subject: [PATCH 20/65] remove simpa changes --- simpa/src/main.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 8975e974a..1baecc3e7 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -222,11 +222,6 @@ fn main_impl(mut args: Args) { Default::default(), unix_now(), )); - - // TODO: Remove the call to get_pruning_point_proof - // let the_hash = Hash::from_str("45d0bb998ab8c3513d18fef3f70d9c686539da7cbe4fab8021e55be1b3a0f8df").unwrap(); - // assert!(topologically_ordered_hashes(&consensus, config.params.genesis.hash).into_iter().contains(&the_hash)); - let _ = consensus.get_pruning_point_proof(); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -446,8 +441,6 @@ mod tests { args.target_blocks = Some(5000); args.tpb = 1; args.test_pruning = true; - // args.output_dir = Some("/tmp/simpa".into()); - // args.input_dir = Some("/tmp/simpa".into()); kaspa_core::log::try_init_logger(&args.log_level); // As we log the panic, we want to set it up after the logger From c5be8ad40aaf0db3c75042bcdb8043aaf1c306d1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 19 Jun 2024 00:45:09 -0600 Subject: [PATCH 21/65] Remove rewriting origin to primary GD It's already on there --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 5db4708be..472e5f130 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -273,7 +273,7 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); if level == 0 { - self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); + // self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() From 8d15e27a39baf53c4b6e6e529ff8efd577fbe4fb Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 19 Jun 2024 17:04:57 -0600 Subject: [PATCH 22/65] More refactoring to use single GD store/manager --- consensus/src/consensus/services.rs | 30 ++++++-------------- consensus/src/consensus/storage.rs | 21 ++++---------- consensus/src/processes/pruning_proof/mod.rs | 7 +---- 3 files changed, 15 insertions(+), 43 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index b5617ea76..41478580c 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,7 +53,6 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_managers: Arc>, pub ghostdag_primary_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, @@ -112,26 +111,15 @@ impl ConsensusServices { reachability_service.clone(), storage.ghostdag_primary_store.clone(), ); - let ghostdag_managers = Arc::new( - storage - .ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - params.genesis.hash, - params.ghostdag_k, - ghostdag_store, - relations_services[level].clone(), - storage.headers_store.clone(), - reachability_service.clone(), - level != 0, - ) - }) - .collect_vec(), + let ghostdag_primary_manager = GhostdagManager::new( + params.genesis.hash, + params.ghostdag_k, + storage.ghostdag_primary_store.clone(), + relations_services[0].clone(), + storage.headers_store.clone(), + reachability_service.clone(), + false, ); - let ghostdag_primary_manager = ghostdag_managers[0].clone(); let coinbase_manager = CoinbaseManager::new( params.coinbase_payload_script_public_key_max_len, @@ -185,7 +173,6 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_managers.clone(), ghostdag_primary_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), @@ -215,7 +202,6 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_managers, ghostdag_primary_manager, coinbase_manager, pruning_point_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index d53324fc6..4b9646ec2 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,7 +50,6 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_stores: Arc>>, pub ghostdag_primary_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, @@ -193,19 +192,12 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_stores = Arc::new( - (0..=params.max_block_level) - .map(|level| { - Arc::new(DbGhostdagStore::new( - db.clone(), - level, - ghostdag_builder.downscale(level).build(), - ghostdag_compact_builder.downscale(level).build(), - )) - }) - .collect_vec(), - ); - let ghostdag_primary_store = ghostdag_stores[0].clone(); + let ghostdag_primary_store = Arc::new(DbGhostdagStore::new( + db.clone(), + 0, + ghostdag_builder.downscale(0).build(), + ghostdag_compact_builder.downscale(0).build(), + )); let daa_excluded_store = Arc::new(DbDaaStore::new(db.clone(), daa_excluded_builder.build())); let headers_store = Arc::new(DbHeadersStore::new(db.clone(), headers_builder.build(), headers_compact_builder.build())); let depth_store = Arc::new(DbDepthStore::new(db.clone(), header_data_builder.build())); @@ -245,7 +237,6 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_stores, ghostdag_primary_store, pruning_point_store, headers_selected_tip_store, diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 472e5f130..fb0eceb77 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -117,7 +117,6 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_stores: Arc>>, ghostdag_primary_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, @@ -128,7 +127,6 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_managers: Arc>, ghostdag_primary_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, @@ -153,7 +151,6 @@ impl PruningProofManager { storage: &Arc, parents_manager: DbParentsManager, reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, @@ -170,7 +167,6 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_stores: storage.ghostdag_stores.clone(), ghostdag_primary_store: storage.ghostdag_primary_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -181,7 +177,6 @@ impl PruningProofManager { selected_chain_store: storage.selected_chain_store.clone(), depth_store: storage.depth_store.clone(), - ghostdag_managers, traversal_manager, window_manager, parents_manager, @@ -467,7 +462,7 @@ impl PruningProofManager { let level = level as usize; reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); - ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); } db.write(batch).unwrap(); From 1c6b585d69d5cf85759d4ac65ab1c7fc0644a3f3 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 21 Jun 2024 17:01:26 -0600 Subject: [PATCH 23/65] Lint fixes --- consensus/src/pipeline/header_processor/processor.rs | 2 +- consensus/src/processes/pruning_proof/mod.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index c4ccc8bae..a04af90e6 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap() + self.ghostdag_data.as_ref().unwrap() } } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index fb0eceb77..0058408fa 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -635,7 +635,7 @@ impl PruningProofManager { let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(&proof)?; + let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; let ghostdag_stores = stores_and_processes.ghostdag_stores; @@ -982,8 +982,8 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(&pp_header, level, required_block, temp_db.clone()) - .expect(&format!("find_sufficient_root failed for level {level}")); + .find_sufficient_root(pp_header, level, required_block, temp_db.clone()) + .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); root_by_level[level_usize] = Some(root); From 273aa81fdd9c9a13edf2941bc8bfec0650486f30 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 21 Jun 2024 17:04:39 -0600 Subject: [PATCH 24/65] warn to trace for common retry --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 0058408fa..39bf8d756 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -957,7 +957,7 @@ impl PruningProofManager { break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; - warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + trace!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); } } From f9b3fda63bab6d8b7e8c7bf62e72fda5f544041f Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 25 Jun 2024 23:11:06 -0600 Subject: [PATCH 25/65] Address initial comments --- .../pipeline/header_processor/processor.rs | 1 + .../pipeline/pruning_processor/processor.rs | 7 ++-- consensus/src/processes/pruning_proof/mod.rs | 41 +++++++++---------- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index a04af90e6..2214d0881 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -368,6 +368,7 @@ impl HeaderProcessor { // Append-only stores: these require no lock and hence done first in order to reduce locking time // + // This data might have been already written when applying the pruning proof. self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index a6f3edf65..b7f46f3b0 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -411,11 +411,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - if level == 0 { - self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); - } }); + self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); self.depth_store.delete_batch(&mut batch, current).unwrap(); @@ -457,7 +456,7 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - // self.assert_proof_rebuilding(proof, new_pruning_point); + self.assert_proof_rebuilding(proof.clone(), new_pruning_point); self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); self.assert_data_rebuilding(data, new_pruning_point); } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 39bf8d756..ccc8f81ff 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -268,8 +268,6 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); if level == 0 { - // self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); - let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { @@ -474,13 +472,13 @@ impl PruningProofManager { fn populate_stores_for_validate_pruning_point_proof( &self, proof: &PruningPointProof, - stores_and_processes: &mut TempProofContext, + ctx: &mut TempProofContext, ) -> PruningImportResult> { - let headers_store = &stores_and_processes.headers_store; - let ghostdag_stores = &stores_and_processes.ghostdag_stores; - let mut relations_stores = stores_and_processes.relations_stores.clone(); - let reachability_stores = &stores_and_processes.reachability_stores; - let ghostdag_managers = &stores_and_processes.ghostdag_managers; + let headers_store = &ctx.headers_store; + let ghostdag_stores = &ctx.ghostdag_stores; + let mut relations_stores = ctx.relations_stores.clone(); + let reachability_stores = &ctx.reachability_stores; + let ghostdag_managers = &ctx.ghostdag_managers; let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; @@ -741,7 +739,7 @@ impl PruningProofManager { } fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { - level_depth << current_dag_level.saturating_sub(level) + level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) } /// selected parent at level = the parent of the header at the level @@ -792,6 +790,7 @@ impl PruningProofManager { &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, + current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -807,16 +806,15 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; - let mut required_level_0_depth = required_level_depth; - // let mut required_level_0_depth = if level == 0 { - // required_level_depth - // } else { - // self.estimated_blue_depth_at_level_0( - // level, - // required_level_depth * 5 / 4, // We take a safety margin - // current_dag_level, - // ) - // }; + let mut required_level_0_depth = if level == 0 { + required_level_depth + } else { + self.estimated_blue_depth_at_level_0( + level, + required_level_depth * 5 / 4, // We take a safety margin + current_dag_level, + ) + }; let mut tries = 0; loop { @@ -957,7 +955,7 @@ impl PruningProofManager { break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; - trace!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); } } @@ -966,6 +964,7 @@ impl PruningProofManager { pp_header: &HeaderWithBlockLevel, temp_db: Arc, ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; let mut root_by_level = vec![None; self.max_block_level as usize + 1]; @@ -982,7 +981,7 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(pp_header, level, required_block, temp_db.clone()) + .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); From ca8bb429691a3575e3b59379de1c086391879554 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 25 Jun 2024 23:27:22 -0600 Subject: [PATCH 26/65] Remove "primary" in ghostdag store/manager references --- consensus/src/consensus/mod.rs | 10 +++---- consensus/src/consensus/services.rs | 20 ++++++------- consensus/src/consensus/storage.rs | 6 ++-- consensus/src/consensus/test_consensus.rs | 6 ++-- .../body_validation_in_context.rs | 2 +- .../src/pipeline/body_processor/processor.rs | 6 ++-- .../pipeline/header_processor/processor.rs | 18 ++++++------ .../pipeline/pruning_processor/processor.rs | 6 ++-- .../pipeline/virtual_processor/processor.rs | 18 ++++++------ .../virtual_processor/utxo_validation.rs | 2 +- consensus/src/processes/pruning_proof/mod.rs | 28 +++++++++---------- simpa/src/main.rs | 12 ++++---- 12 files changed, 67 insertions(+), 67 deletions(-) diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 80babbef0..7e1690b2a 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -231,7 +231,7 @@ impl Consensus { block_processors_pool, db.clone(), storage.statuses_store.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.block_transactions_store.clone(), storage.body_tips_store.clone(), @@ -484,7 +484,7 @@ impl ConsensusApi for Consensus { fn get_virtual_merge_depth_blue_work_threshold(&self) -> BlueWorkType { // PRUNE SAFETY: merge depth root is never close to being pruned (in terms of block depth) - self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_primary_store.get_blue_work(root).unwrap()) + self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_store.get_blue_work(root).unwrap()) } fn get_sink(&self) -> Hash { @@ -812,7 +812,7 @@ impl ConsensusApi for Consensus { Some(BlockStatus::StatusInvalid) => return Err(ConsensusError::InvalidBlock(hash)), _ => {} }; - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; + let ghostdag = self.ghostdag_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; Ok((&*ghostdag).into()) } @@ -864,7 +864,7 @@ impl ConsensusApi for Consensus { Ok(self .services .window_manager - .block_window(&self.ghostdag_primary_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) .unwrap() .deref() .iter() @@ -903,7 +903,7 @@ impl ConsensusApi for Consensus { match start_hash { Some(hash) => { self.validate_block_exists(hash)?; - let ghostdag_data = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); // The selected parent header is used within to check for sampling activation, so we verify its existence first if !self.headers_store.has(ghostdag_data.selected_parent).unwrap() { return Err(ConsensusError::DifficultyError(DifficultyError::InsufficientWindowData(0))); diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 41478580c..74544c11b 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,7 +53,7 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_primary_manager: DbGhostdagManager, + pub ghostdag_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, pub pruning_proof_manager: Arc, @@ -81,13 +81,13 @@ impl ConsensusServices { let reachability_service = MTReachabilityService::new(storage.reachability_store.clone()); let dag_traversal_manager = DagTraversalManager::new( params.genesis.hash, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_service.clone(), reachability_service.clone(), ); let window_manager = DualWindowManager::new( ¶ms.genesis, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.daa_excluded_store.clone(), storage.block_window_cache_for_difficulty.clone(), @@ -109,12 +109,12 @@ impl ConsensusServices { params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), ); - let ghostdag_primary_manager = GhostdagManager::new( + let ghostdag_manager = GhostdagManager::new( params.genesis.hash, params.ghostdag_k, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_services[0].clone(), storage.headers_store.clone(), reachability_service.clone(), @@ -154,7 +154,7 @@ impl ConsensusServices { params.finality_depth, params.genesis.hash, reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.past_pruning_points_store.clone(), storage.headers_selected_tip_store.clone(), @@ -173,7 +173,7 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_primary_manager.clone(), + ghostdag_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, @@ -188,7 +188,7 @@ impl ConsensusServices { params.mergeset_size_limit as usize, reachability_service.clone(), dag_traversal_manager.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.selected_chain_store.clone(), storage.headers_selected_tip_store.clone(), storage.pruning_point_store.clone(), @@ -202,7 +202,7 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_primary_manager, + ghostdag_manager, coinbase_manager, pruning_point_manager, pruning_proof_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 4b9646ec2..e170ace04 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,7 +50,7 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_primary_store: Arc, + pub ghostdag_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, pub past_pruning_points_store: Arc, @@ -192,7 +192,7 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_primary_store = Arc::new(DbGhostdagStore::new( + let ghostdag_store = Arc::new(DbGhostdagStore::new( db.clone(), 0, ghostdag_builder.downscale(0).build(), @@ -237,7 +237,7 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_primary_store, + ghostdag_store, pruning_point_store, headers_selected_tip_store, body_tips_store, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index c626e00ff..a937388ba 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -118,7 +118,7 @@ impl TestConsensus { pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { let mut header = header_from_precomputed_hash(hash, parents); - let ghostdag_data = self.consensus.services.ghostdag_primary_manager.ghostdag(header.direct_parents()); + let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents()); header.pruning_point = self .consensus .services @@ -201,7 +201,7 @@ impl TestConsensus { } pub fn ghostdag_store(&self) -> &Arc { - &self.consensus.ghostdag_primary_store + &self.consensus.ghostdag_store } pub fn reachability_store(&self) -> &Arc> { @@ -233,7 +233,7 @@ impl TestConsensus { } pub fn ghostdag_manager(&self) -> &DbGhostdagManager { - &self.consensus.services.ghostdag_primary_manager + &self.consensus.services.ghostdag_manager } } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index b437f1f13..2425556d0 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -25,7 +25,7 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_primary_store.get_data(block.hash()).unwrap())?; + let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; for tx in block.transactions.iter() { if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { return Err(RuleError::TxInContextFailed(tx.id(), e)); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 8b6d35e19..1ea674263 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -55,7 +55,7 @@ pub struct BlockBodyProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, @@ -92,7 +92,7 @@ impl BlockBodyProcessor { db: Arc, statuses_store: Arc>, - ghostdag_primary_store: Arc, + ghostdag_store: Arc, headers_store: Arc, block_transactions_store: Arc, body_tips_store: Arc>, @@ -116,7 +116,7 @@ impl BlockBodyProcessor { db, statuses_store, reachability_service, - ghostdag_primary_store, + ghostdag_store, headers_store, block_transactions_store, body_tips_store, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 2214d0881..22a5c566c 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_primary_manager: DbGhostdagManager, + pub(super) ghostdag_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_primary_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -349,10 +349,10 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { let ghostdag_data = self - .ghostdag_primary_store + .ghostdag_store .get_data(ctx.hash) .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0]))); + .unwrap_or_else(|| Arc::new(self.ghostdag_manager.ghostdag(&ctx.known_parents[0]))); self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -369,7 +369,7 @@ impl HeaderProcessor { // // This data might have been already written when applying the pruning proof. - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); @@ -447,7 +447,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -487,7 +487,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = Some(Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index b7f46f3b0..f73f8c12e 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -280,7 +280,7 @@ impl PruningProcessor { let mut counter = 0; let mut batch = WriteBatch::default(); for kept in keep_relations.iter().copied() { - let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { + let Some(ghostdag) = self.ghostdag_store.get_data(kept).unwrap_option() else { continue; }; if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains(&h)) { @@ -292,7 +292,7 @@ impl PruningProcessor { mutable_ghostdag.selected_parent = ORIGIN; } counter += 1; - self.ghostdag_primary_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); + self.ghostdag_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); } } self.db.write(batch).unwrap(); @@ -413,7 +413,7 @@ impl PruningProcessor { staging_level_relations.commit(&mut batch).unwrap(); }); - self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index ded062251..db8efed3a 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -115,7 +115,7 @@ pub struct VirtualStateProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) daa_excluded_store: Arc, pub(super) block_transactions_store: Arc, @@ -190,7 +190,7 @@ impl VirtualStateProcessor { db, statuses_store: storage.statuses_store.clone(), headers_store: storage.headers_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), block_transactions_store: storage.block_transactions_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -205,7 +205,7 @@ impl VirtualStateProcessor { pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), - ghostdag_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), reachability_service: services.reachability_service.clone(), relations_service: services.relations_service.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), @@ -302,7 +302,7 @@ impl VirtualStateProcessor { .expect("all possible rule errors are unexpected here"); // Update the pruning processor about the virtual state change - let sink_ghostdag_data = self.ghostdag_primary_store.get_compact_data(new_sink).unwrap(); + let sink_ghostdag_data = self.ghostdag_store.get_compact_data(new_sink).unwrap(); // Empty the channel before sending the new message. If pruning processor is busy, this step makes sure // the internal channel does not grow with no need (since we only care about the most recent message) let _consume = self.pruning_receiver.try_iter().count(); @@ -401,7 +401,7 @@ impl VirtualStateProcessor { } let header = self.headers_store.get_header(current).unwrap(); - let mergeset_data = self.ghostdag_primary_store.get_data(current).unwrap(); + let mergeset_data = self.ghostdag_store.get_data(current).unwrap(); let pov_daa_score = header.daa_score; let selected_parent_multiset_hash = self.utxo_multisets_store.get(selected_parent).unwrap(); @@ -562,7 +562,7 @@ impl VirtualStateProcessor { let mut heap = tips .into_iter() - .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_primary_store.get_blue_work(block).unwrap() }) + .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_store.get_blue_work(block).unwrap() }) .collect::>(); // The initial diff point is the previous sink @@ -584,7 +584,7 @@ impl VirtualStateProcessor { // 2. will be removed eventually by the bounded merge check. // Hence as an optimization we prefer removing such blocks in advance to allow valid tips to be considered. let filtering_root = self.depth_store.merge_depth_root(candidate).unwrap(); - let filtering_blue_work = self.ghostdag_primary_store.get_blue_work(filtering_root).unwrap_or_default(); + let filtering_blue_work = self.ghostdag_store.get_blue_work(filtering_root).unwrap_or_default(); return ( candidate, heap.into_sorted_iter().take_while(|s| s.blue_work >= filtering_blue_work).map(|s| s.hash).collect(), @@ -602,7 +602,7 @@ impl VirtualStateProcessor { if self.reachability_service.is_dag_ancestor_of(finality_point, parent) && !self.reachability_service.is_dag_ancestor_of_any(parent, &mut heap.iter().map(|sb| sb.hash)) { - heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_primary_store.get_blue_work(parent).unwrap() }); + heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_store.get_blue_work(parent).unwrap() }); } } drop(prune_guard); @@ -1117,7 +1117,7 @@ impl VirtualStateProcessor { // in depth of 2*finality_depth, and can give false negatives for smaller finality violations. let current_pp = self.pruning_point_store.read().pruning_point().unwrap(); let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp); - let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp); + let vff = self.depth_manager.calc_finality_point(&self.ghostdag_store.get_data(vf).unwrap(), current_pp); let last_known_pp = pp_list.iter().rev().find(|pp| match self.statuses_store.read().get(pp.hash).unwrap_option() { Some(status) => status.is_valid(), diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 112976294..0e3ca7533 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -82,7 +82,7 @@ impl VirtualStateProcessor { for (i, (merged_block, txs)) in once((ctx.selected_parent(), selected_parent_transactions)) .chain( ctx.ghostdag_data - .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_primary_store.deref()) + .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_store.deref()) .map(|b| (b, self.block_transactions_store.get(b).unwrap())), ) .enumerate() diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index ccc8f81ff..82ebc7433 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -117,7 +117,7 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_primary_store: Arc, + ghostdag_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, past_pruning_points_store: Arc, @@ -127,7 +127,7 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_primary_manager: DbGhostdagManager, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -167,7 +167,7 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -189,7 +189,7 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, - ghostdag_primary_manager: ghostdag_manager, + ghostdag_manager, is_consensus_exiting, } @@ -271,7 +271,7 @@ impl PruningProofManager { let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_primary_manager.ghostdag(&parents); + let calculated_gd = self.ghostdag_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, @@ -282,7 +282,7 @@ impl PruningProofManager { blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), } }; - self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); + self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); } level_ancestors.insert(header.hash); @@ -292,7 +292,7 @@ impl PruningProofManager { let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_primary_manager.ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -880,7 +880,7 @@ impl PruningProofManager { let root = root_header.hash; if level == 0 { - return Ok((self.ghostdag_primary_store.clone(), selected_tip, root)); + return Ok((self.ghostdag_store.clone(), selected_tip, root)); } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); @@ -1181,7 +1181,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -1201,7 +1201,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_primary_manager.sort_blocks(anticone); + let mut anticone = self.ghostdag_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -1212,14 +1212,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_primary_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_primary_store.get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_store.get_data(hash).unwrap()).into(), }); } } @@ -1227,7 +1227,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -1259,7 +1259,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_primary_store.get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1baecc3e7..368b52344 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -414,12 +414,12 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - } fn print_stats(src_consensus: &Consensus, hashes: &[Hash], delay: f64, bps: f64, k: KType) -> usize { - let blues_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_blues.len()).sum::() as f64 - / hashes.len() as f64; - let reds_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_reds.len()).sum::() as f64 - / hashes.len() as f64; + let blues_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_blues.len()).sum::() + as f64 + / hashes.len() as f64; + let reds_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_reds.len()).sum::() + as f64 + / hashes.len() as f64; let parents_mean = hashes.iter().map(|&h| src_consensus.headers_store.get_header(h).unwrap().direct_parents().len()).sum::() as f64 / hashes.len() as f64; From 61183faba05ff020bcad48951bfc484dac93e1c1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 26 Jun 2024 22:55:59 -0600 Subject: [PATCH 27/65] Add small safety margin to proof at level 0 This prevents the case where new root is an anticone of old root --- consensus/src/processes/pruning_proof/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 82ebc7433..d6c109b54 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -807,7 +807,7 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; let mut required_level_0_depth = if level == 0 { - required_level_depth + required_level_depth + 100 // smaller safety margin } else { self.estimated_blue_depth_at_level_0( level, @@ -1035,6 +1035,8 @@ impl PruningProofManager { } else { block_at_depth_2m }; + + // new root is expected to be always an ancestor of old root because new root takes a safety margin assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); From 34bc88f399f6fae547ca79500b657c18a18e3325 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 28 Jun 2024 23:23:40 -0600 Subject: [PATCH 28/65] Revert to only do proof rebuilding on sanity check --- consensus/src/pipeline/pruning_processor/processor.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index f73f8c12e..bbc1ea9a9 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -456,8 +456,7 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - self.assert_proof_rebuilding(proof.clone(), new_pruning_point); - self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); + self.assert_proof_rebuilding(proof, new_pruning_point); self.assert_data_rebuilding(data, new_pruning_point); } From da1cfe34b608d32b1663d865e1c81331afc1bc65 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 28 Jun 2024 23:24:38 -0600 Subject: [PATCH 29/65] Proper "better" proof check --- consensus/src/processes/pruning_proof/mod.rs | 260 +++++++++++-------- 1 file changed, 158 insertions(+), 102 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index d6c109b54..6dac50563 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -49,7 +49,7 @@ use crate::{ }, stores::{ depth::DbDepthStore, - ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -597,32 +597,32 @@ impl PruningProofManager { Ok(()) } - // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple - // that contains the ghostdag data of the proof and current consensus common ancestor. If no - // such ancestor exists, it returns None. - fn find_proof_and_consensus_common_ancestor_ghostdag_data( + /// Returns the common ancestor of the proof and the current consensus if there is one. + /// + /// ghostdag_stores currently contain only entries for blocks in the proof. + /// While iterating through the selected parent chain of the current consensus, if we find any + /// that is already in ghostdag_stores that must mean it's a common ancestor of the proof + /// and current consensus + fn find_proof_and_consensus_common_ancestor( &self, - ghostdag_stores: &[Arc], - proof_selected_tip: Hash, + ghostdag_store: &Arc, + current_consensus_selected_tip_header: Arc
, level: BlockLevel, - proof_selected_tip_gd: CompactGhostdagData, - ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { - let mut proof_current = proof_selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - loop { - match ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); - } - }; + relations_service: &MTRelationsService, + ) -> Option { + let mut chain_block = current_consensus_selected_tip_header.clone(); + + for _ in 0..(2 * self.pruning_proof_m as usize) { + if chain_block.direct_parents().is_empty() || chain_block.hash.is_origin() { + break; + } + if ghostdag_store.has(chain_block.hash).unwrap() { + return Some(chain_block.hash); + } + chain_block = self.find_selected_parent_header_at_level(&chain_block, level, relations_service).unwrap(); } + + None } pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { @@ -640,29 +640,54 @@ impl PruningProofManager { let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; - let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); + let current_pp_header = self.headers_store.get_header_with_block_level(current_pp).unwrap(); for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + + // Next check is to see if this proof is "better" than what's in the current consensus + // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( - &ghostdag_stores, - selected_tip, + // Step 2 - if we can find a common ancestor between the proof and current consensus + // we can determine if the proof is better. The proof is better if the score difference between the + // old current consensus's tips and the common ancestor is less than the score difference between the + // proof's tip and the common ancestor + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let current_consensus_selected_tip_header = if current_pp_header.block_level >= level { + current_pp_header.header.clone() + } else { + self.find_selected_parent_header_at_level(¤t_pp_header.header, level, &relations_service).unwrap() + }; + if let Some(common_ancestor) = self.find_proof_and_consensus_common_ancestor( + &ghostdag_stores[level_idx], + current_consensus_selected_tip_header.clone(), level, - proof_selected_tip_gd, + &relations_service, ) { + // Fill the GD store with data from current consensus, + // starting from the common ancestor until the current level selected tip + let _ = self.fill_proof_ghostdag_data( + proof[level_idx].first().unwrap().hash, + common_ancestor, + current_consensus_selected_tip_header.hash, + &ghostdag_stores[level_idx], + &relations_service, + level != 0, + None, + false, + ); + let common_ancestor_blue_work = ghostdag_stores[level_idx].get_blue_work(common_ancestor).unwrap(); let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { + SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(common_ancestor_blue_work); + for parent in self.parents_manager.parents_at_level(¤t_pp_header.header, level).iter().copied() { let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = - SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); + let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { return Err(PruningImportError::PruningProofInsufficientBlueWork); } @@ -748,7 +773,7 @@ impl PruningProofManager { &self, header: &Header, level: BlockLevel, - relations_service: MTRelationsService, + relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { // Logic of apply_proof only inserts parent entries for a header from the proof // into the relations store for a level if there was GD data in the old stores for that @@ -798,7 +823,7 @@ impl PruningProofManager { let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level, relations_service.clone())? + self.find_selected_parent_header_at_level(&pp_header.header, level, &relations_service)? }; let selected_tip = selected_tip_header.hash; @@ -850,7 +875,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level, relations_service.clone()) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level, &relations_service) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -863,18 +888,15 @@ impl PruningProofManager { }; if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { - current_required_chain_block = match self.find_selected_parent_header_at_level( - ¤t_required_chain_block, - level, - relations_service.clone(), - ) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - finished_headers_for_required_block_chain = true; - current_required_chain_block - } - Err(e) => return Err(e), - }; + current_required_chain_block = + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level, &relations_service) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; } }; let root = root_header.hash; @@ -884,63 +906,16 @@ impl PruningProofManager { } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); - let gd_manager = GhostdagManager::new( + let has_required_block = self.fill_proof_ghostdag_data( + root, root, - self.ghostdag_k, - ghostdag_store.clone(), - relations_service.clone(), - self.headers_store.clone(), - self.reachability_service.clone(), + pp, + &ghostdag_store, + &relations_service, + level != 0, + Some(required_block), true, ); - ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); - ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); - let mut topological_heap: BinaryHeap<_> = Default::default(); - let mut visited = BlockHashSet::new(); - for child in relations_service.get_children(root).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? - })); - } - - let mut has_required_block = root == required_block; - loop { - let Some(current) = topological_heap.pop() else { - break; - }; - let current_hash = current.0.hash; - if !visited.insert(current_hash) { - continue; - } - - if !self.reachability_service.is_dag_ancestor_of(current_hash, pp) { - // We don't care about blocks in the antipast of the pruning point - continue; - } - - if !has_required_block && current_hash == required_block { - has_required_block = true; - } - - let relevant_parents: Box<[Hash]> = relations_service - .get_parents(current_hash) - .unwrap() - .iter() - .copied() - .filter(|parent| self.reachability_service.is_dag_ancestor_of(root, *parent)) - .collect(); - let current_gd = gd_manager.ghostdag(&relevant_parents); - ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap(); - for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? - })); - } - } // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block @@ -1090,6 +1065,87 @@ impl PruningProofManager { .collect_vec() } + /// BFS forward iterates from starting_hash until selected tip, ignoring blocks in the antipast of selected_tip. + /// For each block along the way, insert that hash into the ghostdag_store + /// If we have a required_block to find, this will return true if that block was found along the way + fn fill_proof_ghostdag_data( + &self, + genesis_hash: Hash, + starting_hash: Hash, + selected_tip: Hash, + ghostdag_store: &Arc, + relations_service: &MTRelationsService, + use_score_as_work: bool, + required_block: Option, + initialize_store: bool, + ) -> bool { + let gd_manager = GhostdagManager::new( + genesis_hash, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + use_score_as_work, + ); + + if initialize_store { + ghostdag_store.insert(genesis_hash, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); + } + + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(starting_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + + let mut has_required_block = required_block.is_some_and(|required_block| starting_hash == required_block); + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { + // We don't care about blocks in the antipast of the selected tip + continue; + } + + if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { + has_required_block = true; + } + + let relevant_parents: Box<[Hash]> = relations_service + .get_parents(current_hash) + .unwrap() + .iter() + .copied() + .filter(|parent| self.reachability_service.is_dag_ancestor_of(starting_hash, *parent)) + .collect(); + let current_gd = gd_manager.ghostdag(&relevant_parents); + + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); + + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + } + + has_required_block + } + /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. fn chain_up_to_depth( &self, From a23d1dd88b79dc0770b4ffa96f392024a8647dbd Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:22:30 -0600 Subject: [PATCH 30/65] Update comment on find_selected_parent_header_at_level --- consensus/src/processes/pruning_proof/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6dac50563..e0ca2a1e8 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -775,9 +775,8 @@ impl PruningProofManager { level: BlockLevel, relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { - // Logic of apply_proof only inserts parent entries for a header from the proof - // into the relations store for a level if there was GD data in the old stores for that - // header. To mimic that logic here, we need to filter out parents that are NOT in the relations_service + // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important + // to filter to include only parents that are in relations_service. let parents = self .parents_manager .parents_at_level(header, level) From 974d2004dc427d7a56ce43e3a736d24e61c1bb69 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:45:21 -0600 Subject: [PATCH 31/65] Re-apply missed comment --- consensus/src/pipeline/header_processor/processor.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 22a5c566c..b64fe4ea2 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -447,6 +447,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); + // This data might have been already written when applying the pruning proof. self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); From 6ea832819d78732b9da3d4a0785f8e3ce7accfbc Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 3 Jul 2024 23:48:59 -0600 Subject: [PATCH 32/65] Implement db upgrade logic from 3 to 4 --- Cargo.lock | 2 + consensus/src/consensus/factory.rs | 19 ++++- kaspad/Cargo.toml | 2 + kaspad/src/daemon.rs | 109 +++++++++++++++++++++++++++-- 4 files changed, 125 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22cd64f4f..67272dd78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3755,6 +3755,7 @@ dependencies = [ "dhat", "dirs", "futures-util", + "itertools 0.11.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", @@ -3782,6 +3783,7 @@ dependencies = [ "num_cpus", "rand 0.8.5", "rayon", + "rocksdb", "serde", "serde_with", "tempfile", diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f8af5fb5a 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { @@ -219,6 +219,23 @@ impl MultiConsensusManagementStore { } } + /// Returns the current version of this database + pub fn version(&self) -> StoreResult { + match self.metadata.read() { + Ok(data) => Ok(data.version), + Err(err) => Err(err), + } + } + + /// Set the database version to a different one + pub fn set_version(&mut self, version: u32) -> StoreResult<()> { + self.metadata.update(DirectDbWriter::new(&self.db), |mut data| { + data.version = version; + data + })?; + Ok(()) + } + pub fn should_upgrade(&self) -> StoreResult { match self.metadata.read() { Ok(data) => Ok(data.version != LATEST_DB_VERSION), diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 9f3290a51..0decbc9cc 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -46,10 +46,12 @@ dhat = { workspace = true, optional = true } serde.workspace = true dirs.workspace = true futures-util.workspace = true +itertools.workspace = true log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true +rocksdb.workspace = true tempfile.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 0950ad8fa..08dc1d87a 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -8,7 +8,10 @@ use kaspa_consensus_core::{ use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; use kaspa_core::{core::Core, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; -use kaspa_database::prelude::CachePolicy; +use kaspa_database::{ + prelude::{CachePolicy, DbWriter, DirectDbWriter}, + registry::DatabaseStorePrefixes, +}; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; @@ -31,6 +34,7 @@ use kaspa_mining::{ }; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; +use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; use kaspa_utxoindex::{api::UtxoIndexProxy, UtxoIndex}; use kaspa_wrpc_server::service::{Options as WrpcServerOptions, WebSocketCounters as WrpcServerCounters, WrpcEncoding, WrpcService}; @@ -308,13 +312,106 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm && (meta_db.get_pinned(b"multi-consensus-metadata-key").is_ok_and(|r| r.is_some()) || MultiConsensusManagementStore::new(meta_db.clone()).should_upgrade().unwrap()) { - let msg = - "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; - get_user_approval_or_exit(msg, args.yes); + let mut mcms = MultiConsensusManagementStore::new(meta_db.clone()); + let version = mcms.version().unwrap(); + + // TODO: Update this entire section to a more robust implementation that allows applying multiple upgrade strategies. + // If I'm at version 3 and latest version is 7, I need to be able to upgrade to that version following the intermediate + // steps without having to delete the DB + if version == 3 { + let active_consensus_dir_name = mcms.active_consensus_dir_name().unwrap(); + + match active_consensus_dir_name { + Some(current_consensus_db) => { + // Apply soft upgrade logic: delete GD data from higher levels + // and then update DB version to 4 + let consensus_db = kaspa_database::prelude::ConnBuilder::default() + .with_db_path(consensus_db_dir.clone().join(current_consensus_db)) + .with_files_limit(1) + .build() + .unwrap(); + info!("Scanning for deprecated records to cleanup"); + + let mut gd_record_count: u32 = 0; + let mut compact_record_count: u32 = 0; + + let start_level: u8 = 1; + let start_level_bytes = start_level.to_le_bytes(); + let ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let ghostdag_prefix = ghostdag_prefix_vec.as_slice(); + + // This section is used to count the records to be deleted. It's not used for the actual delete. + for result in consensus_db.iterator(rocksdb::IteratorMode::From(ghostdag_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::Ghostdag.into()]) { + break; + } + + gd_record_count += 1; + } + + let compact_prefix_vec = DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let compact_prefix = compact_prefix_vec.as_slice(); + + for result in consensus_db.iterator(rocksdb::IteratorMode::From(compact_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::GhostdagCompact.into()]) { + break; + } + + compact_record_count += 1; + } + + trace!("Number of Ghostdag records to cleanup: {}", gd_record_count); + trace!("Number of GhostdagCompact records to cleanup: {}", compact_record_count); + info!("Number of deprecated records to cleanup: {}", gd_record_count + compact_record_count); + + let msg = + "Node database currently at version 3. Upgrade process to version 4 needs to be applied. Continue? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + // Actual delete only happens after user consents to the upgrade: + let mut writer = DirectDbWriter::new(&consensus_db); + + let end_level: u8 = config.max_block_level + 1; + let end_level_bytes = end_level.to_le_bytes(); - info!("Deleting databases from previous Kaspad version"); + let start_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let end_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(end_level_bytes).collect_vec(); - is_db_reset_needed = true; + let start_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let end_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(end_level_bytes).collect_vec(); + + // Apply delete of range from level 1 to max (+1) for Ghostdag and GhostdagCompact: + writer.delete_range(start_ghostdag_prefix_vec.clone(), end_ghostdag_prefix_vec.clone()).unwrap(); + writer.delete_range(start_compact_prefix_vec.clone(), end_compact_prefix_vec.clone()).unwrap(); + + // Compact the deleted rangeto apply the delete immediately + consensus_db.compact_range(Some(start_ghostdag_prefix_vec.as_slice()), Some(end_ghostdag_prefix_vec.as_slice())); + consensus_db.compact_range(Some(start_compact_prefix_vec.as_slice()), Some(end_compact_prefix_vec.as_slice())); + + // Also update the version to one higher: + mcms.set_version(version + 1).unwrap(); + } + None => { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + is_db_reset_needed = true; + } + } + } else { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + info!("Deleting databases from previous Kaspad version"); + + is_db_reset_needed = true; + } } // Will be true if any of the other condition above except args.reset_db From f8baf69015d2b4156e6403cfe162281f23d95d3b Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 9 Jul 2024 22:23:23 -0600 Subject: [PATCH 33/65] Explain further the workaround for GD ordering.rs --- consensus/src/processes/ghostdag/ordering.rs | 28 +++++++++++++++----- consensus/src/processes/ghostdag/protocol.rs | 7 +++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index 21306e5b8..cb73c3398 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -1,7 +1,9 @@ use std::cmp::Ordering; use kaspa_consensus_core::BlueWorkType; +use kaspa_core::warn; use kaspa_hashes::Hash; +use kaspa_math::Uint192; use serde::{Deserialize, Serialize}; use crate::model::{ @@ -46,13 +48,27 @@ impl = blocks.into_iter().collect(); sorted_blocks.sort_by_cached_key(|block| SortableBlock { hash: *block, - // Since we're only calculating GD at all levels on-demand, we may get blocks from the relations - // store in the mergeset that are not on our level + // TODO: Reconsider this approach + // It's possible for mergeset.rs::unordered_mergeset_without_selected_parent (which calls this) to reference parents + // that are in a lower level when calling relations.get_parents. This will panic at self.ghostdag_store.get_blue_work(*block) + // // Options for fixes: - // - do this - // - guarantee that we're only getting parents that are in this store - // - make relations store only return parents at the same or higher level - blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_default(), + // 1) do this where we simply unwrap and default to 0 (currently implemented) + // - consequence is that it affects all GD calculations + // - I argue this is fine for the short term because GD entries not being in the GD store + // can only happen IFF the parent is on a lower level. For level 0 (primary GD), this is not a problem + // and for higher GD it's also not a problem since we only want to use blocks in the same + // level or higher. + // - There is also an extra check being done in ghostdag call side to verify that the hashes in the mergeset + // belong to this + // 2) in mergeset.rs::unordered_mergeset_without_selected_parent, guarantee that we're only getting + // parents that are in this store + // 3) make relations store only return parents at the same or higher level + // - we know that realtions.get_parents can return parents in one level lower + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_else(|_| { + warn!("Tried getting blue work of hash not in GD store: {}", block); + Uint192::from_u64(0) + }), }); sorted_blocks } diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index ac9ae41d7..bfc66ebe6 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -106,6 +106,13 @@ impl Date: Tue, 9 Jul 2024 22:23:52 -0600 Subject: [PATCH 34/65] Minor update to Display of TempGD keys --- database/src/key.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/database/src/key.rs b/database/src/key.rs index e8aeff091..83fa8ebb2 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -73,6 +73,8 @@ impl Display for DbKey { match prefix { Ghostdag | GhostdagCompact + | TempGhostdag + | TempGhostdagCompact | RelationsParents | RelationsChildren | Reachability From bc56e65d5dd93d17c00e12e9f2c05e0a924e24b5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 9 Jul 2024 22:28:14 -0600 Subject: [PATCH 35/65] Various fixes - Keep using old root to minimize proof size. Old root is calculated using the temporary gd stores - fix the off-by-one in block_at_depth and chain_up_to_depth - revert the temp fix to sync with the off-by-one --- consensus/src/processes/pruning_proof/mod.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e0ca2a1e8..34ae371db 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -868,8 +868,7 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - // Need to ensure this does the same 2M+1 depth that block_at_depth does - || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth + || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -916,9 +915,8 @@ impl PruningProofManager { true, ); - // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } @@ -1016,7 +1014,8 @@ impl PruningProofManager { let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); + // Still use "old_root" to make sure we use the minimum amount of records for the proof + queue.push(Reverse(SortableBlock::new(old_root, self.headers_store.get_header(old_root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -1158,7 +1157,7 @@ impl PruningProofManager { let mut current_gd = high_gd; let mut current = high; let mut res = vec![current]; - while current_gd.blue_score + depth >= high_gd.blue_score { + while current_gd.blue_score + depth > high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } @@ -1186,7 +1185,7 @@ impl PruningProofManager { .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; let mut current_gd = high_gd; let mut current = high; - while current_gd.blue_score + depth >= high_gd.blue_score { + while current_gd.blue_score + depth > high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } From efbb083b139c6cbe1cc46999fe4986595212725d Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sun, 14 Jul 2024 13:01:33 -0600 Subject: [PATCH 36/65] Revert "Various fixes" This reverts commit bc56e65d5dd93d17c00e12e9f2c05e0a924e24b5. This experimental commit requires a bit more thinking to apply, and optimization can be deferred. --- consensus/src/processes/pruning_proof/mod.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 34ae371db..e0ca2a1e8 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -868,7 +868,8 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + // Need to ensure this does the same 2M+1 depth that block_at_depth does + || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -915,8 +916,9 @@ impl PruningProofManager { true, ); + // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } @@ -1014,8 +1016,7 @@ impl PruningProofManager { let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - // Still use "old_root" to make sure we use the minimum amount of records for the proof - queue.push(Reverse(SortableBlock::new(old_root, self.headers_store.get_header(old_root).unwrap().blue_work))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -1157,7 +1158,7 @@ impl PruningProofManager { let mut current_gd = high_gd; let mut current = high; let mut res = vec![current]; - while current_gd.blue_score + depth > high_gd.blue_score { + while current_gd.blue_score + depth >= high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } @@ -1185,7 +1186,7 @@ impl PruningProofManager { .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; let mut current_gd = high_gd; let mut current = high; - while current_gd.blue_score + depth > high_gd.blue_score { + while current_gd.blue_score + depth >= high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } From a585be7e597a7c274a8be9536381aa9786c3a7d6 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 15 Jul 2024 23:12:34 -0600 Subject: [PATCH 37/65] Revert better proof check Recreates the GD stores for the current consensus by checking existing proof --- consensus/src/processes/pruning_proof/mod.rs | 146 ++++++++++--------- 1 file changed, 77 insertions(+), 69 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e0ca2a1e8..b22f66918 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -49,7 +49,7 @@ use crate::{ }, stores::{ depth::DbDepthStore, - ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -412,14 +412,8 @@ impl PruningProofManager { fn init_validate_pruning_point_proof_stores_and_processes( &self, - proof: &PruningPointProof, + headers_estimate: usize, ) -> PruningImportResult { - if proof[0].is_empty() { - return Err(PruningImportError::PruningProofNotEnoughHeaders); - } - - let headers_estimate = self.estimate_proof_unique_size(proof); - let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -473,6 +467,7 @@ impl PruningProofManager { &self, proof: &PruningPointProof, ctx: &mut TempProofContext, + log_validating: bool, ) -> PruningImportResult> { let headers_store = &ctx.headers_store; let ghostdag_stores = &ctx.ghostdag_stores; @@ -490,7 +485,9 @@ impl PruningProofManager { return Err(PruningImportError::PruningValidationInterrupted); } - info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + if log_validating { + info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + } let level_idx = level as usize; let mut selected_tip = None; for (i, header) in proof[level as usize].iter().enumerate() { @@ -597,32 +594,33 @@ impl PruningProofManager { Ok(()) } - /// Returns the common ancestor of the proof and the current consensus if there is one. - /// - /// ghostdag_stores currently contain only entries for blocks in the proof. - /// While iterating through the selected parent chain of the current consensus, if we find any - /// that is already in ghostdag_stores that must mean it's a common ancestor of the proof - /// and current consensus - fn find_proof_and_consensus_common_ancestor( + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( &self, - ghostdag_store: &Arc, - current_consensus_selected_tip_header: Arc
, + proof_ghostdag_stores: &[Arc], + current_consensus_ghostdag_stores: &[Arc], + proof_selected_tip: Hash, level: BlockLevel, - relations_service: &MTRelationsService, - ) -> Option { - let mut chain_block = current_consensus_selected_tip_header.clone(); - - for _ in 0..(2 * self.pruning_proof_m as usize) { - if chain_block.direct_parents().is_empty() || chain_block.hash.is_origin() { - break; - } - if ghostdag_store.has(chain_block.hash).unwrap() { - return Some(chain_block.hash); - } - chain_block = self.find_selected_parent_header_at_level(&chain_block, level, relations_service).unwrap(); + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match current_consensus_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = proof_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; } - - None } pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { @@ -630,23 +628,49 @@ impl PruningProofManager { return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); } + if proof[0].is_empty() { + return Err(PruningImportError::PruningProofNotEnoughHeaders); + } + + let headers_estimate = self.estimate_proof_unique_size(proof); + + // Initialize the stores for the proof let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; - let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; - let ghostdag_stores = stores_and_processes.ghostdag_stores; + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; + let proof_selected_tip_by_level = + self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; + let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; + + // Get the proof for the current consensus and recreate the stores for it + // This is expected to be fast because if a proof exists, it will be cached. + // If no proof exists, this is empty + let mut current_consensus_proof = self.get_pruning_point_proof(); + if current_consensus_proof.is_empty() { + // An empty proof can only happen if we're at genesis. We're going to create a proof for this case that contains the genesis header only + let genesis_header = self.headers_store.get_header(self.genesis_hash).unwrap(); + current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); + } + let mut current_consensus_stores_and_processes = + self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; + let _ = self.populate_stores_for_validate_pruning_point_proof( + ¤t_consensus_proof, + &mut current_consensus_stores_and_processes, + false, + )?; + let current_consensus_ghostdag_stores = current_consensus_stores_and_processes.ghostdag_stores; let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; - let current_pp_header = self.headers_store.get_header_with_block_level(current_pp).unwrap(); + let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; - let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); // Next check is to see if this proof is "better" than what's in the current consensus // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) @@ -658,36 +682,19 @@ impl PruningProofManager { // we can determine if the proof is better. The proof is better if the score difference between the // old current consensus's tips and the common ancestor is less than the score difference between the // proof's tip and the common ancestor - let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); - let current_consensus_selected_tip_header = if current_pp_header.block_level >= level { - current_pp_header.header.clone() - } else { - self.find_selected_parent_header_at_level(¤t_pp_header.header, level, &relations_service).unwrap() - }; - if let Some(common_ancestor) = self.find_proof_and_consensus_common_ancestor( - &ghostdag_stores[level_idx], - current_consensus_selected_tip_header.clone(), + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &proof_ghostdag_stores, + ¤t_consensus_ghostdag_stores, + selected_tip, level, - &relations_service, + proof_selected_tip_gd, ) { - // Fill the GD store with data from current consensus, - // starting from the common ancestor until the current level selected tip - let _ = self.fill_proof_ghostdag_data( - proof[level_idx].first().unwrap().hash, - common_ancestor, - current_consensus_selected_tip_header.hash, - &ghostdag_stores[level_idx], - &relations_service, - level != 0, - None, - false, - ); - let common_ancestor_blue_work = ghostdag_stores[level_idx].get_blue_work(common_ancestor).unwrap(); let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(common_ancestor_blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header.header, level).iter().copied() { - let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_blue_work); + SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); + for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { + let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work_diff = + SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { return Err(PruningImportError::PruningProofInsufficientBlueWork); } @@ -710,8 +717,8 @@ impl PruningProofManager { for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; - let proof_selected_tip = selected_tip_by_level[level_idx]; - let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + let proof_selected_tip = proof_selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } @@ -721,7 +728,7 @@ impl PruningProofManager { if parents .iter() .copied() - .any(|parent| ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) + .any(|parent| proof_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) { return Ok(()); } @@ -735,7 +742,8 @@ impl PruningProofManager { drop(pruning_read); drop(relations_read); - drop(stores_and_processes.db_lifetime); + drop(proof_stores_and_processes.db_lifetime); + drop(current_consensus_stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) } From e7625c76f79a304e88e005ff63af43a25c03413a Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 16 Jul 2024 09:10:55 -0600 Subject: [PATCH 38/65] Fix: use cc gd store --- consensus/src/processes/pruning_proof/mod.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index b22f66918..a1c7bf6a3 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -725,11 +725,9 @@ impl PruningProofManager { match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { - if parents - .iter() - .copied() - .any(|parent| proof_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) - { + if parents.iter().copied().any(|parent| { + current_consensus_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m + }) { return Ok(()); } } From 0741151000924b43b895d50e498302bdae574f08 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Sun, 18 Aug 2024 12:40:18 +0300 Subject: [PATCH 39/65] When building pruning point proof ghostdag data, ignore blocks before the root --- consensus/src/processes/ghostdag/ordering.rs | 28 +------------- consensus/src/processes/ghostdag/protocol.rs | 7 ---- consensus/src/processes/pruning_proof/mod.rs | 40 +++++++++++++++++++- 3 files changed, 41 insertions(+), 34 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index cb73c3398..88b648b8c 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -1,9 +1,7 @@ use std::cmp::Ordering; use kaspa_consensus_core::BlueWorkType; -use kaspa_core::warn; use kaspa_hashes::Hash; -use kaspa_math::Uint192; use serde::{Deserialize, Serialize}; use crate::model::{ @@ -46,30 +44,8 @@ impl Ord for SortableBlock { impl GhostdagManager { pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks.sort_by_cached_key(|block| SortableBlock { - hash: *block, - // TODO: Reconsider this approach - // It's possible for mergeset.rs::unordered_mergeset_without_selected_parent (which calls this) to reference parents - // that are in a lower level when calling relations.get_parents. This will panic at self.ghostdag_store.get_blue_work(*block) - // - // Options for fixes: - // 1) do this where we simply unwrap and default to 0 (currently implemented) - // - consequence is that it affects all GD calculations - // - I argue this is fine for the short term because GD entries not being in the GD store - // can only happen IFF the parent is on a lower level. For level 0 (primary GD), this is not a problem - // and for higher GD it's also not a problem since we only want to use blocks in the same - // level or higher. - // - There is also an extra check being done in ghostdag call side to verify that the hashes in the mergeset - // belong to this - // 2) in mergeset.rs::unordered_mergeset_without_selected_parent, guarantee that we're only getting - // parents that are in this store - // 3) make relations store only return parents at the same or higher level - // - we know that realtions.get_parents can return parents in one level lower - blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_else(|_| { - warn!("Tried getting blue work of hash not in GD store: {}", block); - Uint192::from_u64(0) - }), - }); + sorted_blocks + .sort_by_cached_key(|block| SortableBlock { hash: *block, blue_work: self.ghostdag_store.get_blue_work(*block).unwrap() }); sorted_blocks } } diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index bfc66ebe6..ac9ae41d7 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -106,13 +106,6 @@ impl { + relations_store: T, + reachability_service: U, + root: Hash, +} + +impl RelationsStoreReader for RelationsStoreInFutureOfRoot { + fn get_parents(&self, hash: Hash) -> Result { + self.relations_store.get_parents(hash).map(|hashes| { + Arc::new(hashes.iter().copied().filter(|h| self.reachability_service.is_dag_ancestor_of(self.root, *h)).collect_vec()) + }) + } + + fn get_children(&self, hash: Hash) -> StoreResult> { + // We assume hash is in future of root + assert!(self.reachability_service.is_dag_ancestor_of(self.root, hash)); + self.relations_store.get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.reachability_service.is_dag_ancestor_of(self.root, hash) { + Ok(false) + } else { + self.relations_store.has(hash) + } + } + + fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { + panic!("unimplemented") + } +} + pub struct PruningProofManager { db: Arc, @@ -1084,6 +1117,11 @@ impl PruningProofManager { required_block: Option, initialize_store: bool, ) -> bool { + let relations_service = RelationsStoreInFutureOfRoot { + relations_store: relations_service.clone(), + reachability_service: self.reachability_service.clone(), + root: genesis_hash, + }; let gd_manager = GhostdagManager::new( genesis_hash, self.ghostdag_k, From 89f17018796c537ded9114ee8ebdec7bcd0fe2a5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 22 Aug 2024 23:02:35 -0600 Subject: [PATCH 40/65] Add trusted blocks to all relevant levels during apply_proof As opposed to applying only to level 0 --- consensus/src/processes/pruning_proof/mod.rs | 24 +++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index a1c7bf6a3..fcefb0b6e 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -225,18 +225,30 @@ impl PruningProofManager { let pruning_point_header = proof[0].last().unwrap().clone(); let pruning_point = pruning_point_header.hash; - let proof_zero_set = BlockHashSet::from_iter(proof[0].iter().map(|header| header.hash)); + // Create a copy of the proof, since we're going to be mutating the proof passed to us + let proof_sets: Vec> = (0..=self.max_block_level) + .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) + .collect(); + let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); for tb in trusted_set.iter() { trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); - if proof_zero_set.contains(&tb.block.hash()) { - continue; - } + let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); - proof[0].push(tb.block.header.clone()); + (0..=tb_block_level).for_each(|current_proof_level| { + // If this block was in the original proof, ignore it + if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { + return; + } + + proof[current_proof_level as usize].push(tb.block.header.clone()); + }); } - proof[0].sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + proof.iter_mut().for_each(|level_proof| { + level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + }); + self.populate_reachability_and_headers(&proof); { From fb3d1e9b88f670555aaa89059b5dd86b3ac37235 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:41:55 -0600 Subject: [PATCH 41/65] Calculate headers estimate in init proof stores --- consensus/src/processes/pruning_proof/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index fcefb0b6e..e199f26f2 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -424,8 +424,14 @@ impl PruningProofManager { fn init_validate_pruning_point_proof_stores_and_processes( &self, - headers_estimate: usize, + proof: &PruningPointProof, ) -> PruningImportResult { + if proof[0].is_empty() { + return Err(PruningImportError::PruningProofNotEnoughHeaders); + } + + let headers_estimate = self.estimate_proof_unique_size(proof); + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -640,17 +646,11 @@ impl PruningProofManager { return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); } - if proof[0].is_empty() { - return Err(PruningImportError::PruningProofNotEnoughHeaders); - } - - let headers_estimate = self.estimate_proof_unique_size(proof); - // Initialize the stores for the proof + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; let proof_selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; @@ -665,7 +665,7 @@ impl PruningProofManager { current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); } let mut current_consensus_stores_and_processes = - self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; + self.init_validate_pruning_point_proof_stores_and_processes(¤t_consensus_proof)?; let _ = self.populate_stores_for_validate_pruning_point_proof( ¤t_consensus_proof, &mut current_consensus_stores_and_processes, From c9855d32bff5b7af8e05034686d241db4e407a68 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:43:00 -0600 Subject: [PATCH 42/65] Explain finished headers logic Add back the panic if we couldn't find the required block and our headers are done Add explanation in comment for why trying anyway if finished_headers is acceptable --- consensus/src/processes/pruning_proof/mod.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e199f26f2..b668a88da 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -943,8 +943,16 @@ impl PruningProofManager { tries += 1; if finished_headers { - warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Trying anyway."); - break Ok((ghostdag_store, selected_tip, root)); + if has_required_block { + // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. + // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to + // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync + // with such an older node. As long as we found the required block, we can still proceed. + warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Required block found so trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); + } else { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned"); + } } required_level_0_depth <<= 1; warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); From a63acdb02fe3161c6be0b4613677140707f51192 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:45:50 -0600 Subject: [PATCH 43/65] clarify comment --- consensus/src/processes/pruning_proof/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index b668a88da..118a79f1e 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -691,8 +691,8 @@ impl PruningProofManager { } // Step 2 - if we can find a common ancestor between the proof and current consensus - // we can determine if the proof is better. The proof is better if the score difference between the - // old current consensus's tips and the common ancestor is less than the score difference between the + // we can determine if the proof is better. The proof is better if the blue work difference between the + // old current consensus's tips and the common ancestor is less than the blue work difference between the // proof's tip and the common ancestor if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( &proof_ghostdag_stores, From 46dbac30a78fa48b503621d8970e019e9a45aa31 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:58:31 -0600 Subject: [PATCH 44/65] Rename old_root to depth_based_root explain logic for the two root calculation --- consensus/src/processes/pruning_proof/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 118a79f1e..ceacbf12f 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1013,8 +1013,12 @@ impl PruningProofManager { .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); + // (New Logic) This is the root we calculated by going through block relations let root = roots_by_level[level]; - let old_root = if level != self.max_block_level as usize { + // (Old Logic) This is the root we can calculate given that the GD records are already filled + // The root calc logic below is the original logic before the on-demand higher level GD calculation + // We only need depth_based_root to sanity check the new logic + let depth_based_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) @@ -1036,8 +1040,8 @@ impl PruningProofManager { block_at_depth_2m }; - // new root is expected to be always an ancestor of old root because new root takes a safety margin - assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + // new root is expected to be always an ancestor of depth_based_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, depth_based_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); From 4c9f9979040e4e5b3aa4adad4541d7193bf312c6 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 27 Aug 2024 01:07:07 -0600 Subject: [PATCH 45/65] More merge fixes --- Cargo.lock | 2 +- consensus/src/consensus/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1fc312db8..cd93c85b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3751,7 +3751,7 @@ dependencies = [ "dhat", "dirs", "futures-util", - "itertools 0.11.0", + "itertools 0.13.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 7c527a542..5b09b12cd 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -532,7 +532,7 @@ impl ConsensusApi for Consensus { for child in initial_children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -559,7 +559,7 @@ impl ConsensusApi for Consensus { for child in children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } From 56b4392178bb5a751752cafc461c3b9aa9155250 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 27 Aug 2024 23:03:39 -0600 Subject: [PATCH 46/65] Refactor relations services into self --- consensus/src/processes/pruning_proof/mod.rs | 31 ++++++++------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 17147e089..550bde740 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -152,6 +152,7 @@ pub struct PruningProofManager { reachability_service: MTReachabilityService, ghostdag_store: Arc, relations_stores: Arc>>, + level_relations_services: Vec>, pruning_point_store: Arc>, past_pruning_points_store: Arc, virtual_stores: Arc>, @@ -225,6 +226,10 @@ impl PruningProofManager { ghostdag_manager, is_consensus_exiting, + + level_relations_services: (0..=max_block_level) + .map(|level| MTRelationsService::new(storage.relations_stores.clone().clone(), level)) + .collect_vec(), } } @@ -824,7 +829,6 @@ impl PruningProofManager { &self, header: &Header, level: BlockLevel, - relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important // to filter to include only parents that are in relations_service. @@ -833,7 +837,7 @@ impl PruningProofManager { .parents_at_level(header, level) .iter() .copied() - .filter(|parent| relations_service.has(*parent).unwrap()) + .filter(|parent| self.level_relations_services[level as usize].has(*parent).unwrap()) .collect_vec() .push_if_empty(ORIGIN); @@ -869,11 +873,10 @@ impl PruningProofManager { required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { - let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level, &relations_service)? + self.find_selected_parent_header_at_level(&pp_header.header, level)? }; let selected_tip = selected_tip_header.hash; @@ -925,7 +928,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level, &relations_service) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -939,7 +942,7 @@ impl PruningProofManager { if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { current_required_chain_block = - match self.find_selected_parent_header_at_level(¤t_required_chain_block, level, &relations_service) { + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { finished_headers_for_required_block_chain = true; @@ -956,16 +959,8 @@ impl PruningProofManager { } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); - let has_required_block = self.fill_proof_ghostdag_data( - root, - root, - pp, - &ghostdag_store, - &relations_service, - level != 0, - Some(required_block), - true, - ); + let has_required_block = + self.fill_proof_ghostdag_data(root, root, pp, &ghostdag_store, level != 0, Some(required_block), true, level); // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block @@ -1136,13 +1131,13 @@ impl PruningProofManager { starting_hash: Hash, selected_tip: Hash, ghostdag_store: &Arc, - relations_service: &MTRelationsService, use_score_as_work: bool, required_block: Option, initialize_store: bool, + level: BlockLevel, ) -> bool { let relations_service = RelationsStoreInFutureOfRoot { - relations_store: relations_service.clone(), + relations_store: self.level_relations_services[level as usize].clone(), reachability_service: self.reachability_service.clone(), root: genesis_hash, }; From 66d4ebe674f798ec6a2ecc7c16f5e92f9bab8e8f Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 29 Aug 2024 20:00:10 -0600 Subject: [PATCH 47/65] Use blue_work for find_selected_parent_header_at_level --- consensus/src/processes/pruning_proof/mod.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 550bde740..0ce6a1b98 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -841,18 +841,21 @@ impl PruningProofManager { .collect_vec() .push_if_empty(ORIGIN); - let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; + let mut sp = SortableBlock { + hash: parents[0], + blue_work: if parents[0] == ORIGIN { 0.into() } else { self.headers_store.get_header(parents[0]).unwrap().blue_work }, + }; for parent in parents.iter().copied().skip(1) { let sblock = SortableBlock { hash: parent, blue_work: self .headers_store - .get_blue_score(parent) + .get_header(parent) .unwrap_option() .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(format!( "find_selected_parent_header_at_level (level {level}) couldn't find the header for block {parent}" )))? - .into(), + .blue_work, }; if sblock > sp { sp = sblock; From 18158e62794b8c6943fc234f58462268ce8af935 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 29 Aug 2024 20:07:15 -0600 Subject: [PATCH 48/65] Comment fixes and small refactor --- consensus/src/processes/pruning_proof/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 0ce6a1b98..e0898b7fb 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -264,9 +264,9 @@ impl PruningProofManager { let pruning_point = pruning_point_header.hash; // Create a copy of the proof, since we're going to be mutating the proof passed to us - let proof_sets: Vec> = (0..=self.max_block_level) + let proof_sets = (0..=self.max_block_level) .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) - .collect(); + .collect_vec(); let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); for tb in trusted_set.iter() { @@ -729,9 +729,10 @@ impl PruningProofManager { } // Step 2 - if we can find a common ancestor between the proof and current consensus - // we can determine if the proof is better. The proof is better if the blue work difference between the + // we can determine if the proof is better. The proof is better if the blue work* difference between the // old current consensus's tips and the common ancestor is less than the blue work difference between the - // proof's tip and the common ancestor + // proof's tip and the common ancestor. + // *Note: blue work is the same as blue score on levels higher than 0 if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( &proof_ghostdag_stores, ¤t_consensus_ghostdag_stores, @@ -865,7 +866,6 @@ impl PruningProofManager { self.headers_store.get_header(sp.hash).unwrap_option().ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof( format!("find_selected_parent_header_at_level (level {level}) couldn't find the header for block {}", sp.hash,), )) - // Ok(self.headers_store.get_header(sp.hash).unwrap_option().expect("already checked if compact header exists above")) } fn find_sufficient_root( From 8d042b2d2d9568e1eeb097b66305d9b1cf69d055 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sun, 1 Sep 2024 11:03:04 -0600 Subject: [PATCH 49/65] Revert rename to old root --- consensus/src/processes/pruning_proof/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e0898b7fb..8c059963a 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1048,8 +1048,8 @@ impl PruningProofManager { let root = roots_by_level[level]; // (Old Logic) This is the root we can calculate given that the GD records are already filled // The root calc logic below is the original logic before the on-demand higher level GD calculation - // We only need depth_based_root to sanity check the new logic - let depth_based_root = if level != self.max_block_level as usize { + // We only need old_root to sanity check the new logic + let old_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) @@ -1071,8 +1071,8 @@ impl PruningProofManager { block_at_depth_2m }; - // new root is expected to be always an ancestor of depth_based_root because new root takes a safety margin - assert!(self.reachability_service.is_dag_ancestor_of(root, depth_based_root)); + // new root is expected to be always an ancestor of old_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); From 65948d32b73301910d19eea8a159ea79f6840ee4 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 6 Sep 2024 17:48:54 -0600 Subject: [PATCH 50/65] Lint fix from merged code --- kaspad/src/daemon.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index b66fb80aa..db9f32c16 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -6,7 +6,7 @@ use kaspa_consensus_core::{ errors::config::{ConfigError, ConfigResult}, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, debug, info}; +use kaspa_core::{core::Core, debug, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; use kaspa_database::{ prelude::{CachePolicy, DbWriter, DirectDbWriter}, From 2e8deabd2526cee8c0535b35a61aebaa25545d74 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:44:07 -0600 Subject: [PATCH 51/65] Some cleanup - use BlueWorkType - fix some comments --- consensus/src/pipeline/header_processor/processor.rs | 4 +--- consensus/src/processes/ghostdag/protocol.rs | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 5d9bf3fed..ac8cc441c 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -344,7 +344,7 @@ impl HeaderProcessor { .collect_vec() } - /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) + /// Runs the GHOSTDAG algorithm and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { let ghostdag_data = self .ghostdag_store @@ -365,8 +365,6 @@ impl HeaderProcessor { // // Append-only stores: these require no lock and hence done first in order to reduce locking time // - - // This data might have been already written when applying the pruning proof. self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 2474c2c37..997c4eecb 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -5,7 +5,6 @@ use kaspa_consensus_core::{ BlockHashMap, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; -use kaspa_math::Uint192; use kaspa_utils::refs::Refs; use crate::{ @@ -118,7 +117,7 @@ impl Date: Tue, 15 Oct 2024 16:16:43 -0600 Subject: [PATCH 52/65] remove last reference to ghostdag_primary_* --- consensus/src/pipeline/header_processor/processor.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index ac8cc441c..4ecc761af 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -356,7 +356,7 @@ impl HeaderProcessor { } fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { - let ghostdag_primary_data = ctx.ghostdag_data.as_ref().unwrap(); + let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); let pp = ctx.pruning_point(); // Create a DB batch writer @@ -365,7 +365,7 @@ impl HeaderProcessor { // // Append-only stores: these require no lock and hence done first in order to reduce locking time // - self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); @@ -387,8 +387,8 @@ impl HeaderProcessor { // time, and thus serializing this part will do no harm. However this should be benchmarked. The // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - let selected_parent = ghostdag_primary_data.selected_parent; - let mut reachability_mergeset = ghostdag_primary_data.unordered_mergeset_without_selected_parent(); + let selected_parent = ghostdag_data.selected_parent; + let mut reachability_mergeset = ghostdag_data.unordered_mergeset_without_selected_parent(); reachability::add_block(&mut staging, ctx.hash, selected_parent, &mut reachability_mergeset).unwrap(); // Non-append only stores need to use write locks. From b6af1afb882424bad850695e5c9d9694af4eb9fe Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:37:58 -0600 Subject: [PATCH 53/65] Cleaner find_selected_parent_header_at_level Co-authored-by: Michael Sutton --- consensus/src/processes/pruning_proof/mod.rs | 34 ++++---------------- 1 file changed, 6 insertions(+), 28 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8c059963a..7afe28675 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -833,39 +833,17 @@ impl PruningProofManager { ) -> PruningProofManagerInternalResult> { // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important // to filter to include only parents that are in relations_service. - let parents = self + let sp = self .parents_manager .parents_at_level(header, level) .iter() .copied() - .filter(|parent| self.level_relations_services[level as usize].has(*parent).unwrap()) - .collect_vec() - .push_if_empty(ORIGIN); + .filter(|p| self.level_relations_services[level as usize].has(*p).unwrap()) + .filter_map(|p| self.headers_store.get_header(p).unwrap_option().map(|h| SortableBlock::new(p, h.blue_work))) + .max() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof("no parents with header".to_string()))?; - let mut sp = SortableBlock { - hash: parents[0], - blue_work: if parents[0] == ORIGIN { 0.into() } else { self.headers_store.get_header(parents[0]).unwrap().blue_work }, - }; - for parent in parents.iter().copied().skip(1) { - let sblock = SortableBlock { - hash: parent, - blue_work: self - .headers_store - .get_header(parent) - .unwrap_option() - .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(format!( - "find_selected_parent_header_at_level (level {level}) couldn't find the header for block {parent}" - )))? - .blue_work, - }; - if sblock > sp { - sp = sblock; - } - } - // TODO: For higher levels the chance of having more than two parents is very small, so it might make sense to fetch the whole header for the SortableBlock instead of blue_score (which will probably come from a compact header). - self.headers_store.get_header(sp.hash).unwrap_option().ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof( - format!("find_selected_parent_header_at_level (level {level}) couldn't find the header for block {}", sp.hash,), - )) + Ok(self.headers_store.get_header(sp.hash).expect("unwrapped above")) } fn find_sufficient_root( From dcd3f5c2f0a4a089e19f1c54a37c74473cfdbba5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 22 Oct 2024 23:00:52 -0600 Subject: [PATCH 54/65] Refactor for better readability and add more docs --- consensus/src/processes/pruning_proof/mod.rs | 189 +++++++++++-------- 1 file changed, 107 insertions(+), 82 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 7afe28675..01d210405 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -842,10 +842,17 @@ impl PruningProofManager { .filter_map(|p| self.headers_store.get_header(p).unwrap_option().map(|h| SortableBlock::new(p, h.blue_work))) .max() .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof("no parents with header".to_string()))?; - Ok(self.headers_store.get_header(sp.hash).expect("unwrapped above")) } + /// Find a sufficient root at a given level by going through the headers store and looking + /// for a deep enough level block + /// For each root candidate, fill in the ghostdag data to see if it actually is deep enough. + /// If the root is deep enough, it will satisfy these conditions + /// 1. block at depth 2m at this level ∈ Future(root) + /// 2. block at depth m at the next level ∈ Future(root) + /// + /// Returns: the filled ghostdag store from root to tip, the selected tip and the root fn find_sufficient_root( &self, pp_header: &HeaderWithBlockLevel, @@ -854,18 +861,19 @@ impl PruningProofManager { required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { - let selected_tip_header = if pp_header.block_level >= level { - pp_header.header.clone() + // Step 1: Determine which selected tip to use + let selected_tip = if pp_header.block_level >= level { + pp_header.header.hash } else { - self.find_selected_parent_header_at_level(&pp_header.header, level)? + self.find_selected_parent_header_at_level(&pp_header.header, level)?.hash }; - let selected_tip = selected_tip_header.hash; - let pp = pp_header.header.hash; - let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; - let mut required_level_0_depth = if level == 0 { + + // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. + // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level + let mut required_base_level_depth = if level == 0 { required_level_depth + 100 // smaller safety margin } else { self.estimated_blue_depth_at_level_0( @@ -875,96 +883,80 @@ impl PruningProofManager { ) }; + let mut is_last_level_header; let mut tries = 0; - loop { - let required_block = if let Some(required_block) = required_block { - // TODO: We can change it to skip related checks if `None` - required_block - } else { - selected_tip - }; - let mut finished_headers = false; - let mut finished_headers_for_required_block_chain = false; - let mut current_header = selected_tip_header.clone(); - let mut required_block_chain = BlockHashSet::new(); - let mut selected_chain = BlockHashSet::new(); - let mut intersected_with_required_block_chain = false; - let mut current_required_chain_block = self.headers_store.get_header(required_block).unwrap(); - let root_header = loop { - if !intersected_with_required_block_chain { - required_block_chain.insert(current_required_chain_block.hash); - selected_chain.insert(current_header.hash); - if required_block_chain.contains(¤t_header.hash) - || selected_chain.contains(¤t_required_chain_block.hash) - { - intersected_with_required_block_chain = true; - } - } + let block_at_depth_m_at_next_level = required_block.unwrap_or(selected_tip); - if current_header.direct_parents().is_empty() // Stop at genesis - // Need to ensure this does the same 2M+1 depth that block_at_depth does - || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth - && intersected_with_required_block_chain) - { - break current_header; + loop { + // Step 2 - Find a deep enough root candidate + let block_at_depth_2m = match self.level_block_at_base_depth(level, selected_tip, required_base_level_depth) { + Ok((header, is_last_header)) => { + is_last_level_header = is_last_header; + header } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - if !intersected_with_required_block_chain { - warn!("it's unknown if the selected root for level {level} ( {} ) is in the chain of the required block {required_block}", current_header.hash) - } - finished_headers = true; // We want to give this root a shot if all its past is pruned - break current_header; - } - Err(e) => return Err(e), - }; + Err(e) => return Err(e), + }; - if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { - current_required_chain_block = - match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - finished_headers_for_required_block_chain = true; - current_required_chain_block - } - Err(e) => return Err(e), - }; + let root = if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { + block_at_depth_2m + } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { + block_at_depth_m_at_next_level + } else { + // find common ancestor of block_at_depth_m_at_next_level and block_at_depth_2m in chain of block_at_depth_m_at_next_level + let mut common_ancestor = self.headers_store.get_header(block_at_depth_m_at_next_level).unwrap(); + + while !self.reachability_service.is_dag_ancestor_of(common_ancestor.hash, block_at_depth_2m) { + common_ancestor = match self.find_selected_parent_header_at_level(&common_ancestor, level) { + Ok(header) => header, + // Try to give this last header a chance at being root + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => break, + Err(e) => return Err(e), + }; } + + common_ancestor.hash }; - let root = root_header.hash; if level == 0 { return Ok((self.ghostdag_store.clone(), selected_tip, root)); } + // Step 3 - Fill the ghostdag data from root to tip let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); - let has_required_block = - self.fill_proof_ghostdag_data(root, root, pp, &ghostdag_store, level != 0, Some(required_block), true, level); + let has_required_block = self.fill_level_proof_ghostdag_data( + root, + pp_header.header.hash, + &ghostdag_store, + Some(block_at_depth_m_at_next_level), + level, + ); + // Step 4 - Check if we actually have enough depth. // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } tries += 1; - if finished_headers { + if is_last_level_header { if has_required_block { // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync // with such an older node. As long as we found the required block, we can still proceed. - warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Required block found so trying anyway."); + warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); break Ok((ghostdag_store, selected_tip, root)); } else { - panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned"); + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned"); } } - required_level_0_depth <<= 1; - warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + + // If we don't have enough depth now, we need to look deeper + required_base_level_depth <<= 1; + warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); } } @@ -1062,7 +1054,9 @@ impl PruningProofManager { continue; } - if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) { + if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) + || ghostdag_stores[level as usize].has(current).is_err() + { continue; } @@ -1103,18 +1097,15 @@ impl PruningProofManager { .collect_vec() } - /// BFS forward iterates from starting_hash until selected tip, ignoring blocks in the antipast of selected_tip. + /// BFS forward iterates from genesis_hash until selected tip, ignoring blocks in the antipast of selected_tip. /// For each block along the way, insert that hash into the ghostdag_store /// If we have a required_block to find, this will return true if that block was found along the way - fn fill_proof_ghostdag_data( + fn fill_level_proof_ghostdag_data( &self, genesis_hash: Hash, - starting_hash: Hash, selected_tip: Hash, ghostdag_store: &Arc, - use_score_as_work: bool, required_block: Option, - initialize_store: bool, level: BlockLevel, ) -> bool { let relations_service = RelationsStoreInFutureOfRoot { @@ -1129,17 +1120,15 @@ impl PruningProofManager { relations_service.clone(), self.headers_store.clone(), self.reachability_service.clone(), - use_score_as_work, + level != 0, ); - if initialize_store { - ghostdag_store.insert(genesis_hash, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); - ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); - } + ghostdag_store.insert(genesis_hash, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); let mut topological_heap: BinaryHeap<_> = Default::default(); let mut visited = BlockHashSet::new(); - for child in relations_service.get_children(starting_hash).unwrap().read().iter().copied() { + for child in relations_service.get_children(genesis_hash).unwrap().read().iter().copied() { topological_heap.push(Reverse(SortableBlock { hash: child, // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology @@ -1147,7 +1136,7 @@ impl PruningProofManager { })); } - let mut has_required_block = required_block.is_some_and(|required_block| starting_hash == required_block); + let mut has_required_block = required_block.is_some_and(|required_block| genesis_hash == required_block); loop { let Some(current) = topological_heap.pop() else { break; @@ -1171,7 +1160,7 @@ impl PruningProofManager { .unwrap() .iter() .copied() - .filter(|parent| self.reachability_service.is_dag_ancestor_of(starting_hash, *parent)) + .filter(|parent| self.reachability_service.is_dag_ancestor_of(genesis_hash, *parent)) .collect(); let current_gd = gd_manager.ghostdag(&relevant_parents); @@ -1246,6 +1235,42 @@ impl PruningProofManager { Ok(current) } + /// Finds the block on a given level that is at base_depth deep from it. + /// Also returns if the block was the last one in the level + /// base_depth = the blue score depth at level 0 + fn level_block_at_base_depth( + &self, + level: BlockLevel, + high: Hash, + base_depth: u64, + ) -> PruningProofManagerInternalResult<(Hash, bool)> { + let high_header = self + .headers_store + .get_header(high) + .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {base_depth}, {err}")))?; + let high_header_score = high_header.blue_score; + let mut current_header = high_header; + + let mut is_last_header = false; + + while current_header.blue_score + base_depth >= high_header_score { + if current_header.direct_parents().is_empty() { + break; + } + + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + // We want to give this root a shot if all its past is pruned + is_last_header = true; + break; + } + Err(e) => return Err(e), + }; + } + Ok((current_header.hash, is_last_header)) + } + fn find_common_ancestor_in_chain_of_a( &self, ghostdag_store: &impl GhostdagStoreReader, From 32f890def1fccdf8ec743162e6780d9801af1980 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:41:45 -0600 Subject: [PATCH 55/65] Smaller safety margin for all --- consensus/src/processes/pruning_proof/mod.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 01d210405..cb787dfea 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -873,15 +873,11 @@ impl PruningProofManager { // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level - let mut required_base_level_depth = if level == 0 { - required_level_depth + 100 // smaller safety margin - } else { - self.estimated_blue_depth_at_level_0( - level, - required_level_depth * 5 / 4, // We take a safety margin - current_dag_level, - ) - }; + let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( + level, + required_level_depth + 100, // We take a safety margin + current_dag_level, + ); let mut is_last_level_header; let mut tries = 0; From 15486f86ea2fbb043a3d26688e15940ba80aee07 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sat, 26 Oct 2024 15:50:03 -0600 Subject: [PATCH 56/65] Lint and logic fix --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index cb787dfea..ff3795bdd 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1051,7 +1051,7 @@ impl PruningProofManager { } if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) - || ghostdag_stores[level as usize].has(current).is_err() + || !ghostdag_stores[level].has(current).is_ok_and(|found| found) { continue; } From 6a41f2633a850f2a5bfdef7098170a12ea1e418c Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:28:13 -0600 Subject: [PATCH 57/65] Reduce loop depth increase on level proof retries Co-authored-by: Michael Sutton --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index ff3795bdd..baea088d7 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -951,7 +951,7 @@ impl PruningProofManager { } // If we don't have enough depth now, we need to look deeper - required_base_level_depth <<= 1; + required_base_level_depth = (required_base_level_depth as f64 * 1.1) as u64; warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); } } From cf476f282b97730bdbdea8424eb1a07c012357e1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:29:24 -0600 Subject: [PATCH 58/65] Update consensus/src/processes/pruning_proof/mod.rs Co-authored-by: Michael Sutton --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index baea088d7..8b56d9171 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -139,7 +139,7 @@ impl RelationsStoreReader for R } fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { - panic!("unimplemented") + unimplemented!() } } From be105f99a69919ffe633675fb3a978b2138441ec Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:33:11 -0600 Subject: [PATCH 59/65] Comment cleanup --- consensus/src/processes/pruning_proof/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8b56d9171..700a7132f 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -825,7 +825,7 @@ impl PruningProofManager { } /// selected parent at level = the parent of the header at the level - /// with the highest blue_work (using score as work in this case) + /// with the highest blue_work fn find_selected_parent_header_at_level( &self, header: &Header, @@ -868,7 +868,7 @@ impl PruningProofManager { self.find_selected_parent_header_at_level(&pp_header.header, level)?.hash }; - let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let required_level_depth = 2 * self.pruning_proof_m; // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. @@ -1128,7 +1128,7 @@ impl PruningProofManager { topological_heap.push(Reverse(SortableBlock { hash: child, // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + blue_work: self.headers_store.get_header(child).unwrap().blue_work, })); } @@ -1166,7 +1166,7 @@ impl PruningProofManager { topological_heap.push(Reverse(SortableBlock { hash: child, // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + blue_work: self.headers_store.get_header(child).unwrap().blue_work, })); } } From 59b1a050cdf3a8a84dd9da664db5bfc26831cde4 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:37:19 -0600 Subject: [PATCH 60/65] Remove unnecessary clone Co-authored-by: Michael Sutton --- consensus/src/processes/pruning_proof/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 700a7132f..38cd95320 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -328,8 +328,8 @@ impl PruningProofManager { blue_work: header.blue_work, selected_parent: calculated_gd.selected_parent, mergeset_blues: calculated_gd.mergeset_blues.clone(), - mergeset_reds: calculated_gd.mergeset_reds.clone(), - blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), + mergeset_reds: calculated_gd.mergeset_reds, + blues_anticone_sizes: calculated_gd.blues_anticone_sizes, } }; self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); From 5cd27f3b71d60f0f9b646a67d75a5a3d50d6b2c2 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:45:41 -0600 Subject: [PATCH 61/65] Rename genesis_hash to root; Remove redundant filter --- consensus/src/processes/pruning_proof/mod.rs | 22 +++++++------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 38cd95320..e1c68277c 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1093,12 +1093,12 @@ impl PruningProofManager { .collect_vec() } - /// BFS forward iterates from genesis_hash until selected tip, ignoring blocks in the antipast of selected_tip. + /// BFS forward iterates from root until selected tip, ignoring blocks in the antipast of selected_tip. /// For each block along the way, insert that hash into the ghostdag_store /// If we have a required_block to find, this will return true if that block was found along the way fn fill_level_proof_ghostdag_data( &self, - genesis_hash: Hash, + root: Hash, selected_tip: Hash, ghostdag_store: &Arc, required_block: Option, @@ -1107,10 +1107,10 @@ impl PruningProofManager { let relations_service = RelationsStoreInFutureOfRoot { relations_store: self.level_relations_services[level as usize].clone(), reachability_service: self.reachability_service.clone(), - root: genesis_hash, + root, }; let gd_manager = GhostdagManager::new( - genesis_hash, + root, self.ghostdag_k, ghostdag_store.clone(), relations_service.clone(), @@ -1119,12 +1119,12 @@ impl PruningProofManager { level != 0, ); - ghostdag_store.insert(genesis_hash, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); let mut topological_heap: BinaryHeap<_> = Default::default(); let mut visited = BlockHashSet::new(); - for child in relations_service.get_children(genesis_hash).unwrap().read().iter().copied() { + for child in relations_service.get_children(root).unwrap().read().iter().copied() { topological_heap.push(Reverse(SortableBlock { hash: child, // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology @@ -1132,7 +1132,7 @@ impl PruningProofManager { })); } - let mut has_required_block = required_block.is_some_and(|required_block| genesis_hash == required_block); + let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); loop { let Some(current) = topological_heap.pop() else { break; @@ -1151,13 +1151,7 @@ impl PruningProofManager { has_required_block = true; } - let relevant_parents: Box<[Hash]> = relations_service - .get_parents(current_hash) - .unwrap() - .iter() - .copied() - .filter(|parent| self.reachability_service.is_dag_ancestor_of(genesis_hash, *parent)) - .collect(); + let relevant_parents: Box<[Hash]> = relations_service.get_parents(current_hash).unwrap().iter().copied().collect(); let current_gd = gd_manager.ghostdag(&relevant_parents); ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); From 8e034525920126fcd949e126648bf04517f5ba8c Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:50:33 -0600 Subject: [PATCH 62/65] Cleaner reachability_stores type Co-authored-by: Michael Sutton --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e1c68277c..f4e66f43a 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -104,7 +104,7 @@ struct TempProofContext { headers_store: Arc, ghostdag_stores: Vec>, relations_stores: Vec, - reachability_stores: Vec>>, + reachability_stores: Vec>>, ghostdag_managers: Vec, DbHeadersStore>>, db_lifetime: DbLifetime, From 01024b1366f7521812476eccc141c40de3047816 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 18:56:33 -0600 Subject: [PATCH 63/65] Change failed to find sufficient root log to debug --- consensus/src/processes/pruning_proof/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index f4e66f43a..5a949cc4e 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -27,7 +27,7 @@ use kaspa_consensus_core::{ trusted::{TrustedBlock, TrustedGhostdagData, TrustedHeader}, BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; -use kaspa_core::{debug, info, trace, warn}; +use kaspa_core::{debug, info, trace}; use kaspa_database::{ prelude::{CachePolicy, ConnBuilder, StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}, utils::DbLifetime, @@ -943,7 +943,7 @@ impl PruningProofManager { // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync // with such an older node. As long as we found the required block, we can still proceed. - warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); + debug!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); break Ok((ghostdag_store, selected_tip, root)); } else { panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned"); @@ -952,7 +952,7 @@ impl PruningProofManager { // If we don't have enough depth now, we need to look deeper required_base_level_depth = (required_base_level_depth as f64 * 1.1) as u64; - warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); + debug!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); } } From 1777b8061858d75fcf9f9411d3ed5da5fd304ae7 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 28 Oct 2024 21:06:18 -0600 Subject: [PATCH 64/65] Bump node version to 0.15.3 --- Cargo.lock | 116 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 112 +++++++++++++++++++++++++-------------------------- 2 files changed, 114 insertions(+), 114 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47db7f667..a951993e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2265,7 +2265,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2282,7 +2282,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "igd-next", @@ -2304,14 +2304,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.15.2" +version = "0.15.3" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "bs58", @@ -2338,7 +2338,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2385,7 +2385,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures-util", @@ -2402,7 +2402,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2446,7 +2446,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2474,7 +2474,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-trait", @@ -2513,7 +2513,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2532,7 +2532,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2556,7 +2556,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures", @@ -2574,7 +2574,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2592,7 +2592,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2614,7 +2614,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "enum-primitive-derive", @@ -2636,7 +2636,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2668,7 +2668,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2733,7 +2733,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -2754,7 +2754,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2773,7 +2773,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2801,7 +2801,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2822,14 +2822,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2845,7 +2845,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "futures-util", @@ -2872,7 +2872,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -2880,7 +2880,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "kaspa-hashes", @@ -2893,7 +2893,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2929,7 +2929,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "chrono", @@ -2960,7 +2960,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "ctrlc", @@ -2991,7 +2991,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-core", "log", @@ -3003,7 +3003,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "js-sys", @@ -3019,7 +3019,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3061,7 +3061,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3073,7 +3073,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "kaspa-addresses", @@ -3102,7 +3102,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -3194,7 +3194,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "secp256k1", "thiserror", @@ -3202,7 +3202,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bytes", "cfg-if 1.0.0", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-consensus", @@ -3275,7 +3275,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3287,7 +3287,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "js-sys", @@ -3301,7 +3301,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "aes", "ahash", @@ -3382,7 +3382,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3415,7 +3415,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3428,7 +3428,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "derive_builder", @@ -3455,7 +3455,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3483,7 +3483,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "faster-hex", "hexplay", @@ -3494,7 +3494,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3530,7 +3530,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ctrlc", "futures", @@ -3545,7 +3545,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "clap 4.5.19", @@ -3564,7 +3564,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3592,7 +3592,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-simple-client-example" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-rpc-core", @@ -3602,7 +3602,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "async-std", @@ -3632,7 +3632,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -4974,7 +4974,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "clap 4.5.19", @@ -5387,7 +5387,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index dd5eb3132..7141101f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ members = [ [workspace.package] rust-version = "1.81.0" -version = "0.15.2" +version = "0.15.3" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -80,61 +80,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.15.2", path = "testing/integration" } -kaspa-addresses = { version = "0.15.2", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.15.2", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.15.2", path = "wallet/bip32" } -kaspa-cli = { version = "0.15.2", path = "cli" } -kaspa-connectionmanager = { version = "0.15.2", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.15.2", path = "consensus" } -kaspa-consensus-core = { version = "0.15.2", path = "consensus/core" } -kaspa-consensus-client = { version = "0.15.2", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.15.2", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.15.2", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.15.2", path = "components/consensusmanager" } -kaspa-core = { version = "0.15.2", path = "core" } -kaspa-daemon = { version = "0.15.2", path = "daemon" } -kaspa-database = { version = "0.15.2", path = "database" } -kaspa-grpc-client = { version = "0.15.2", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.15.2", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.15.2", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.15.2", path = "crypto/hashes" } -kaspa-index-core = { version = "0.15.2", path = "indexes/core" } -kaspa-index-processor = { version = "0.15.2", path = "indexes/processor" } -kaspa-math = { version = "0.15.2", path = "math" } -kaspa-merkle = { version = "0.15.2", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.15.2", path = "metrics/core" } -kaspa-mining = { version = "0.15.2", path = "mining" } -kaspa-mining-errors = { version = "0.15.2", path = "mining/errors" } -kaspa-muhash = { version = "0.15.2", path = "crypto/muhash" } -kaspa-notify = { version = "0.15.2", path = "notify" } -kaspa-p2p-flows = { version = "0.15.2", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.15.2", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.15.2", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.15.2", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.15.2", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.15.2", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.15.2", path = "rpc/service" } -kaspa-txscript = { version = "0.15.2", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.15.2", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.15.2", path = "utils" } -kaspa-utils-tower = { version = "0.15.2", path = "utils/tower" } -kaspa-utxoindex = { version = "0.15.2", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.15.2", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.15.2", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.15.2", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.15.2", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.15.2", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.15.2", path = "wallet/macros" } -kaspa-wasm = { version = "0.15.2", path = "wasm" } -kaspa-wasm-core = { version = "0.15.2", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.15.2", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.15.2", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.15.2", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.15.2", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.15.2", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.15.2", path = "kaspad" } -kaspa-alloc = { version = "0.15.2", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.15.3", path = "testing/integration" } +kaspa-addresses = { version = "0.15.3", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.15.3", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.15.3", path = "wallet/bip32" } +kaspa-cli = { version = "0.15.3", path = "cli" } +kaspa-connectionmanager = { version = "0.15.3", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.15.3", path = "consensus" } +kaspa-consensus-core = { version = "0.15.3", path = "consensus/core" } +kaspa-consensus-client = { version = "0.15.3", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.15.3", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.15.3", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.15.3", path = "components/consensusmanager" } +kaspa-core = { version = "0.15.3", path = "core" } +kaspa-daemon = { version = "0.15.3", path = "daemon" } +kaspa-database = { version = "0.15.3", path = "database" } +kaspa-grpc-client = { version = "0.15.3", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.15.3", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.15.3", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.15.3", path = "crypto/hashes" } +kaspa-index-core = { version = "0.15.3", path = "indexes/core" } +kaspa-index-processor = { version = "0.15.3", path = "indexes/processor" } +kaspa-math = { version = "0.15.3", path = "math" } +kaspa-merkle = { version = "0.15.3", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.15.3", path = "metrics/core" } +kaspa-mining = { version = "0.15.3", path = "mining" } +kaspa-mining-errors = { version = "0.15.3", path = "mining/errors" } +kaspa-muhash = { version = "0.15.3", path = "crypto/muhash" } +kaspa-notify = { version = "0.15.3", path = "notify" } +kaspa-p2p-flows = { version = "0.15.3", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.15.3", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.15.3", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.15.3", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.15.3", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.15.3", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.15.3", path = "rpc/service" } +kaspa-txscript = { version = "0.15.3", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.15.3", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.15.3", path = "utils" } +kaspa-utils-tower = { version = "0.15.3", path = "utils/tower" } +kaspa-utxoindex = { version = "0.15.3", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.15.3", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.15.3", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.15.3", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.15.3", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.15.3", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.15.3", path = "wallet/macros" } +kaspa-wasm = { version = "0.15.3", path = "wasm" } +kaspa-wasm-core = { version = "0.15.3", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.15.3", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.15.3", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.15.3", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.15.3", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.15.3", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.15.3", path = "kaspad" } +kaspa-alloc = { version = "0.15.3", path = "utils/alloc" } # external aes = "0.8.3" From f83a70fdcc75583d5d3d6d4dd09d5fe4ff3efc34 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Tue, 29 Oct 2024 13:59:18 +0000 Subject: [PATCH 65/65] A few minor leftovers --- consensus/src/processes/pruning_proof/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 5a949cc4e..e9690ec38 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -327,7 +327,7 @@ impl PruningProofManager { blue_score: header.blue_score, blue_work: header.blue_work, selected_parent: calculated_gd.selected_parent, - mergeset_blues: calculated_gd.mergeset_blues.clone(), + mergeset_blues: calculated_gd.mergeset_blues, mergeset_reds: calculated_gd.mergeset_reds, blues_anticone_sizes: calculated_gd.blues_anticone_sizes, } @@ -1010,6 +1010,7 @@ impl PruningProofManager { .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); + // TODO (relaxed): remove the assertion below // (New Logic) This is the root we calculated by going through block relations let root = roots_by_level[level]; // (Old Logic) This is the root we can calculate given that the GD records are already filled @@ -1050,6 +1051,9 @@ impl PruningProofManager { continue; } + // The second condition is always expected to be true (ghostdag store will have the entry) + // because we are traversing the exact diamond (future(root) ⋂ past(tip)) for which we calculated + // GD for (see fill_level_proof_ghostdag_data). TODO (relaxed): remove the condition or turn into assertion if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) || !ghostdag_stores[level].has(current).is_ok_and(|found| found) { @@ -1062,6 +1066,7 @@ impl PruningProofManager { } } + // TODO (relaxed): remove the assertion below // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); let chain_2m = self @@ -1151,8 +1156,7 @@ impl PruningProofManager { has_required_block = true; } - let relevant_parents: Box<[Hash]> = relations_service.get_parents(current_hash).unwrap().iter().copied().collect(); - let current_gd = gd_manager.ghostdag(&relevant_parents); + let current_gd = gd_manager.ghostdag(&relations_service.get_parents(current_hash).unwrap()); ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists();