From 5d3b10f924f9ffecd997f3cf7af2a528efb38fab Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:31:34 -0600 Subject: [PATCH 01/58] Refactor pruning proof validation to many functions Co-authored-by: Ori Newman --- consensus/src/consensus/services.rs | 1 + consensus/src/processes/pruning_proof/mod.rs | 194 ++++++++++++++----- 2 files changed, 145 insertions(+), 50 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 4afb5938a..3db1e8d38 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -185,6 +185,7 @@ impl ConsensusServices { parents_manager.clone(), reachability_service.clone(), ghostdag_managers.clone(), + ghostdag_primary_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6324aa4ee..3dfed8660 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -26,7 +26,10 @@ use kaspa_consensus_core::{ BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; use kaspa_core::{debug, info, trace}; -use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; +use kaspa_database::{ + prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}, + utils::DbLifetime, +}; use kaspa_hashes::Hash; use kaspa_pow::calc_block_level; use kaspa_utils::{binary_heap::BinaryHeapExtensions, vec::VecExtensions}; @@ -41,7 +44,7 @@ use crate::{ services::reachability::{MTReachabilityService, ReachabilityService}, stores::{ depth::DbDepthStore, - ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -88,6 +91,16 @@ impl Clone for CachedPruningPointData { } } +struct TempProofContext { + headers_store: Arc, + ghostdag_stores: Vec>, + relations_stores: Vec, + reachability_stores: Vec>>, + ghostdag_managers: + Vec, DbHeadersStore>>, + db_lifetime: DbLifetime, +} + pub struct PruningProofManager { db: Arc, @@ -96,6 +109,7 @@ pub struct PruningProofManager { reachability_relations_store: Arc>, reachability_service: MTReachabilityService, ghostdag_stores: Arc>>, + ghostdag_primary_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, past_pruning_points_store: Arc, @@ -106,6 +120,7 @@ pub struct PruningProofManager { selected_chain_store: Arc>, ghostdag_managers: Arc>, + ghostdag_primary_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -130,6 +145,7 @@ impl PruningProofManager { parents_manager: DbParentsManager, reachability_service: MTReachabilityService, ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, max_block_level: BlockLevel, @@ -146,6 +162,7 @@ impl PruningProofManager { reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_primary_store: storage.ghostdag_primary_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -168,6 +185,7 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, + ghostdag_primary_manager: ghostdag_manager, is_consensus_exiting, } @@ -244,8 +262,12 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); let gd = if header.hash == self.genesis_hash { self.ghostdag_managers[level].genesis_ghostdag_data() - } else if level == 0 { - if let Some(gd) = trusted_gd_map.get(&header.hash) { + } else { + self.ghostdag_managers[level].ghostdag(&parents) + }; + + if level == 0 { + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); @@ -258,18 +280,18 @@ impl PruningProofManager { mergeset_reds: calculated_gd.mergeset_reds.clone(), blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), } - } + }; + self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + } } } let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_managers[0].ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_primary_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -387,18 +409,16 @@ impl PruningProofManager { } } - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { - if proof.len() != self.max_block_level as usize + 1 { - return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); - } + fn init_validate_pruning_point_proof_stores_and_processes( + &self, + proof: &PruningPointProof, + ) -> PruningImportResult { if proof[0].is_empty() { return Err(PruningImportError::PruningProofNotEnoughHeaders); } let headers_estimate = self.estimate_proof_unique_size(proof); - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -444,6 +464,23 @@ impl PruningProofManager { db.write(batch).unwrap(); } + Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) + } + + fn populate_stores_for_validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + stores_and_processes: &mut TempProofContext, + ) -> PruningImportResult> { + let headers_store = &stores_and_processes.headers_store; + let ghostdag_stores = &stores_and_processes.ghostdag_stores; + let mut relations_stores = stores_and_processes.relations_stores.clone(); + let reachability_stores = &stores_and_processes.reachability_stores; + let ghostdag_managers = &stores_and_processes.ghostdag_managers; + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; for level in (0..=self.max_block_level).rev() { // Before processing this level, check if the process is exiting so we can end early @@ -533,45 +570,91 @@ impl PruningProofManager { selected_tip_by_level[level_idx] = selected_tip; } + Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) + } + + fn validate_proof_selected_tip( + &self, + proof_selected_tip: Hash, + level: BlockLevel, + proof_pp_level: BlockLevel, + proof_pp: Hash, + proof_pp_header: &Header, + ) -> PruningImportResult<()> { + // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level + // otherwise. + if level <= proof_pp_level { + if proof_selected_tip != proof_pp { + return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); + } + } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { + return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); + } + + Ok(()) + } + + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( + &self, + ghostdag_stores: &[Arc], + proof_selected_tip: Hash, + level: BlockLevel, + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match self.ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; + } + } + + pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + if proof.len() != self.max_block_level as usize + 1 { + return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); + } + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(&proof)?; + let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; + let ghostdag_stores = stores_and_processes.ghostdag_stores; + let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - for (level_idx, selected_tip) in selected_tip_by_level.into_iter().enumerate() { + for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; - let selected_tip = selected_tip.unwrap(); - if level <= proof_pp_level { - if selected_tip != proof_pp { - return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(selected_tip, level)); - } - } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip) { - return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(selected_tip, level)); - } + self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - let mut proof_current = selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - let common_ancestor_data = loop { - match self.ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap(); - } - }; - }; - - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = common_ancestor_data { + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &ghostdag_stores, + selected_tip, + level, + proof_selected_tip_gd, + ) { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { @@ -593,8 +676,19 @@ impl PruningProofManager { return Ok(()); } + // If we got here it means there's no level with shared blocks + // between the proof and the current consensus. In this case we + // consider the proof to be better if it has at least one level + // with 2*self.pruning_proof_m blue blocks where consensus doesn't. for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; + + let proof_selected_tip = selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { if parents @@ -614,7 +708,7 @@ impl PruningProofManager { drop(pruning_read); drop(relations_read); - drop(db_lifetime); + drop(stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) } @@ -816,7 +910,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_stores[0].get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -836,7 +930,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_managers[0].sort_blocks(anticone); + let mut anticone = self.ghostdag_primary_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -847,14 +941,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_stores[0].get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_primary_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_stores[0].get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_primary_store.get_data(hash).unwrap()).into(), }); } } @@ -862,7 +956,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_stores[0].get_data(hash).unwrap(); + let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -894,7 +988,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_stores[0].get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_primary_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); From 40f1cc9bbe24031f9aefaae0db9bef7e1897bbb2 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:32:26 -0600 Subject: [PATCH 02/58] Use blue score as work for higher levels Co-authored-by: Ori Newman --- consensus/src/consensus/services.rs | 1 + consensus/src/processes/ghostdag/protocol.rs | 24 +++++++++++++------- consensus/src/processes/pruning_proof/mod.rs | 1 + 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 3db1e8d38..b5617ea76 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -126,6 +126,7 @@ impl ConsensusServices { relations_services[level].clone(), storage.headers_store.clone(), reachability_service.clone(), + level != 0, ) }) .collect_vec(), diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 87beeb565..ac9ae41d7 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -5,6 +5,7 @@ use kaspa_consensus_core::{ BlockHashMap, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; +use kaspa_math::Uint192; use kaspa_utils::refs::Refs; use crate::{ @@ -29,6 +30,7 @@ pub struct GhostdagManager, pub(super) reachability_service: U, + use_score_as_work: bool, } impl GhostdagManager { @@ -39,8 +41,9 @@ impl, reachability_service: U, + use_score_as_work: bool, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store } + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -115,14 +118,19 @@ impl Date: Mon, 3 Jun 2024 23:34:17 -0600 Subject: [PATCH 03/58] Remove pruning processor dependency on gd managers Co-authored-by: Ori Newman --- consensus/src/pipeline/pruning_processor/processor.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 8cded745a..bee46834a 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -2,7 +2,7 @@ use crate::{ consensus::{ - services::{ConsensusServices, DbGhostdagManager, DbPruningPointManager}, + services::{ConsensusServices, DbPruningPointManager}, storage::ConsensusStorage, }, model::{ @@ -69,7 +69,6 @@ pub struct PruningProcessor { // Managers and Services reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, pruning_point_manager: DbPruningPointManager, pruning_proof_manager: Arc, @@ -106,7 +105,6 @@ impl PruningProcessor { db, storage: storage.clone(), reachability_service: services.reachability_service.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), pruning_point_manager: services.pruning_point_manager.clone(), pruning_proof_manager: services.pruning_proof_manager.clone(), pruning_lock, From 1df5a22e2c5afb8b45a7cc59ae2579ac640cf238 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:35:17 -0600 Subject: [PATCH 04/58] Consistency renaming Co-authored-by: Ori Newman --- .../pipeline/body_processor/body_validation_in_context.rs | 2 +- consensus/src/pipeline/body_processor/processor.rs | 6 +++--- consensus/src/pipeline/header_processor/processor.rs | 7 ++++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 2425556d0..b437f1f13 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -25,7 +25,7 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; + let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_primary_store.get_data(block.hash()).unwrap())?; for tx in block.transactions.iter() { if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { return Err(RuleError::TxInContextFailed(tx.id(), e)); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 1ea674263..8b6d35e19 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -55,7 +55,7 @@ pub struct BlockBodyProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_store: Arc, + pub(super) ghostdag_primary_store: Arc, pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, @@ -92,7 +92,7 @@ impl BlockBodyProcessor { db: Arc, statuses_store: Arc>, - ghostdag_store: Arc, + ghostdag_primary_store: Arc, headers_store: Arc, block_transactions_store: Arc, body_tips_store: Arc>, @@ -116,7 +116,7 @@ impl BlockBodyProcessor { db, statuses_store, reachability_service, - ghostdag_store, + ghostdag_primary_store, headers_store, block_transactions_store, body_tips_store, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index d1b74aeb5..a90e67c50 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -356,13 +356,13 @@ impl HeaderProcessor { .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) }) .collect_vec(); - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); + let ghostdag_primary_data = &ghostdag_data[0]; let pp = ctx.pruning_point(); // Create a DB batch writer @@ -375,6 +375,7 @@ impl HeaderProcessor { for (level, datum) in ghostdag_data.iter().enumerate() { self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); } + if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); } @@ -395,8 +396,8 @@ impl HeaderProcessor { // time, and thus serializing this part will do no harm. However this should be benchmarked. The // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - let selected_parent = ghostdag_data[0].selected_parent; - let mut reachability_mergeset = ghostdag_data[0].unordered_mergeset_without_selected_parent(); + let selected_parent = ghostdag_primary_data.selected_parent; + let mut reachability_mergeset = ghostdag_primary_data.unordered_mergeset_without_selected_parent(); reachability::add_block(&mut staging, ctx.hash, selected_parent, &mut reachability_mergeset).unwrap(); // Non-append only stores need to use write locks. From d12592c34f3055fd474c3ae8847c4c83c853aff1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:35:37 -0600 Subject: [PATCH 05/58] Update db version Co-authored-by: Ori Newman --- consensus/src/consensus/factory.rs | 2 +- database/src/registry.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f34aa54f9 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { diff --git a/database/src/registry.rs b/database/src/registry.rs index 9e1b129d6..981af729d 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -36,10 +36,12 @@ pub enum DatabaseStorePrefixes { UtxoMultisets = 26, VirtualUtxoset = 27, VirtualState = 28, + GhostdagProof = 29, + GhostdagCompactProof = 30, // ---- Decomposed reachability stores ---- - ReachabilityTreeChildren = 30, - ReachabilityFutureCoveringSet = 31, + ReachabilityTreeChildren = 31, + ReachabilityFutureCoveringSet = 32, // ---- Metadata ---- MultiConsensusMetadata = 124, From 2bea765a2228f2c003589c89193ce66307087c62 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 22:44:21 -0600 Subject: [PATCH 06/58] GD Optimizations Co-authored-by: Ori Newman --- consensus/src/model/stores/ghostdag.rs | 21 ++ .../pipeline/pruning_processor/processor.rs | 3 +- consensus/src/processes/pruning_proof/mod.rs | 296 ++++++++++++++++-- database/src/registry.rs | 2 + simpa/src/main.rs | 7 + 5 files changed, 304 insertions(+), 25 deletions(-) diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index 89c4686c5..3ffe23e7e 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -271,6 +271,27 @@ impl DbGhostdagStore { } } + pub fn new_temp( + db: Arc, + level: BlockLevel, + cache_policy: CachePolicy, + compact_cache_policy: CachePolicy, + temp_index: u8, + ) -> Self { + assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); + let lvl_bytes = level.to_le_bytes(); + let temp_index_bytes = temp_index.to_le_bytes(); + let prefix = DatabaseStorePrefixes::TempGhostdag.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + let compact_prefix = + DatabaseStorePrefixes::TempGhostdagCompact.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_policy, prefix), + compact_access: CachedDbAccess::new(db, compact_cache_policy, compact_prefix), + } + } + pub fn clone_with_new_cache(&self, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { Self::new(Arc::clone(&self.db), self.level, cache_policy, compact_cache_policy) } diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index bee46834a..cd9026565 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -455,7 +455,8 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - self.assert_proof_rebuilding(proof, new_pruning_point); + // self.assert_proof_rebuilding(proof, new_pruning_point); + self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); self.assert_data_rebuilding(data, new_pruning_point); } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 31b1df833..8b4b3e299 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -25,7 +25,7 @@ use kaspa_consensus_core::{ trusted::{TrustedBlock, TrustedGhostdagData, TrustedHeader}, BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; -use kaspa_core::{debug, info, trace}; +use kaspa_core::{debug, info, trace, warn}; use kaspa_database::{ prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}, utils::DbLifetime, @@ -41,11 +41,14 @@ use crate::{ storage::ConsensusStorage, }, model::{ - services::reachability::{MTReachabilityService, ReachabilityService}, + services::{ + reachability::{MTReachabilityService, ReachabilityService}, + relations::MTRelationsService, + }, stores::{ depth::DbDepthStore, ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, - headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, pruning::{DbPruningStore, PruningStoreReader}, @@ -78,7 +81,11 @@ enum PruningProofManagerInternalError { #[error("cannot find a common ancestor: {0}")] NoCommonAncestor(String), + + #[error("missing headers to build proof: {0}")] + NotEnoughHeadersToBuildProof(String), } +type PruningProofManagerInternalResult = std::result::Result; struct CachedPruningPointData { pruning_point: Hash, @@ -714,40 +721,280 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } + // TODO: Find a better name + fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { + let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); + pp_header + .parents_by_level + .iter() + .enumerate() + .skip(1) + .find_map(|(level, parents)| { + if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { + None + } else { + Some((level - 1) as BlockLevel) + } + }) + .unwrap_or(self.max_block_level) + } + + fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { + level_depth << current_dag_level.saturating_sub(level) + } + + fn find_selected_parent_header_at_level( + &self, + header: &Header, + level: BlockLevel, + ) -> PruningProofManagerInternalResult> { + let parents = self.parents_manager.parents_at_level(header, level); + let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; + for parent in parents.iter().copied().skip(1) { + let sblock = SortableBlock { + hash: parent, + blue_work: self + .headers_store + .get_blue_score(parent) + .unwrap_option() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(format!( + "find_selected_parent_header_at_level (level {level}) couldn't find the header for block {parent}" + )))? + .into(), + }; + if sblock > sp { + sp = sblock; + } + } + // TODO: For higher levels the chance of having more than two parents is very small, so it might make sense to fetch the whole header for the SortableBlock instead of blue_score (which will probably come from a compact header). + self.headers_store.get_header(sp.hash).unwrap_option().ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof( + format!("find_selected_parent_header_at_level (level {level}) couldn't find the header for block {}", sp.hash,), + )) + // Ok(self.headers_store.get_header(sp.hash).unwrap_option().expect("already checked if compact header exists above")) + } + + fn find_sufficient_root( + &self, + pp_header: &HeaderWithBlockLevel, + level: BlockLevel, + current_dag_level: BlockLevel, + required_block: Option, + temp_db: Arc, + ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + let selected_tip_header = if pp_header.block_level >= level { + pp_header.header.clone() + } else { + self.find_selected_parent_header_at_level(&pp_header.header, level)? + }; + let selected_tip = selected_tip_header.hash; + let pp = pp_header.header.hash; + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size + let required_level_depth = 2 * self.pruning_proof_m; + let mut required_level_0_depth = if level == 0 { + required_level_depth + } else { + self.estimated_blue_depth_at_level_0( + level, + required_level_depth * 5 / 4, // We take a safety margin + current_dag_level, + ) + }; + + let mut tries = 0; + loop { + let required_block = if let Some(required_block) = required_block { + // TODO: We can change it to skip related checks if `None` + required_block + } else { + selected_tip + }; + + let mut finished_headers = false; + let mut finished_headers_for_required_block_chain = false; + let mut current_header = selected_tip_header.clone(); + let mut required_block_chain = BlockHashSet::new(); + let mut selected_chain = BlockHashSet::new(); + let mut intersected_with_required_block_chain = false; + let mut current_required_chain_block = self.headers_store.get_header(required_block).unwrap(); + let root_header = loop { + if !intersected_with_required_block_chain { + required_block_chain.insert(current_required_chain_block.hash); + selected_chain.insert(current_header.hash); + if required_block_chain.contains(¤t_header.hash) + || required_block_chain.contains(¤t_required_chain_block.hash) + { + intersected_with_required_block_chain = true; + } + } + + if current_header.direct_parents().is_empty() // Stop at genesis + || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + && intersected_with_required_block_chain) + { + break current_header; + } + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + if !intersected_with_required_block_chain { + warn!("it's unknown if the selected root for level {level} ( {} ) is in the chain of the required block {required_block}", current_header.hash) + } + finished_headers = true; // We want to give this root a shot if all its past is pruned + break current_header; + } + Err(e) => return Err(e), + }; + + if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { + current_required_chain_block = + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; + } + }; + let root = root_header.hash; + + if level == 0 { + return Ok((self.ghostdag_primary_store.clone(), selected_tip, root)); + } + + let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); + let gd_manager = GhostdagManager::new( + root, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + true, + ); + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(root).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + + let mut has_required_block = root == required_block; + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, pp) { + // We don't care about blocks in the antipast of the pruning point + continue; + } + + if !has_required_block && current_hash == required_block { + has_required_block = true; + } + + let relevant_parents: Box<[Hash]> = relations_service + .get_parents(current_hash) + .unwrap() + .iter() + .copied() + .filter(|parent| self.reachability_service.is_dag_ancestor_of(root, *parent)) + .collect(); + let current_gd = gd_manager.ghostdag(&relevant_parents); + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap(); + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + } + + if has_required_block + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + { + break Ok((ghostdag_store, selected_tip, root)); + } + + tries += 1; + if finished_headers { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned") + } + required_level_0_depth <<= 1; + warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + } + } + + fn calc_gd_for_all_levels( + &self, + pp_header: &HeaderWithBlockLevel, + temp_db: Arc, + ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); + let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + let mut root_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + let level_usize = level as usize; + let required_block = if level != self.max_block_level { + let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); + let block_at_depth_m_at_next_level = self + .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + Some(block_at_depth_m_at_next_level) + } else { + None + }; + let (store, selected_tip, root) = self + .find_sufficient_root(&pp_header, level, current_dag_level, required_block, temp_db.clone()) + .expect(&format!("find_sufficient_root failed for level {level}")); + ghostdag_stores[level_usize] = Some(store); + selected_tip_by_level[level_usize] = Some(selected_tip); + root_by_level[level_usize] = Some(root); + } + + ( + ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), + selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), + root_by_level.into_iter().map(Option::unwrap).collect_vec(), + ) + } + pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { if pp == self.genesis_hash { return vec![]; } + let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); - let selected_tip_by_level = (0..=self.max_block_level) - .map(|level| { - if level <= pp_header.block_level { - pp - } else { - self.ghostdag_managers[level as usize].find_selected_parent( - self.parents_manager - .parents_at_level(&pp_header.header, level) - .iter() - .filter(|parent| self.ghostdag_stores[level as usize].has(**parent).unwrap()) - .cloned(), - ) - } - }) - .collect_vec(); + let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); (0..=self.max_block_level) .map(|level| { let level = level as usize; let selected_tip = selected_tip_by_level[level]; let block_at_depth_2m = self - .block_at_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); - let root = if level != self.max_block_level as usize { + let root = roots_by_level[level]; + let old_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self - .block_at_depth(&*self.ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) .unwrap(); if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { @@ -756,7 +1003,7 @@ impl PruningProofManager { block_at_depth_2m } else { self.find_common_ancestor_in_chain_of_a( - &*self.ghostdag_stores[level], + &*ghostdag_stores[level], block_at_depth_m_at_next_level, block_at_depth_2m, ) @@ -766,11 +1013,12 @@ impl PruningProofManager { } else { block_at_depth_2m }; + // assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.ghostdag_stores[level].get_blue_work(root).unwrap()))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -783,7 +1031,7 @@ impl PruningProofManager { headers.push(self.headers_store.get_header(current).unwrap()); for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.ghostdag_stores[level].get_blue_work(child).unwrap()))); + queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); } } diff --git a/database/src/registry.rs b/database/src/registry.rs index 981af729d..0b4f6e5d0 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -38,6 +38,8 @@ pub enum DatabaseStorePrefixes { VirtualState = 28, GhostdagProof = 29, GhostdagCompactProof = 30, + TempGhostdag = 33, + TempGhostdagCompact = 34, // ---- Decomposed reachability stores ---- ReachabilityTreeChildren = 31, diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1baecc3e7..8975e974a 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -222,6 +222,11 @@ fn main_impl(mut args: Args) { Default::default(), unix_now(), )); + + // TODO: Remove the call to get_pruning_point_proof + // let the_hash = Hash::from_str("45d0bb998ab8c3513d18fef3f70d9c686539da7cbe4fab8021e55be1b3a0f8df").unwrap(); + // assert!(topologically_ordered_hashes(&consensus, config.params.genesis.hash).into_iter().contains(&the_hash)); + let _ = consensus.get_pruning_point_proof(); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -441,6 +446,8 @@ mod tests { args.target_blocks = Some(5000); args.tpb = 1; args.test_pruning = true; + // args.output_dir = Some("/tmp/simpa".into()); + // args.input_dir = Some("/tmp/simpa".into()); kaspa_core::log::try_init_logger(&args.log_level); // As we log the panic, we want to set it up after the logger From 902b2172528982fe61c3a5ac2d41396960188d48 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:52:38 -0600 Subject: [PATCH 07/58] Remove remnant of old impl. optimize db prefixes --- database/src/registry.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/database/src/registry.rs b/database/src/registry.rs index 0b4f6e5d0..87e89a491 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -36,14 +36,14 @@ pub enum DatabaseStorePrefixes { UtxoMultisets = 26, VirtualUtxoset = 27, VirtualState = 28, - GhostdagProof = 29, - GhostdagCompactProof = 30, - TempGhostdag = 33, - TempGhostdagCompact = 34, // ---- Decomposed reachability stores ---- - ReachabilityTreeChildren = 31, - ReachabilityFutureCoveringSet = 32, + ReachabilityTreeChildren = 30, + ReachabilityFutureCoveringSet = 31, + + // ---- Ghostdag Proof + TempGhostdag = 40, + TempGhostdagCompact = 41, // ---- Metadata ---- MultiConsensusMetadata = 124, From 7f1f412a7abc3b60fe8148483617ae9b187c6a44 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:13:31 -0600 Subject: [PATCH 08/58] Ensure parents are in relations; Add comments apply_proof only inserts parent entries for a header from the proof into the relations store for a level if there was GD data in the old stores for that header. This adds a check to filter out parent records not in relations store --- consensus/src/processes/pruning_proof/mod.rs | 62 +++++++++++++------- 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8b4b3e299..c2aca9f49 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -721,7 +721,8 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } - // TODO: Find a better name + /// Looks for the first level whose parents are different from the direct parents of the pp_header + /// The current DAG level is the one right below that. fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); pp_header @@ -743,12 +744,26 @@ impl PruningProofManager { level_depth << current_dag_level.saturating_sub(level) } + /// selected parent at level = the parent of the header at the level + /// with the highest blue_work (using score as work in this case) fn find_selected_parent_header_at_level( &self, header: &Header, level: BlockLevel, + relations_service: MTRelationsService, ) -> PruningProofManagerInternalResult> { - let parents = self.parents_manager.parents_at_level(header, level); + // Logic of apply_proof only inserts parent entries for a header from the proof + // into the relations store for a level if there was GD data in the old stores for that + // header. To mimic that logic here, we need to filter out parents that are NOT in the relations_service + let parents = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|parent| relations_service.has(*parent).unwrap()) + .collect_vec() + .push_if_empty(ORIGIN); + let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; for parent in parents.iter().copied().skip(1) { let sblock = SortableBlock { @@ -781,14 +796,16 @@ impl PruningProofManager { required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level)? + self.find_selected_parent_header_at_level(&pp_header.header, level, relations_service.clone())? }; + let selected_tip = selected_tip_header.hash; let pp = pp_header.header.hash; - let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; let mut required_level_0_depth = if level == 0 { @@ -822,7 +839,7 @@ impl PruningProofManager { required_block_chain.insert(current_required_chain_block.hash); selected_chain.insert(current_header.hash); if required_block_chain.contains(¤t_header.hash) - || required_block_chain.contains(¤t_required_chain_block.hash) + || selected_chain.contains(¤t_required_chain_block.hash) { intersected_with_required_block_chain = true; } @@ -834,7 +851,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level, relations_service.clone()) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -847,15 +864,18 @@ impl PruningProofManager { }; if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { - current_required_chain_block = - match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - finished_headers_for_required_block_chain = true; - current_required_chain_block - } - Err(e) => return Err(e), - }; + current_required_chain_block = match self.find_selected_parent_header_at_level( + ¤t_required_chain_block, + level, + relations_service.clone(), + ) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; } }; let root = root_header.hash; @@ -1038,7 +1058,7 @@ impl PruningProofManager { // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); let chain_2m = self - .chain_up_to_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| { dbg!(level, selected_tip, block_at_depth_2m, root); format!("Assert 2M chain -- level: {}, err: {}", level, err) @@ -1049,13 +1069,13 @@ impl PruningProofManager { if !set.contains(&chain_hash) { let next_level_tip = selected_tip_by_level[level + 1]; let next_level_chain_m = - self.chain_up_to_depth(&*self.ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); + self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); let next_level_block_m = next_level_chain_m.last().copied().unwrap(); dbg!(next_level_chain_m.len()); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); dbg!(level, selected_tip, block_at_depth_2m, root); panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); } From f49478af23674a30fe8b53f4a3332942e1a17603 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:18:24 -0600 Subject: [PATCH 09/58] Match depth check to block_at_depth logic --- consensus/src/processes/pruning_proof/mod.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index c2aca9f49..c03c29449 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -721,8 +721,12 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } - /// Looks for the first level whose parents are different from the direct parents of the pp_header - /// The current DAG level is the one right below that. + // The "current dag level" is the level right before the level whose parents are + // not the same as our header's direct parents + // + // Find the current DAG level by going through all the parents at each level, + // starting from the bottom level and see which is the first level that has + // parents that are NOT our current pp_header's direct parents. fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); pp_header @@ -846,7 +850,8 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + // Need to ensure this does the same 2M+1 depth that block_at_depth does + || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -942,8 +947,9 @@ impl PruningProofManager { } } + // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } From 879c135bd09edf7f9bd5feadbd75c23f8f603519 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:21:52 -0600 Subject: [PATCH 10/58] Use singular GD store for header processing --- .../pipeline/header_processor/processor.rs | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index a90e67c50..141c15418 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_stores: Arc>>, + pub(super) ghostdag_primary_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_managers: Arc>, + pub(super) ghostdag_primary_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_primary_store: storage.ghostdag_primary_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), + ghostdag_primary_manager: services.ghostdag_primary_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -348,14 +348,11 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = (0..=ctx.block_level as usize) - .map(|level| { - self.ghostdag_stores[level] - .get_data(ctx.hash) - .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) - }) - .collect_vec(); + let ghostdag_data = vec![self + .ghostdag_primary_store + .get_data(ctx.hash) + .unwrap_option() + .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0])))]; self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -373,7 +370,7 @@ impl HeaderProcessor { // for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap(); } if let Some(window) = ctx.block_window_for_difficulty { @@ -454,7 +451,7 @@ impl HeaderProcessor { for (level, datum) in ghostdag_data.iter().enumerate() { // This data might have been already written when applying the pruning proof. - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); } let mut relations_write = self.relations_stores.write(); @@ -495,8 +492,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = - Some(self.ghostdag_managers.iter().map(|manager_by_level| Arc::new(manager_by_level.genesis_ghostdag_data())).collect()); + ctx.ghostdag_data = Some(vec![Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())]); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); From 56c0b40505b70c3cb2a98a50f8628d5b3888e5e4 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 11 Jun 2024 21:52:11 -0600 Subject: [PATCH 11/58] Relax the panic to warn when finished_headers and couldn't find sufficient root This happens when there's not enough headers in the pruning proof but it satisfies validation --- consensus/src/processes/pruning_proof/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index c03c29449..35b502e33 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -956,7 +956,8 @@ impl PruningProofManager { tries += 1; if finished_headers { - panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned") + warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); From 43e9f9e82b85c352ce01d5173c38a0dd3bd8233d Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:26:17 -0600 Subject: [PATCH 12/58] Error handling for gd on higher levels relations.get_parents on GD gets extra parents that aren't in the current GD store. so get_blue_work throws an error next, ORIGIN was mising from the GD so add that --- consensus/src/processes/ghostdag/ordering.rs | 12 ++++++++++-- consensus/src/processes/pruning_proof/mod.rs | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index 88b648b8c..21306e5b8 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -44,8 +44,16 @@ impl Ord for SortableBlock { impl GhostdagManager { pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks - .sort_by_cached_key(|block| SortableBlock { hash: *block, blue_work: self.ghostdag_store.get_blue_work(*block).unwrap() }); + sorted_blocks.sort_by_cached_key(|block| SortableBlock { + hash: *block, + // Since we're only calculating GD at all levels on-demand, we may get blocks from the relations + // store in the mergeset that are not on our level + // Options for fixes: + // - do this + // - guarantee that we're only getting parents that are in this store + // - make relations store only return parents at the same or higher level + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_default(), + }); sorted_blocks } } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 35b502e33..26b011134 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -900,6 +900,7 @@ impl PruningProofManager { true, ); ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); let mut topological_heap: BinaryHeap<_> = Default::default(); let mut visited = BlockHashSet::new(); for child in relations_service.get_children(root).unwrap().read().iter().copied() { From 34f20abd64c913ebe66d007eefdc2ddbc603ad3c Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sat, 15 Jun 2024 09:57:43 -0600 Subject: [PATCH 13/58] remove using deeper requirements in lower levels --- consensus/src/processes/pruning_proof/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 26b011134..313063172 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -812,15 +812,16 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; - let mut required_level_0_depth = if level == 0 { - required_level_depth - } else { - self.estimated_blue_depth_at_level_0( - level, - required_level_depth * 5 / 4, // We take a safety margin - current_dag_level, - ) - }; + let mut required_level_0_depth = required_level_depth; + // let mut required_level_0_depth = if level == 0 { + // required_level_depth + // } else { + // self.estimated_blue_depth_at_level_0( + // level, + // required_level_depth * 5 / 4, // We take a safety margin + // current_dag_level, + // ) + // }; let mut tries = 0; loop { From 2654b254b97144cdf758e121f66b261987fd7b19 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 00:04:44 -0600 Subject: [PATCH 14/58] Fix missed references to self.ghostdag_stores in validate_pruning_point_proof --- consensus/src/processes/pruning_proof/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 313063172..6f7840ea6 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -666,7 +666,7 @@ impl PruningProofManager { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = self.ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { @@ -702,7 +702,7 @@ impl PruningProofManager { if parents .iter() .copied() - .any(|parent| self.ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) + .any(|parent| ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) { return Ok(()); } From ba049296b978577b17868fa854c3ab9b5ece362a Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:03:17 -0600 Subject: [PATCH 15/58] Refactoring for single GD header processing --- .../pipeline/header_processor/processor.rs | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 141c15418..c4ccc8bae 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -55,7 +55,7 @@ pub struct HeaderProcessingContext { pub known_parents: Vec, // Staging data - pub ghostdag_data: Option>>, + pub ghostdag_data: Option>, pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap()[0] + &self.ghostdag_data.as_ref().unwrap() } } @@ -348,18 +348,17 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = vec![self + let ghostdag_data = self .ghostdag_primary_store .get_data(ctx.hash) .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0])))]; - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); + .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0]))); + self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { - let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); - let ghostdag_primary_data = &ghostdag_data[0]; + let ghostdag_primary_data = ctx.ghostdag_data.as_ref().unwrap(); let pp = ctx.pruning_point(); // Create a DB batch writer @@ -369,9 +368,7 @@ impl HeaderProcessor { // Append-only stores: these require no lock and hence done first in order to reduce locking time // - for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap(); - } + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); @@ -449,10 +446,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - for (level, datum) in ghostdag_data.iter().enumerate() { - // This data might have been already written when applying the pruning proof. - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); - } + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -492,7 +486,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = Some(vec![Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())]); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); From a45b57122be5f9e8bec551d894f859dcae16d303 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:03:40 -0600 Subject: [PATCH 16/58] Add assertion to check root vs old_root --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6f7840ea6..8fbcb8b3c 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1042,7 +1042,7 @@ impl PruningProofManager { } else { block_at_depth_2m }; - // assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); From edb5cd3d9300f93f6b8e3f3abf9e11ff8bc627c7 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:04:57 -0600 Subject: [PATCH 17/58] Lint fix current_dag_level --- consensus/src/processes/pruning_proof/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8fbcb8b3c..e92cc6772 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -796,7 +796,6 @@ impl PruningProofManager { &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, - current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -971,7 +970,6 @@ impl PruningProofManager { pp_header: &HeaderWithBlockLevel, temp_db: Arc, ) -> (Vec>, Vec, Vec) { - let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; let mut root_by_level = vec![None; self.max_block_level as usize + 1]; @@ -988,7 +986,7 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(&pp_header, level, current_dag_level, required_block, temp_db.clone()) + .find_sufficient_root(&pp_header, level, required_block, temp_db.clone()) .expect(&format!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); From e81394fe48bf797066407f53b220f02293472e83 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:19:46 -0600 Subject: [PATCH 18/58] Keep DB Version at 3 The new prefixes added are compatible with the old version. We don't want to trigger a db delete with this change --- consensus/src/consensus/factory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f34aa54f9..f3ee51d9c 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 4; +const LATEST_DB_VERSION: u32 = 3; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { From 0e8c788c8e9b936df6327f4888ed3fb12400a008 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:38:47 -0600 Subject: [PATCH 19/58] Cleanup apply_proof logic and handle more ghostdag_stores logic --- .../pipeline/pruning_processor/processor.rs | 4 ++- consensus/src/processes/pruning_proof/mod.rs | 27 ++++++++++--------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index cd9026565..a6f3edf65 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -411,7 +411,9 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option(); + if level == 0 { + self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + } }); // Remove additional header related data diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e92cc6772..5db4708be 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1,7 +1,9 @@ use std::{ cmp::{max, Reverse}, - collections::{hash_map::Entry, BinaryHeap}, - collections::{hash_map::Entry::Vacant, VecDeque}, + collections::{ + hash_map::Entry::{self, Vacant}, + BinaryHeap, HashSet, VecDeque, + }, ops::{Deref, DerefMut}, sync::{ atomic::{AtomicBool, Ordering}, @@ -254,30 +256,29 @@ impl PruningProofManager { for (level, headers) in proof.iter().enumerate() { trace!("Applying level {} from the pruning point proof", level); - self.ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + let mut level_ancestors: HashSet = HashSet::new(); + level_ancestors.insert(ORIGIN); + for header in headers.iter() { let parents = Arc::new( self.parents_manager .parents_at_level(header, level as BlockLevel) .iter() .copied() - .filter(|parent| self.ghostdag_stores[level].has(*parent).unwrap()) + .filter(|parent| level_ancestors.contains(parent)) .collect_vec() .push_if_empty(ORIGIN), ); self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - let gd = if header.hash == self.genesis_hash { - self.ghostdag_managers[level].genesis_ghostdag_data() - } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; if level == 0 { + self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); + let calculated_gd = self.ghostdag_primary_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, @@ -289,9 +290,9 @@ impl PruningProofManager { } }; self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); - } else { - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); } + + level_ancestors.insert(header.hash); } } @@ -616,7 +617,7 @@ impl PruningProofManager { let mut proof_current = proof_selected_tip; let mut proof_current_gd = proof_selected_tip_gd; loop { - match self.ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + match ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { Some(current_gd) => { break Some((proof_current_gd, current_gd)); } From 56f9dab2059d59541bb5ebdb8df69f0814c2a2a5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:40:27 -0600 Subject: [PATCH 20/58] remove simpa changes --- simpa/src/main.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 8975e974a..1baecc3e7 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -222,11 +222,6 @@ fn main_impl(mut args: Args) { Default::default(), unix_now(), )); - - // TODO: Remove the call to get_pruning_point_proof - // let the_hash = Hash::from_str("45d0bb998ab8c3513d18fef3f70d9c686539da7cbe4fab8021e55be1b3a0f8df").unwrap(); - // assert!(topologically_ordered_hashes(&consensus, config.params.genesis.hash).into_iter().contains(&the_hash)); - let _ = consensus.get_pruning_point_proof(); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -446,8 +441,6 @@ mod tests { args.target_blocks = Some(5000); args.tpb = 1; args.test_pruning = true; - // args.output_dir = Some("/tmp/simpa".into()); - // args.input_dir = Some("/tmp/simpa".into()); kaspa_core::log::try_init_logger(&args.log_level); // As we log the panic, we want to set it up after the logger From c5be8ad40aaf0db3c75042bcdb8043aaf1c306d1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 19 Jun 2024 00:45:09 -0600 Subject: [PATCH 21/58] Remove rewriting origin to primary GD It's already on there --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 5db4708be..472e5f130 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -273,7 +273,7 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); if level == 0 { - self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); + // self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() From 8d15e27a39baf53c4b6e6e529ff8efd577fbe4fb Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 19 Jun 2024 17:04:57 -0600 Subject: [PATCH 22/58] More refactoring to use single GD store/manager --- consensus/src/consensus/services.rs | 30 ++++++-------------- consensus/src/consensus/storage.rs | 21 ++++---------- consensus/src/processes/pruning_proof/mod.rs | 7 +---- 3 files changed, 15 insertions(+), 43 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index b5617ea76..41478580c 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,7 +53,6 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_managers: Arc>, pub ghostdag_primary_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, @@ -112,26 +111,15 @@ impl ConsensusServices { reachability_service.clone(), storage.ghostdag_primary_store.clone(), ); - let ghostdag_managers = Arc::new( - storage - .ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - params.genesis.hash, - params.ghostdag_k, - ghostdag_store, - relations_services[level].clone(), - storage.headers_store.clone(), - reachability_service.clone(), - level != 0, - ) - }) - .collect_vec(), + let ghostdag_primary_manager = GhostdagManager::new( + params.genesis.hash, + params.ghostdag_k, + storage.ghostdag_primary_store.clone(), + relations_services[0].clone(), + storage.headers_store.clone(), + reachability_service.clone(), + false, ); - let ghostdag_primary_manager = ghostdag_managers[0].clone(); let coinbase_manager = CoinbaseManager::new( params.coinbase_payload_script_public_key_max_len, @@ -185,7 +173,6 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_managers.clone(), ghostdag_primary_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), @@ -215,7 +202,6 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_managers, ghostdag_primary_manager, coinbase_manager, pruning_point_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index d53324fc6..4b9646ec2 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,7 +50,6 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_stores: Arc>>, pub ghostdag_primary_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, @@ -193,19 +192,12 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_stores = Arc::new( - (0..=params.max_block_level) - .map(|level| { - Arc::new(DbGhostdagStore::new( - db.clone(), - level, - ghostdag_builder.downscale(level).build(), - ghostdag_compact_builder.downscale(level).build(), - )) - }) - .collect_vec(), - ); - let ghostdag_primary_store = ghostdag_stores[0].clone(); + let ghostdag_primary_store = Arc::new(DbGhostdagStore::new( + db.clone(), + 0, + ghostdag_builder.downscale(0).build(), + ghostdag_compact_builder.downscale(0).build(), + )); let daa_excluded_store = Arc::new(DbDaaStore::new(db.clone(), daa_excluded_builder.build())); let headers_store = Arc::new(DbHeadersStore::new(db.clone(), headers_builder.build(), headers_compact_builder.build())); let depth_store = Arc::new(DbDepthStore::new(db.clone(), header_data_builder.build())); @@ -245,7 +237,6 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_stores, ghostdag_primary_store, pruning_point_store, headers_selected_tip_store, diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 472e5f130..fb0eceb77 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -117,7 +117,6 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_stores: Arc>>, ghostdag_primary_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, @@ -128,7 +127,6 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_managers: Arc>, ghostdag_primary_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, @@ -153,7 +151,6 @@ impl PruningProofManager { storage: &Arc, parents_manager: DbParentsManager, reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, @@ -170,7 +167,6 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_stores: storage.ghostdag_stores.clone(), ghostdag_primary_store: storage.ghostdag_primary_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -181,7 +177,6 @@ impl PruningProofManager { selected_chain_store: storage.selected_chain_store.clone(), depth_store: storage.depth_store.clone(), - ghostdag_managers, traversal_manager, window_manager, parents_manager, @@ -467,7 +462,7 @@ impl PruningProofManager { let level = level as usize; reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); - ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); } db.write(batch).unwrap(); From 1c6b585d69d5cf85759d4ac65ab1c7fc0644a3f3 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 21 Jun 2024 17:01:26 -0600 Subject: [PATCH 23/58] Lint fixes --- consensus/src/pipeline/header_processor/processor.rs | 2 +- consensus/src/processes/pruning_proof/mod.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index c4ccc8bae..a04af90e6 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap() + self.ghostdag_data.as_ref().unwrap() } } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index fb0eceb77..0058408fa 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -635,7 +635,7 @@ impl PruningProofManager { let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(&proof)?; + let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; let ghostdag_stores = stores_and_processes.ghostdag_stores; @@ -982,8 +982,8 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(&pp_header, level, required_block, temp_db.clone()) - .expect(&format!("find_sufficient_root failed for level {level}")); + .find_sufficient_root(pp_header, level, required_block, temp_db.clone()) + .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); root_by_level[level_usize] = Some(root); From 273aa81fdd9c9a13edf2941bc8bfec0650486f30 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 21 Jun 2024 17:04:39 -0600 Subject: [PATCH 24/58] warn to trace for common retry --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 0058408fa..39bf8d756 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -957,7 +957,7 @@ impl PruningProofManager { break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; - warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + trace!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); } } From f9b3fda63bab6d8b7e8c7bf62e72fda5f544041f Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 25 Jun 2024 23:11:06 -0600 Subject: [PATCH 25/58] Address initial comments --- .../pipeline/header_processor/processor.rs | 1 + .../pipeline/pruning_processor/processor.rs | 7 ++-- consensus/src/processes/pruning_proof/mod.rs | 41 +++++++++---------- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index a04af90e6..2214d0881 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -368,6 +368,7 @@ impl HeaderProcessor { // Append-only stores: these require no lock and hence done first in order to reduce locking time // + // This data might have been already written when applying the pruning proof. self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index a6f3edf65..b7f46f3b0 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -411,11 +411,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - if level == 0 { - self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); - } }); + self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); self.depth_store.delete_batch(&mut batch, current).unwrap(); @@ -457,7 +456,7 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - // self.assert_proof_rebuilding(proof, new_pruning_point); + self.assert_proof_rebuilding(proof.clone(), new_pruning_point); self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); self.assert_data_rebuilding(data, new_pruning_point); } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 39bf8d756..ccc8f81ff 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -268,8 +268,6 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); if level == 0 { - // self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); - let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { @@ -474,13 +472,13 @@ impl PruningProofManager { fn populate_stores_for_validate_pruning_point_proof( &self, proof: &PruningPointProof, - stores_and_processes: &mut TempProofContext, + ctx: &mut TempProofContext, ) -> PruningImportResult> { - let headers_store = &stores_and_processes.headers_store; - let ghostdag_stores = &stores_and_processes.ghostdag_stores; - let mut relations_stores = stores_and_processes.relations_stores.clone(); - let reachability_stores = &stores_and_processes.reachability_stores; - let ghostdag_managers = &stores_and_processes.ghostdag_managers; + let headers_store = &ctx.headers_store; + let ghostdag_stores = &ctx.ghostdag_stores; + let mut relations_stores = ctx.relations_stores.clone(); + let reachability_stores = &ctx.reachability_stores; + let ghostdag_managers = &ctx.ghostdag_managers; let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; @@ -741,7 +739,7 @@ impl PruningProofManager { } fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { - level_depth << current_dag_level.saturating_sub(level) + level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) } /// selected parent at level = the parent of the header at the level @@ -792,6 +790,7 @@ impl PruningProofManager { &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, + current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -807,16 +806,15 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; - let mut required_level_0_depth = required_level_depth; - // let mut required_level_0_depth = if level == 0 { - // required_level_depth - // } else { - // self.estimated_blue_depth_at_level_0( - // level, - // required_level_depth * 5 / 4, // We take a safety margin - // current_dag_level, - // ) - // }; + let mut required_level_0_depth = if level == 0 { + required_level_depth + } else { + self.estimated_blue_depth_at_level_0( + level, + required_level_depth * 5 / 4, // We take a safety margin + current_dag_level, + ) + }; let mut tries = 0; loop { @@ -957,7 +955,7 @@ impl PruningProofManager { break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; - trace!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); } } @@ -966,6 +964,7 @@ impl PruningProofManager { pp_header: &HeaderWithBlockLevel, temp_db: Arc, ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; let mut root_by_level = vec![None; self.max_block_level as usize + 1]; @@ -982,7 +981,7 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(pp_header, level, required_block, temp_db.clone()) + .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); From ca8bb429691a3575e3b59379de1c086391879554 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 25 Jun 2024 23:27:22 -0600 Subject: [PATCH 26/58] Remove "primary" in ghostdag store/manager references --- consensus/src/consensus/mod.rs | 10 +++---- consensus/src/consensus/services.rs | 20 ++++++------- consensus/src/consensus/storage.rs | 6 ++-- consensus/src/consensus/test_consensus.rs | 6 ++-- .../body_validation_in_context.rs | 2 +- .../src/pipeline/body_processor/processor.rs | 6 ++-- .../pipeline/header_processor/processor.rs | 18 ++++++------ .../pipeline/pruning_processor/processor.rs | 6 ++-- .../pipeline/virtual_processor/processor.rs | 18 ++++++------ .../virtual_processor/utxo_validation.rs | 2 +- consensus/src/processes/pruning_proof/mod.rs | 28 +++++++++---------- simpa/src/main.rs | 12 ++++---- 12 files changed, 67 insertions(+), 67 deletions(-) diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 80babbef0..7e1690b2a 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -231,7 +231,7 @@ impl Consensus { block_processors_pool, db.clone(), storage.statuses_store.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.block_transactions_store.clone(), storage.body_tips_store.clone(), @@ -484,7 +484,7 @@ impl ConsensusApi for Consensus { fn get_virtual_merge_depth_blue_work_threshold(&self) -> BlueWorkType { // PRUNE SAFETY: merge depth root is never close to being pruned (in terms of block depth) - self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_primary_store.get_blue_work(root).unwrap()) + self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_store.get_blue_work(root).unwrap()) } fn get_sink(&self) -> Hash { @@ -812,7 +812,7 @@ impl ConsensusApi for Consensus { Some(BlockStatus::StatusInvalid) => return Err(ConsensusError::InvalidBlock(hash)), _ => {} }; - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; + let ghostdag = self.ghostdag_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; Ok((&*ghostdag).into()) } @@ -864,7 +864,7 @@ impl ConsensusApi for Consensus { Ok(self .services .window_manager - .block_window(&self.ghostdag_primary_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) .unwrap() .deref() .iter() @@ -903,7 +903,7 @@ impl ConsensusApi for Consensus { match start_hash { Some(hash) => { self.validate_block_exists(hash)?; - let ghostdag_data = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); // The selected parent header is used within to check for sampling activation, so we verify its existence first if !self.headers_store.has(ghostdag_data.selected_parent).unwrap() { return Err(ConsensusError::DifficultyError(DifficultyError::InsufficientWindowData(0))); diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 41478580c..74544c11b 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,7 +53,7 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_primary_manager: DbGhostdagManager, + pub ghostdag_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, pub pruning_proof_manager: Arc, @@ -81,13 +81,13 @@ impl ConsensusServices { let reachability_service = MTReachabilityService::new(storage.reachability_store.clone()); let dag_traversal_manager = DagTraversalManager::new( params.genesis.hash, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_service.clone(), reachability_service.clone(), ); let window_manager = DualWindowManager::new( ¶ms.genesis, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.daa_excluded_store.clone(), storage.block_window_cache_for_difficulty.clone(), @@ -109,12 +109,12 @@ impl ConsensusServices { params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), ); - let ghostdag_primary_manager = GhostdagManager::new( + let ghostdag_manager = GhostdagManager::new( params.genesis.hash, params.ghostdag_k, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_services[0].clone(), storage.headers_store.clone(), reachability_service.clone(), @@ -154,7 +154,7 @@ impl ConsensusServices { params.finality_depth, params.genesis.hash, reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.past_pruning_points_store.clone(), storage.headers_selected_tip_store.clone(), @@ -173,7 +173,7 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_primary_manager.clone(), + ghostdag_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, @@ -188,7 +188,7 @@ impl ConsensusServices { params.mergeset_size_limit as usize, reachability_service.clone(), dag_traversal_manager.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.selected_chain_store.clone(), storage.headers_selected_tip_store.clone(), storage.pruning_point_store.clone(), @@ -202,7 +202,7 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_primary_manager, + ghostdag_manager, coinbase_manager, pruning_point_manager, pruning_proof_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 4b9646ec2..e170ace04 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,7 +50,7 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_primary_store: Arc, + pub ghostdag_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, pub past_pruning_points_store: Arc, @@ -192,7 +192,7 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_primary_store = Arc::new(DbGhostdagStore::new( + let ghostdag_store = Arc::new(DbGhostdagStore::new( db.clone(), 0, ghostdag_builder.downscale(0).build(), @@ -237,7 +237,7 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_primary_store, + ghostdag_store, pruning_point_store, headers_selected_tip_store, body_tips_store, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index c626e00ff..a937388ba 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -118,7 +118,7 @@ impl TestConsensus { pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { let mut header = header_from_precomputed_hash(hash, parents); - let ghostdag_data = self.consensus.services.ghostdag_primary_manager.ghostdag(header.direct_parents()); + let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents()); header.pruning_point = self .consensus .services @@ -201,7 +201,7 @@ impl TestConsensus { } pub fn ghostdag_store(&self) -> &Arc { - &self.consensus.ghostdag_primary_store + &self.consensus.ghostdag_store } pub fn reachability_store(&self) -> &Arc> { @@ -233,7 +233,7 @@ impl TestConsensus { } pub fn ghostdag_manager(&self) -> &DbGhostdagManager { - &self.consensus.services.ghostdag_primary_manager + &self.consensus.services.ghostdag_manager } } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index b437f1f13..2425556d0 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -25,7 +25,7 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_primary_store.get_data(block.hash()).unwrap())?; + let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; for tx in block.transactions.iter() { if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { return Err(RuleError::TxInContextFailed(tx.id(), e)); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 8b6d35e19..1ea674263 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -55,7 +55,7 @@ pub struct BlockBodyProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, @@ -92,7 +92,7 @@ impl BlockBodyProcessor { db: Arc, statuses_store: Arc>, - ghostdag_primary_store: Arc, + ghostdag_store: Arc, headers_store: Arc, block_transactions_store: Arc, body_tips_store: Arc>, @@ -116,7 +116,7 @@ impl BlockBodyProcessor { db, statuses_store, reachability_service, - ghostdag_primary_store, + ghostdag_store, headers_store, block_transactions_store, body_tips_store, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 2214d0881..22a5c566c 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_primary_manager: DbGhostdagManager, + pub(super) ghostdag_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_primary_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -349,10 +349,10 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { let ghostdag_data = self - .ghostdag_primary_store + .ghostdag_store .get_data(ctx.hash) .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0]))); + .unwrap_or_else(|| Arc::new(self.ghostdag_manager.ghostdag(&ctx.known_parents[0]))); self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -369,7 +369,7 @@ impl HeaderProcessor { // // This data might have been already written when applying the pruning proof. - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); @@ -447,7 +447,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -487,7 +487,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = Some(Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index b7f46f3b0..f73f8c12e 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -280,7 +280,7 @@ impl PruningProcessor { let mut counter = 0; let mut batch = WriteBatch::default(); for kept in keep_relations.iter().copied() { - let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { + let Some(ghostdag) = self.ghostdag_store.get_data(kept).unwrap_option() else { continue; }; if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains(&h)) { @@ -292,7 +292,7 @@ impl PruningProcessor { mutable_ghostdag.selected_parent = ORIGIN; } counter += 1; - self.ghostdag_primary_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); + self.ghostdag_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); } } self.db.write(batch).unwrap(); @@ -413,7 +413,7 @@ impl PruningProcessor { staging_level_relations.commit(&mut batch).unwrap(); }); - self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index ded062251..db8efed3a 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -115,7 +115,7 @@ pub struct VirtualStateProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) daa_excluded_store: Arc, pub(super) block_transactions_store: Arc, @@ -190,7 +190,7 @@ impl VirtualStateProcessor { db, statuses_store: storage.statuses_store.clone(), headers_store: storage.headers_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), block_transactions_store: storage.block_transactions_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -205,7 +205,7 @@ impl VirtualStateProcessor { pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), - ghostdag_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), reachability_service: services.reachability_service.clone(), relations_service: services.relations_service.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), @@ -302,7 +302,7 @@ impl VirtualStateProcessor { .expect("all possible rule errors are unexpected here"); // Update the pruning processor about the virtual state change - let sink_ghostdag_data = self.ghostdag_primary_store.get_compact_data(new_sink).unwrap(); + let sink_ghostdag_data = self.ghostdag_store.get_compact_data(new_sink).unwrap(); // Empty the channel before sending the new message. If pruning processor is busy, this step makes sure // the internal channel does not grow with no need (since we only care about the most recent message) let _consume = self.pruning_receiver.try_iter().count(); @@ -401,7 +401,7 @@ impl VirtualStateProcessor { } let header = self.headers_store.get_header(current).unwrap(); - let mergeset_data = self.ghostdag_primary_store.get_data(current).unwrap(); + let mergeset_data = self.ghostdag_store.get_data(current).unwrap(); let pov_daa_score = header.daa_score; let selected_parent_multiset_hash = self.utxo_multisets_store.get(selected_parent).unwrap(); @@ -562,7 +562,7 @@ impl VirtualStateProcessor { let mut heap = tips .into_iter() - .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_primary_store.get_blue_work(block).unwrap() }) + .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_store.get_blue_work(block).unwrap() }) .collect::>(); // The initial diff point is the previous sink @@ -584,7 +584,7 @@ impl VirtualStateProcessor { // 2. will be removed eventually by the bounded merge check. // Hence as an optimization we prefer removing such blocks in advance to allow valid tips to be considered. let filtering_root = self.depth_store.merge_depth_root(candidate).unwrap(); - let filtering_blue_work = self.ghostdag_primary_store.get_blue_work(filtering_root).unwrap_or_default(); + let filtering_blue_work = self.ghostdag_store.get_blue_work(filtering_root).unwrap_or_default(); return ( candidate, heap.into_sorted_iter().take_while(|s| s.blue_work >= filtering_blue_work).map(|s| s.hash).collect(), @@ -602,7 +602,7 @@ impl VirtualStateProcessor { if self.reachability_service.is_dag_ancestor_of(finality_point, parent) && !self.reachability_service.is_dag_ancestor_of_any(parent, &mut heap.iter().map(|sb| sb.hash)) { - heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_primary_store.get_blue_work(parent).unwrap() }); + heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_store.get_blue_work(parent).unwrap() }); } } drop(prune_guard); @@ -1117,7 +1117,7 @@ impl VirtualStateProcessor { // in depth of 2*finality_depth, and can give false negatives for smaller finality violations. let current_pp = self.pruning_point_store.read().pruning_point().unwrap(); let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp); - let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp); + let vff = self.depth_manager.calc_finality_point(&self.ghostdag_store.get_data(vf).unwrap(), current_pp); let last_known_pp = pp_list.iter().rev().find(|pp| match self.statuses_store.read().get(pp.hash).unwrap_option() { Some(status) => status.is_valid(), diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 112976294..0e3ca7533 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -82,7 +82,7 @@ impl VirtualStateProcessor { for (i, (merged_block, txs)) in once((ctx.selected_parent(), selected_parent_transactions)) .chain( ctx.ghostdag_data - .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_primary_store.deref()) + .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_store.deref()) .map(|b| (b, self.block_transactions_store.get(b).unwrap())), ) .enumerate() diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index ccc8f81ff..82ebc7433 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -117,7 +117,7 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_primary_store: Arc, + ghostdag_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, past_pruning_points_store: Arc, @@ -127,7 +127,7 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_primary_manager: DbGhostdagManager, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -167,7 +167,7 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -189,7 +189,7 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, - ghostdag_primary_manager: ghostdag_manager, + ghostdag_manager, is_consensus_exiting, } @@ -271,7 +271,7 @@ impl PruningProofManager { let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_primary_manager.ghostdag(&parents); + let calculated_gd = self.ghostdag_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, @@ -282,7 +282,7 @@ impl PruningProofManager { blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), } }; - self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); + self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); } level_ancestors.insert(header.hash); @@ -292,7 +292,7 @@ impl PruningProofManager { let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_primary_manager.ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -880,7 +880,7 @@ impl PruningProofManager { let root = root_header.hash; if level == 0 { - return Ok((self.ghostdag_primary_store.clone(), selected_tip, root)); + return Ok((self.ghostdag_store.clone(), selected_tip, root)); } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); @@ -1181,7 +1181,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -1201,7 +1201,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_primary_manager.sort_blocks(anticone); + let mut anticone = self.ghostdag_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -1212,14 +1212,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_primary_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_primary_store.get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_store.get_data(hash).unwrap()).into(), }); } } @@ -1227,7 +1227,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -1259,7 +1259,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_primary_store.get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1baecc3e7..368b52344 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -414,12 +414,12 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - } fn print_stats(src_consensus: &Consensus, hashes: &[Hash], delay: f64, bps: f64, k: KType) -> usize { - let blues_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_blues.len()).sum::() as f64 - / hashes.len() as f64; - let reds_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_reds.len()).sum::() as f64 - / hashes.len() as f64; + let blues_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_blues.len()).sum::() + as f64 + / hashes.len() as f64; + let reds_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_reds.len()).sum::() + as f64 + / hashes.len() as f64; let parents_mean = hashes.iter().map(|&h| src_consensus.headers_store.get_header(h).unwrap().direct_parents().len()).sum::() as f64 / hashes.len() as f64; From 61183faba05ff020bcad48951bfc484dac93e1c1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 26 Jun 2024 22:55:59 -0600 Subject: [PATCH 27/58] Add small safety margin to proof at level 0 This prevents the case where new root is an anticone of old root --- consensus/src/processes/pruning_proof/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 82ebc7433..d6c109b54 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -807,7 +807,7 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; let mut required_level_0_depth = if level == 0 { - required_level_depth + required_level_depth + 100 // smaller safety margin } else { self.estimated_blue_depth_at_level_0( level, @@ -1035,6 +1035,8 @@ impl PruningProofManager { } else { block_at_depth_2m }; + + // new root is expected to be always an ancestor of old root because new root takes a safety margin assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); From 34bc88f399f6fae547ca79500b657c18a18e3325 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 28 Jun 2024 23:23:40 -0600 Subject: [PATCH 28/58] Revert to only do proof rebuilding on sanity check --- consensus/src/pipeline/pruning_processor/processor.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index f73f8c12e..bbc1ea9a9 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -456,8 +456,7 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - self.assert_proof_rebuilding(proof.clone(), new_pruning_point); - self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); + self.assert_proof_rebuilding(proof, new_pruning_point); self.assert_data_rebuilding(data, new_pruning_point); } From da1cfe34b608d32b1663d865e1c81331afc1bc65 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 28 Jun 2024 23:24:38 -0600 Subject: [PATCH 29/58] Proper "better" proof check --- consensus/src/processes/pruning_proof/mod.rs | 260 +++++++++++-------- 1 file changed, 158 insertions(+), 102 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index d6c109b54..6dac50563 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -49,7 +49,7 @@ use crate::{ }, stores::{ depth::DbDepthStore, - ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -597,32 +597,32 @@ impl PruningProofManager { Ok(()) } - // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple - // that contains the ghostdag data of the proof and current consensus common ancestor. If no - // such ancestor exists, it returns None. - fn find_proof_and_consensus_common_ancestor_ghostdag_data( + /// Returns the common ancestor of the proof and the current consensus if there is one. + /// + /// ghostdag_stores currently contain only entries for blocks in the proof. + /// While iterating through the selected parent chain of the current consensus, if we find any + /// that is already in ghostdag_stores that must mean it's a common ancestor of the proof + /// and current consensus + fn find_proof_and_consensus_common_ancestor( &self, - ghostdag_stores: &[Arc], - proof_selected_tip: Hash, + ghostdag_store: &Arc, + current_consensus_selected_tip_header: Arc
, level: BlockLevel, - proof_selected_tip_gd: CompactGhostdagData, - ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { - let mut proof_current = proof_selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - loop { - match ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); - } - }; + relations_service: &MTRelationsService, + ) -> Option { + let mut chain_block = current_consensus_selected_tip_header.clone(); + + for _ in 0..(2 * self.pruning_proof_m as usize) { + if chain_block.direct_parents().is_empty() || chain_block.hash.is_origin() { + break; + } + if ghostdag_store.has(chain_block.hash).unwrap() { + return Some(chain_block.hash); + } + chain_block = self.find_selected_parent_header_at_level(&chain_block, level, relations_service).unwrap(); } + + None } pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { @@ -640,29 +640,54 @@ impl PruningProofManager { let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; - let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); + let current_pp_header = self.headers_store.get_header_with_block_level(current_pp).unwrap(); for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + + // Next check is to see if this proof is "better" than what's in the current consensus + // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( - &ghostdag_stores, - selected_tip, + // Step 2 - if we can find a common ancestor between the proof and current consensus + // we can determine if the proof is better. The proof is better if the score difference between the + // old current consensus's tips and the common ancestor is less than the score difference between the + // proof's tip and the common ancestor + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let current_consensus_selected_tip_header = if current_pp_header.block_level >= level { + current_pp_header.header.clone() + } else { + self.find_selected_parent_header_at_level(¤t_pp_header.header, level, &relations_service).unwrap() + }; + if let Some(common_ancestor) = self.find_proof_and_consensus_common_ancestor( + &ghostdag_stores[level_idx], + current_consensus_selected_tip_header.clone(), level, - proof_selected_tip_gd, + &relations_service, ) { + // Fill the GD store with data from current consensus, + // starting from the common ancestor until the current level selected tip + let _ = self.fill_proof_ghostdag_data( + proof[level_idx].first().unwrap().hash, + common_ancestor, + current_consensus_selected_tip_header.hash, + &ghostdag_stores[level_idx], + &relations_service, + level != 0, + None, + false, + ); + let common_ancestor_blue_work = ghostdag_stores[level_idx].get_blue_work(common_ancestor).unwrap(); let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { + SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(common_ancestor_blue_work); + for parent in self.parents_manager.parents_at_level(¤t_pp_header.header, level).iter().copied() { let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = - SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); + let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { return Err(PruningImportError::PruningProofInsufficientBlueWork); } @@ -748,7 +773,7 @@ impl PruningProofManager { &self, header: &Header, level: BlockLevel, - relations_service: MTRelationsService, + relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { // Logic of apply_proof only inserts parent entries for a header from the proof // into the relations store for a level if there was GD data in the old stores for that @@ -798,7 +823,7 @@ impl PruningProofManager { let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level, relations_service.clone())? + self.find_selected_parent_header_at_level(&pp_header.header, level, &relations_service)? }; let selected_tip = selected_tip_header.hash; @@ -850,7 +875,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level, relations_service.clone()) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level, &relations_service) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -863,18 +888,15 @@ impl PruningProofManager { }; if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { - current_required_chain_block = match self.find_selected_parent_header_at_level( - ¤t_required_chain_block, - level, - relations_service.clone(), - ) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - finished_headers_for_required_block_chain = true; - current_required_chain_block - } - Err(e) => return Err(e), - }; + current_required_chain_block = + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level, &relations_service) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; } }; let root = root_header.hash; @@ -884,63 +906,16 @@ impl PruningProofManager { } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); - let gd_manager = GhostdagManager::new( + let has_required_block = self.fill_proof_ghostdag_data( + root, root, - self.ghostdag_k, - ghostdag_store.clone(), - relations_service.clone(), - self.headers_store.clone(), - self.reachability_service.clone(), + pp, + &ghostdag_store, + &relations_service, + level != 0, + Some(required_block), true, ); - ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); - ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); - let mut topological_heap: BinaryHeap<_> = Default::default(); - let mut visited = BlockHashSet::new(); - for child in relations_service.get_children(root).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? - })); - } - - let mut has_required_block = root == required_block; - loop { - let Some(current) = topological_heap.pop() else { - break; - }; - let current_hash = current.0.hash; - if !visited.insert(current_hash) { - continue; - } - - if !self.reachability_service.is_dag_ancestor_of(current_hash, pp) { - // We don't care about blocks in the antipast of the pruning point - continue; - } - - if !has_required_block && current_hash == required_block { - has_required_block = true; - } - - let relevant_parents: Box<[Hash]> = relations_service - .get_parents(current_hash) - .unwrap() - .iter() - .copied() - .filter(|parent| self.reachability_service.is_dag_ancestor_of(root, *parent)) - .collect(); - let current_gd = gd_manager.ghostdag(&relevant_parents); - ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap(); - for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? - })); - } - } // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block @@ -1090,6 +1065,87 @@ impl PruningProofManager { .collect_vec() } + /// BFS forward iterates from starting_hash until selected tip, ignoring blocks in the antipast of selected_tip. + /// For each block along the way, insert that hash into the ghostdag_store + /// If we have a required_block to find, this will return true if that block was found along the way + fn fill_proof_ghostdag_data( + &self, + genesis_hash: Hash, + starting_hash: Hash, + selected_tip: Hash, + ghostdag_store: &Arc, + relations_service: &MTRelationsService, + use_score_as_work: bool, + required_block: Option, + initialize_store: bool, + ) -> bool { + let gd_manager = GhostdagManager::new( + genesis_hash, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + use_score_as_work, + ); + + if initialize_store { + ghostdag_store.insert(genesis_hash, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); + } + + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(starting_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + + let mut has_required_block = required_block.is_some_and(|required_block| starting_hash == required_block); + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { + // We don't care about blocks in the antipast of the selected tip + continue; + } + + if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { + has_required_block = true; + } + + let relevant_parents: Box<[Hash]> = relations_service + .get_parents(current_hash) + .unwrap() + .iter() + .copied() + .filter(|parent| self.reachability_service.is_dag_ancestor_of(starting_hash, *parent)) + .collect(); + let current_gd = gd_manager.ghostdag(&relevant_parents); + + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); + + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + } + + has_required_block + } + /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. fn chain_up_to_depth( &self, From a23d1dd88b79dc0770b4ffa96f392024a8647dbd Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:22:30 -0600 Subject: [PATCH 30/58] Update comment on find_selected_parent_header_at_level --- consensus/src/processes/pruning_proof/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6dac50563..e0ca2a1e8 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -775,9 +775,8 @@ impl PruningProofManager { level: BlockLevel, relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { - // Logic of apply_proof only inserts parent entries for a header from the proof - // into the relations store for a level if there was GD data in the old stores for that - // header. To mimic that logic here, we need to filter out parents that are NOT in the relations_service + // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important + // to filter to include only parents that are in relations_service. let parents = self .parents_manager .parents_at_level(header, level) From 974d2004dc427d7a56ce43e3a736d24e61c1bb69 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:45:21 -0600 Subject: [PATCH 31/58] Re-apply missed comment --- consensus/src/pipeline/header_processor/processor.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 22a5c566c..b64fe4ea2 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -447,6 +447,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); + // This data might have been already written when applying the pruning proof. self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); From 6ea832819d78732b9da3d4a0785f8e3ce7accfbc Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 3 Jul 2024 23:48:59 -0600 Subject: [PATCH 32/58] Implement db upgrade logic from 3 to 4 --- Cargo.lock | 2 + consensus/src/consensus/factory.rs | 19 ++++- kaspad/Cargo.toml | 2 + kaspad/src/daemon.rs | 109 +++++++++++++++++++++++++++-- 4 files changed, 125 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22cd64f4f..67272dd78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3755,6 +3755,7 @@ dependencies = [ "dhat", "dirs", "futures-util", + "itertools 0.11.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", @@ -3782,6 +3783,7 @@ dependencies = [ "num_cpus", "rand 0.8.5", "rayon", + "rocksdb", "serde", "serde_with", "tempfile", diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f8af5fb5a 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { @@ -219,6 +219,23 @@ impl MultiConsensusManagementStore { } } + /// Returns the current version of this database + pub fn version(&self) -> StoreResult { + match self.metadata.read() { + Ok(data) => Ok(data.version), + Err(err) => Err(err), + } + } + + /// Set the database version to a different one + pub fn set_version(&mut self, version: u32) -> StoreResult<()> { + self.metadata.update(DirectDbWriter::new(&self.db), |mut data| { + data.version = version; + data + })?; + Ok(()) + } + pub fn should_upgrade(&self) -> StoreResult { match self.metadata.read() { Ok(data) => Ok(data.version != LATEST_DB_VERSION), diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 9f3290a51..0decbc9cc 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -46,10 +46,12 @@ dhat = { workspace = true, optional = true } serde.workspace = true dirs.workspace = true futures-util.workspace = true +itertools.workspace = true log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true +rocksdb.workspace = true tempfile.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 0950ad8fa..08dc1d87a 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -8,7 +8,10 @@ use kaspa_consensus_core::{ use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; use kaspa_core::{core::Core, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; -use kaspa_database::prelude::CachePolicy; +use kaspa_database::{ + prelude::{CachePolicy, DbWriter, DirectDbWriter}, + registry::DatabaseStorePrefixes, +}; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; @@ -31,6 +34,7 @@ use kaspa_mining::{ }; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; +use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; use kaspa_utxoindex::{api::UtxoIndexProxy, UtxoIndex}; use kaspa_wrpc_server::service::{Options as WrpcServerOptions, WebSocketCounters as WrpcServerCounters, WrpcEncoding, WrpcService}; @@ -308,13 +312,106 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm && (meta_db.get_pinned(b"multi-consensus-metadata-key").is_ok_and(|r| r.is_some()) || MultiConsensusManagementStore::new(meta_db.clone()).should_upgrade().unwrap()) { - let msg = - "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; - get_user_approval_or_exit(msg, args.yes); + let mut mcms = MultiConsensusManagementStore::new(meta_db.clone()); + let version = mcms.version().unwrap(); + + // TODO: Update this entire section to a more robust implementation that allows applying multiple upgrade strategies. + // If I'm at version 3 and latest version is 7, I need to be able to upgrade to that version following the intermediate + // steps without having to delete the DB + if version == 3 { + let active_consensus_dir_name = mcms.active_consensus_dir_name().unwrap(); + + match active_consensus_dir_name { + Some(current_consensus_db) => { + // Apply soft upgrade logic: delete GD data from higher levels + // and then update DB version to 4 + let consensus_db = kaspa_database::prelude::ConnBuilder::default() + .with_db_path(consensus_db_dir.clone().join(current_consensus_db)) + .with_files_limit(1) + .build() + .unwrap(); + info!("Scanning for deprecated records to cleanup"); + + let mut gd_record_count: u32 = 0; + let mut compact_record_count: u32 = 0; + + let start_level: u8 = 1; + let start_level_bytes = start_level.to_le_bytes(); + let ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let ghostdag_prefix = ghostdag_prefix_vec.as_slice(); + + // This section is used to count the records to be deleted. It's not used for the actual delete. + for result in consensus_db.iterator(rocksdb::IteratorMode::From(ghostdag_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::Ghostdag.into()]) { + break; + } + + gd_record_count += 1; + } + + let compact_prefix_vec = DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let compact_prefix = compact_prefix_vec.as_slice(); + + for result in consensus_db.iterator(rocksdb::IteratorMode::From(compact_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::GhostdagCompact.into()]) { + break; + } + + compact_record_count += 1; + } + + trace!("Number of Ghostdag records to cleanup: {}", gd_record_count); + trace!("Number of GhostdagCompact records to cleanup: {}", compact_record_count); + info!("Number of deprecated records to cleanup: {}", gd_record_count + compact_record_count); + + let msg = + "Node database currently at version 3. Upgrade process to version 4 needs to be applied. Continue? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + // Actual delete only happens after user consents to the upgrade: + let mut writer = DirectDbWriter::new(&consensus_db); + + let end_level: u8 = config.max_block_level + 1; + let end_level_bytes = end_level.to_le_bytes(); - info!("Deleting databases from previous Kaspad version"); + let start_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let end_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(end_level_bytes).collect_vec(); - is_db_reset_needed = true; + let start_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let end_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(end_level_bytes).collect_vec(); + + // Apply delete of range from level 1 to max (+1) for Ghostdag and GhostdagCompact: + writer.delete_range(start_ghostdag_prefix_vec.clone(), end_ghostdag_prefix_vec.clone()).unwrap(); + writer.delete_range(start_compact_prefix_vec.clone(), end_compact_prefix_vec.clone()).unwrap(); + + // Compact the deleted rangeto apply the delete immediately + consensus_db.compact_range(Some(start_ghostdag_prefix_vec.as_slice()), Some(end_ghostdag_prefix_vec.as_slice())); + consensus_db.compact_range(Some(start_compact_prefix_vec.as_slice()), Some(end_compact_prefix_vec.as_slice())); + + // Also update the version to one higher: + mcms.set_version(version + 1).unwrap(); + } + None => { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + is_db_reset_needed = true; + } + } + } else { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + info!("Deleting databases from previous Kaspad version"); + + is_db_reset_needed = true; + } } // Will be true if any of the other condition above except args.reset_db From f8baf69015d2b4156e6403cfe162281f23d95d3b Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 9 Jul 2024 22:23:23 -0600 Subject: [PATCH 33/58] Explain further the workaround for GD ordering.rs --- consensus/src/processes/ghostdag/ordering.rs | 28 +++++++++++++++----- consensus/src/processes/ghostdag/protocol.rs | 7 +++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index 21306e5b8..cb73c3398 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -1,7 +1,9 @@ use std::cmp::Ordering; use kaspa_consensus_core::BlueWorkType; +use kaspa_core::warn; use kaspa_hashes::Hash; +use kaspa_math::Uint192; use serde::{Deserialize, Serialize}; use crate::model::{ @@ -46,13 +48,27 @@ impl = blocks.into_iter().collect(); sorted_blocks.sort_by_cached_key(|block| SortableBlock { hash: *block, - // Since we're only calculating GD at all levels on-demand, we may get blocks from the relations - // store in the mergeset that are not on our level + // TODO: Reconsider this approach + // It's possible for mergeset.rs::unordered_mergeset_without_selected_parent (which calls this) to reference parents + // that are in a lower level when calling relations.get_parents. This will panic at self.ghostdag_store.get_blue_work(*block) + // // Options for fixes: - // - do this - // - guarantee that we're only getting parents that are in this store - // - make relations store only return parents at the same or higher level - blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_default(), + // 1) do this where we simply unwrap and default to 0 (currently implemented) + // - consequence is that it affects all GD calculations + // - I argue this is fine for the short term because GD entries not being in the GD store + // can only happen IFF the parent is on a lower level. For level 0 (primary GD), this is not a problem + // and for higher GD it's also not a problem since we only want to use blocks in the same + // level or higher. + // - There is also an extra check being done in ghostdag call side to verify that the hashes in the mergeset + // belong to this + // 2) in mergeset.rs::unordered_mergeset_without_selected_parent, guarantee that we're only getting + // parents that are in this store + // 3) make relations store only return parents at the same or higher level + // - we know that realtions.get_parents can return parents in one level lower + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_else(|_| { + warn!("Tried getting blue work of hash not in GD store: {}", block); + Uint192::from_u64(0) + }), }); sorted_blocks } diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index ac9ae41d7..bfc66ebe6 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -106,6 +106,13 @@ impl Date: Tue, 9 Jul 2024 22:23:52 -0600 Subject: [PATCH 34/58] Minor update to Display of TempGD keys --- database/src/key.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/database/src/key.rs b/database/src/key.rs index e8aeff091..83fa8ebb2 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -73,6 +73,8 @@ impl Display for DbKey { match prefix { Ghostdag | GhostdagCompact + | TempGhostdag + | TempGhostdagCompact | RelationsParents | RelationsChildren | Reachability From bc56e65d5dd93d17c00e12e9f2c05e0a924e24b5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 9 Jul 2024 22:28:14 -0600 Subject: [PATCH 35/58] Various fixes - Keep using old root to minimize proof size. Old root is calculated using the temporary gd stores - fix the off-by-one in block_at_depth and chain_up_to_depth - revert the temp fix to sync with the off-by-one --- consensus/src/processes/pruning_proof/mod.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e0ca2a1e8..34ae371db 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -868,8 +868,7 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - // Need to ensure this does the same 2M+1 depth that block_at_depth does - || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth + || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -916,9 +915,8 @@ impl PruningProofManager { true, ); - // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } @@ -1016,7 +1014,8 @@ impl PruningProofManager { let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); + // Still use "old_root" to make sure we use the minimum amount of records for the proof + queue.push(Reverse(SortableBlock::new(old_root, self.headers_store.get_header(old_root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -1158,7 +1157,7 @@ impl PruningProofManager { let mut current_gd = high_gd; let mut current = high; let mut res = vec![current]; - while current_gd.blue_score + depth >= high_gd.blue_score { + while current_gd.blue_score + depth > high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } @@ -1186,7 +1185,7 @@ impl PruningProofManager { .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; let mut current_gd = high_gd; let mut current = high; - while current_gd.blue_score + depth >= high_gd.blue_score { + while current_gd.blue_score + depth > high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } From efbb083b139c6cbe1cc46999fe4986595212725d Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sun, 14 Jul 2024 13:01:33 -0600 Subject: [PATCH 36/58] Revert "Various fixes" This reverts commit bc56e65d5dd93d17c00e12e9f2c05e0a924e24b5. This experimental commit requires a bit more thinking to apply, and optimization can be deferred. --- consensus/src/processes/pruning_proof/mod.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 34ae371db..e0ca2a1e8 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -868,7 +868,8 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + // Need to ensure this does the same 2M+1 depth that block_at_depth does + || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -915,8 +916,9 @@ impl PruningProofManager { true, ); + // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } @@ -1014,8 +1016,7 @@ impl PruningProofManager { let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - // Still use "old_root" to make sure we use the minimum amount of records for the proof - queue.push(Reverse(SortableBlock::new(old_root, self.headers_store.get_header(old_root).unwrap().blue_work))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -1157,7 +1158,7 @@ impl PruningProofManager { let mut current_gd = high_gd; let mut current = high; let mut res = vec![current]; - while current_gd.blue_score + depth > high_gd.blue_score { + while current_gd.blue_score + depth >= high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } @@ -1185,7 +1186,7 @@ impl PruningProofManager { .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; let mut current_gd = high_gd; let mut current = high; - while current_gd.blue_score + depth > high_gd.blue_score { + while current_gd.blue_score + depth >= high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } From a585be7e597a7c274a8be9536381aa9786c3a7d6 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 15 Jul 2024 23:12:34 -0600 Subject: [PATCH 37/58] Revert better proof check Recreates the GD stores for the current consensus by checking existing proof --- consensus/src/processes/pruning_proof/mod.rs | 146 ++++++++++--------- 1 file changed, 77 insertions(+), 69 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e0ca2a1e8..b22f66918 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -49,7 +49,7 @@ use crate::{ }, stores::{ depth::DbDepthStore, - ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -412,14 +412,8 @@ impl PruningProofManager { fn init_validate_pruning_point_proof_stores_and_processes( &self, - proof: &PruningPointProof, + headers_estimate: usize, ) -> PruningImportResult { - if proof[0].is_empty() { - return Err(PruningImportError::PruningProofNotEnoughHeaders); - } - - let headers_estimate = self.estimate_proof_unique_size(proof); - let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -473,6 +467,7 @@ impl PruningProofManager { &self, proof: &PruningPointProof, ctx: &mut TempProofContext, + log_validating: bool, ) -> PruningImportResult> { let headers_store = &ctx.headers_store; let ghostdag_stores = &ctx.ghostdag_stores; @@ -490,7 +485,9 @@ impl PruningProofManager { return Err(PruningImportError::PruningValidationInterrupted); } - info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + if log_validating { + info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + } let level_idx = level as usize; let mut selected_tip = None; for (i, header) in proof[level as usize].iter().enumerate() { @@ -597,32 +594,33 @@ impl PruningProofManager { Ok(()) } - /// Returns the common ancestor of the proof and the current consensus if there is one. - /// - /// ghostdag_stores currently contain only entries for blocks in the proof. - /// While iterating through the selected parent chain of the current consensus, if we find any - /// that is already in ghostdag_stores that must mean it's a common ancestor of the proof - /// and current consensus - fn find_proof_and_consensus_common_ancestor( + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( &self, - ghostdag_store: &Arc, - current_consensus_selected_tip_header: Arc
, + proof_ghostdag_stores: &[Arc], + current_consensus_ghostdag_stores: &[Arc], + proof_selected_tip: Hash, level: BlockLevel, - relations_service: &MTRelationsService, - ) -> Option { - let mut chain_block = current_consensus_selected_tip_header.clone(); - - for _ in 0..(2 * self.pruning_proof_m as usize) { - if chain_block.direct_parents().is_empty() || chain_block.hash.is_origin() { - break; - } - if ghostdag_store.has(chain_block.hash).unwrap() { - return Some(chain_block.hash); - } - chain_block = self.find_selected_parent_header_at_level(&chain_block, level, relations_service).unwrap(); + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match current_consensus_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = proof_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; } - - None } pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { @@ -630,23 +628,49 @@ impl PruningProofManager { return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); } + if proof[0].is_empty() { + return Err(PruningImportError::PruningProofNotEnoughHeaders); + } + + let headers_estimate = self.estimate_proof_unique_size(proof); + + // Initialize the stores for the proof let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; - let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; - let ghostdag_stores = stores_and_processes.ghostdag_stores; + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; + let proof_selected_tip_by_level = + self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; + let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; + + // Get the proof for the current consensus and recreate the stores for it + // This is expected to be fast because if a proof exists, it will be cached. + // If no proof exists, this is empty + let mut current_consensus_proof = self.get_pruning_point_proof(); + if current_consensus_proof.is_empty() { + // An empty proof can only happen if we're at genesis. We're going to create a proof for this case that contains the genesis header only + let genesis_header = self.headers_store.get_header(self.genesis_hash).unwrap(); + current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); + } + let mut current_consensus_stores_and_processes = + self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; + let _ = self.populate_stores_for_validate_pruning_point_proof( + ¤t_consensus_proof, + &mut current_consensus_stores_and_processes, + false, + )?; + let current_consensus_ghostdag_stores = current_consensus_stores_and_processes.ghostdag_stores; let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; - let current_pp_header = self.headers_store.get_header_with_block_level(current_pp).unwrap(); + let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; - let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); // Next check is to see if this proof is "better" than what's in the current consensus // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) @@ -658,36 +682,19 @@ impl PruningProofManager { // we can determine if the proof is better. The proof is better if the score difference between the // old current consensus's tips and the common ancestor is less than the score difference between the // proof's tip and the common ancestor - let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); - let current_consensus_selected_tip_header = if current_pp_header.block_level >= level { - current_pp_header.header.clone() - } else { - self.find_selected_parent_header_at_level(¤t_pp_header.header, level, &relations_service).unwrap() - }; - if let Some(common_ancestor) = self.find_proof_and_consensus_common_ancestor( - &ghostdag_stores[level_idx], - current_consensus_selected_tip_header.clone(), + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &proof_ghostdag_stores, + ¤t_consensus_ghostdag_stores, + selected_tip, level, - &relations_service, + proof_selected_tip_gd, ) { - // Fill the GD store with data from current consensus, - // starting from the common ancestor until the current level selected tip - let _ = self.fill_proof_ghostdag_data( - proof[level_idx].first().unwrap().hash, - common_ancestor, - current_consensus_selected_tip_header.hash, - &ghostdag_stores[level_idx], - &relations_service, - level != 0, - None, - false, - ); - let common_ancestor_blue_work = ghostdag_stores[level_idx].get_blue_work(common_ancestor).unwrap(); let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(common_ancestor_blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header.header, level).iter().copied() { - let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_blue_work); + SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); + for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { + let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work_diff = + SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { return Err(PruningImportError::PruningProofInsufficientBlueWork); } @@ -710,8 +717,8 @@ impl PruningProofManager { for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; - let proof_selected_tip = selected_tip_by_level[level_idx]; - let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + let proof_selected_tip = proof_selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } @@ -721,7 +728,7 @@ impl PruningProofManager { if parents .iter() .copied() - .any(|parent| ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) + .any(|parent| proof_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) { return Ok(()); } @@ -735,7 +742,8 @@ impl PruningProofManager { drop(pruning_read); drop(relations_read); - drop(stores_and_processes.db_lifetime); + drop(proof_stores_and_processes.db_lifetime); + drop(current_consensus_stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) } From e7625c76f79a304e88e005ff63af43a25c03413a Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 16 Jul 2024 09:10:55 -0600 Subject: [PATCH 38/58] Fix: use cc gd store --- consensus/src/processes/pruning_proof/mod.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index b22f66918..a1c7bf6a3 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -725,11 +725,9 @@ impl PruningProofManager { match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { - if parents - .iter() - .copied() - .any(|parent| proof_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) - { + if parents.iter().copied().any(|parent| { + current_consensus_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m + }) { return Ok(()); } } From 0741151000924b43b895d50e498302bdae574f08 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Sun, 18 Aug 2024 12:40:18 +0300 Subject: [PATCH 39/58] When building pruning point proof ghostdag data, ignore blocks before the root --- consensus/src/processes/ghostdag/ordering.rs | 28 +------------- consensus/src/processes/ghostdag/protocol.rs | 7 ---- consensus/src/processes/pruning_proof/mod.rs | 40 +++++++++++++++++++- 3 files changed, 41 insertions(+), 34 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index cb73c3398..88b648b8c 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -1,9 +1,7 @@ use std::cmp::Ordering; use kaspa_consensus_core::BlueWorkType; -use kaspa_core::warn; use kaspa_hashes::Hash; -use kaspa_math::Uint192; use serde::{Deserialize, Serialize}; use crate::model::{ @@ -46,30 +44,8 @@ impl Ord for SortableBlock { impl GhostdagManager { pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks.sort_by_cached_key(|block| SortableBlock { - hash: *block, - // TODO: Reconsider this approach - // It's possible for mergeset.rs::unordered_mergeset_without_selected_parent (which calls this) to reference parents - // that are in a lower level when calling relations.get_parents. This will panic at self.ghostdag_store.get_blue_work(*block) - // - // Options for fixes: - // 1) do this where we simply unwrap and default to 0 (currently implemented) - // - consequence is that it affects all GD calculations - // - I argue this is fine for the short term because GD entries not being in the GD store - // can only happen IFF the parent is on a lower level. For level 0 (primary GD), this is not a problem - // and for higher GD it's also not a problem since we only want to use blocks in the same - // level or higher. - // - There is also an extra check being done in ghostdag call side to verify that the hashes in the mergeset - // belong to this - // 2) in mergeset.rs::unordered_mergeset_without_selected_parent, guarantee that we're only getting - // parents that are in this store - // 3) make relations store only return parents at the same or higher level - // - we know that realtions.get_parents can return parents in one level lower - blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_else(|_| { - warn!("Tried getting blue work of hash not in GD store: {}", block); - Uint192::from_u64(0) - }), - }); + sorted_blocks + .sort_by_cached_key(|block| SortableBlock { hash: *block, blue_work: self.ghostdag_store.get_blue_work(*block).unwrap() }); sorted_blocks } } diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index bfc66ebe6..ac9ae41d7 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -106,13 +106,6 @@ impl { + relations_store: T, + reachability_service: U, + root: Hash, +} + +impl RelationsStoreReader for RelationsStoreInFutureOfRoot { + fn get_parents(&self, hash: Hash) -> Result { + self.relations_store.get_parents(hash).map(|hashes| { + Arc::new(hashes.iter().copied().filter(|h| self.reachability_service.is_dag_ancestor_of(self.root, *h)).collect_vec()) + }) + } + + fn get_children(&self, hash: Hash) -> StoreResult> { + // We assume hash is in future of root + assert!(self.reachability_service.is_dag_ancestor_of(self.root, hash)); + self.relations_store.get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.reachability_service.is_dag_ancestor_of(self.root, hash) { + Ok(false) + } else { + self.relations_store.has(hash) + } + } + + fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { + panic!("unimplemented") + } +} + pub struct PruningProofManager { db: Arc, @@ -1084,6 +1117,11 @@ impl PruningProofManager { required_block: Option, initialize_store: bool, ) -> bool { + let relations_service = RelationsStoreInFutureOfRoot { + relations_store: relations_service.clone(), + reachability_service: self.reachability_service.clone(), + root: genesis_hash, + }; let gd_manager = GhostdagManager::new( genesis_hash, self.ghostdag_k, From 89f17018796c537ded9114ee8ebdec7bcd0fe2a5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 22 Aug 2024 23:02:35 -0600 Subject: [PATCH 40/58] Add trusted blocks to all relevant levels during apply_proof As opposed to applying only to level 0 --- consensus/src/processes/pruning_proof/mod.rs | 24 +++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index a1c7bf6a3..fcefb0b6e 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -225,18 +225,30 @@ impl PruningProofManager { let pruning_point_header = proof[0].last().unwrap().clone(); let pruning_point = pruning_point_header.hash; - let proof_zero_set = BlockHashSet::from_iter(proof[0].iter().map(|header| header.hash)); + // Create a copy of the proof, since we're going to be mutating the proof passed to us + let proof_sets: Vec> = (0..=self.max_block_level) + .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) + .collect(); + let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); for tb in trusted_set.iter() { trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); - if proof_zero_set.contains(&tb.block.hash()) { - continue; - } + let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); - proof[0].push(tb.block.header.clone()); + (0..=tb_block_level).for_each(|current_proof_level| { + // If this block was in the original proof, ignore it + if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { + return; + } + + proof[current_proof_level as usize].push(tb.block.header.clone()); + }); } - proof[0].sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + proof.iter_mut().for_each(|level_proof| { + level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + }); + self.populate_reachability_and_headers(&proof); { From fb3d1e9b88f670555aaa89059b5dd86b3ac37235 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:41:55 -0600 Subject: [PATCH 41/58] Calculate headers estimate in init proof stores --- consensus/src/processes/pruning_proof/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index fcefb0b6e..e199f26f2 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -424,8 +424,14 @@ impl PruningProofManager { fn init_validate_pruning_point_proof_stores_and_processes( &self, - headers_estimate: usize, + proof: &PruningPointProof, ) -> PruningImportResult { + if proof[0].is_empty() { + return Err(PruningImportError::PruningProofNotEnoughHeaders); + } + + let headers_estimate = self.estimate_proof_unique_size(proof); + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -640,17 +646,11 @@ impl PruningProofManager { return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); } - if proof[0].is_empty() { - return Err(PruningImportError::PruningProofNotEnoughHeaders); - } - - let headers_estimate = self.estimate_proof_unique_size(proof); - // Initialize the stores for the proof + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; let proof_selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; @@ -665,7 +665,7 @@ impl PruningProofManager { current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); } let mut current_consensus_stores_and_processes = - self.init_validate_pruning_point_proof_stores_and_processes(headers_estimate)?; + self.init_validate_pruning_point_proof_stores_and_processes(¤t_consensus_proof)?; let _ = self.populate_stores_for_validate_pruning_point_proof( ¤t_consensus_proof, &mut current_consensus_stores_and_processes, From c9855d32bff5b7af8e05034686d241db4e407a68 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:43:00 -0600 Subject: [PATCH 42/58] Explain finished headers logic Add back the panic if we couldn't find the required block and our headers are done Add explanation in comment for why trying anyway if finished_headers is acceptable --- consensus/src/processes/pruning_proof/mod.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e199f26f2..b668a88da 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -943,8 +943,16 @@ impl PruningProofManager { tries += 1; if finished_headers { - warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Trying anyway."); - break Ok((ghostdag_store, selected_tip, root)); + if has_required_block { + // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. + // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to + // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync + // with such an older node. As long as we found the required block, we can still proceed. + warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Required block found so trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); + } else { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned"); + } } required_level_0_depth <<= 1; warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); From a63acdb02fe3161c6be0b4613677140707f51192 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:45:50 -0600 Subject: [PATCH 43/58] clarify comment --- consensus/src/processes/pruning_proof/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index b668a88da..118a79f1e 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -691,8 +691,8 @@ impl PruningProofManager { } // Step 2 - if we can find a common ancestor between the proof and current consensus - // we can determine if the proof is better. The proof is better if the score difference between the - // old current consensus's tips and the common ancestor is less than the score difference between the + // we can determine if the proof is better. The proof is better if the blue work difference between the + // old current consensus's tips and the common ancestor is less than the blue work difference between the // proof's tip and the common ancestor if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( &proof_ghostdag_stores, From 46dbac30a78fa48b503621d8970e019e9a45aa31 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:58:31 -0600 Subject: [PATCH 44/58] Rename old_root to depth_based_root explain logic for the two root calculation --- consensus/src/processes/pruning_proof/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 118a79f1e..ceacbf12f 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1013,8 +1013,12 @@ impl PruningProofManager { .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); + // (New Logic) This is the root we calculated by going through block relations let root = roots_by_level[level]; - let old_root = if level != self.max_block_level as usize { + // (Old Logic) This is the root we can calculate given that the GD records are already filled + // The root calc logic below is the original logic before the on-demand higher level GD calculation + // We only need depth_based_root to sanity check the new logic + let depth_based_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) @@ -1036,8 +1040,8 @@ impl PruningProofManager { block_at_depth_2m }; - // new root is expected to be always an ancestor of old root because new root takes a safety margin - assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + // new root is expected to be always an ancestor of depth_based_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, depth_based_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); From 4c9f9979040e4e5b3aa4adad4541d7193bf312c6 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 27 Aug 2024 01:07:07 -0600 Subject: [PATCH 45/58] More merge fixes --- Cargo.lock | 2 +- consensus/src/consensus/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1fc312db8..cd93c85b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3751,7 +3751,7 @@ dependencies = [ "dhat", "dirs", "futures-util", - "itertools 0.11.0", + "itertools 0.13.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 7c527a542..5b09b12cd 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -532,7 +532,7 @@ impl ConsensusApi for Consensus { for child in initial_children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -559,7 +559,7 @@ impl ConsensusApi for Consensus { for child in children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } From 56b4392178bb5a751752cafc461c3b9aa9155250 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 27 Aug 2024 23:03:39 -0600 Subject: [PATCH 46/58] Refactor relations services into self --- consensus/src/processes/pruning_proof/mod.rs | 31 ++++++++------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 17147e089..550bde740 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -152,6 +152,7 @@ pub struct PruningProofManager { reachability_service: MTReachabilityService, ghostdag_store: Arc, relations_stores: Arc>>, + level_relations_services: Vec>, pruning_point_store: Arc>, past_pruning_points_store: Arc, virtual_stores: Arc>, @@ -225,6 +226,10 @@ impl PruningProofManager { ghostdag_manager, is_consensus_exiting, + + level_relations_services: (0..=max_block_level) + .map(|level| MTRelationsService::new(storage.relations_stores.clone().clone(), level)) + .collect_vec(), } } @@ -824,7 +829,6 @@ impl PruningProofManager { &self, header: &Header, level: BlockLevel, - relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important // to filter to include only parents that are in relations_service. @@ -833,7 +837,7 @@ impl PruningProofManager { .parents_at_level(header, level) .iter() .copied() - .filter(|parent| relations_service.has(*parent).unwrap()) + .filter(|parent| self.level_relations_services[level as usize].has(*parent).unwrap()) .collect_vec() .push_if_empty(ORIGIN); @@ -869,11 +873,10 @@ impl PruningProofManager { required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { - let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level, &relations_service)? + self.find_selected_parent_header_at_level(&pp_header.header, level)? }; let selected_tip = selected_tip_header.hash; @@ -925,7 +928,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level, &relations_service) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -939,7 +942,7 @@ impl PruningProofManager { if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { current_required_chain_block = - match self.find_selected_parent_header_at_level(¤t_required_chain_block, level, &relations_service) { + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { finished_headers_for_required_block_chain = true; @@ -956,16 +959,8 @@ impl PruningProofManager { } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); - let has_required_block = self.fill_proof_ghostdag_data( - root, - root, - pp, - &ghostdag_store, - &relations_service, - level != 0, - Some(required_block), - true, - ); + let has_required_block = + self.fill_proof_ghostdag_data(root, root, pp, &ghostdag_store, level != 0, Some(required_block), true, level); // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block @@ -1136,13 +1131,13 @@ impl PruningProofManager { starting_hash: Hash, selected_tip: Hash, ghostdag_store: &Arc, - relations_service: &MTRelationsService, use_score_as_work: bool, required_block: Option, initialize_store: bool, + level: BlockLevel, ) -> bool { let relations_service = RelationsStoreInFutureOfRoot { - relations_store: relations_service.clone(), + relations_store: self.level_relations_services[level as usize].clone(), reachability_service: self.reachability_service.clone(), root: genesis_hash, }; From 14376d2ae77df0d57e9e389653b58a0da85df317 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 29 Aug 2024 22:10:13 +0000 Subject: [PATCH 47/58] Squashed commit of the following: commit 3820fdddc6ec089f68501070edf540d7c13b0cc2 Merge: 5b59ea98d c839a9d3f Author: Michael Sutton Date: Thu Aug 29 22:09:09 2024 +0000 Merge branch 'dev' into bcm-parallel-processing commit 5b59ea98d2d807e1ceff4f458b19f3f37d321be2 Author: max143672 Date: Tue Jun 25 19:28:11 2024 +0300 suppress warnings commit ed3f76bd9f1a928dd1cb72ba8b3355e0b90de8de Author: max143672 Date: Tue Jun 25 15:27:17 2024 +0300 style: fmt commit 0e4629dfcdaad279f79fff66202ba569b017dc23 Author: max143672 Date: Tue Jun 25 15:26:07 2024 +0300 add bench with custom threadpool commit 5dc827b85140985a2cbfdd31686b6ca24682c882 Author: max143672 Date: Tue Jun 25 15:20:28 2024 +0300 fix clippy commit 1e6fd8ec493e361bf6f7d1ba61fbb7764794616b Author: max143672 Date: Tue Jun 25 11:51:54 2024 +0300 refactor check_scripts fn, fix tests commit 5b76a6445f576c918783c52d29a8456d41af6644 Author: max143672 Date: Mon Jun 24 22:52:23 2024 +0300 apply par iter to `check_scripts` commit 93c5b1adafe9e0ff7c28c2c48236985a9b643175 Author: max143672 Date: Mon Jun 24 01:16:51 2024 +0300 remove scc commit de650c32242dd2a6e44283e44c4fc94d3405e816 Author: max143672 Date: Mon Jun 24 01:07:00 2024 +0300 rollback rwlock and indexmap. commit 5f3fe0280de7a8145e7e51856acb2b61e003ccf9 Author: max143672 Date: Mon Jun 24 00:37:12 2024 +0300 dont apply cache commit 258b40fa7057a93886765c7dc398fc37844bf1dc Author: max143672 Date: Sun Jun 23 14:39:59 2024 +0300 use hashcache commit 3edd435b8109b7311c6f6ea89de964acd7e1ab85 Author: max143672 Date: Sun Jun 23 00:28:22 2024 +0300 use concurrent cache commit 0c2acdafbd154f7acbe946df20f9dcac7421b432 Author: max143672 Date: Sun Jun 23 00:08:52 2024 +0300 use upgreadable read commit 91364d132cedd69b95e0eed310bbcfd0ff88f127 Author: max143672 Date: Sun Jun 23 00:06:52 2024 +0300 fix benches commit d3d0716bac1881fa042853f64582a48a4c402b18 Author: max143672 Date: Sat Jun 22 11:54:43 2024 +0300 fix par versions commit b756df4305ccfdae95546ab279cde0f8fa9e38ff Author: max143672 Date: Fri Jun 21 22:39:28 2024 +0300 use cache per iteration per function commit cfcd7e13b12e080a65761004f6a7d3b2033804a5 Author: max143672 Date: Fri Jun 21 21:38:07 2024 +0300 benches are implemented commit 25f1087c6caaad446c5f91e32ac770e3bf9d2d9a Author: max143672 Date: Fri Jun 21 20:22:14 2024 +0300 sighash reused trait --- Cargo.lock | 2 + consensus/Cargo.toml | 5 + consensus/benches/check_scripts.rs | 126 +++++++++++ consensus/client/src/sign.rs | 6 +- consensus/client/src/signing.rs | 10 +- consensus/core/Cargo.toml | 1 + consensus/core/src/hashing/sighash.rs | 195 +++++++++++++----- consensus/core/src/sign.rs | 22 +- .../pipeline/virtual_processor/processor.rs | 15 +- .../transaction_validator_populated.rs | 84 ++++++-- consensus/wasm/src/utils.rs | 6 +- crypto/txscript/src/caches.rs | 2 +- crypto/txscript/src/lib.rs | 58 +++--- crypto/txscript/src/opcodes/macros.rs | 22 +- crypto/txscript/src/opcodes/mod.rs | 70 ++++--- crypto/txscript/src/standard/multisig.rs | 10 +- .../src/mempool/check_transaction_standard.rs | 9 +- wallet/core/src/account/pskb.rs | 8 +- wallet/pskt/examples/multisig.rs | 8 +- wallet/pskt/src/pskt.rs | 7 +- 20 files changed, 497 insertions(+), 169 deletions(-) create mode 100644 consensus/benches/check_scripts.rs diff --git a/Cargo.lock b/Cargo.lock index 80526e0a5..fe92dbaaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2543,6 +2543,7 @@ dependencies = [ "futures-util", "indexmap 2.4.0", "itertools 0.13.0", + "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensus-notify", "kaspa-consensusmanager", @@ -2604,6 +2605,7 @@ dependencies = [ name = "kaspa-consensus-core" version = "0.14.3" dependencies = [ + "arc-swap", "async-trait", "bincode", "borsh", diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 3f4a1b456..b9a183ea8 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -54,11 +54,16 @@ serde_json.workspace = true flate2.workspace = true rand_distr.workspace = true kaspa-txscript-errors.workspace = true +kaspa-addresses.workspace = true [[bench]] name = "hash_benchmarks" harness = false +[[bench]] +name = "check_scripts" +harness = false + [features] html_reports = [] devnet-prealloc = ["kaspa-consensus-core/devnet-prealloc"] diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs new file mode 100644 index 000000000..b6a8402d4 --- /dev/null +++ b/consensus/benches/check_scripts.rs @@ -0,0 +1,126 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode}; +use kaspa_addresses::{Address, Prefix, Version}; +use kaspa_consensus::processes::transaction_validator::transaction_validator_populated::{ + check_scripts_par_iter, check_scripts_par_iter_thread, check_scripts_single_threaded, +}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; +use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; +use kaspa_consensus_core::subnets::SubnetworkId; +use kaspa_consensus_core::tx::{MutableTransaction, Transaction, TransactionInput, TransactionOutpoint, UtxoEntry}; +use kaspa_txscript::caches::Cache; +use kaspa_txscript::pay_to_address_script; +use rand::{thread_rng, Rng}; +use secp256k1::Keypair; +use std::thread::available_parallelism; + +// You may need to add more detailed mocks depending on your actual code. +fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec) { + let reused_values = SigHashReusedValuesUnsync::new(); + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let mut tx = Transaction::new( + 0, + vec![], + vec![], + 0, + SubnetworkId::from_bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, + vec![], + ); + let mut utxos = vec![]; + let mut kps = vec![]; + for _ in 0..inputs_count - non_uniq_signatures { + let kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 333, + is_coinbase: false, + }); + kps.push(kp); + } + for _ in 0..non_uniq_signatures { + let kp = kps.last().unwrap(); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 444, + is_coinbase: false, + }); + } + for (i, kp) in kps.iter().enumerate().take(inputs_count - non_uniq_signatures) { + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + let length = tx.inputs.len(); + for i in (inputs_count - non_uniq_signatures)..length { + let kp = kps.last().unwrap(); + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + (tx, utxos) +} + +fn benchmark_check_scripts(c: &mut Criterion) { + for inputs_count in [100, 50, 25, 10, 5, 2] { + for non_uniq_signatures in [0, inputs_count / 2] { + let (tx, utxos) = mock_tx(inputs_count, non_uniq_signatures); + let mut group = c.benchmark_group(format!("inputs: {inputs_count}, non uniq: {non_uniq_signatures}")); + group.sampling_mode(SamplingMode::Flat); + + group.bench_function("single_thread", |b| { + let tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.map.write().clear(); + check_scripts_single_threaded(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + }) + }); + + group.bench_function("rayon par iter", |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.map.write().clear(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + }) + }); + + for i in (2..=available_parallelism().unwrap().get()).step_by(2) { + if inputs_count >= i { + group.bench_function(&format!("rayon, custom threadpool, thread count {i}"), |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + let pool = rayon::ThreadPoolBuilder::new().num_threads(i).build().unwrap(); + b.iter(|| { + // Create a custom thread pool with the specified number of threads + cache.map.write().clear(); + check_scripts_par_iter_thread(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool)) + .unwrap(); + }) + }); + } + } + } + } +} + +criterion_group! { + name = benches; + // This can be any expression that returns a `Criterion` object. + config = Criterion::default().with_output_color(true).measurement_time(std::time::Duration::new(20, 0)); + targets = benchmark_check_scripts +} + +criterion_main!(benches); diff --git a/consensus/client/src/sign.rs b/consensus/client/src/sign.rs index c254aee07..6dae4ec3a 100644 --- a/consensus/client/src/sign.rs +++ b/consensus/client/src/sign.rs @@ -3,7 +3,7 @@ use core::iter::once; use itertools::Itertools; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, tx::PopulatedTransaction, @@ -40,7 +40,7 @@ pub fn sign_with_multiple_v3<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; { let input_len = tx.inner().inputs.len(); @@ -55,7 +55,7 @@ pub fn sign_with_multiple_v3<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> }; let script = script_pub_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) diff --git a/consensus/client/src/signing.rs b/consensus/client/src/signing.rs index ef993d011..f7fe8cee6 100644 --- a/consensus/client/src/signing.rs +++ b/consensus/client/src/signing.rs @@ -75,7 +75,7 @@ impl SigHashCache { } } - pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { + pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } @@ -185,16 +185,16 @@ pub fn calc_schnorr_signature_hash( let mut hasher = TransactionSigningHash::new(); hasher .write_u16(tx.version) - .update(previous_outputs_hash(&tx, hash_type, &mut reused_values)) - .update(sequences_hash(&tx, hash_type, &mut reused_values)) - .update(sig_op_counts_hash(&tx, hash_type, &mut reused_values)); + .update(previous_outputs_hash(&tx, hash_type, &reused_values)) + .update(sequences_hash(&tx, hash_type, &reused_values)) + .update(sig_op_counts_hash(&tx, hash_type, &reused_values)); hash_outpoint(&mut hasher, input.previous_outpoint); hash_script_public_key(&mut hasher, &utxo.script_public_key); hasher .write_u64(utxo.amount) .write_u64(input.sequence) .write_u8(input.sig_op_count) - .update(outputs_hash(&tx, hash_type, &mut reused_values, input_index)) + .update(outputs_hash(&tx, hash_type, &reused_values, input_index)) .write_u64(tx.lock_time) .update(&tx.subnetwork_id) .write_u64(tx.gas) diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 44dbedd38..228b4ac11 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -15,6 +15,7 @@ wasm32-sdk = [] default = [] [dependencies] +arc-swap.workspace = true async-trait.workspace = true borsh.workspace = true cfg-if.workspace = true diff --git a/consensus/core/src/hashing/sighash.rs b/consensus/core/src/hashing/sighash.rs index c1b6133e8..f237e773c 100644 --- a/consensus/core/src/hashing/sighash.rs +++ b/consensus/core/src/hashing/sighash.rs @@ -1,4 +1,7 @@ +use arc_swap::ArcSwapOption; use kaspa_hashes::{Hash, Hasher, HasherBase, TransactionSigningHash, TransactionSigningHashECDSA, ZERO_HASH}; +use std::cell::Cell; +use std::sync::Arc; use crate::{ subnets::SUBNETWORK_ID_NATIVE, @@ -11,72 +14,174 @@ use super::{sighash_type::SigHashType, HasherExtensions}; /// the same for all transaction inputs. /// Reuse of such values prevents the quadratic hashing problem. #[derive(Default)] -pub struct SigHashReusedValues { - previous_outputs_hash: Option, - sequences_hash: Option, - sig_op_counts_hash: Option, - outputs_hash: Option, +pub struct SigHashReusedValuesUnsync { + previous_outputs_hash: Cell>, + sequences_hash: Cell>, + sig_op_counts_hash: Cell>, + outputs_hash: Cell>, } -impl SigHashReusedValues { +impl SigHashReusedValuesUnsync { pub fn new() -> Self { - Self { previous_outputs_hash: None, sequences_hash: None, sig_op_counts_hash: None, outputs_hash: None } + Self::default() } } -pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +#[derive(Default)] +pub struct SigHashReusedValuesSync { + previous_outputs_hash: ArcSwapOption, + sequences_hash: ArcSwapOption, + sig_op_counts_hash: ArcSwapOption, + outputs_hash: ArcSwapOption, +} + +impl SigHashReusedValuesSync { + pub fn new() -> Self { + Self::default() + } +} + +pub trait SigHashReusedValues { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash; + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash; + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; +} + +impl SigHashReusedValues for Arc { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().previous_outputs_hash(set) + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().sequences_hash(set) + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().sig_op_counts_hash(set) + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().outputs_hash(set) + } +} + +impl SigHashReusedValues for SigHashReusedValuesUnsync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.previous_outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.previous_outputs_hash.set(Some(hash)); + hash + }) + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sequences_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sequences_hash.set(Some(hash)); + hash + }) + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sig_op_counts_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sig_op_counts_hash.set(Some(hash)); + hash + }) + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.outputs_hash.set(Some(hash)); + hash + }) + } +} + +impl SigHashReusedValues for SigHashReusedValuesSync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.previous_outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.previous_outputs_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sequences_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sequences_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sig_op_counts_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sig_op_counts_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.outputs_hash.rcu(|_| Arc::new(hash)); + hash + } +} + +pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - - if let Some(previous_outputs_hash) = reused_values.previous_outputs_hash { - previous_outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.update(input.previous_outpoint.transaction_id.as_bytes()); hasher.write_u32(input.previous_outpoint.index); } - let previous_outputs_hash = hasher.finalize(); - reused_values.previous_outputs_hash = Some(previous_outputs_hash); - previous_outputs_hash - } + hasher.finalize() + }; + reused_values.previous_outputs_hash(hash) } -pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_single() || hash_type.is_sighash_anyone_can_pay() || hash_type.is_sighash_none() { return ZERO_HASH; } - - if let Some(sequences_hash) = reused_values.sequences_hash { - sequences_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u64(input.sequence); } - let sequence_hash = hasher.finalize(); - reused_values.sequences_hash = Some(sequence_hash); - sequence_hash - } + hasher.finalize() + }; + reused_values.sequences_hash(hash) } -pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - if let Some(sig_op_counts_hash) = reused_values.sig_op_counts_hash { - sig_op_counts_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u8(input.sig_op_count); } - let sig_op_counts_hash = hasher.finalize(); - reused_values.sig_op_counts_hash = Some(sig_op_counts_hash); - sig_op_counts_hash - } + hasher.finalize() + }; + reused_values.sig_op_counts_hash(hash) } pub fn payload_hash(tx: &Transaction) -> Hash { @@ -92,7 +197,7 @@ pub fn payload_hash(tx: &Transaction) -> Hash { hasher.finalize() } -pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues, input_index: usize) -> Hash { +pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues, input_index: usize) -> Hash { if hash_type.is_sighash_none() { return ZERO_HASH; } @@ -107,19 +212,15 @@ pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mu hash_output(&mut hasher, &tx.outputs[input_index]); return hasher.finalize(); } - - // Otherwise, return hash of all outputs. Re-use hash if available. - if let Some(outputs_hash) = reused_values.outputs_hash { - outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for output in tx.outputs.iter() { hash_output(&mut hasher, output); } - let outputs_hash = hasher.finalize(); - reused_values.outputs_hash = Some(outputs_hash); - outputs_hash - } + hasher.finalize() + }; + // Otherwise, return hash of all outputs. Re-use hash if available. + reused_values.outputs_hash(hash) } pub fn hash_outpoint(hasher: &mut impl Hasher, outpoint: TransactionOutpoint) { @@ -141,7 +242,7 @@ pub fn calc_schnorr_signature_hash( verifiable_tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let input = verifiable_tx.populated_input(input_index); let tx = verifiable_tx.tx(); @@ -170,7 +271,7 @@ pub fn calc_ecdsa_signature_hash( tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, reused_values); let mut hasher = TransactionSigningHashECDSA::new(); @@ -573,9 +674,9 @@ mod tests { } } let populated_tx = PopulatedTransaction::new(&tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); assert_eq!( - calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &mut reused_values).to_string(), + calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &reused_values).to_string(), test.expected_hash, "test {} failed", test.name diff --git a/consensus/core/src/sign.rs b/consensus/core/src/sign.rs index a40b949e3..1a87d03f1 100644 --- a/consensus/core/src/sign.rs +++ b/consensus/core/src/sign.rs @@ -1,6 +1,6 @@ use crate::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::{SigHashType, SIG_HASH_ALL}, }, tx::{SignableTransaction, VerifiableTransaction}, @@ -84,9 +84,9 @@ pub fn sign(mut signable_tx: SignableTransaction, schnorr_key: secp256k1::Keypai signable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..signable_tx.tx.inputs.len() { - let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -106,11 +106,11 @@ pub fn sign_with_multiple(mut mutable_tx: SignableTransaction, privkeys: Vec<[u8 mutable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -132,12 +132,12 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -155,9 +155,9 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u /// Sign a transaction input with a sighash_type using schnorr pub fn sign_input(tx: &impl VerifiableTransaction, input_index: usize, private_key: &[u8; 32], hash_type: SigHashType) -> Vec { - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); - let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, &mut reused_values); + let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, private_key).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); @@ -167,7 +167,7 @@ pub fn sign_input(tx: &impl VerifiableTransaction, input_index: usize, private_k } pub fn verify(tx: &impl VerifiableTransaction) -> Result<(), Error> { - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { if input.signature_script.is_empty() { return Err(Error::Message(format!("Signature is empty for input: {i}"))); @@ -175,7 +175,7 @@ pub fn verify(tx: &impl VerifiableTransaction) -> Result<(), Error> { let pk = &entry.script_public_key.script()[1..33]; let pk = secp256k1::XOnlyPublicKey::from_slice(pk)?; let sig = secp256k1::schnorr::Signature::from_slice(&input.signature_script[1..65])?; - let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice())?; sig.verify(&msg, &pk)?; } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 6596f624c..d36c2edac 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -772,7 +772,20 @@ impl VirtualStateProcessor { let virtual_utxo_view = &virtual_read.utxo_set; let virtual_daa_score = virtual_state.daa_score; let virtual_past_median_time = virtual_state.past_median_time; - self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time, args) + if mutable_tx.tx.inputs.len() > 1 { + // use pool to apply par_iter to inputs + self.thread_pool.install(|| { + self.validate_mempool_transaction_impl( + mutable_tx, + virtual_utxo_view, + virtual_daa_score, + virtual_past_median_time, + args, + ) + }) + } else { + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time, args) + } } pub fn validate_mempool_transactions_in_parallel( diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index f7a43aad2..d6884ca02 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -1,11 +1,16 @@ use crate::constants::{MAX_SOMPI, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK}; +use kaspa_consensus_core::hashing::sighash::{SigHashReusedValues, SigHashReusedValuesSync}; use kaspa_consensus_core::{ - hashing::sighash::SigHashReusedValues, + hashing::sighash::SigHashReusedValuesUnsync, tx::{TransactionInput, VerifiableTransaction}, }; use kaspa_core::warn; -use kaspa_txscript::{get_sig_op_count, TxScriptEngine}; +use kaspa_txscript::caches::Cache; +use kaspa_txscript::{get_sig_op_count, SigCacheKey, TxScriptEngine}; use kaspa_txscript_errors::TxScriptError; +use rayon::iter::IntoParallelIterator; +use rayon::ThreadPool; +use std::sync::Arc; use super::{ errors::{TxResult, TxRuleError}, @@ -28,7 +33,7 @@ pub enum TxValidationFlags { impl TransactionValidator { pub fn validate_populated_transaction_and_get_fee( &self, - tx: &impl VerifiableTransaction, + tx: &(impl VerifiableTransaction + std::marker::Sync), pov_daa_score: u64, flags: TxValidationFlags, mass_and_feerate_threshold: Option<(u64, f64)>, @@ -53,7 +58,7 @@ impl TransactionValidator { match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { - Self::check_sig_op_counts(tx)?; + Self::check_sig_op_counts::<_, SigHashReusedValuesUnsync>(tx)?; self.check_scripts(tx)?; } TxValidationFlags::SkipScriptChecks => {} @@ -157,9 +162,9 @@ impl TransactionValidator { Ok(()) } - fn check_sig_op_counts(tx: &T) -> TxResult<()> { + fn check_sig_op_counts(tx: &T) -> TxResult<()> { for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); + let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); if calculated != input.sig_op_count as u64 { return Err(TxRuleError::WrongSigOpCount(i, input.sig_op_count as u64, calculated)); } @@ -167,18 +172,66 @@ impl TransactionValidator { Ok(()) } - pub fn check_scripts(&self, tx: &impl VerifiableTransaction) -> TxResult<()> { - let mut reused_values = SigHashReusedValues::new(); - for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let mut engine = TxScriptEngine::from_transaction_input(tx, input, i, entry, &mut reused_values, &self.sig_cache) - .map_err(|err| map_script_err(err, input))?; - engine.execute().map_err(|err| map_script_err(err, input))?; - } + pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + std::marker::Sync)) -> TxResult<()> { + check_scripts(&self.sig_cache, tx) + } +} - Ok(()) +pub fn check_scripts(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { + if tx.inputs().len() > 1 { + check_scripts_par_iter(sig_cache, tx) + } else { + check_scripts_single_threaded(sig_cache, tx) } } +pub fn check_scripts_single_threaded(sig_cache: &Cache, tx: &impl VerifiableTransaction) -> TxResult<()> { + let reused_values = SigHashReusedValuesUnsync::new(); + for (i, (input, entry)) in tx.populated_inputs().enumerate() { + let mut engine = TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache) + .map_err(|err| map_script_err(err, input))?; + engine.execute().map_err(|err| map_script_err(err, input))?; + } + Ok(()) +} + +pub fn check_scripts_par_iter( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + std::marker::Sync), +) -> TxResult<()> { + use rayon::iter::ParallelIterator; + let reused_values = std::sync::Arc::new(SigHashReusedValuesSync::new()); + (0..tx.inputs().len()) + .into_par_iter() + .try_for_each(|idx| { + let reused_values = reused_values.clone(); // Clone the Arc to share ownership + let (input, utxo) = tx.populated_input(idx); + let mut engine = TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache)?; + engine.execute() + }) + .map_err(TxRuleError::SignatureInvalid) +} + +pub fn check_scripts_par_iter_thread( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + std::marker::Sync), + pool: &ThreadPool, +) -> TxResult<()> { + use rayon::iter::ParallelIterator; + pool.install(|| { + let reused_values = Arc::new(SigHashReusedValuesSync::new()); + (0..tx.inputs().len()) + .into_par_iter() + .try_for_each(|idx| { + let reused_values = reused_values.clone(); // Clone the Arc to share ownership + let (input, utxo) = tx.populated_input(idx); + let mut engine = TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache)?; + engine.execute() + }) + .map_err(TxRuleError::SignatureInvalid) + }) +} + fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRuleError { if input.signature_script.is_empty() { TxRuleError::SignatureEmpty(script_err) @@ -192,6 +245,7 @@ mod tests { use super::super::errors::TxRuleError; use core::str::FromStr; use itertools::Itertools; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::sign::sign; use kaspa_consensus_core::subnets::SubnetworkId; use kaspa_consensus_core::tx::{MutableTransaction, PopulatedTransaction, ScriptVec, TransactionId, UtxoEntry}; @@ -710,6 +764,6 @@ mod tests { let signed_tx = sign(MutableTransaction::with_entries(unsigned_tx, entries), schnorr_key); let populated_tx = signed_tx.as_verifiable(); assert_eq!(tv.check_scripts(&populated_tx), Ok(())); - assert_eq!(TransactionValidator::check_sig_op_counts(&populated_tx), Ok(())); + assert_eq!(TransactionValidator::check_sig_op_counts::<_, SigHashReusedValuesUnsync>(&populated_tx), Ok(())); } } diff --git a/consensus/wasm/src/utils.rs b/consensus/wasm/src/utils.rs index 0139b573f..b70664e1e 100644 --- a/consensus/wasm/src/utils.rs +++ b/consensus/wasm/src/utils.rs @@ -1,5 +1,5 @@ use crate::result::Result; -use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; use kaspa_consensus_core::tx; @@ -9,9 +9,9 @@ pub fn script_hashes(mut mutable_tx: tx::SignableTransaction) -> Result { // We use IndexMap and not HashMap, because it makes it cheaper to remove a random element when the cache is full. - map: Arc>>, + pub map: Arc>>, size: usize, counters: Arc, } diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index b145fb90e..2a00bd080 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -45,6 +45,8 @@ pub const MAX_PUB_KEYS_PER_MUTLTISIG: i32 = 20; // Note that this includes OP_RESERVED which counts as a push operation. pub const NO_COST_OPCODE: u8 = 0x60; +type DynOpcodeImplementation = Box>; + #[derive(Clone, Hash, PartialEq, Eq)] enum Signature { Secp256k1(secp256k1::schnorr::Signature), @@ -70,7 +72,7 @@ enum ScriptSource<'a, T: VerifiableTransaction> { StandAloneScripts(Vec<&'a [u8]>), } -pub struct TxScriptEngine<'a, T: VerifiableTransaction> { +pub struct TxScriptEngine<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> { dstack: Stack, astack: Stack, @@ -78,7 +80,7 @@ pub struct TxScriptEngine<'a, T: VerifiableTransaction> { // Outer caches for quicker calculation // TODO:: make it compatible with threading - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, cond_stack: Vec, // Following if stacks, and whether it is running @@ -86,30 +88,35 @@ pub struct TxScriptEngine<'a, T: VerifiableTransaction> { num_ops: i32, } -fn parse_script( +fn parse_script( script: &[u8], -) -> impl Iterator>, TxScriptError>> + '_ { +) -> impl Iterator, TxScriptError>> + '_ { script.iter().batching(|it| deserialize_next_opcode(it)) } -pub fn get_sig_op_count(signature_script: &[u8], prev_script_public_key: &ScriptPublicKey) -> u64 { +pub fn get_sig_op_count( + signature_script: &[u8], + prev_script_public_key: &ScriptPublicKey, +) -> u64 { let is_p2sh = ScriptClass::is_pay_to_script_hash(prev_script_public_key.script()); - let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); + let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); if !is_p2sh { return get_sig_op_count_by_opcodes(&script_pub_key_ops); } - let signature_script_ops = parse_script::(signature_script).collect_vec(); + let signature_script_ops = parse_script::(signature_script).collect_vec(); if signature_script_ops.is_empty() || signature_script_ops.iter().any(|op| op.is_err() || !op.as_ref().unwrap().is_push_opcode()) { return 0; } let p2sh_script = signature_script_ops.last().expect("checked if empty above").as_ref().expect("checked if err above").get_data(); - let p2sh_ops = parse_script::(p2sh_script).collect_vec(); + let p2sh_ops = parse_script::(p2sh_script).collect_vec(); get_sig_op_count_by_opcodes(&p2sh_ops) } -fn get_sig_op_count_by_opcodes(opcodes: &[Result>, TxScriptError>]) -> u64 { +fn get_sig_op_count_by_opcodes( + opcodes: &[Result, TxScriptError>], +) -> u64 { // TODO: Check for overflows let mut num_sigs: u64 = 0; for (i, op) in opcodes.iter().enumerate() { @@ -142,12 +149,12 @@ fn get_sig_op_count_by_opcodes(opcodes: &[Result(script: &[u8]) -> bool { - parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) +pub fn is_unspendable(script: &[u8]) -> bool { + parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) } -impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { - pub fn new(reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { +impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<'a, T, Reused> { + pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: vec![], astack: vec![], @@ -164,7 +171,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { input: &'a TransactionInput, input_idx: usize, utxo_entry: &'a UtxoEntry, - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, ) -> Result { let script_public_key = utxo_entry.script_public_key.script(); @@ -185,7 +192,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { } } - pub fn from_script(script: &'a [u8], reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { + pub fn from_script(script: &'a [u8], reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: Default::default(), astack: Default::default(), @@ -202,7 +209,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { return self.cond_stack.is_empty() || *self.cond_stack.last().expect("Checked not empty") == OpCond::True; } - fn execute_opcode(&mut self, opcode: Box>) -> Result<(), TxScriptError> { + fn execute_opcode(&mut self, opcode: DynOpcodeImplementation) -> Result<(), TxScriptError> { // Different from kaspad: Illegal and disabled opcode are checked on execute instead // Note that this includes OP_RESERVED which counts as a push operation. if !opcode.is_push_opcode() { @@ -512,6 +519,7 @@ mod tests { use crate::opcodes::codes::{OpBlake2b, OpCheckSig, OpData1, OpData2, OpData32, OpDup, OpEqual, OpPushData1, OpTrue}; use super::*; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -542,7 +550,7 @@ mod tests { fn run_test_script_cases(test_cases: Vec) { let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for test in test_cases { // Ensure encapsulation of variables (no leaking between tests) @@ -565,7 +573,7 @@ mod tests { let populated_tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Script creation failed"); assert_eq!(vm.execute(), test.expected_result); } @@ -783,7 +791,7 @@ mod tests { ]; for test in test_cases { - let check = TxScriptEngine::::check_pub_key_encoding(test.key); + let check = TxScriptEngine::::check_pub_key_encoding(test.key); if test.is_valid { assert_eq!( check, @@ -880,7 +888,10 @@ mod tests { for test in tests { assert_eq!( - get_sig_op_count::(test.signature_script, &test.prev_script_public_key), + get_sig_op_count::( + test.signature_script, + &test.prev_script_public_key + ), test.expected_sig_ops, "failed for '{}'", test.name @@ -909,7 +920,7 @@ mod tests { for test in tests { assert_eq!( - is_unspendable::(test.script_public_key), + is_unspendable::(test.script_public_key), test.expected, "failed for '{}'", test.name @@ -929,6 +940,7 @@ mod bitcoind_tests { use super::*; use crate::script_builder::ScriptBuilderError; use kaspa_consensus_core::constants::MAX_TX_IN_SEQUENCE_NUM; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -1019,13 +1031,13 @@ mod bitcoind_tests { // Run transaction let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut vm = TxScriptEngine::from_transaction_input( &populated_tx, &populated_tx.tx().inputs[0], 0, &populated_tx.entries[0], - &mut reused_values, + &reused_values, &sig_cache, ) .map_err(UnifiedError::TxScriptError)?; diff --git a/crypto/txscript/src/opcodes/macros.rs b/crypto/txscript/src/opcodes/macros.rs index b3db98829..c4d161d40 100644 --- a/crypto/txscript/src/opcodes/macros.rs +++ b/crypto/txscript/src/opcodes/macros.rs @@ -6,9 +6,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), length.to_le_bytes().as_slice(), self.data.as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { match it.take(size_of::<$type>()).copied().collect::>().try_into() { Ok(bytes) => { let length = <$type>::from_le_bytes(bytes) as usize; @@ -32,9 +32,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), self.data.clone().as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { // Static length includes the opcode itself let data: Vec = it.take($length - 1).copied().collect(); Self::new(data) @@ -44,7 +44,7 @@ macro_rules! opcode_serde { macro_rules! opcode_init { ($type:ty) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() > <$type>::MAX as usize { return Err(TxScriptError::MalformedPush(<$type>::MAX as usize, data.len())); } @@ -52,7 +52,7 @@ macro_rules! opcode_init { } }; ($length: literal) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() != $length - 1 { return Err(TxScriptError::MalformedPush($length - 1, data.len())); } @@ -69,20 +69,20 @@ macro_rules! opcode_impl { opcode_serde!($length); } - impl OpCodeExecution for $name { - fn empty() -> Result>, TxScriptError> { + impl OpCodeExecution for $name { + fn empty() -> Result>, TxScriptError> { Self::new(vec![]) } opcode_init!($length); #[allow(unused_variables)] - fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { + fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { $code } } - impl OpCodeImplementation for $name {} + impl OpCodeImplementation for $name {} } } @@ -111,7 +111,7 @@ macro_rules! opcode_list { )? )* - pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction>(it: &mut I) -> Option>, TxScriptError>> { + pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>(it: &mut I) -> Option>, TxScriptError>> { match it.next() { Some(opcode_num) => match opcode_num { $( diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index 5d6096b7a..7b66da27f 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -10,6 +10,7 @@ use crate::{ }; use blake2b_simd::Params; use core::cmp::{max, min}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; use kaspa_consensus_core::hashing::sighash_type::SigHashType; use kaspa_consensus_core::tx::VerifiableTransaction; use sha2::{Digest, Sha256}; @@ -75,28 +76,31 @@ pub trait OpCodeMetadata: Debug { } } -pub trait OpCodeExecution { - fn empty() -> Result>, TxScriptError> +pub trait OpCodeExecution { + fn empty() -> Result>, TxScriptError> where Self: Sized; #[allow(clippy::new_ret_no_self)] - fn new(data: Vec) -> Result>, TxScriptError> + fn new(data: Vec) -> Result>, TxScriptError> where Self: Sized; - fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; + fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; } pub trait OpcodeSerialization { fn serialize(&self) -> Vec; - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> + ) -> Result>, TxScriptError> where Self: Sized; } -pub trait OpCodeImplementation: OpCodeExecution + OpCodeMetadata + OpcodeSerialization {} +pub trait OpCodeImplementation: + OpCodeExecution + OpCodeMetadata + OpcodeSerialization +{ +} impl OpCodeMetadata for OpCode { fn value(&self) -> u8 { @@ -195,13 +199,19 @@ impl OpCodeMetadata for OpCode { // Helpers for some opcodes with shared data #[inline] -fn push_data(data: Vec, vm: &mut TxScriptEngine) -> OpCodeResult { +fn push_data( + data: Vec, + vm: &mut TxScriptEngine, +) -> OpCodeResult { vm.dstack.push(data); Ok(()) } #[inline] -fn push_number(number: i64, vm: &mut TxScriptEngine) -> OpCodeResult { +fn push_number( + number: i64, + vm: &mut TxScriptEngine, +) -> OpCodeResult { vm.dstack.push_item(number); Ok(()) } @@ -960,7 +970,7 @@ opcode_list! { // converts an opcode from the list of Op0 to Op16 to its associated value #[allow(clippy::borrowed_box)] -pub fn to_small_int(opcode: &Box>) -> u8 { +pub fn to_small_int(opcode: &Box>) -> u8 { let value = opcode.value(); if value == codes::OpFalse { return 0; @@ -978,7 +988,7 @@ mod test { use crate::{opcodes, pay_to_address_script, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD}; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::constants::{SOMPI_PER_KASPA, TX_VERSION}; - use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, @@ -987,21 +997,21 @@ mod test { struct TestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, dstack: Stack, } struct ErrorTestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, error: TxScriptError, } fn run_success_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for TestCase { init, code, dstack } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache); vm.dstack = init; code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); @@ -1010,9 +1020,9 @@ mod test { fn run_error_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for ErrorTestCase { init, code, error } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache); vm.dstack.clone_from(&init); assert_eq!( code.execute(&mut vm) @@ -1027,7 +1037,7 @@ mod test { #[test] fn test_opcode_disabled() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpCat::empty().expect("Should accept empty"), opcodes::OpSubStr::empty().expect("Should accept empty"), opcodes::OpLeft::empty().expect("Should accept empty"), @@ -1046,8 +1056,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1059,7 +1069,7 @@ mod test { #[test] fn test_opcode_reserved() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpReserved::empty().expect("Should accept empty"), opcodes::OpVer::empty().expect("Should accept empty"), opcodes::OpVerIf::empty().expect("Should accept empty"), @@ -1069,8 +1079,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1082,7 +1092,7 @@ mod test { #[test] fn test_opcode_invalid() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpUnknown166::empty().expect("Should accept empty"), opcodes::OpUnknown167::empty().expect("Should accept empty"), opcodes::OpUnknown178::empty().expect("Should accept empty"), @@ -1160,8 +1170,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -2741,7 +2751,7 @@ mod test { let (base_tx, input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckLockTimeVerify::empty().expect("Should accept empty"); @@ -2753,7 +2763,7 @@ mod test { ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Shouldn't fail"); vm.dstack = vec![lock_time.clone()]; match code.execute(&mut vm) { @@ -2783,7 +2793,7 @@ mod test { let (tx, base_input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckSequenceVerify::empty().expect("Should accept empty"); @@ -2796,7 +2806,7 @@ mod test { ] { let mut input = base_input.clone(); input.sequence = tx_sequence; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Shouldn't fail"); vm.dstack = vec![sequence.clone()]; match code.execute(&mut vm) { diff --git a/crypto/txscript/src/standard/multisig.rs b/crypto/txscript/src/standard/multisig.rs index 79c74c7b3..cbd9dbe6d 100644 --- a/crypto/txscript/src/standard/multisig.rs +++ b/crypto/txscript/src/standard/multisig.rs @@ -74,7 +74,7 @@ mod tests { use core::str::FromStr; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, subnets::SubnetworkId, @@ -154,11 +154,11 @@ mod tests { }]; let mut tx = MutableTransaction::with_entries(tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let sig_hash = if !is_ecdsa { - calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) } else { - calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) }; let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let signatures: Vec<_> = inputs @@ -184,7 +184,7 @@ mod tests { let (input, entry) = tx.populated_inputs().next().unwrap(); let cache = Cache::new(10_000); - let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &mut reused_values, &cache).unwrap(); + let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache).unwrap(); assert_eq!(engine.execute().is_ok(), is_ok); } #[test] diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index e759a9e50..060677a1e 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -2,6 +2,7 @@ use crate::mempool::{ errors::{NonStandardError, NonStandardResult}, Mempool, }; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::{ constants::{MAX_SCRIPT_PUBLIC_KEY_VERSION, MAX_SOMPI}, mass, @@ -114,7 +115,7 @@ impl Mempool { /// It is exposed by [MiningManager] for use by transaction generators and wallets. pub(crate) fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { // Unspendable outputs are considered dust. - if is_unspendable::(transaction_output.script_public_key.script()) { + if is_unspendable::(transaction_output.script_public_key.script()) { return true; } @@ -175,7 +176,6 @@ impl Mempool { if contextual_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { return Err(NonStandardError::RejectContextualMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } - for (i, input) in transaction.tx.inputs.iter().enumerate() { // It is safe to elide existence and index checks here since // they have already been checked prior to calling this @@ -188,7 +188,10 @@ impl Mempool { ScriptClass::PubKey => {} ScriptClass::PubKeyECDSA => {} ScriptClass::ScriptHash => { - get_sig_op_count::(&input.signature_script, &entry.script_public_key); + get_sig_op_count::( + &input.signature_script, + &entry.script_public_key, + ); let num_sig_ops = 1; if num_sig_ops > MAX_STANDARD_P2SH_SIG_OPS { return Err(NonStandardError::RejectSignatureCount(transaction_id, i, num_sig_ops, MAX_STANDARD_P2SH_SIG_OPS)); diff --git a/wallet/core/src/account/pskb.rs b/wallet/core/src/account/pskb.rs index 5cf1eeea9..f8ed74470 100644 --- a/wallet/core/src/account/pskb.rs +++ b/wallet/core/src/account/pskb.rs @@ -4,7 +4,7 @@ use crate::tx::PaymentOutputs; use futures::stream; use kaspa_bip32::{DerivationPath, KeyFingerprint, PrivateKey}; use kaspa_consensus_client::UtxoEntry as ClientUTXO; -use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; use kaspa_consensus_core::tx::VerifiableTransaction; use kaspa_consensus_core::tx::{TransactionInput, UtxoEntry}; use kaspa_txscript::extract_script_pub_key_address; @@ -155,7 +155,7 @@ pub async fn pskb_signer_for_address( key_fingerprint: KeyFingerprint, ) -> Result { let mut signed_bundle = Bundle::new(); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); // If set, sign-for address is used for signing. // Else, all addresses from inputs are. @@ -181,7 +181,7 @@ pub async fn pskb_signer_for_address( for pskt_inner in bundle.iter().cloned() { let pskt: PSKT = PSKT::from(pskt_inner); - let mut sign = |signer_pskt: PSKT| { + let sign = |signer_pskt: PSKT| { signer_pskt .pass_signature_sync(|tx, sighash| -> Result, String> { tx.tx @@ -189,7 +189,7 @@ pub async fn pskb_signer_for_address( .iter() .enumerate() .map(|(idx, _input)| { - let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); // When address represents a locked UTXO, no private key is available. diff --git a/wallet/pskt/examples/multisig.rs b/wallet/pskt/examples/multisig.rs index fb011402f..7a9ca190e 100644 --- a/wallet/pskt/examples/multisig.rs +++ b/wallet/pskt/examples/multisig.rs @@ -1,5 +1,5 @@ use kaspa_consensus_core::{ - hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, tx::{TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_txscript::{multisig_redeem_script, opcodes::codes::OpData65, pay_to_script_hash_script, script_builder::ScriptBuilder}; @@ -51,8 +51,8 @@ fn main() { println!("Serialized after setting sequence: {}", ser_updated); let signer_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); - let mut reused_values = SigHashReusedValues::new(); - let mut sign = |signer_pskt: PSKT, kp: &Keypair| { + let reused_values = SigHashReusedValuesUnsync::new(); + let sign = |signer_pskt: PSKT, kp: &Keypair| { signer_pskt .pass_signature_sync(|tx, sighash| -> Result, String> { let tx = dbg!(tx); @@ -61,7 +61,7 @@ fn main() { .iter() .enumerate() .map(|(idx, _input)| { - let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); Ok(SignInputOk { signature: Signature::Schnorr(kp.sign_schnorr(msg)), diff --git a/wallet/pskt/src/pskt.rs b/wallet/pskt/src/pskt.rs index 245609803..6c1a38abb 100644 --- a/wallet/pskt/src/pskt.rs +++ b/wallet/pskt/src/pskt.rs @@ -1,4 +1,5 @@ use kaspa_bip32::{secp256k1, DerivationPath, KeyFingerprint}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use serde::{Deserialize, Serialize}; use serde_repr::{Deserialize_repr, Serialize_repr}; use std::{collections::BTreeMap, fmt::Display, fmt::Formatter, future::Future, marker::PhantomData, ops::Deref}; @@ -10,7 +11,7 @@ pub use crate::output::{Output, OutputBuilder}; pub use crate::role::{Combiner, Constructor, Creator, Extractor, Finalizer, Signer, Updater}; use kaspa_consensus_core::tx::UtxoEntry; use kaspa_consensus_core::{ - hashing::{sighash::SigHashReusedValues, sighash_type::SigHashType}, + hashing::sighash_type::SigHashType, subnets::SUBNETWORK_ID_NATIVE, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, }; @@ -411,10 +412,10 @@ impl PSKT { { let tx = tx.as_verifiable(); let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { - TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &mut reused_values, &cache)?.execute()?; + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache)?.execute()?; >::Ok(()) })?; } From 5742d252324d93273359c5610f490817d2450a76 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 29 Aug 2024 22:15:40 +0000 Subject: [PATCH 48/58] reachability constants --- consensus/core/src/config/constants.rs | 2 +- consensus/src/consensus/storage.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index c4635083b..0c021c99d 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -117,7 +117,7 @@ pub mod perf { /// The default slack interval used by the reachability /// algorithm to encounter for blocks out of the selected chain. - pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 12; + pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 14; const BASELINE_HEADER_DATA_CACHE_SIZE: usize = 10_000; const BASELINE_BLOCK_DATA_CACHE_SIZE: usize = 200; diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index e170ace04..fc7a6246d 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -88,12 +88,12 @@ impl ConsensusStorage { // Budgets in bytes. All byte budgets overall sum up to ~1GB of memory (which obviously takes more low level alloc space) let daa_excluded_budget = scaled(30_000_000); let statuses_budget = scaled(30_000_000); - let reachability_data_budget = scaled(20_000_000); - let reachability_sets_budget = scaled(20_000_000); // x 2 for tree children and future covering set + let reachability_data_budget = scaled(200_000_000); + let reachability_sets_budget = scaled(200_000_000); // x 2 for tree children and future covering set let ghostdag_compact_budget = scaled(15_000_000); let headers_compact_budget = scaled(5_000_000); - let parents_budget = scaled(40_000_000); // x 3 for reachability and levels - let children_budget = scaled(5_000_000); // x 3 for reachability and levels + let parents_budget = scaled(80_000_000); // x 3 for reachability and levels + let children_budget = scaled(20_000_000); // x 3 for reachability and levels let ghostdag_budget = scaled(80_000_000); // x 2 for levels let headers_budget = scaled(80_000_000); let transactions_budget = scaled(40_000_000); From 5446898bc16755c916282cf4054c60293bb0f82d Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 29 Aug 2024 22:15:56 +0000 Subject: [PATCH 49/58] bump version to 0.14.4 --- Cargo.lock | 114 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 112 ++++++++++++++++++++++++++-------------------------- 2 files changed, 113 insertions(+), 113 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fe92dbaaf..a0a8b86ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2393,7 +2393,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.14.3" +version = "0.14.4" dependencies = [ "borsh", "criterion", @@ -2410,7 +2410,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.14.3" +version = "0.14.4" dependencies = [ "borsh", "igd-next", @@ -2433,14 +2433,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.14.3" +version = "0.14.4" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.14.3" +version = "0.14.4" dependencies = [ "borsh", "bs58", @@ -2467,7 +2467,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "borsh", @@ -2514,7 +2514,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.14.3" +version = "0.14.4" dependencies = [ "duration-string", "futures-util", @@ -2531,7 +2531,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.14.3" +version = "0.14.4" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2575,7 +2575,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.14.3" +version = "0.14.4" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2603,7 +2603,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "arc-swap", "async-trait", @@ -2642,7 +2642,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2661,7 +2661,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.14.3" +version = "0.14.4" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2685,7 +2685,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.14.3" +version = "0.14.4" dependencies = [ "duration-string", "futures", @@ -2703,7 +2703,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2721,7 +2721,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "borsh", @@ -2743,7 +2743,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.14.3" +version = "0.14.4" dependencies = [ "bincode", "enum-primitive-derive", @@ -2765,7 +2765,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2796,7 +2796,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2825,7 +2825,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2860,7 +2860,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.14.3" +version = "0.14.4" dependencies = [ "blake2b_simd", "borsh", @@ -2881,7 +2881,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2900,7 +2900,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2928,7 +2928,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.14.3" +version = "0.14.4" dependencies = [ "borsh", "criterion", @@ -2949,14 +2949,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.14.3" +version = "0.14.4" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "borsh", @@ -2972,7 +2972,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.14.3" +version = "0.14.4" dependencies = [ "criterion", "futures-util", @@ -2999,7 +2999,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.14.3" +version = "0.14.4" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -3007,7 +3007,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.14.3" +version = "0.14.4" dependencies = [ "criterion", "kaspa-hashes", @@ -3020,7 +3020,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3056,7 +3056,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "chrono", @@ -3087,7 +3087,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.14.3" +version = "0.14.4" dependencies = [ "borsh", "ctrlc", @@ -3118,7 +3118,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.14.3" +version = "0.14.4" dependencies = [ "kaspa-core", "log", @@ -3130,7 +3130,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.14.3" +version = "0.14.4" dependencies = [ "criterion", "js-sys", @@ -3146,7 +3146,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3188,7 +3188,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.14.3" +version = "0.14.4" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3200,7 +3200,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "kaspa-addresses", @@ -3229,7 +3229,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3289,7 +3289,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.14.3" +version = "0.14.4" dependencies = [ "blake2b_simd", "borsh", @@ -3321,7 +3321,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.14.3" +version = "0.14.4" dependencies = [ "secp256k1", "thiserror", @@ -3329,7 +3329,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.14.3" +version = "0.14.4" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.14.3" +version = "0.14.4" dependencies = [ "cfg-if 1.0.0", "futures", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.14.3" +version = "0.14.4" dependencies = [ "futures", "kaspa-consensus", @@ -3400,7 +3400,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-std", "async-trait", @@ -3412,7 +3412,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "js-sys", @@ -3426,7 +3426,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "aes", "ahash", @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "borsh", @@ -3540,7 +3540,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.14.3" +version = "0.14.4" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3553,7 +3553,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.14.3" +version = "0.14.4" dependencies = [ "bincode", "derive_builder", @@ -3580,7 +3580,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.14.3" +version = "0.14.4" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3608,7 +3608,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.14.3" +version = "0.14.4" dependencies = [ "faster-hex", "hexplay", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-std", "async-trait", @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.14.3" +version = "0.14.4" dependencies = [ "ctrlc", "futures", @@ -3669,7 +3669,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "clap 4.5.16", @@ -3688,7 +3688,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-trait", "borsh", @@ -3716,7 +3716,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.14.3" +version = "0.14.4" dependencies = [ "ahash", "async-std", @@ -3745,7 +3745,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -5192,7 +5192,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "clap 4.5.16", @@ -5669,7 +5669,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.14.3" +version = "0.14.4" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 0a6799e43..5a13b045b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ members = [ [workspace.package] rust-version = "1.80.0" -version = "0.14.3" +version = "0.14.4" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -79,61 +79,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.14.3", path = "testing/integration" } -kaspa-addresses = { version = "0.14.3", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.14.3", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.14.3", path = "wallet/bip32" } -kaspa-cli = { version = "0.14.3", path = "cli" } -kaspa-connectionmanager = { version = "0.14.3", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.14.3", path = "consensus" } -kaspa-consensus-core = { version = "0.14.3", path = "consensus/core" } -kaspa-consensus-client = { version = "0.14.3", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.14.3", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.14.3", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.14.3", path = "components/consensusmanager" } -kaspa-core = { version = "0.14.3", path = "core" } -kaspa-daemon = { version = "0.14.3", path = "daemon" } -kaspa-database = { version = "0.14.3", path = "database" } -kaspa-grpc-client = { version = "0.14.3", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.14.3", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.14.3", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.14.3", path = "crypto/hashes" } -kaspa-index-core = { version = "0.14.3", path = "indexes/core" } -kaspa-index-processor = { version = "0.14.3", path = "indexes/processor" } -kaspa-math = { version = "0.14.3", path = "math" } -kaspa-merkle = { version = "0.14.3", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.14.3", path = "metrics/core" } -kaspa-mining = { version = "0.14.3", path = "mining" } -kaspa-mining-errors = { version = "0.14.3", path = "mining/errors" } -kaspa-muhash = { version = "0.14.3", path = "crypto/muhash" } -kaspa-notify = { version = "0.14.3", path = "notify" } -kaspa-p2p-flows = { version = "0.14.3", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.14.3", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.14.3", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.14.3", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.14.3", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.14.3", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.14.3", path = "rpc/service" } -kaspa-txscript = { version = "0.14.3", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.14.3", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.14.3", path = "utils" } -kaspa-utils-tower = { version = "0.14.3", path = "utils/tower" } -kaspa-utxoindex = { version = "0.14.3", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.14.3", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.14.3", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.14.3", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.14.3", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.14.3", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.14.3", path = "wallet/macros" } -kaspa-wasm = { version = "0.14.3", path = "wasm" } -kaspa-wasm-core = { version = "0.14.3", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.14.3", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.14.3", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.14.3", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.14.3", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.14.3", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.14.3", path = "kaspad" } -kaspa-alloc = { version = "0.14.3", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.14.4", path = "testing/integration" } +kaspa-addresses = { version = "0.14.4", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.14.4", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.14.4", path = "wallet/bip32" } +kaspa-cli = { version = "0.14.4", path = "cli" } +kaspa-connectionmanager = { version = "0.14.4", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.14.4", path = "consensus" } +kaspa-consensus-core = { version = "0.14.4", path = "consensus/core" } +kaspa-consensus-client = { version = "0.14.4", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.14.4", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.14.4", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.14.4", path = "components/consensusmanager" } +kaspa-core = { version = "0.14.4", path = "core" } +kaspa-daemon = { version = "0.14.4", path = "daemon" } +kaspa-database = { version = "0.14.4", path = "database" } +kaspa-grpc-client = { version = "0.14.4", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.14.4", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.14.4", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.14.4", path = "crypto/hashes" } +kaspa-index-core = { version = "0.14.4", path = "indexes/core" } +kaspa-index-processor = { version = "0.14.4", path = "indexes/processor" } +kaspa-math = { version = "0.14.4", path = "math" } +kaspa-merkle = { version = "0.14.4", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.14.4", path = "metrics/core" } +kaspa-mining = { version = "0.14.4", path = "mining" } +kaspa-mining-errors = { version = "0.14.4", path = "mining/errors" } +kaspa-muhash = { version = "0.14.4", path = "crypto/muhash" } +kaspa-notify = { version = "0.14.4", path = "notify" } +kaspa-p2p-flows = { version = "0.14.4", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.14.4", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.14.4", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.14.4", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.14.4", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.14.4", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.14.4", path = "rpc/service" } +kaspa-txscript = { version = "0.14.4", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.14.4", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.14.4", path = "utils" } +kaspa-utils-tower = { version = "0.14.4", path = "utils/tower" } +kaspa-utxoindex = { version = "0.14.4", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.14.4", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.14.4", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.14.4", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.14.4", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.14.4", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.14.4", path = "wallet/macros" } +kaspa-wasm = { version = "0.14.4", path = "wasm" } +kaspa-wasm-core = { version = "0.14.4", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.14.4", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.14.4", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.14.4", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.14.4", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.14.4", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.14.4", path = "kaspad" } +kaspa-alloc = { version = "0.14.4", path = "utils/alloc" } # external aes = "0.8.3" From a633da83cbc857579b62e5461474d82093696302 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 2 Sep 2024 08:46:14 +0000 Subject: [PATCH 50/58] Squashed commit of the following: commit 09f1443a22c53806e16997e51cad45dad2786878 Merge: 8d042b2d2 864aaf674 Author: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sun Sep 1 18:54:03 2024 -0600 Merge branch 'dev' into gd-optimization commit 8d042b2d2d9568e1eeb097b66305d9b1cf69d055 Author: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sun Sep 1 11:03:04 2024 -0600 Revert rename to old root commit 18158e62794b8c6943fc234f58462268ce8af935 Author: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu Aug 29 20:07:15 2024 -0600 Comment fixes and small refactor commit 66d4ebe674f798ec6a2ecc7c16f5e92f9bab8e8f Author: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu Aug 29 20:00:10 2024 -0600 Use blue_work for find_selected_parent_header_at_level --- consensus/src/processes/pruning_proof/mod.rs | 27 +++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 550bde740..8c059963a 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -264,9 +264,9 @@ impl PruningProofManager { let pruning_point = pruning_point_header.hash; // Create a copy of the proof, since we're going to be mutating the proof passed to us - let proof_sets: Vec> = (0..=self.max_block_level) + let proof_sets = (0..=self.max_block_level) .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) - .collect(); + .collect_vec(); let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); for tb in trusted_set.iter() { @@ -729,9 +729,10 @@ impl PruningProofManager { } // Step 2 - if we can find a common ancestor between the proof and current consensus - // we can determine if the proof is better. The proof is better if the blue work difference between the + // we can determine if the proof is better. The proof is better if the blue work* difference between the // old current consensus's tips and the common ancestor is less than the blue work difference between the - // proof's tip and the common ancestor + // proof's tip and the common ancestor. + // *Note: blue work is the same as blue score on levels higher than 0 if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( &proof_ghostdag_stores, ¤t_consensus_ghostdag_stores, @@ -841,18 +842,21 @@ impl PruningProofManager { .collect_vec() .push_if_empty(ORIGIN); - let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; + let mut sp = SortableBlock { + hash: parents[0], + blue_work: if parents[0] == ORIGIN { 0.into() } else { self.headers_store.get_header(parents[0]).unwrap().blue_work }, + }; for parent in parents.iter().copied().skip(1) { let sblock = SortableBlock { hash: parent, blue_work: self .headers_store - .get_blue_score(parent) + .get_header(parent) .unwrap_option() .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(format!( "find_selected_parent_header_at_level (level {level}) couldn't find the header for block {parent}" )))? - .into(), + .blue_work, }; if sblock > sp { sp = sblock; @@ -862,7 +866,6 @@ impl PruningProofManager { self.headers_store.get_header(sp.hash).unwrap_option().ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof( format!("find_selected_parent_header_at_level (level {level}) couldn't find the header for block {}", sp.hash,), )) - // Ok(self.headers_store.get_header(sp.hash).unwrap_option().expect("already checked if compact header exists above")) } fn find_sufficient_root( @@ -1045,8 +1048,8 @@ impl PruningProofManager { let root = roots_by_level[level]; // (Old Logic) This is the root we can calculate given that the GD records are already filled // The root calc logic below is the original logic before the on-demand higher level GD calculation - // We only need depth_based_root to sanity check the new logic - let depth_based_root = if level != self.max_block_level as usize { + // We only need old_root to sanity check the new logic + let old_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) @@ -1068,8 +1071,8 @@ impl PruningProofManager { block_at_depth_2m }; - // new root is expected to be always an ancestor of depth_based_root because new root takes a safety margin - assert!(self.reachability_service.is_dag_ancestor_of(root, depth_based_root)); + // new root is expected to be always an ancestor of old_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); From 81fe3d19e4cca3dd4e52736ec217c875b73b25ca Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Sep 2024 05:30:42 +0000 Subject: [PATCH 51/58] Squashed commit of the following: commit afbcf9e473acfe238b43785cab52f09387307588 Author: starkbamse <139136798+starkbamse@users.noreply.github.com> Date: Fri Sep 6 05:40:53 2024 +0200 Change directory back to repo root & Fix Rust v1.81 lints (#545) * Change directory back to repodir Change directory back to repodir after building toolchain * Clippy * Update crypto/txscript/src/caches.rs Co-authored-by: Maxim <59533214+biryukovmaxim@users.noreply.github.com> * Update crypto/txscript/src/caches.rs * rename `is_none_or` -> `is_none_or_ex` to avoid conflict with future std * remove `use std::mem::size_of` wherever possible (added to std prelude recently) --------- Co-authored-by: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Co-authored-by: Michael Sutton commit 06a874f4ee174d056d0c5c3d11ba1c3be545b8f4 Author: starkbamse <139136798+starkbamse@users.noreply.github.com> Date: Thu Sep 5 18:51:04 2024 +0200 Deprecate zigbuild and glibc in favor of static musl binaries for Linux builds. (#541) * CTNG Config file, Musl build instructions - Crosstools-ng configuration file for quick reproducable builds on musl. - Instructions for how to build RK on musl. * Test * Update ci.yaml * Test crosstools install * Cache ct-ng build * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Fix error in command * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Comments, naming * Update ci.yaml * Update ci.yaml * Update ci.yaml * Merge new musl build with old CI, Release builds - Merges the old CI script with the new musl build. - Update deploy.yaml to use updated musl toolchain to build musl target for linux. * Move to workspace * Delete musl-build.md * Lock to ctng version 1.26 * Checkout fix * Revert master change * Indentation * Revert "Indentation" This reverts commit 6a7e6c094052f1b5848d1e3e9154f4f0164fad05. * Revert "Revert master change" This reverts commit 1a047e46863ca9383eea169fd2665a333c9d5431. * Update ci.yaml * Force mimalloc * Compress into script * Fix typo * Update build.sh * Replace bloaded config file * Update build.sh * Update build.sh * Source script * Revert vendor * Update defconfig * Update defconfig * Update defconfig * Update build.sh * Update build.sh * Update build.sh * Update build.sh * Update defconfig * Delete defconfig * Create defconfig * Update build.sh * Deprecate config, use default preset * Update build.sh * Add preset hash logic in script * Move preset hash update Move preset hash update after openssl build * Use openssl crate * Update exports, cache config * Remove spaces in export command * Update names Should not trigger cache * Move source preset * CD before preset * Add comment Adds comment and should invalidate cache. commit b04092e41e3c86c9bf7d7f7733eaabd563131eb7 Author: aspect Date: Thu Sep 5 18:00:24 2024 +0300 add proxy limit field to sysinfo (#544) commit f866dfad16eb54d67d36a127435d81fa00b39680 Author: Michael Sutton Date: Thu Sep 5 14:04:16 2024 +0300 Various miscellaneous changes towards 0.15.1 RC2 (#543) * infrequent logs should be debug * cleanup some todos * when a network starts, genesis has a body, so there's no need for a special exception * remove unneeded method and add an error just in case it is added in the future * count and log chain disqualified blocks * count and log mempool evictions * bump version to 0.14.5 --- .github/workflows/ci.yaml | 30 +++-- .github/workflows/deploy.yaml | 34 ++++-- Cargo.lock | 114 +++++++++--------- Cargo.toml | 112 ++++++++--------- consensus/benches/check_scripts.rs | 2 +- consensus/core/src/api/counters.rs | 4 + consensus/core/src/errors/block.rs | 4 + consensus/core/src/utxo/utxo_diff.rs | 2 +- consensus/src/consensus/storage.rs | 2 +- consensus/src/model/stores/acceptance_data.rs | 1 - .../src/model/stores/block_transactions.rs | 1 - consensus/src/model/stores/ghostdag.rs | 1 - consensus/src/model/stores/headers.rs | 1 - consensus/src/model/stores/mod.rs | 7 +- consensus/src/model/stores/utxo_set.rs | 5 +- .../body_validation_in_context.rs | 17 +-- .../src/pipeline/body_processor/processor.rs | 4 +- .../pipeline/header_processor/processor.rs | 3 - consensus/src/pipeline/monitor.rs | 9 +- .../pipeline/virtual_processor/processor.rs | 16 ++- .../virtual_processor/utxo_validation.rs | 1 - consensus/src/processes/coinbase.rs | 2 +- consensus/src/processes/pruning.rs | 2 +- consensus/src/processes/sync/mod.rs | 2 +- crypto/muhash/fuzz/fuzz_targets/u3072.rs | 1 - crypto/muhash/src/u3072.rs | 4 +- crypto/txscript/src/caches.rs | 7 +- crypto/txscript/src/data_stack.rs | 1 - crypto/txscript/src/opcodes/mod.rs | 2 - database/src/registry.rs | 4 +- indexes/utxoindex/src/stores/indexed_utxos.rs | 4 +- kaspad/src/daemon.rs | 8 +- math/src/uint.rs | 2 +- mining/src/lib.rs | 23 ++-- .../validate_and_insert_transaction.rs | 36 +++--- mining/src/monitor.rs | 6 + mining/src/testutils/coinbase_mock.rs | 1 - musl-toolchain/build.sh | 96 +++++++++++++++ musl-toolchain/preset.sh | 4 + protocol/flows/src/flowcontext/orphans.rs | 7 +- protocol/p2p/src/convert/net_address.rs | 5 +- rpc/core/src/model/message.rs | 11 +- rpc/core/src/model/tests.rs | 1 + rpc/grpc/core/proto/rpc.proto | 1 + rpc/grpc/core/src/convert/message.rs | 2 + rpc/service/src/service.rs | 1 + rpc/wrpc/server/Cargo.toml | 8 +- utils/src/mem_size.rs | 2 +- utils/src/option.rs | 5 +- utils/src/sysinfo.rs | 50 +++++++- 50 files changed, 426 insertions(+), 242 deletions(-) create mode 100755 musl-toolchain/build.sh create mode 100755 musl-toolchain/preset.sh diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 27fe1376a..693266885 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -353,9 +353,8 @@ jobs: with: name: kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip path: wasm/release/kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip - build-release: - name: Build Ubuntu Release + name: Build Linux Release runs-on: ubuntu-latest steps: - name: Checkout sources @@ -369,7 +368,7 @@ jobs: - name: Install stable toolchain uses: dtolnay/rust-toolchain@stable - - name: Cache + - name: Cache Cargo Build Outputs uses: actions/cache@v4 with: path: | @@ -379,14 +378,23 @@ jobs: ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Cache Toolchain + uses: actions/cache@v4 + with: + path: | + ~/x-tools + key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }} + restore-keys: | + ${{ runner.os }}-musl- - - name: Install zig - if: runner.os == 'Linux' - uses: goto-bus-stop/setup-zig@v2 # needed for cargo-zigbuild - - - name: Build on Linux + + - name: Build RK with musl toolchain if: runner.os == 'Linux' - # We're using musl to make the binaries statically linked and portable run: | - cargo install cargo-zigbuild - cargo --verbose zigbuild --bin kaspad --bin simpa --bin rothschild --release --target x86_64-unknown-linux-gnu.2.27 # Use an older glibc version + # Run build script for musl toolchain + source musl-toolchain/build.sh + # Build for musl + cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 567adb557..8f46cb1fe 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -29,7 +29,7 @@ jobs: - name: Install stable toolchain uses: dtolnay/rust-toolchain@stable - - name: Cache + - name: Cache Cargo Build Outputs uses: actions/cache@v3 with: path: | @@ -40,23 +40,33 @@ jobs: target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - name: Install zig on linux - if: runner.os == 'Linux' - uses: goto-bus-stop/setup-zig@v2 # needed for cargo-zigbuild + - name: Cache Toolchain + uses: actions/cache@v4 + with: + path: | + ~/x-tools + key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }} + restore-keys: | + ${{ runner.os }}-musl- - name: Build on Linux if: runner.os == 'Linux' # We're using musl to make the binaries statically linked and portable run: | - cargo install cargo-zigbuild - cargo --verbose zigbuild --bin kaspad --bin simpa --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-gnu.2.27 # Use an older glibc version + # Run build script for musl toolchain + source musl-toolchain/build.sh + + # Go back to the workspace + cd $GITHUB_WORKSPACE + + # Build for musl + cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl mkdir bin || true - cp target/x86_64-unknown-linux-gnu/release/kaspad bin/ - cp target/x86_64-unknown-linux-gnu/release/simpa bin/ - cp target/x86_64-unknown-linux-gnu/release/rothschild bin/ - cp target/x86_64-unknown-linux-gnu/release/kaspa-wallet bin/ - archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-gnu-amd64.zip" - asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-gnu-amd64.zip" + cp target/x86_64-unknown-linux-musl/release/kaspad bin/ + cp target/x86_64-unknown-linux-musl/release/rothschild bin/ + cp target/x86_64-unknown-linux-musl/release/kaspa-wallet bin/ + archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-musl-amd64.zip" + asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-musl-amd64.zip" zip -r "${archive}" ./bin/* echo "archive=${archive}" >> $GITHUB_ENV echo "asset_name=${asset_name}" >> $GITHUB_ENV diff --git a/Cargo.lock b/Cargo.lock index a0a8b86ac..1665219c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2393,7 +2393,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.14.4" +version = "0.14.5" dependencies = [ "borsh", "criterion", @@ -2410,7 +2410,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.14.4" +version = "0.14.5" dependencies = [ "borsh", "igd-next", @@ -2433,14 +2433,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.14.4" +version = "0.14.5" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.14.4" +version = "0.14.5" dependencies = [ "borsh", "bs58", @@ -2467,7 +2467,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "borsh", @@ -2514,7 +2514,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.14.4" +version = "0.14.5" dependencies = [ "duration-string", "futures-util", @@ -2531,7 +2531,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.14.4" +version = "0.14.5" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2575,7 +2575,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.14.4" +version = "0.14.5" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2603,7 +2603,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "arc-swap", "async-trait", @@ -2642,7 +2642,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2661,7 +2661,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.14.4" +version = "0.14.5" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2685,7 +2685,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.14.4" +version = "0.14.5" dependencies = [ "duration-string", "futures", @@ -2703,7 +2703,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2721,7 +2721,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "borsh", @@ -2743,7 +2743,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.14.4" +version = "0.14.5" dependencies = [ "bincode", "enum-primitive-derive", @@ -2765,7 +2765,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2796,7 +2796,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2825,7 +2825,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2860,7 +2860,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.14.4" +version = "0.14.5" dependencies = [ "blake2b_simd", "borsh", @@ -2881,7 +2881,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2900,7 +2900,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2928,7 +2928,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.14.4" +version = "0.14.5" dependencies = [ "borsh", "criterion", @@ -2949,14 +2949,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.14.4" +version = "0.14.5" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "borsh", @@ -2972,7 +2972,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.14.4" +version = "0.14.5" dependencies = [ "criterion", "futures-util", @@ -2999,7 +2999,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.14.4" +version = "0.14.5" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -3007,7 +3007,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.14.4" +version = "0.14.5" dependencies = [ "criterion", "kaspa-hashes", @@ -3020,7 +3020,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3056,7 +3056,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "chrono", @@ -3087,7 +3087,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.14.4" +version = "0.14.5" dependencies = [ "borsh", "ctrlc", @@ -3118,7 +3118,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.14.4" +version = "0.14.5" dependencies = [ "kaspa-core", "log", @@ -3130,7 +3130,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.14.4" +version = "0.14.5" dependencies = [ "criterion", "js-sys", @@ -3146,7 +3146,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3188,7 +3188,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.14.4" +version = "0.14.5" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3200,7 +3200,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "kaspa-addresses", @@ -3229,7 +3229,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3289,7 +3289,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.14.4" +version = "0.14.5" dependencies = [ "blake2b_simd", "borsh", @@ -3321,7 +3321,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.14.4" +version = "0.14.5" dependencies = [ "secp256k1", "thiserror", @@ -3329,7 +3329,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.14.4" +version = "0.14.5" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.14.4" +version = "0.14.5" dependencies = [ "cfg-if 1.0.0", "futures", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.14.4" +version = "0.14.5" dependencies = [ "futures", "kaspa-consensus", @@ -3400,7 +3400,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-std", "async-trait", @@ -3412,7 +3412,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "js-sys", @@ -3426,7 +3426,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "aes", "ahash", @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "borsh", @@ -3540,7 +3540,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.14.4" +version = "0.14.5" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3553,7 +3553,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.14.4" +version = "0.14.5" dependencies = [ "bincode", "derive_builder", @@ -3580,7 +3580,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.14.4" +version = "0.14.5" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3608,7 +3608,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.14.4" +version = "0.14.5" dependencies = [ "faster-hex", "hexplay", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-std", "async-trait", @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.14.4" +version = "0.14.5" dependencies = [ "ctrlc", "futures", @@ -3669,7 +3669,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "clap 4.5.16", @@ -3688,7 +3688,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-trait", "borsh", @@ -3716,7 +3716,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.14.4" +version = "0.14.5" dependencies = [ "ahash", "async-std", @@ -3745,7 +3745,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -5192,7 +5192,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "clap 4.5.16", @@ -5669,7 +5669,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.14.4" +version = "0.14.5" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 5a13b045b..0fdc4a499 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ members = [ [workspace.package] rust-version = "1.80.0" -version = "0.14.4" +version = "0.14.5" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -79,61 +79,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.14.4", path = "testing/integration" } -kaspa-addresses = { version = "0.14.4", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.14.4", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.14.4", path = "wallet/bip32" } -kaspa-cli = { version = "0.14.4", path = "cli" } -kaspa-connectionmanager = { version = "0.14.4", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.14.4", path = "consensus" } -kaspa-consensus-core = { version = "0.14.4", path = "consensus/core" } -kaspa-consensus-client = { version = "0.14.4", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.14.4", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.14.4", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.14.4", path = "components/consensusmanager" } -kaspa-core = { version = "0.14.4", path = "core" } -kaspa-daemon = { version = "0.14.4", path = "daemon" } -kaspa-database = { version = "0.14.4", path = "database" } -kaspa-grpc-client = { version = "0.14.4", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.14.4", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.14.4", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.14.4", path = "crypto/hashes" } -kaspa-index-core = { version = "0.14.4", path = "indexes/core" } -kaspa-index-processor = { version = "0.14.4", path = "indexes/processor" } -kaspa-math = { version = "0.14.4", path = "math" } -kaspa-merkle = { version = "0.14.4", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.14.4", path = "metrics/core" } -kaspa-mining = { version = "0.14.4", path = "mining" } -kaspa-mining-errors = { version = "0.14.4", path = "mining/errors" } -kaspa-muhash = { version = "0.14.4", path = "crypto/muhash" } -kaspa-notify = { version = "0.14.4", path = "notify" } -kaspa-p2p-flows = { version = "0.14.4", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.14.4", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.14.4", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.14.4", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.14.4", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.14.4", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.14.4", path = "rpc/service" } -kaspa-txscript = { version = "0.14.4", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.14.4", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.14.4", path = "utils" } -kaspa-utils-tower = { version = "0.14.4", path = "utils/tower" } -kaspa-utxoindex = { version = "0.14.4", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.14.4", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.14.4", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.14.4", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.14.4", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.14.4", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.14.4", path = "wallet/macros" } -kaspa-wasm = { version = "0.14.4", path = "wasm" } -kaspa-wasm-core = { version = "0.14.4", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.14.4", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.14.4", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.14.4", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.14.4", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.14.4", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.14.4", path = "kaspad" } -kaspa-alloc = { version = "0.14.4", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.14.5", path = "testing/integration" } +kaspa-addresses = { version = "0.14.5", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.14.5", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.14.5", path = "wallet/bip32" } +kaspa-cli = { version = "0.14.5", path = "cli" } +kaspa-connectionmanager = { version = "0.14.5", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.14.5", path = "consensus" } +kaspa-consensus-core = { version = "0.14.5", path = "consensus/core" } +kaspa-consensus-client = { version = "0.14.5", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.14.5", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.14.5", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.14.5", path = "components/consensusmanager" } +kaspa-core = { version = "0.14.5", path = "core" } +kaspa-daemon = { version = "0.14.5", path = "daemon" } +kaspa-database = { version = "0.14.5", path = "database" } +kaspa-grpc-client = { version = "0.14.5", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.14.5", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.14.5", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.14.5", path = "crypto/hashes" } +kaspa-index-core = { version = "0.14.5", path = "indexes/core" } +kaspa-index-processor = { version = "0.14.5", path = "indexes/processor" } +kaspa-math = { version = "0.14.5", path = "math" } +kaspa-merkle = { version = "0.14.5", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.14.5", path = "metrics/core" } +kaspa-mining = { version = "0.14.5", path = "mining" } +kaspa-mining-errors = { version = "0.14.5", path = "mining/errors" } +kaspa-muhash = { version = "0.14.5", path = "crypto/muhash" } +kaspa-notify = { version = "0.14.5", path = "notify" } +kaspa-p2p-flows = { version = "0.14.5", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.14.5", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.14.5", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.14.5", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.14.5", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.14.5", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.14.5", path = "rpc/service" } +kaspa-txscript = { version = "0.14.5", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.14.5", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.14.5", path = "utils" } +kaspa-utils-tower = { version = "0.14.5", path = "utils/tower" } +kaspa-utxoindex = { version = "0.14.5", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.14.5", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.14.5", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.14.5", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.14.5", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.14.5", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.14.5", path = "wallet/macros" } +kaspa-wasm = { version = "0.14.5", path = "wasm" } +kaspa-wasm-core = { version = "0.14.5", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.14.5", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.14.5", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.14.5", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.14.5", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.14.5", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.14.5", path = "kaspad" } +kaspa-alloc = { version = "0.14.5", path = "utils/alloc" } # external aes = "0.8.3" diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs index b6a8402d4..eb0d8e84f 100644 --- a/consensus/benches/check_scripts.rs +++ b/consensus/benches/check_scripts.rs @@ -99,7 +99,7 @@ fn benchmark_check_scripts(c: &mut Criterion) { for i in (2..=available_parallelism().unwrap().get()).step_by(2) { if inputs_count >= i { - group.bench_function(&format!("rayon, custom threadpool, thread count {i}"), |b| { + group.bench_function(format!("rayon, custom threadpool, thread count {i}"), |b| { let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); let cache = Cache::new(inputs_count as u64); let pool = rayon::ThreadPoolBuilder::new().num_threads(i).build().unwrap(); diff --git a/consensus/core/src/api/counters.rs b/consensus/core/src/api/counters.rs index 5faee5bc3..0297dab26 100644 --- a/consensus/core/src/api/counters.rs +++ b/consensus/core/src/api/counters.rs @@ -9,6 +9,7 @@ pub struct ProcessingCounters { pub body_counts: AtomicU64, pub txs_counts: AtomicU64, pub chain_block_counts: AtomicU64, + pub chain_disqualified_counts: AtomicU64, pub mass_counts: AtomicU64, } @@ -22,6 +23,7 @@ impl ProcessingCounters { body_counts: self.body_counts.load(Ordering::Relaxed), txs_counts: self.txs_counts.load(Ordering::Relaxed), chain_block_counts: self.chain_block_counts.load(Ordering::Relaxed), + chain_disqualified_counts: self.chain_disqualified_counts.load(Ordering::Relaxed), mass_counts: self.mass_counts.load(Ordering::Relaxed), } } @@ -36,6 +38,7 @@ pub struct ProcessingCountersSnapshot { pub body_counts: u64, pub txs_counts: u64, pub chain_block_counts: u64, + pub chain_disqualified_counts: u64, pub mass_counts: u64, } @@ -51,6 +54,7 @@ impl core::ops::Sub for &ProcessingCountersSnapshot { body_counts: self.body_counts.saturating_sub(rhs.body_counts), txs_counts: self.txs_counts.saturating_sub(rhs.txs_counts), chain_block_counts: self.chain_block_counts.saturating_sub(rhs.chain_block_counts), + chain_disqualified_counts: self.chain_disqualified_counts.saturating_sub(rhs.chain_disqualified_counts), mass_counts: self.mass_counts.saturating_sub(rhs.mass_counts), } } diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index 9aab18905..f5c235476 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -147,6 +147,10 @@ pub enum RuleError { #[error("DAA window data has only {0} entries")] InsufficientDaaWindowSize(usize), + + /// Currently this error is never created because it is impossible to submit such a block + #[error("cannot add block body to a pruned block")] + PrunedBlock, } pub type BlockProcessResult = std::result::Result; diff --git a/consensus/core/src/utxo/utxo_diff.rs b/consensus/core/src/utxo/utxo_diff.rs index 3cd12f3f5..fb4ffbcba 100644 --- a/consensus/core/src/utxo/utxo_diff.rs +++ b/consensus/core/src/utxo/utxo_diff.rs @@ -5,7 +5,7 @@ use super::{ use crate::tx::{TransactionOutpoint, UtxoEntry, VerifiableTransaction}; use kaspa_utils::mem_size::MemSizeEstimator; use serde::{Deserialize, Serialize}; -use std::{collections::hash_map::Entry::Vacant, mem::size_of}; +use std::collections::hash_map::Entry::Vacant; pub trait ImmutableUtxoDiff { fn added(&self) -> &UtxoCollection; diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index fc7a6246d..ac2ab40b2 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -31,7 +31,7 @@ use kaspa_consensus_core::{blockstatus::BlockStatus, BlockHashSet}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; use parking_lot::RwLock; -use std::{mem::size_of, ops::DerefMut, sync::Arc}; +use std::{ops::DerefMut, sync::Arc}; pub struct ConsensusStorage { // DB diff --git a/consensus/src/model/stores/acceptance_data.rs b/consensus/src/model/stores/acceptance_data.rs index a66fcdcfe..83f6c8f13 100644 --- a/consensus/src/model/stores/acceptance_data.rs +++ b/consensus/src/model/stores/acceptance_data.rs @@ -12,7 +12,6 @@ use kaspa_utils::mem_size::MemSizeEstimator; use rocksdb::WriteBatch; use serde::Deserialize; use serde::Serialize; -use std::mem::size_of; use std::sync::Arc; pub trait AcceptanceDataStoreReader { diff --git a/consensus/src/model/stores/block_transactions.rs b/consensus/src/model/stores/block_transactions.rs index 050606d3c..504268288 100644 --- a/consensus/src/model/stores/block_transactions.rs +++ b/consensus/src/model/stores/block_transactions.rs @@ -9,7 +9,6 @@ use kaspa_hashes::Hash; use kaspa_utils::mem_size::MemSizeEstimator; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; -use std::mem::size_of; use std::sync::Arc; pub trait BlockTransactionsStoreReader { diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index 3ffe23e7e..fd2600a1c 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -14,7 +14,6 @@ use kaspa_utils::mem_size::MemSizeEstimator; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; use std::iter::once; -use std::mem::size_of; use std::{cell::RefCell, sync::Arc}; /// Re-export for convenience diff --git a/consensus/src/model/stores/headers.rs b/consensus/src/model/stores/headers.rs index 64e10a90b..85668f699 100644 --- a/consensus/src/model/stores/headers.rs +++ b/consensus/src/model/stores/headers.rs @@ -1,4 +1,3 @@ -use std::mem::size_of; use std::sync::Arc; use kaspa_consensus_core::{header::Header, BlockHasher, BlockLevel}; diff --git a/consensus/src/model/stores/mod.rs b/consensus/src/model/stores/mod.rs index 839755886..9fda33296 100644 --- a/consensus/src/model/stores/mod.rs +++ b/consensus/src/model/stores/mod.rs @@ -3,10 +3,6 @@ pub mod block_transactions; pub mod block_window_cache; pub mod children; pub mod daa; -pub mod selected_chain; -use std::{fmt::Display, mem::size_of}; - -pub use kaspa_database; pub mod depth; pub mod ghostdag; pub mod headers; @@ -16,6 +12,7 @@ pub mod pruning; pub mod pruning_utxoset; pub mod reachability; pub mod relations; +pub mod selected_chain; pub mod statuses; pub mod tips; pub mod utxo_diffs; @@ -23,7 +20,9 @@ pub mod utxo_multisets; pub mod utxo_set; pub mod virtual_state; +pub use kaspa_database; pub use kaspa_database::prelude::DB; +use std::fmt::Display; #[derive(PartialEq, Eq, Clone, Copy, Hash)] pub(crate) struct U64Key([u8; size_of::()]); diff --git a/consensus/src/model/stores/utxo_set.rs b/consensus/src/model/stores/utxo_set.rs index fbe64deaf..03add0948 100644 --- a/consensus/src/model/stores/utxo_set.rs +++ b/consensus/src/model/stores/utxo_set.rs @@ -28,7 +28,7 @@ pub trait UtxoSetStore: UtxoSetStoreReader { fn write_many(&mut self, utxos: &[(TransactionOutpoint, UtxoEntry)]) -> Result<(), StoreError>; } -pub const UTXO_KEY_SIZE: usize = kaspa_hashes::HASH_SIZE + std::mem::size_of::(); +pub const UTXO_KEY_SIZE: usize = kaspa_hashes::HASH_SIZE + size_of::(); #[derive(Eq, Hash, PartialEq, Debug, Copy, Clone)] struct UtxoKey([u8; UTXO_KEY_SIZE]); @@ -81,8 +81,7 @@ impl From for TransactionOutpoint { fn from(k: UtxoKey) -> Self { let transaction_id = Hash::from_slice(&k.0[..kaspa_hashes::HASH_SIZE]); let index = TransactionIndexType::from_le_bytes( - <[u8; std::mem::size_of::()]>::try_from(&k.0[kaspa_hashes::HASH_SIZE..]) - .expect("expecting index size"), + <[u8; size_of::()]>::try_from(&k.0[kaspa_hashes::HASH_SIZE..]).expect("expecting index size"), ); Self::new(transaction_id, index) } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 042410fa8..b03643df8 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -14,14 +14,7 @@ impl BlockBodyProcessor { pub fn validate_body_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { self.check_parent_bodies_exist(block)?; self.check_coinbase_blue_score_and_subsidy(block)?; - self.check_block_transactions_in_context(block)?; - self.check_block_is_not_pruned(block) - } - - fn check_block_is_not_pruned(self: &Arc, _block: &Block) -> BlockProcessResult<()> { - // TODO: In kaspad code it checks that the block is not in the past of the current tips. - // We should decide what's the best indication that a block was pruned. - Ok(()) + self.check_block_transactions_in_context(block) } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { @@ -36,12 +29,6 @@ impl BlockBodyProcessor { } fn check_parent_bodies_exist(self: &Arc, block: &Block) -> BlockProcessResult<()> { - // TODO: Skip this check for blocks in PP anticone that comes as part of the pruning proof. - - if block.header.direct_parents().len() == 1 && block.header.direct_parents()[0] == self.genesis.hash { - return Ok(()); - } - let statuses_read_guard = self.statuses_store.read(); let missing: Vec = block .header @@ -50,7 +37,7 @@ impl BlockBodyProcessor { .copied() .filter(|parent| { let status_option = statuses_read_guard.get(*parent).unwrap_option(); - status_option.is_none_or(|s| !s.has_block_body()) + status_option.is_none_or_ex(|s| !s.has_block_body()) }) .collect(); if !missing.is_empty() { diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index ae9c07b8a..4191a01ce 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -201,8 +201,7 @@ impl BlockBodyProcessor { // transactions that fits the merkle root. // PrunedBlock - PrunedBlock is an error that rejects a block body and // not the block as a whole, so we shouldn't mark it as invalid. - // TODO: implement the last part. - if !matches!(e, RuleError::BadMerkleRoot(_, _) | RuleError::MissingParents(_)) { + if !matches!(e, RuleError::BadMerkleRoot(_, _) | RuleError::MissingParents(_) | RuleError::PrunedBlock) { self.statuses_store.write().set(block.hash(), BlockStatus::StatusInvalid).unwrap(); } return Err(e); @@ -226,7 +225,6 @@ impl BlockBodyProcessor { fn validate_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult { let mass = self.validate_body_in_isolation(block)?; if !is_trusted { - // TODO: Check that it's safe to skip this check if the block is trusted. self.validate_body_in_context(block)?; } Ok(mass) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index b64fe4ea2..5d9bf3fed 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -308,8 +308,6 @@ impl HeaderProcessor { // Runs partial header validation for trusted blocks (currently validates only header-in-isolation and computes GHOSTDAG). fn validate_trusted_header(&self, header: &Arc
) -> BlockProcessResult { - // TODO: For now we skip most validations for trusted blocks, but in the future we should - // employ some validations to avoid spam etc. let block_level = self.validate_header_in_isolation(header)?; let mut ctx = self.build_processing_context(header, block_level); self.ghostdag(&mut ctx); @@ -403,7 +401,6 @@ impl HeaderProcessor { && reachability::is_chain_ancestor_of(&staging, pp, ctx.hash).unwrap() { // Hint reachability about the new tip. - // TODO: identify a disqualified hst and make sure to use sink instead reachability::hint_virtual_selected_parent(&mut staging, ctx.hash).unwrap(); hst_write.set_batch(&mut batch, SortableBlock::new(ctx.hash, header.blue_work)).unwrap(); } diff --git a/consensus/src/pipeline/monitor.rs b/consensus/src/pipeline/monitor.rs index 600059f0a..ca370a2f8 100644 --- a/consensus/src/pipeline/monitor.rs +++ b/consensus/src/pipeline/monitor.rs @@ -5,7 +5,7 @@ use kaspa_core::{ service::{AsyncService, AsyncServiceFuture}, tick::{TickReason, TickService}, }, - trace, + trace, warn, }; use std::{ sync::Arc, @@ -62,6 +62,13 @@ impl ConsensusMonitor { if delta.body_counts != 0 { delta.mass_counts as f64 / delta.body_counts as f64 } else{ 0f64 }, ); + if delta.chain_disqualified_counts > 0 { + warn!( + "Consensus detected UTXO-invalid blocks which are disqualified from the virtual selected chain (possibly due to inheritance): {} disqualified vs. {} valid chain blocks", + delta.chain_disqualified_counts, delta.chain_block_counts + ); + } + last_snapshot = snapshot; last_log_time = now; } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index d36c2edac..5a3cf1682 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -383,10 +383,12 @@ impl VirtualStateProcessor { // Walk back up to the new virtual selected parent candidate let mut chain_block_counter = 0; + let mut chain_disqualified_counter = 0; for (selected_parent, current) in self.reachability_service.forward_chain_iterator(split_point, to, true).tuple_windows() { if selected_parent != diff_point { // This indicates that the selected parent is disqualified, propagate up and continue self.statuses_store.write().set(current, StatusDisqualifiedFromChain).unwrap(); + chain_disqualified_counter += 1; continue; } @@ -416,6 +418,7 @@ impl VirtualStateProcessor { if let Err(rule_error) = res { info!("Block {} is disqualified from virtual chain: {}", current, rule_error); self.statuses_store.write().set(current, StatusDisqualifiedFromChain).unwrap(); + chain_disqualified_counter += 1; } else { debug!("VIRTUAL PROCESSOR, UTXO validated for {current}"); @@ -434,6 +437,9 @@ impl VirtualStateProcessor { } // Report counters self.counters.chain_block_counts.fetch_add(chain_block_counter, Ordering::Relaxed); + if chain_disqualified_counter > 0 { + self.counters.chain_disqualified_counts.fetch_add(chain_disqualified_counter, Ordering::Relaxed); + } diff_point } @@ -559,7 +565,7 @@ impl VirtualStateProcessor { finality_point: Hash, pruning_point: Hash, ) -> (Hash, VecDeque) { - // TODO: tests + // TODO (relaxed): additional tests let mut heap = tips .into_iter() @@ -621,7 +627,7 @@ impl VirtualStateProcessor { mut candidates: VecDeque, pruning_point: Hash, ) -> (Vec, GhostdagData) { - // TODO: tests + // TODO (relaxed): additional tests // Mergeset increasing might traverse DAG areas which are below the finality point and which theoretically // can borderline with pruned data, hence we acquire the prune lock to ensure data consistency. Note that @@ -670,7 +676,7 @@ impl VirtualStateProcessor { MergesetIncreaseResult::Rejected { new_candidate } => { // If we already have a candidate in the past of new candidate then skip. if self.reachability_service.is_any_dag_ancestor(&mut candidates.iter().copied(), new_candidate) { - continue; // TODO: not sure this test is needed if candidates invariant as antichain is kept + continue; // TODO (optimization): not sure this check is needed if candidates invariant as antichain is kept } // Remove all candidates which are in the future of the new candidate candidates.retain(|&h| !self.reachability_service.is_dag_ancestor_of(new_candidate, h)); @@ -873,7 +879,7 @@ impl VirtualStateProcessor { build_mode: TemplateBuildMode, ) -> Result { // - // TODO: tests + // TODO (relaxed): additional tests // // We call for the initial tx batch before acquiring the virtual read lock, @@ -1061,7 +1067,7 @@ impl VirtualStateProcessor { ); } - // TODO: rename to reflect finalizing pruning point utxoset state and importing *to* virtual utxoset + /// Finalizes the pruning point utxoset state and imports the pruning point utxoset *to* virtual utxoset pub fn import_pruning_point_utxo_set( &self, new_pruning_point: Hash, diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 32276e6ff..6d734fc29 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -267,7 +267,6 @@ impl VirtualStateProcessor { for i in 0..mutable_tx.tx.inputs.len() { if mutable_tx.entries[i].is_some() { // We prefer a previously populated entry if such exists - // TODO: consider re-checking the utxo view to get the most up-to-date entry (since DAA score can change) continue; } if let Some(entry) = utxo_view.get(&mutable_tx.tx.inputs[i].previous_outpoint) { diff --git a/consensus/src/processes/coinbase.rs b/consensus/src/processes/coinbase.rs index 4e3c36b79..f79bbed75 100644 --- a/consensus/src/processes/coinbase.rs +++ b/consensus/src/processes/coinbase.rs @@ -5,7 +5,7 @@ use kaspa_consensus_core::{ tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionOutput}, BlockHashMap, BlockHashSet, }; -use std::{convert::TryInto, mem::size_of}; +use std::convert::TryInto; use crate::{constants, model::stores::ghostdag::GhostdagData}; diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 0fa1f7624..7c534af8e 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -213,7 +213,7 @@ impl< let mut expected_pps_queue = VecDeque::new(); for current in self.reachability_service.backward_chain_iterator(hst, pruning_info.pruning_point, false) { let current_header = self.headers_store.get_header(current).unwrap(); - if expected_pps_queue.back().is_none_or(|&&h| h != current_header.pruning_point) { + if expected_pps_queue.back().is_none_or_ex(|&&h| h != current_header.pruning_point) { expected_pps_queue.push_back(current_header.pruning_point); } } diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 7b8480111..847222968 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -191,7 +191,7 @@ impl< } } - if highest_with_body.is_none_or(|&h| h == high) { + if highest_with_body.is_none_or_ex(|&h| h == high) { return Ok(vec![]); }; diff --git a/crypto/muhash/fuzz/fuzz_targets/u3072.rs b/crypto/muhash/fuzz/fuzz_targets/u3072.rs index 584006628..115c6f4a6 100644 --- a/crypto/muhash/fuzz/fuzz_targets/u3072.rs +++ b/crypto/muhash/fuzz/fuzz_targets/u3072.rs @@ -4,7 +4,6 @@ use kaspa_muhash::u3072::{self, U3072}; use num_bigint::BigInt; use num_integer::Integer; use num_traits::{One, Signed}; -use std::mem::size_of; fuzz_target!(|data: &[u8]| { if data.len() < muhash::SERIALIZED_MUHASH_SIZE { diff --git a/crypto/muhash/src/u3072.rs b/crypto/muhash/src/u3072.rs index 8d37f8381..82021eb88 100644 --- a/crypto/muhash/src/u3072.rs +++ b/crypto/muhash/src/u3072.rs @@ -15,8 +15,8 @@ pub(crate) type DoubleLimb = u128; //#[cfg(target_pointer_width = "32")] //pub(crate) type DoubleLimb = u64; -const LIMB_SIZE_BYTES: usize = std::mem::size_of::(); -const LIMB_SIZE: usize = std::mem::size_of::() * 8; +const LIMB_SIZE_BYTES: usize = size_of::(); +const LIMB_SIZE: usize = Limb::BITS as usize; pub const LIMBS: usize = crate::ELEMENT_BYTE_SIZE / LIMB_SIZE_BYTES; pub const PRIME_DIFF: Limb = 1103717; diff --git a/crypto/txscript/src/caches.rs b/crypto/txscript/src/caches.rs index 4fbc37a1c..8906f4670 100644 --- a/crypto/txscript/src/caches.rs +++ b/crypto/txscript/src/caches.rs @@ -32,9 +32,8 @@ impl Option { - self.map.read().get(key).cloned().map(|data| { + self.map.read().get(key).cloned().inspect(|_data| { self.counters.get_counts.fetch_add(1, Ordering::Relaxed); - data }) } @@ -87,8 +86,8 @@ impl core::ops::Sub for &TxScriptCacheCountersSnapshot { fn sub(self, rhs: Self) -> Self::Output { Self::Output { - insert_counts: self.insert_counts.checked_sub(rhs.insert_counts).unwrap_or_default(), - get_counts: self.get_counts.checked_sub(rhs.get_counts).unwrap_or_default(), + insert_counts: self.insert_counts.saturating_sub(rhs.insert_counts), + get_counts: self.get_counts.saturating_sub(rhs.get_counts), } } } diff --git a/crypto/txscript/src/data_stack.rs b/crypto/txscript/src/data_stack.rs index 74988042a..5d8ea18ed 100644 --- a/crypto/txscript/src/data_stack.rs +++ b/crypto/txscript/src/data_stack.rs @@ -1,7 +1,6 @@ use crate::TxScriptError; use core::fmt::Debug; use core::iter; -use core::mem::size_of; const DEFAULT_SCRIPT_NUM_LEN: usize = 4; diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index 7b66da27f..ec5c5a2e6 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -1,5 +1,3 @@ -use core::mem::size_of; - #[macro_use] mod macros; diff --git a/database/src/registry.rs b/database/src/registry.rs index 87e89a491..36a728ebe 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -99,8 +99,8 @@ mod tests { let prefix = DatabaseStorePrefixes::AcceptanceData; assert_eq!(&[prefix as u8], prefix.as_ref()); assert_eq!( - std::mem::size_of::(), - std::mem::size_of::(), + size_of::(), + size_of::(), "DatabaseStorePrefixes is expected to have the same memory layout of u8" ); } diff --git a/indexes/utxoindex/src/stores/indexed_utxos.rs b/indexes/utxoindex/src/stores/indexed_utxos.rs index a96f5e46b..c9bce2c71 100644 --- a/indexes/utxoindex/src/stores/indexed_utxos.rs +++ b/indexes/utxoindex/src/stores/indexed_utxos.rs @@ -11,7 +11,6 @@ use kaspa_index_core::indexed_utxos::BalanceByScriptPublicKey; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::fmt::Display; -use std::mem::size_of; use std::sync::Arc; pub const VERSION_TYPE_SIZE: usize = size_of::(); // Const since we need to re-use this a few times. @@ -67,8 +66,7 @@ impl From for TransactionOutpoint { fn from(key: TransactionOutpointKey) -> Self { let transaction_id = Hash::from_slice(&key.0[..kaspa_hashes::HASH_SIZE]); let index = TransactionIndexType::from_le_bytes( - <[u8; std::mem::size_of::()]>::try_from(&key.0[kaspa_hashes::HASH_SIZE..]) - .expect("expected index size"), + <[u8; size_of::()]>::try_from(&key.0[kaspa_hashes::HASH_SIZE..]).expect("expected index size"), ); Self::new(transaction_id, index) } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index d519f27cb..db9f32c16 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -6,7 +6,7 @@ use kaspa_consensus_core::{ errors::config::{ConfigError, ConfigResult}, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, info, trace}; +use kaspa_core::{core::Core, debug, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; use kaspa_database::{ prelude::{CachePolicy, DbWriter, DirectDbWriter}, @@ -497,10 +497,10 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm .with_tick_service(tick_service.clone()); let perf_monitor = if args.perf_metrics { let cb = move |counters: CountersSnapshot| { - trace!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_process_metrics_display()); - trace!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_io_metrics_display()); + debug!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_process_metrics_display()); + debug!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_io_metrics_display()); #[cfg(feature = "heap")] - trace!("[{}] heap stats: {:?}", kaspa_perf_monitor::SERVICE_NAME, dhat::HeapStats::get()); + debug!("[{}] heap stats: {:?}", kaspa_perf_monitor::SERVICE_NAME, dhat::HeapStats::get()); }; Arc::new(perf_monitor_builder.with_fetch_cb(cb).build()) } else { diff --git a/math/src/uint.rs b/math/src/uint.rs index deb01496e..4ecc1fe12 100644 --- a/math/src/uint.rs +++ b/math/src/uint.rs @@ -15,7 +15,7 @@ macro_rules! construct_uint { pub const MIN: Self = Self::ZERO; pub const MAX: Self = $name([u64::MAX; $n_words]); pub const BITS: u32 = $n_words * u64::BITS; - pub const BYTES: usize = $n_words * core::mem::size_of::(); + pub const BYTES: usize = $n_words * size_of::(); pub const LIMBS: usize = $n_words; #[inline] diff --git a/mining/src/lib.rs b/mining/src/lib.rs index 745fb63f9..141d9d283 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -30,6 +30,7 @@ pub struct MiningCounters { pub low_priority_tx_counts: AtomicU64, pub block_tx_counts: AtomicU64, pub tx_accepted_counts: AtomicU64, + pub tx_evicted_counts: AtomicU64, pub input_counts: AtomicU64, pub output_counts: AtomicU64, @@ -48,6 +49,7 @@ impl Default for MiningCounters { low_priority_tx_counts: Default::default(), block_tx_counts: Default::default(), tx_accepted_counts: Default::default(), + tx_evicted_counts: Default::default(), input_counts: Default::default(), output_counts: Default::default(), ready_txs_sample: Default::default(), @@ -66,6 +68,7 @@ impl MiningCounters { low_priority_tx_counts: self.low_priority_tx_counts.load(Ordering::Relaxed), block_tx_counts: self.block_tx_counts.load(Ordering::Relaxed), tx_accepted_counts: self.tx_accepted_counts.load(Ordering::Relaxed), + tx_evicted_counts: self.tx_evicted_counts.load(Ordering::Relaxed), input_counts: self.input_counts.load(Ordering::Relaxed), output_counts: self.output_counts.load(Ordering::Relaxed), ready_txs_sample: self.ready_txs_sample.load(Ordering::Relaxed), @@ -101,6 +104,7 @@ pub struct MempoolCountersSnapshot { pub low_priority_tx_counts: u64, pub block_tx_counts: u64, pub tx_accepted_counts: u64, + pub tx_evicted_counts: u64, pub input_counts: u64, pub output_counts: u64, pub ready_txs_sample: u64, @@ -151,13 +155,14 @@ impl core::ops::Sub for &MempoolCountersSnapshot { fn sub(self, rhs: Self) -> Self::Output { Self::Output { - elapsed_time: self.elapsed_time.checked_sub(rhs.elapsed_time).unwrap_or_default(), - high_priority_tx_counts: self.high_priority_tx_counts.checked_sub(rhs.high_priority_tx_counts).unwrap_or_default(), - low_priority_tx_counts: self.low_priority_tx_counts.checked_sub(rhs.low_priority_tx_counts).unwrap_or_default(), - block_tx_counts: self.block_tx_counts.checked_sub(rhs.block_tx_counts).unwrap_or_default(), - tx_accepted_counts: self.tx_accepted_counts.checked_sub(rhs.tx_accepted_counts).unwrap_or_default(), - input_counts: self.input_counts.checked_sub(rhs.input_counts).unwrap_or_default(), - output_counts: self.output_counts.checked_sub(rhs.output_counts).unwrap_or_default(), + elapsed_time: self.elapsed_time.saturating_sub(rhs.elapsed_time), + high_priority_tx_counts: self.high_priority_tx_counts.saturating_sub(rhs.high_priority_tx_counts), + low_priority_tx_counts: self.low_priority_tx_counts.saturating_sub(rhs.low_priority_tx_counts), + block_tx_counts: self.block_tx_counts.saturating_sub(rhs.block_tx_counts), + tx_accepted_counts: self.tx_accepted_counts.saturating_sub(rhs.tx_accepted_counts), + tx_evicted_counts: self.tx_evicted_counts.saturating_sub(rhs.tx_evicted_counts), + input_counts: self.input_counts.saturating_sub(rhs.input_counts), + output_counts: self.output_counts.saturating_sub(rhs.output_counts), ready_txs_sample: (self.ready_txs_sample + rhs.ready_txs_sample) / 2, txs_sample: (self.txs_sample + rhs.txs_sample) / 2, orphans_sample: (self.orphans_sample + rhs.orphans_sample) / 2, @@ -177,8 +182,8 @@ impl core::ops::Sub for &P2pTxCountSample { fn sub(self, rhs: Self) -> Self::Output { Self::Output { - elapsed_time: self.elapsed_time.checked_sub(rhs.elapsed_time).unwrap_or_default(), - low_priority_tx_counts: self.low_priority_tx_counts.checked_sub(rhs.low_priority_tx_counts).unwrap_or_default(), + elapsed_time: self.elapsed_time.saturating_sub(rhs.elapsed_time), + low_priority_tx_counts: self.low_priority_tx_counts.saturating_sub(rhs.low_priority_tx_counts), } } } diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 3eddac459..69e08019b 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -1,3 +1,5 @@ +use std::sync::atomic::Ordering; + use crate::mempool::{ errors::{RuleError, RuleResult}, model::{ @@ -84,21 +86,27 @@ impl Mempool { // Before adding the transaction, check if there is room in the pool let transaction_size = transaction.mempool_estimated_bytes(); let txs_to_remove = self.transaction_pool.limit_transaction_count(&transaction, transaction_size)?; - for x in txs_to_remove.iter() { - self.remove_transaction(x, true, TxRemovalReason::MakingRoom, format!(" for {}", transaction_id).as_str())?; - // self.transaction_pool.limit_transaction_count(&transaction) returns the - // smallest prefix of `ready_transactions` (sorted by ascending fee-rate) - // that makes enough room for `transaction`, but since each call to `self.remove_transaction` - // also removes all transactions dependant on `x` we might already have sufficient space, so - // we constantly check the break condition. - // - // Note that self.transaction_pool.len() < self.config.maximum_transaction_count means we have - // at least one available slot in terms of the count limit - if self.transaction_pool.len() < self.config.maximum_transaction_count - && self.transaction_pool.get_estimated_size() + transaction_size <= self.config.mempool_size_limit - { - break; + if !txs_to_remove.is_empty() { + let transaction_pool_len_before = self.transaction_pool.len(); + for x in txs_to_remove.iter() { + self.remove_transaction(x, true, TxRemovalReason::MakingRoom, format!(" for {}", transaction_id).as_str())?; + // self.transaction_pool.limit_transaction_count(&transaction) returns the + // smallest prefix of `ready_transactions` (sorted by ascending fee-rate) + // that makes enough room for `transaction`, but since each call to `self.remove_transaction` + // also removes all transactions dependant on `x` we might already have sufficient space, so + // we constantly check the break condition. + // + // Note that self.transaction_pool.len() < self.config.maximum_transaction_count means we have + // at least one available slot in terms of the count limit + if self.transaction_pool.len() < self.config.maximum_transaction_count + && self.transaction_pool.get_estimated_size() + transaction_size <= self.config.mempool_size_limit + { + break; + } } + self.counters + .tx_evicted_counts + .fetch_add(transaction_pool_len_before.saturating_sub(self.transaction_pool.len()) as u64, Ordering::Relaxed); } assert!( diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 876ce9b7a..74449424c 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -69,6 +69,12 @@ impl MiningMonitor { let feerate_estimations = self.mining_manager.clone().get_realtime_feerate_estimations().await; debug!("Realtime feerate estimations: {}", feerate_estimations); } + if delta.tx_evicted_counts > 0 { + info!( + "Mempool stats: {} transactions were evicted from the mempool in favor of incoming higher feerate transactions", + delta.tx_evicted_counts + ); + } if tx_script_cache_snapshot != last_tx_script_cache_snapshot { debug!( "UTXO set stats: {} spent, {} created ({} signatures validated, {} cache hits, {:.2} hit ratio)", diff --git a/mining/src/testutils/coinbase_mock.rs b/mining/src/testutils/coinbase_mock.rs index 12e0905a8..8d19c2fcd 100644 --- a/mining/src/testutils/coinbase_mock.rs +++ b/mining/src/testutils/coinbase_mock.rs @@ -4,7 +4,6 @@ use kaspa_consensus_core::{ subnets::SUBNETWORK_ID_COINBASE, tx::{Transaction, TransactionOutput}, }; -use std::mem::size_of; const LENGTH_OF_BLUE_SCORE: usize = size_of::(); const LENGTH_OF_SUBSIDY: usize = size_of::(); diff --git a/musl-toolchain/build.sh b/musl-toolchain/build.sh new file mode 100755 index 000000000..b32314082 --- /dev/null +++ b/musl-toolchain/build.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +PRESET_HASH_FILE="$HOME/x-tools/preset_hash" + +# Calculate the hash of the preset file +CURRENT_PRESET_HASH=$(sha256sum $GITHUB_WORKSPACE/musl-toolchain/preset.sh | awk '{print $1}') + +echo "Current preset hash: $CURRENT_PRESET_HASH" + +# Traverse to working directory +cd $GITHUB_WORKSPACE/musl-toolchain + +# Set the preset +source preset.sh + +# If the toolchain is not installed or the preset has changed or the preset hash file does not exist +if [ ! -d "$HOME/x-tools" ] || [ ! -f "$PRESET_HASH_FILE" ] || [ "$(cat $PRESET_HASH_FILE)" != "$CURRENT_PRESET_HASH" ]; then + # Install dependencies + sudo apt-get update + sudo apt-get install -y autoconf automake libtool libtool-bin unzip help2man python3.10-dev gperf bison flex texinfo gawk libncurses5-dev + + # Clone crosstool-ng + git clone https://github.com/crosstool-ng/crosstool-ng + + # Configure and build crosstool-ng + cd crosstool-ng + # Use version 1.26 + git checkout crosstool-ng-1.26.0 + ./bootstrap + ./configure --prefix=$HOME/ctng + make + make install + # Add crosstool-ng to PATH + export PATH=$HOME/ctng/bin:$PATH + + + + # Load toolchainc configuration + ct-ng $CTNG_PRESET + + # Build the toolchain + ct-ng build > build.log 2>&1 + + # Set status to the exit code of the build + status=$? + + # We store the log in a file because it bloats the screen too much + # on GitHub Actions. We print it only if the build fails. + echo "Build result:" + if [ $status -eq 0 ]; then + echo "Build succeeded" + ls -la $HOME/x-tools + # Store the current hash of preset.sh after successful build + echo "$CURRENT_PRESET_HASH" > "$PRESET_HASH_FILE" + else + echo "Build failed, here's the log:" + cat .config + cat build.log + fi +fi + +# Update toolchain variables: C compiler, C++ compiler, linker, and archiver +export CC=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-gcc +export CXX=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-g++ +export LD=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-ld +export AR=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-ar + +# Exports for cc crate +# https://docs.rs/cc/latest/cc/#external-configuration-via-environment-variables +export RANLIB_x86_64_unknown_linux_musl=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-ranlib +export CC_x86_64_unknown_linux_musl=$CC +export CXX_x86_64_unknown_linux_musl=$CXX +export AR_x86_64_unknown_linux_musl=$AR +export LD_x86_64_unknown_linux_musl=$LD + +# Set environment variables for static linking +export OPENSSL_STATIC=true +export RUSTFLAGS="-C link-arg=-static" + +# We specify the compiler that will invoke linker +export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=$CC + +# Add target +rustup target add x86_64-unknown-linux-musl + +# Install missing dependencies +cargo fetch --target x86_64-unknown-linux-musl + +# Patch missing include in librocksdb-sys-0.16.0+8.10.0. Credit: @supertypo +FILE_PATH=$(find $HOME/.cargo/registry/src/ -path "*/librocksdb-sys-0.16.0+8.10.0/*/offpeak_time_info.h") + +if [ -n "$FILE_PATH" ]; then + sed -i '1i #include ' "$FILE_PATH" +else + echo "No such file for sed modification." +fi \ No newline at end of file diff --git a/musl-toolchain/preset.sh b/musl-toolchain/preset.sh new file mode 100755 index 000000000..63e98a768 --- /dev/null +++ b/musl-toolchain/preset.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Sets the preset that will be used by crosstool-ng +# Available presets can be fetched with: ct-ng list-samples +export CTNG_PRESET="x86_64-multilib-linux-musl" \ No newline at end of file diff --git a/protocol/flows/src/flowcontext/orphans.rs b/protocol/flows/src/flowcontext/orphans.rs index d1122b0e0..f18649e55 100644 --- a/protocol/flows/src/flowcontext/orphans.rs +++ b/protocol/flows/src/flowcontext/orphans.rs @@ -166,7 +166,7 @@ impl OrphanBlocksPool { } } else { let status = consensus.async_get_block_status(current).await; - if status.is_none_or(|s| s.is_header_only()) { + if status.is_none_or_ex(|s| s.is_header_only()) { // Block is not in the orphan pool nor does its body exist consensus-wise, so it is a root roots.push(current); } @@ -193,7 +193,8 @@ impl OrphanBlocksPool { if let Occupied(entry) = self.orphans.entry(orphan_hash) { let mut processable = true; for p in entry.get().block.header.direct_parents().iter().copied() { - if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or(|s| s.is_header_only()) { + if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or_ex(|s| s.is_header_only()) + { processable = false; break; } @@ -249,7 +250,7 @@ impl OrphanBlocksPool { let mut processable = true; for parent in block.block.header.direct_parents().iter().copied() { if self.orphans.contains_key(&parent) - || consensus.async_get_block_status(parent).await.is_none_or(|status| status.is_header_only()) + || consensus.async_get_block_status(parent).await.is_none_or_ex(|status| status.is_header_only()) { processable = false; break; diff --git a/protocol/p2p/src/convert/net_address.rs b/protocol/p2p/src/convert/net_address.rs index 5a2ffec0e..c525300ef 100644 --- a/protocol/p2p/src/convert/net_address.rs +++ b/protocol/p2p/src/convert/net_address.rs @@ -1,7 +1,4 @@ -use std::{ - mem::size_of, - net::{IpAddr, Ipv4Addr, Ipv6Addr}, -}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::error::ConversionError; use crate::pb as protowire; diff --git a/rpc/core/src/model/message.rs b/rpc/core/src/model/message.rs index 779f7593a..ba8d6abf7 100644 --- a/rpc/core/src/model/message.rs +++ b/rpc/core/src/model/message.rs @@ -1899,6 +1899,7 @@ pub struct GetSystemInfoResponse { pub cpu_physical_cores: u16, pub total_memory: u64, pub fd_limit: u32, + pub proxy_socket_limit_per_cpu_core: Option, } impl std::fmt::Debug for GetSystemInfoResponse { @@ -1910,19 +1911,21 @@ impl std::fmt::Debug for GetSystemInfoResponse { .field("cpu_physical_cores", &self.cpu_physical_cores) .field("total_memory", &self.total_memory) .field("fd_limit", &self.fd_limit) + .field("proxy_socket_limit_per_cpu_core", &self.proxy_socket_limit_per_cpu_core) .finish() } } impl Serializer for GetSystemInfoResponse { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - store!(u16, &1, writer)?; + store!(u16, &2, writer)?; store!(String, &self.version, writer)?; store!(Option>, &self.system_id, writer)?; store!(Option>, &self.git_hash, writer)?; store!(u16, &self.cpu_physical_cores, writer)?; store!(u64, &self.total_memory, writer)?; store!(u32, &self.fd_limit, writer)?; + store!(Option, &self.proxy_socket_limit_per_cpu_core, writer)?; Ok(()) } @@ -1930,7 +1933,7 @@ impl Serializer for GetSystemInfoResponse { impl Deserializer for GetSystemInfoResponse { fn deserialize(reader: &mut R) -> std::io::Result { - let _version = load!(u16, reader)?; + let payload_version = load!(u16, reader)?; let version = load!(String, reader)?; let system_id = load!(Option>, reader)?; let git_hash = load!(Option>, reader)?; @@ -1938,7 +1941,9 @@ impl Deserializer for GetSystemInfoResponse { let total_memory = load!(u64, reader)?; let fd_limit = load!(u32, reader)?; - Ok(Self { version, system_id, git_hash, cpu_physical_cores, total_memory, fd_limit }) + let proxy_socket_limit_per_cpu_core = if payload_version > 1 { load!(Option, reader)? } else { None }; + + Ok(Self { version, system_id, git_hash, cpu_physical_cores, total_memory, fd_limit, proxy_socket_limit_per_cpu_core }) } } diff --git a/rpc/core/src/model/tests.rs b/rpc/core/src/model/tests.rs index 11ebe7de1..d931f5ac2 100644 --- a/rpc/core/src/model/tests.rs +++ b/rpc/core/src/model/tests.rs @@ -970,6 +970,7 @@ mod mockery { cpu_physical_cores: mock(), total_memory: mock(), fd_limit: mock(), + proxy_socket_limit_per_cpu_core: mock(), } } } diff --git a/rpc/grpc/core/proto/rpc.proto b/rpc/grpc/core/proto/rpc.proto index 0147e15e2..7a38f8852 100644 --- a/rpc/grpc/core/proto/rpc.proto +++ b/rpc/grpc/core/proto/rpc.proto @@ -851,6 +851,7 @@ message GetSystemInfoResponseMessage{ uint32 coreNum = 4; uint64 totalMemory = 5; uint32 fdLimit = 6; + uint32 proxySocketLimitPerCpuCore = 7; RPCError error = 1000; } diff --git a/rpc/grpc/core/src/convert/message.rs b/rpc/grpc/core/src/convert/message.rs index a04f9c863..c0e75cf03 100644 --- a/rpc/grpc/core/src/convert/message.rs +++ b/rpc/grpc/core/src/convert/message.rs @@ -480,6 +480,7 @@ from!(item: RpcResult<&kaspa_rpc_core::GetSystemInfoResponse>, protowire::GetSys total_memory : item.total_memory, core_num : item.cpu_physical_cores as u32, fd_limit : item.fd_limit, + proxy_socket_limit_per_cpu_core : item.proxy_socket_limit_per_cpu_core.unwrap_or_default(), error: None, } }); @@ -962,6 +963,7 @@ try_from!(item: &protowire::GetSystemInfoResponseMessage, RpcResult 0).then_some(item.proxy_socket_limit_per_cpu_core), } }); diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index d498f522f..2c22fd6bb 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -1067,6 +1067,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and cpu_physical_cores: self.system_info.cpu_physical_cores, total_memory: self.system_info.total_memory, fd_limit: self.system_info.fd_limit, + proxy_socket_limit_per_cpu_core: self.system_info.proxy_socket_limit_per_cpu_core, }; Ok(response) diff --git a/rpc/wrpc/server/Cargo.toml b/rpc/wrpc/server/Cargo.toml index f0f55aed7..3bf285c3f 100644 --- a/rpc/wrpc/server/Cargo.toml +++ b/rpc/wrpc/server/Cargo.toml @@ -35,8 +35,12 @@ workflow-log.workspace = true workflow-rpc.workspace = true workflow-serializer.workspace = true -[target.x86_64-unknown-linux-gnu.dependencies] -# Adding explicitely the openssl dependency here is needed for a successful build with zigbuild +# Adding explicitely the openssl dependency here is needed for a successful build with zigbuild and musl # as used in the release deployment in GitHub CI # see: https://github.com/rust-cross/cargo-zigbuild/issues/127 + +[target.x86_64-unknown-linux-gnu.dependencies] +openssl = { version = "0.10", features = ["vendored"] } + +[target.x86_64-unknown-linux-musl.dependencies] openssl = { version = "0.10", features = ["vendored"] } diff --git a/utils/src/mem_size.rs b/utils/src/mem_size.rs index c7963a40c..449f649bc 100644 --- a/utils/src/mem_size.rs +++ b/utils/src/mem_size.rs @@ -2,7 +2,7 @@ //! estimate sizes of run-time objects in memory, including deep heap allocations. See //! struct-level docs for more details. -use std::{collections::HashSet, mem::size_of, sync::Arc}; +use std::{collections::HashSet, sync::Arc}; use parking_lot::RwLock; diff --git a/utils/src/option.rs b/utils/src/option.rs index 9ccf96c90..ff4779dc1 100644 --- a/utils/src/option.rs +++ b/utils/src/option.rs @@ -1,9 +1,10 @@ pub trait OptionExtensions { - fn is_none_or(&self, f: impl FnOnce(&T) -> bool) -> bool; + /// Substitute for unstable [Option::is_non_or] + fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool; } impl OptionExtensions for Option { - fn is_none_or(&self, f: impl FnOnce(&T) -> bool) -> bool { + fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool { match self { Some(v) => f(v), None => true, diff --git a/utils/src/sysinfo.rs b/utils/src/sysinfo.rs index 4e009d449..ba6f25110 100644 --- a/utils/src/sysinfo.rs +++ b/utils/src/sysinfo.rs @@ -2,21 +2,32 @@ use crate::fd_budget; use crate::git; use crate::hex::ToHex; use sha2::{Digest, Sha256}; -use std::fs::File; +use std::fs::{read_to_string, File}; use std::io::Read; +use std::path::PathBuf; +// use std::fs::read_to_string; use std::sync::OnceLock; static SYSTEM_INFO: OnceLock = OnceLock::new(); #[derive(Clone)] pub struct SystemInfo { + /// unique system (machine) identifier pub system_id: Option>, + /// full git commit hash pub git_hash: Option>, + /// short git commit hash pub git_short_hash: Option>, + /// crate (workspace) version pub version: String, + /// number of physical CPU cores pub cpu_physical_cores: u16, + /// total system memory in bytes pub total_memory: u64, + /// file descriptor limit of the current process pub fd_limit: u32, + /// maximum number of sockets per CPU core + pub proxy_socket_limit_per_cpu_core: Option, } // provide hex encoding for system_id, git_hash, and git_short_hash @@ -30,6 +41,7 @@ impl std::fmt::Debug for SystemInfo { .field("cpu_physical_cores", &self.cpu_physical_cores) .field("total_memory", &self.total_memory) .field("fd_limit", &self.fd_limit) + .field("proxy_socket_limit_per_cpu_core", &self.proxy_socket_limit_per_cpu_core) .finish() } } @@ -46,8 +58,18 @@ impl Default for SystemInfo { let git_hash = git::hash(); let git_short_hash = git::short_hash(); let version = git::version(); + let proxy_socket_limit_per_cpu_core = Self::try_proxy_socket_limit_per_cpu_core(); - SystemInfo { system_id, git_hash, git_short_hash, version, cpu_physical_cores, total_memory, fd_limit } + SystemInfo { + system_id, + git_hash, + git_short_hash, + version, + cpu_physical_cores, + total_memory, + fd_limit, + proxy_socket_limit_per_cpu_core, + } }); (*system_info).clone() } @@ -72,6 +94,19 @@ impl SystemInfo { sha256.update(some_id.as_bytes()); Some(sha256.finalize().to_vec()) } + + fn try_proxy_socket_limit_per_cpu_core() -> Option { + let nginx_config_path = PathBuf::from("/etc/nginx/nginx.conf"); + if nginx_config_path.exists() { + read_to_string(nginx_config_path) + .ok() + .and_then(|content| content.lines().find(|line| line.trim().starts_with("worker_connections")).map(String::from)) + .and_then(|line| line.split_whitespace().nth(1).map(|v| v.replace(";", ""))) + .and_then(|value| value.parse::().ok()) + } else { + None + } + } } impl AsRef for SystemInfo { @@ -79,3 +114,14 @@ impl AsRef for SystemInfo { self } } + +// #[cfg(test)] +// mod tests { +// use super::*; + +// #[test] +// fn test_system_info() { +// let system_info = SystemInfo::default(); +// println!("{:#?}", system_info); +// } +// } From a9888c4651d373f0ce0ac7c2ee2b8cb0877930c0 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Sep 2024 05:31:43 +0000 Subject: [PATCH 52/58] bump to version 0.14.6 --- Cargo.lock | 114 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 112 ++++++++++++++++++++++++++-------------------------- 2 files changed, 113 insertions(+), 113 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1665219c6..37f8960dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2393,7 +2393,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.14.5" +version = "0.14.6" dependencies = [ "borsh", "criterion", @@ -2410,7 +2410,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.14.5" +version = "0.14.6" dependencies = [ "borsh", "igd-next", @@ -2433,14 +2433,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.14.5" +version = "0.14.6" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.14.5" +version = "0.14.6" dependencies = [ "borsh", "bs58", @@ -2467,7 +2467,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "borsh", @@ -2514,7 +2514,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.14.5" +version = "0.14.6" dependencies = [ "duration-string", "futures-util", @@ -2531,7 +2531,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.14.5" +version = "0.14.6" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2575,7 +2575,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.14.5" +version = "0.14.6" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2603,7 +2603,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "arc-swap", "async-trait", @@ -2642,7 +2642,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2661,7 +2661,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.14.5" +version = "0.14.6" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2685,7 +2685,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.14.5" +version = "0.14.6" dependencies = [ "duration-string", "futures", @@ -2703,7 +2703,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2721,7 +2721,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "borsh", @@ -2743,7 +2743,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.14.5" +version = "0.14.6" dependencies = [ "bincode", "enum-primitive-derive", @@ -2765,7 +2765,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2796,7 +2796,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2825,7 +2825,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2860,7 +2860,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.14.5" +version = "0.14.6" dependencies = [ "blake2b_simd", "borsh", @@ -2881,7 +2881,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2900,7 +2900,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2928,7 +2928,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.14.5" +version = "0.14.6" dependencies = [ "borsh", "criterion", @@ -2949,14 +2949,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.14.5" +version = "0.14.6" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "borsh", @@ -2972,7 +2972,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.14.5" +version = "0.14.6" dependencies = [ "criterion", "futures-util", @@ -2999,7 +2999,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.14.5" +version = "0.14.6" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -3007,7 +3007,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.14.5" +version = "0.14.6" dependencies = [ "criterion", "kaspa-hashes", @@ -3020,7 +3020,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3056,7 +3056,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "chrono", @@ -3087,7 +3087,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.14.5" +version = "0.14.6" dependencies = [ "borsh", "ctrlc", @@ -3118,7 +3118,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.14.5" +version = "0.14.6" dependencies = [ "kaspa-core", "log", @@ -3130,7 +3130,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.14.5" +version = "0.14.6" dependencies = [ "criterion", "js-sys", @@ -3146,7 +3146,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3188,7 +3188,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.14.5" +version = "0.14.6" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3200,7 +3200,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "kaspa-addresses", @@ -3229,7 +3229,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3289,7 +3289,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.14.5" +version = "0.14.6" dependencies = [ "blake2b_simd", "borsh", @@ -3321,7 +3321,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.14.5" +version = "0.14.6" dependencies = [ "secp256k1", "thiserror", @@ -3329,7 +3329,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.14.5" +version = "0.14.6" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.14.5" +version = "0.14.6" dependencies = [ "cfg-if 1.0.0", "futures", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.14.5" +version = "0.14.6" dependencies = [ "futures", "kaspa-consensus", @@ -3400,7 +3400,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-std", "async-trait", @@ -3412,7 +3412,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "js-sys", @@ -3426,7 +3426,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "aes", "ahash", @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "borsh", @@ -3540,7 +3540,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.14.5" +version = "0.14.6" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3553,7 +3553,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.14.5" +version = "0.14.6" dependencies = [ "bincode", "derive_builder", @@ -3580,7 +3580,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.14.5" +version = "0.14.6" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3608,7 +3608,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.14.5" +version = "0.14.6" dependencies = [ "faster-hex", "hexplay", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-std", "async-trait", @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.14.5" +version = "0.14.6" dependencies = [ "ctrlc", "futures", @@ -3669,7 +3669,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "clap 4.5.16", @@ -3688,7 +3688,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-trait", "borsh", @@ -3716,7 +3716,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.14.5" +version = "0.14.6" dependencies = [ "ahash", "async-std", @@ -3745,7 +3745,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -5192,7 +5192,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "clap 4.5.16", @@ -5669,7 +5669,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.14.5" +version = "0.14.6" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 0fdc4a499..732279bd5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ members = [ [workspace.package] rust-version = "1.80.0" -version = "0.14.5" +version = "0.14.6" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -79,61 +79,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.14.5", path = "testing/integration" } -kaspa-addresses = { version = "0.14.5", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.14.5", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.14.5", path = "wallet/bip32" } -kaspa-cli = { version = "0.14.5", path = "cli" } -kaspa-connectionmanager = { version = "0.14.5", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.14.5", path = "consensus" } -kaspa-consensus-core = { version = "0.14.5", path = "consensus/core" } -kaspa-consensus-client = { version = "0.14.5", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.14.5", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.14.5", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.14.5", path = "components/consensusmanager" } -kaspa-core = { version = "0.14.5", path = "core" } -kaspa-daemon = { version = "0.14.5", path = "daemon" } -kaspa-database = { version = "0.14.5", path = "database" } -kaspa-grpc-client = { version = "0.14.5", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.14.5", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.14.5", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.14.5", path = "crypto/hashes" } -kaspa-index-core = { version = "0.14.5", path = "indexes/core" } -kaspa-index-processor = { version = "0.14.5", path = "indexes/processor" } -kaspa-math = { version = "0.14.5", path = "math" } -kaspa-merkle = { version = "0.14.5", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.14.5", path = "metrics/core" } -kaspa-mining = { version = "0.14.5", path = "mining" } -kaspa-mining-errors = { version = "0.14.5", path = "mining/errors" } -kaspa-muhash = { version = "0.14.5", path = "crypto/muhash" } -kaspa-notify = { version = "0.14.5", path = "notify" } -kaspa-p2p-flows = { version = "0.14.5", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.14.5", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.14.5", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.14.5", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.14.5", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.14.5", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.14.5", path = "rpc/service" } -kaspa-txscript = { version = "0.14.5", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.14.5", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.14.5", path = "utils" } -kaspa-utils-tower = { version = "0.14.5", path = "utils/tower" } -kaspa-utxoindex = { version = "0.14.5", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.14.5", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.14.5", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.14.5", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.14.5", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.14.5", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.14.5", path = "wallet/macros" } -kaspa-wasm = { version = "0.14.5", path = "wasm" } -kaspa-wasm-core = { version = "0.14.5", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.14.5", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.14.5", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.14.5", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.14.5", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.14.5", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.14.5", path = "kaspad" } -kaspa-alloc = { version = "0.14.5", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.14.6", path = "testing/integration" } +kaspa-addresses = { version = "0.14.6", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.14.6", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.14.6", path = "wallet/bip32" } +kaspa-cli = { version = "0.14.6", path = "cli" } +kaspa-connectionmanager = { version = "0.14.6", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.14.6", path = "consensus" } +kaspa-consensus-core = { version = "0.14.6", path = "consensus/core" } +kaspa-consensus-client = { version = "0.14.6", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.14.6", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.14.6", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.14.6", path = "components/consensusmanager" } +kaspa-core = { version = "0.14.6", path = "core" } +kaspa-daemon = { version = "0.14.6", path = "daemon" } +kaspa-database = { version = "0.14.6", path = "database" } +kaspa-grpc-client = { version = "0.14.6", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.14.6", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.14.6", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.14.6", path = "crypto/hashes" } +kaspa-index-core = { version = "0.14.6", path = "indexes/core" } +kaspa-index-processor = { version = "0.14.6", path = "indexes/processor" } +kaspa-math = { version = "0.14.6", path = "math" } +kaspa-merkle = { version = "0.14.6", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.14.6", path = "metrics/core" } +kaspa-mining = { version = "0.14.6", path = "mining" } +kaspa-mining-errors = { version = "0.14.6", path = "mining/errors" } +kaspa-muhash = { version = "0.14.6", path = "crypto/muhash" } +kaspa-notify = { version = "0.14.6", path = "notify" } +kaspa-p2p-flows = { version = "0.14.6", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.14.6", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.14.6", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.14.6", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.14.6", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.14.6", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.14.6", path = "rpc/service" } +kaspa-txscript = { version = "0.14.6", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.14.6", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.14.6", path = "utils" } +kaspa-utils-tower = { version = "0.14.6", path = "utils/tower" } +kaspa-utxoindex = { version = "0.14.6", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.14.6", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.14.6", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.14.6", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.14.6", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.14.6", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.14.6", path = "wallet/macros" } +kaspa-wasm = { version = "0.14.6", path = "wasm" } +kaspa-wasm-core = { version = "0.14.6", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.14.6", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.14.6", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.14.6", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.14.6", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.14.6", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.14.6", path = "kaspad" } +kaspa-alloc = { version = "0.14.6", path = "utils/alloc" } # external aes = "0.8.3" From 5a80f53863df0450b23e7e420653563c75c64b39 Mon Sep 17 00:00:00 2001 From: max143672 Date: Wed, 11 Sep 2024 23:57:35 +0300 Subject: [PATCH 53/58] introduce db trait --- components/addressmanager/src/lib.rs | 10 +-- .../src/stores/address_store.rs | 6 +- .../src/stores/banned_address_store.rs | 6 +- consensus/src/consensus/ctl.rs | 6 +- consensus/src/consensus/factory.rs | 9 ++- consensus/src/consensus/mod.rs | 6 +- consensus/src/consensus/services.rs | 4 +- consensus/src/consensus/storage.rs | 6 +- consensus/src/consensus/test_consensus.rs | 4 +- consensus/src/model/stores/acceptance_data.rs | 6 +- .../src/model/stores/block_transactions.rs | 6 +- consensus/src/model/stores/children.rs | 8 +- consensus/src/model/stores/daa.rs | 6 +- consensus/src/model/stores/depth.rs | 6 +- consensus/src/model/stores/ghostdag.rs | 8 +- consensus/src/model/stores/headers.rs | 6 +- .../src/model/stores/headers_selected_tip.rs | 6 +- consensus/src/model/stores/mod.rs | 2 +- .../src/model/stores/past_pruning_points.rs | 6 +- consensus/src/model/stores/pruning.rs | 6 +- consensus/src/model/stores/pruning_utxoset.rs | 4 +- consensus/src/model/stores/reachability.rs | 11 +-- consensus/src/model/stores/relations.rs | 8 +- consensus/src/model/stores/selected_chain.rs | 6 +- consensus/src/model/stores/statuses.rs | 6 +- consensus/src/model/stores/tips.rs | 6 +- consensus/src/model/stores/utxo_diffs.rs | 6 +- consensus/src/model/stores/utxo_multisets.rs | 6 +- consensus/src/model/stores/utxo_set.rs | 6 +- consensus/src/model/stores/virtual_state.rs | 8 +- .../src/pipeline/body_processor/processor.rs | 6 +- .../pipeline/header_processor/processor.rs | 6 +- .../pipeline/pruning_processor/processor.rs | 6 +- .../pipeline/virtual_processor/processor.rs | 6 +- consensus/src/processes/pruning_proof/mod.rs | 10 +-- database/src/access.rs | 78 ++++++++++--------- database/src/db.rs | 67 ++++++++++++++-- database/src/db/conn_builder.rs | 17 ++-- database/src/item.rs | 8 +- database/src/key.rs | 6 ++ database/src/lib.rs | 2 +- database/src/set_access.rs | 8 +- database/src/utils.rs | 8 +- database/src/writer.rs | 8 +- indexes/utxoindex/src/index.rs | 4 +- indexes/utxoindex/src/stores/indexed_utxos.rs | 6 +- indexes/utxoindex/src/stores/store_manager.rs | 4 +- indexes/utxoindex/src/stores/supply.rs | 6 +- indexes/utxoindex/src/stores/tips.rs | 6 +- 49 files changed, 258 insertions(+), 194 deletions(-) diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index 85f9acb3e..7e7e7a719 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -15,7 +15,7 @@ use itertools::{ }; use kaspa_consensus_core::config::Config; use kaspa_core::{debug, info, task::tick::TickService, time::unix_now, warn}; -use kaspa_database::prelude::{CachePolicy, StoreResultExtensions, DB}; +use kaspa_database::prelude::{CachePolicy, RocksDB, StoreResultExtensions}; use kaspa_utils::networking::IpAddress; use local_ip_address::list_afinet_netifas; use parking_lot::Mutex; @@ -59,7 +59,7 @@ pub struct AddressManager { } impl AddressManager { - pub fn new(config: Arc, db: Arc, tick_service: Arc) -> (Arc>, Option) { + pub fn new(config: Arc, db: Arc, tick_service: Arc) -> (Arc>, Option) { let mut instance = Self { banned_address_store: DbBannedAddressesStore::new(db.clone(), CachePolicy::Count(MAX_ADDRESSES)), address_store: address_store_with_cache::new(db), @@ -337,7 +337,7 @@ mod address_store_with_cache { }; use itertools::Itertools; - use kaspa_database::prelude::{CachePolicy, DB}; + use kaspa_database::prelude::{CachePolicy, RocksDB}; use kaspa_utils::networking::PrefixBucket; use rand::{ distributions::{WeightedError, WeightedIndex}, @@ -358,7 +358,7 @@ mod address_store_with_cache { } impl Store { - fn new(db: Arc) -> Self { + fn new(db: Arc) -> Self { // We manage the cache ourselves on this level, so we disable the inner builtin cache let db_store = DbAddressesStore::new(db, CachePolicy::Empty); let mut addresses = HashMap::new(); @@ -457,7 +457,7 @@ mod address_store_with_cache { } } - pub fn new(db: Arc) -> Store { + pub fn new(db: Arc) -> Store { Store::new(db) } diff --git a/components/addressmanager/src/stores/address_store.rs b/components/addressmanager/src/stores/address_store.rs index fe4ddb244..676e49cfe 100644 --- a/components/addressmanager/src/stores/address_store.rs +++ b/components/addressmanager/src/stores/address_store.rs @@ -1,5 +1,5 @@ use kaspa_database::{ - prelude::DB, + prelude::RocksDB, prelude::{CachePolicy, StoreError, StoreResult}, prelude::{CachedDbAccess, DirectDbWriter}, registry::DatabaseStorePrefixes, @@ -74,12 +74,12 @@ impl From for AddressKey { #[derive(Clone)] pub struct DbAddressesStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbAddressesStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::Addresses.into()) } } diff --git a/components/addressmanager/src/stores/banned_address_store.rs b/components/addressmanager/src/stores/banned_address_store.rs index b530af0ce..800aeb6c9 100644 --- a/components/addressmanager/src/stores/banned_address_store.rs +++ b/components/addressmanager/src/stores/banned_address_store.rs @@ -1,6 +1,6 @@ use kaspa_database::{ prelude::{CachePolicy, StoreError, StoreResult}, - prelude::{CachedDbAccess, DirectDbWriter, DB}, + prelude::{CachedDbAccess, DirectDbWriter, RocksDB}, registry::DatabaseStorePrefixes, }; use kaspa_utils::mem_size::MemSizeEstimator; @@ -68,12 +68,12 @@ impl From for IpAddr { #[derive(Clone)] pub struct DbBannedAddressesStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbBannedAddressesStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::BannedAddresses.into()) } } diff --git a/consensus/src/consensus/ctl.rs b/consensus/src/consensus/ctl.rs index 4b2eb1511..7bd35595b 100644 --- a/consensus/src/consensus/ctl.rs +++ b/consensus/src/consensus/ctl.rs @@ -1,6 +1,6 @@ use super::{factory::MultiConsensusManagementStore, Consensus}; use kaspa_consensusmanager::ConsensusCtl; -use kaspa_database::prelude::DB; +use kaspa_database::prelude::RocksDB; use parking_lot::RwLock; use std::{ path::PathBuf, @@ -10,7 +10,7 @@ use std::{ pub struct Ctl { management_store: Arc>, - consensus_db_ref: Weak, + consensus_db_ref: Weak, consensus_db_path: PathBuf, consensus: Arc, } @@ -18,7 +18,7 @@ pub struct Ctl { impl Ctl { pub fn new( management_store: Arc>, - consensus_db: Arc, + consensus_db: Arc, consensus: Arc, ) -> Self { let consensus_db_path = consensus_db.path().to_owned(); diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f8af5fb5a..dec3149c3 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -9,7 +9,8 @@ use kaspa_consensusmanager::{ConsensusFactory, ConsensusInstance, DynConsensusCt use kaspa_core::{debug, time::unix_now, warn}; use kaspa_database::{ prelude::{ - BatchDbWriter, CachePolicy, CachedDbAccess, CachedDbItem, DirectDbWriter, StoreError, StoreResult, StoreResultExtensions, DB, + BatchDbWriter, CachePolicy, CachedDbAccess, CachedDbItem, DirectDbWriter, RocksDB, StoreError, StoreResult, + StoreResultExtensions, }, registry::DatabaseStorePrefixes, }; @@ -75,13 +76,13 @@ impl Default for MultiConsensusMetadata { #[derive(Clone)] pub struct MultiConsensusManagementStore { - db: Arc, + db: Arc, entries: CachedDbAccess, metadata: CachedDbItem, } impl MultiConsensusManagementStore { - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc) -> Self { let mut store = Self { db: db.clone(), entries: CachedDbAccess::new(db.clone(), CachePolicy::Count(16), DatabaseStorePrefixes::ConsensusEntries.into()), @@ -258,7 +259,7 @@ pub struct Factory { impl Factory { pub fn new( - management_db: Arc, + management_db: Arc, config: &Config, db_root_dir: PathBuf, db_parallelism: usize, diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 5b09b12cd..843fb1620 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -25,7 +25,7 @@ use crate::{ statuses::StatusesStoreReader, tips::TipsStoreReader, utxo_set::{UtxoSetStore, UtxoSetStoreReader}, - DB, + RocksDB, }, }, pipeline::{ @@ -104,7 +104,7 @@ use std::cmp; pub struct Consensus { // DB - db: Arc, + db: Arc, // Channels block_sender: CrossbeamSender, @@ -150,7 +150,7 @@ impl Deref for Consensus { impl Consensus { pub fn new( - db: Arc, + db: Arc, config: Arc, pruning_lock: SessionLock, notification_root: Arc, diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 97c6d0b76..7f66bb8b7 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -7,7 +7,7 @@ use crate::{ block_window_cache::BlockWindowCacheStore, daa::DbDaaStore, depth::DbDepthStore, ghostdag::DbGhostdagStore, headers::DbHeadersStore, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::DbPastPruningPointsStore, pruning::DbPruningStore, reachability::DbReachabilityStore, relations::DbRelationsStore, - selected_chain::DbSelectedChainStore, statuses::DbStatusesStore, DB, + selected_chain::DbSelectedChainStore, statuses::DbStatusesStore, RocksDB, }, }, processes::{ @@ -66,7 +66,7 @@ pub struct ConsensusServices { impl ConsensusServices { pub fn new( - db: Arc, + db: Arc, storage: Arc, config: Arc, tx_script_cache_counters: Arc, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index ac2ab40b2..d5e73d42c 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -20,7 +20,7 @@ use crate::{ utxo_diffs::DbUtxoDiffsStore, utxo_multisets::DbUtxoMultisetsStore, virtual_state::{LkgVirtualState, VirtualStores}, - DB, + RocksDB, }, processes::{ghostdag::ordering::SortableBlock, reachability::inquirer as reachability, relations}, }; @@ -35,7 +35,7 @@ use std::{ops::DerefMut, sync::Arc}; pub struct ConsensusStorage { // DB - db: Arc, + db: Arc, // Locked stores pub statuses_store: Arc>, @@ -73,7 +73,7 @@ pub struct ConsensusStorage { } impl ConsensusStorage { - pub fn new(db: Arc, config: Arc) -> Arc { + pub fn new(db: Arc, config: Arc) -> Arc { let scale_factor = config.ram_scale; let scaled = |s| (s as f64 * scale_factor) as usize; diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index 472bdbd83..e9700495f 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -28,7 +28,7 @@ use crate::{ services::reachability::MTReachabilityService, stores::{ ghostdag::DbGhostdagStore, headers::HeaderStoreReader, pruning::PruningStoreReader, reachability::DbReachabilityStore, - virtual_state::VirtualStores, DB, + virtual_state::VirtualStores, RocksDB, }, }, params::Params, @@ -48,7 +48,7 @@ pub struct TestConsensus { impl TestConsensus { /// Creates a test consensus instance based on `config` with the provided `db` and `notification_sender` - pub fn with_db(db: Arc, config: &Config, notification_sender: Sender) -> Self { + pub fn with_db(db: Arc, config: &Config, notification_sender: Sender) -> Self { let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_sender)); let counters = Default::default(); let tx_script_cache_counters = Default::default(); diff --git a/consensus/src/model/stores/acceptance_data.rs b/consensus/src/model/stores/acceptance_data.rs index 83f6c8f13..aa5d8a048 100644 --- a/consensus/src/model/stores/acceptance_data.rs +++ b/consensus/src/model/stores/acceptance_data.rs @@ -3,8 +3,8 @@ use kaspa_consensus_core::acceptance_data::AcceptedTxEntry; use kaspa_consensus_core::acceptance_data::MergesetBlockAcceptanceData; use kaspa_consensus_core::BlockHasher; use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreError; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -39,12 +39,12 @@ impl MemSizeEstimator for AcceptanceDataEntry { /// A DB + cache implementation of `DbAcceptanceDataStore` trait, with concurrency support. #[derive(Clone)] pub struct DbAcceptanceDataStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbAcceptanceDataStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::AcceptanceData.into()) } } diff --git a/consensus/src/model/stores/block_transactions.rs b/consensus/src/model/stores/block_transactions.rs index 504268288..c63634479 100644 --- a/consensus/src/model/stores/block_transactions.rs +++ b/consensus/src/model/stores/block_transactions.rs @@ -1,8 +1,8 @@ use kaspa_consensus_core::tx::{TransactionInput, TransactionOutput}; use kaspa_consensus_core::{tx::Transaction, BlockHasher}; use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreError; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -44,12 +44,12 @@ impl MemSizeEstimator for BlockBody { /// A DB + cache implementation of `BlockTransactionsStore` trait, with concurrency support. #[derive(Clone)] pub struct DbBlockTransactionsStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbBlockTransactionsStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::BlockTransactions.into()) } } diff --git a/consensus/src/model/stores/children.rs b/consensus/src/model/stores/children.rs index 2b45f342a..a3e96f0ca 100644 --- a/consensus/src/model/stores/children.rs +++ b/consensus/src/model/stores/children.rs @@ -6,9 +6,9 @@ use kaspa_database::prelude::CachePolicy; use kaspa_database::prelude::CachedDbSetAccess; use kaspa_database::prelude::DbWriter; use kaspa_database::prelude::ReadLock; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreError; use kaspa_database::prelude::StoreResult; -use kaspa_database::prelude::DB; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; use rocksdb::WriteBatch; @@ -26,12 +26,12 @@ pub trait ChildrenStore { /// A DB + cache implementation of `DbChildrenStore` trait, with concurrency support. #[derive(Clone)] pub struct DbChildrenStore { - db: Arc, + db: Arc, access: CachedDbSetAccess, } impl DbChildrenStore { - pub fn new(db: Arc, level: BlockLevel, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, level: BlockLevel, cache_policy: CachePolicy) -> Self { let lvl_bytes = level.to_le_bytes(); Self { db: Arc::clone(&db), @@ -43,7 +43,7 @@ impl DbChildrenStore { } } - pub fn with_prefix(db: Arc, prefix: &[u8], cache_policy: CachePolicy) -> Self { + pub fn with_prefix(db: Arc, prefix: &[u8], cache_policy: CachePolicy) -> Self { let db_prefix = prefix.iter().copied().chain(DatabaseStorePrefixes::RelationsChildren).collect(); Self { db: Arc::clone(&db), access: CachedDbSetAccess::new(db, cache_policy, db_prefix) } } diff --git a/consensus/src/model/stores/daa.rs b/consensus/src/model/stores/daa.rs index b87ed9b64..f4d97317e 100644 --- a/consensus/src/model/stores/daa.rs +++ b/consensus/src/model/stores/daa.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use kaspa_consensus_core::{BlockHashSet, BlockHasher}; use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreError; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -22,12 +22,12 @@ pub trait DaaStore: DaaStoreReader { /// A DB + cache implementation of `DaaStore` trait, with concurrency support. #[derive(Clone)] pub struct DbDaaStore { - db: Arc, + db: Arc, access: CachedDbAccess, BlockHasher>, } impl DbDaaStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::NonDaaMergeset.into()) } } diff --git a/consensus/src/model/stores/depth.rs b/consensus/src/model/stores/depth.rs index 730b442d4..8cc6f5bc4 100644 --- a/consensus/src/model/stores/depth.rs +++ b/consensus/src/model/stores/depth.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use kaspa_consensus_core::BlockHasher; use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreError; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -33,12 +33,12 @@ impl MemSizeEstimator for BlockDepthInfo {} /// A DB + cache implementation of `DepthStore` trait, with concurrency support. #[derive(Clone)] pub struct DbDepthStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbDepthStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::BlockDepth.into()) } } diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index fd2600a1c..befa682e2 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -2,7 +2,7 @@ use crate::processes::ghostdag::ordering::SortableBlock; use kaspa_consensus_core::trusted::ExternalGhostdagData; use kaspa_consensus_core::{blockhash::BlockHashes, BlueWorkType}; use kaspa_consensus_core::{BlockHashMap, BlockHasher, BlockLevel, HashMapCustomHasher}; -use kaspa_database::prelude::DB; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DbKey}; use kaspa_database::prelude::{CachePolicy, StoreError}; use kaspa_database::registry::{DatabaseStorePrefixes, SEPARATOR}; @@ -250,14 +250,14 @@ pub trait GhostdagStore: GhostdagStoreReader { /// A DB + cache implementation of `GhostdagStore` trait, with concurrency support. #[derive(Clone)] pub struct DbGhostdagStore { - db: Arc, + db: Arc, level: BlockLevel, access: CachedDbAccess, BlockHasher>, compact_access: CachedDbAccess, } impl DbGhostdagStore { - pub fn new(db: Arc, level: BlockLevel, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, level: BlockLevel, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); let lvl_bytes = level.to_le_bytes(); let prefix = DatabaseStorePrefixes::Ghostdag.into_iter().chain(lvl_bytes).collect_vec(); @@ -271,7 +271,7 @@ impl DbGhostdagStore { } pub fn new_temp( - db: Arc, + db: Arc, level: BlockLevel, cache_policy: CachePolicy, compact_cache_policy: CachePolicy, diff --git a/consensus/src/model/stores/headers.rs b/consensus/src/model/stores/headers.rs index 85668f699..37c2fdc2a 100644 --- a/consensus/src/model/stores/headers.rs +++ b/consensus/src/model/stores/headers.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use kaspa_consensus_core::{header::Header, BlockHasher, BlockLevel}; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess}; -use kaspa_database::prelude::{CachePolicy, DB}; +use kaspa_database::prelude::{CachePolicy, RocksDB}; use kaspa_database::prelude::{StoreError, StoreResult}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -57,13 +57,13 @@ impl From<&Header> for CompactHeaderData { /// A DB + cache implementation of `HeaderStore` trait, with concurrency support. #[derive(Clone)] pub struct DbHeadersStore { - db: Arc, + db: Arc, compact_headers_access: CachedDbAccess, headers_access: CachedDbAccess, } impl DbHeadersStore { - pub fn new(db: Arc, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), compact_headers_access: CachedDbAccess::new( diff --git a/consensus/src/model/stores/headers_selected_tip.rs b/consensus/src/model/stores/headers_selected_tip.rs index 04a18566f..d5910b5f7 100644 --- a/consensus/src/model/stores/headers_selected_tip.rs +++ b/consensus/src/model/stores/headers_selected_tip.rs @@ -1,6 +1,6 @@ use crate::processes::ghostdag::ordering::SortableBlock; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreResult; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbItem, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use rocksdb::WriteBatch; @@ -18,12 +18,12 @@ pub trait HeadersSelectedTipStore: HeadersSelectedTipStoreReader { /// A DB + cache implementation of `HeadersSelectedTipStore` trait #[derive(Clone)] pub struct DbHeadersSelectedTipStore { - db: Arc, + db: Arc, access: CachedDbItem, } impl DbHeadersSelectedTipStore { - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc) -> Self { Self { db: Arc::clone(&db), access: CachedDbItem::new(db, DatabaseStorePrefixes::HeadersSelectedTip.into()) } } diff --git a/consensus/src/model/stores/mod.rs b/consensus/src/model/stores/mod.rs index 9fda33296..f9b1baa64 100644 --- a/consensus/src/model/stores/mod.rs +++ b/consensus/src/model/stores/mod.rs @@ -21,7 +21,7 @@ pub mod utxo_set; pub mod virtual_state; pub use kaspa_database; -pub use kaspa_database::prelude::DB; +pub use kaspa_database::prelude::RocksDB; use std::fmt::Display; #[derive(PartialEq, Eq, Clone, Copy, Hash)] diff --git a/consensus/src/model/stores/past_pruning_points.rs b/consensus/src/model/stores/past_pruning_points.rs index b0bccdcf9..aaef48450 100644 --- a/consensus/src/model/stores/past_pruning_points.rs +++ b/consensus/src/model/stores/past_pruning_points.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; -use kaspa_database::prelude::{CachePolicy, DB}; +use kaspa_database::prelude::{CachePolicy, RocksDB}; use kaspa_database::prelude::{StoreError, StoreResult}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -22,12 +22,12 @@ pub trait PastPruningPointsStore: PastPruningPointsStoreReader { /// A DB + cache implementation of `PastPruningPointsStore` trait, with concurrency support. #[derive(Clone)] pub struct DbPastPruningPointsStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbPastPruningPointsStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::PastPruningPoints.into()) } } diff --git a/consensus/src/model/stores/pruning.rs b/consensus/src/model/stores/pruning.rs index 167a47001..50afe4c94 100644 --- a/consensus/src/model/stores/pruning.rs +++ b/consensus/src/model/stores/pruning.rs @@ -1,7 +1,7 @@ use std::sync::Arc; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreResult; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbItem, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -51,13 +51,13 @@ pub trait PruningStore: PruningStoreReader { /// A DB + cache implementation of `PruningStore` trait, with concurrent readers support. #[derive(Clone)] pub struct DbPruningStore { - db: Arc, + db: Arc, access: CachedDbItem, history_root_access: CachedDbItem, } impl DbPruningStore { - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc) -> Self { Self { db: Arc::clone(&db), access: CachedDbItem::new(db.clone(), DatabaseStorePrefixes::PruningPoint.into()), diff --git a/consensus/src/model/stores/pruning_utxoset.rs b/consensus/src/model/stores/pruning_utxoset.rs index 116134514..e50a74aae 100644 --- a/consensus/src/model/stores/pruning_utxoset.rs +++ b/consensus/src/model/stores/pruning_utxoset.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreResult; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbItem}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -17,7 +17,7 @@ pub struct PruningUtxosetStores { } impl PruningUtxosetStores { - pub fn new(db: Arc, utxoset_cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, utxoset_cache_policy: CachePolicy) -> Self { Self { utxo_set: DbUtxoSetStore::new(db.clone(), utxoset_cache_policy, DatabaseStorePrefixes::PruningUtxoset.into()), utxoset_position_access: CachedDbItem::new(db, DatabaseStorePrefixes::PruningUtxosetPosition.into()), diff --git a/consensus/src/model/stores/reachability.rs b/consensus/src/model/stores/reachability.rs index 71b3d50d2..9a3ea9ea7 100644 --- a/consensus/src/model/stores/reachability.rs +++ b/consensus/src/model/stores/reachability.rs @@ -5,7 +5,8 @@ use kaspa_consensus_core::{ }; use kaspa_database::{ prelude::{ - BatchDbWriter, Cache, CachePolicy, CachedDbAccess, CachedDbItem, DbKey, DbSetAccess, DbWriter, DirectDbWriter, StoreError, DB, + BatchDbWriter, Cache, CachePolicy, CachedDbAccess, CachedDbItem, DbKey, DbSetAccess, DbWriter, DirectDbWriter, RocksDB, + StoreError, }, registry::{DatabaseStorePrefixes, SEPARATOR}, }; @@ -171,7 +172,7 @@ impl DbReachabilitySet { /// A DB + cache implementation of `ReachabilityStore` trait, with concurrent readers support. #[derive(Clone)] pub struct DbReachabilityStore { - db: Arc, + db: Arc, access: CachedDbAccess, // Main access children_access: DbReachabilitySet, // Tree children fcs_access: DbReachabilitySet, // Future Covering Set @@ -180,16 +181,16 @@ pub struct DbReachabilityStore { } impl DbReachabilityStore { - pub fn new(db: Arc, cache_policy: CachePolicy, sets_cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy, sets_cache_policy: CachePolicy) -> Self { Self::with_prefix_end(db, cache_policy, sets_cache_policy, DatabaseStorePrefixes::Separator.into()) } - pub fn with_block_level(db: Arc, cache_policy: CachePolicy, sets_cache_policy: CachePolicy, level: BlockLevel) -> Self { + pub fn with_block_level(db: Arc, cache_policy: CachePolicy, sets_cache_policy: CachePolicy, level: BlockLevel) -> Self { assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); Self::with_prefix_end(db, cache_policy, sets_cache_policy, level) } - fn with_prefix_end(db: Arc, cache_policy: CachePolicy, sets_cache_policy: CachePolicy, prefix_end: u8) -> Self { + fn with_prefix_end(db: Arc, cache_policy: CachePolicy, sets_cache_policy: CachePolicy, prefix_end: u8) -> Self { let store_prefix = DatabaseStorePrefixes::Reachability.into_iter().chain(once(prefix_end)).collect_vec(); let children_prefix = DatabaseStorePrefixes::ReachabilityTreeChildren.into_iter().chain(once(prefix_end)).collect_vec(); let fcs_prefix = DatabaseStorePrefixes::ReachabilityFutureCoveringSet.into_iter().chain(once(prefix_end)).collect_vec(); diff --git a/consensus/src/model/stores/relations.rs b/consensus/src/model/stores/relations.rs index 4734f099a..6552ada33 100644 --- a/consensus/src/model/stores/relations.rs +++ b/consensus/src/model/stores/relations.rs @@ -5,7 +5,7 @@ use kaspa_database::prelude::{BatchDbWriter, CachePolicy, DbWriter}; use kaspa_database::prelude::{CachedDbAccess, DbKey, DirectDbWriter}; use kaspa_database::prelude::{DirectWriter, MemoryWriter}; use kaspa_database::prelude::{ReadLock, StoreError}; -use kaspa_database::prelude::{StoreResult, DB}; +use kaspa_database::prelude::{RocksDB, StoreResult}; use kaspa_database::registry::{DatabaseStorePrefixes, SEPARATOR}; use kaspa_hashes::Hash; use rocksdb::WriteBatch; @@ -38,13 +38,13 @@ pub trait RelationsStore: RelationsStoreReader { /// A DB + cache implementation of `RelationsStore` trait, with concurrent readers support. #[derive(Clone)] pub struct DbRelationsStore { - db: Arc, + db: Arc, parents_access: CachedDbAccess>, BlockHasher>, children_store: DbChildrenStore, } impl DbRelationsStore { - pub fn new(db: Arc, level: BlockLevel, cache_policy: CachePolicy, children_cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, level: BlockLevel, cache_policy: CachePolicy, children_cache_policy: CachePolicy) -> Self { assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); let lvl_bytes = level.to_le_bytes(); let parents_prefix = DatabaseStorePrefixes::RelationsParents.into_iter().chain(lvl_bytes).collect_vec(); @@ -56,7 +56,7 @@ impl DbRelationsStore { } } - pub fn with_prefix(db: Arc, prefix: &[u8], cache_policy: CachePolicy, children_cache_policy: CachePolicy) -> Self { + pub fn with_prefix(db: Arc, prefix: &[u8], cache_policy: CachePolicy, children_cache_policy: CachePolicy) -> Self { let parents_prefix = prefix.iter().copied().chain(DatabaseStorePrefixes::RelationsParents).collect_vec(); Self { db: Arc::clone(&db), diff --git a/consensus/src/model/stores/selected_chain.rs b/consensus/src/model/stores/selected_chain.rs index 4b73d52c9..af844490e 100644 --- a/consensus/src/model/stores/selected_chain.rs +++ b/consensus/src/model/stores/selected_chain.rs @@ -7,7 +7,7 @@ use rocksdb::WriteBatch; use std::sync::Arc; use kaspa_database::prelude::{BatchDbWriter, CachePolicy, CachedDbAccess, DbWriter}; -use kaspa_database::prelude::{CachedDbItem, DB}; +use kaspa_database::prelude::{CachedDbItem, RocksDB}; use kaspa_database::prelude::{StoreError, StoreResult}; use kaspa_hashes::Hash; @@ -31,14 +31,14 @@ pub trait SelectedChainStore: SelectedChainStoreReader { /// A DB + cache implementation of `SelectedChainStore` trait, with concurrent readers support. #[derive(Clone)] pub struct DbSelectedChainStore { - db: Arc, + db: Arc, access_hash_by_index: CachedDbAccess, access_index_by_hash: CachedDbAccess, access_highest_index: CachedDbItem, } impl DbSelectedChainStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access_hash_by_index: CachedDbAccess::new(db.clone(), cache_policy, DatabaseStorePrefixes::ChainHashByIndex.into()), diff --git a/consensus/src/model/stores/statuses.rs b/consensus/src/model/stores/statuses.rs index 48b1d378d..a6f7c55d5 100644 --- a/consensus/src/model/stores/statuses.rs +++ b/consensus/src/model/stores/statuses.rs @@ -5,7 +5,7 @@ use rocksdb::WriteBatch; use std::sync::Arc; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; -use kaspa_database::prelude::{CachePolicy, DB}; +use kaspa_database::prelude::{CachePolicy, RocksDB}; use kaspa_database::prelude::{StoreError, StoreResult}; use kaspa_hashes::Hash; @@ -26,12 +26,12 @@ pub trait StatusesStore: StatusesStoreReader { /// A DB + cache implementation of `StatusesStore` trait, with concurrent readers support. #[derive(Clone)] pub struct DbStatusesStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbStatusesStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::Statuses.into()) } } diff --git a/consensus/src/model/stores/tips.rs b/consensus/src/model/stores/tips.rs index 06dd13b0c..811d8f95c 100644 --- a/consensus/src/model/stores/tips.rs +++ b/consensus/src/model/stores/tips.rs @@ -5,9 +5,9 @@ use kaspa_consensus_core::BlockHasher; use kaspa_database::prelude::CachedDbSetItem; use kaspa_database::prelude::DbWriter; use kaspa_database::prelude::ReadLock; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreResult; use kaspa_database::prelude::StoreResultExtensions; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -43,12 +43,12 @@ pub trait TipsStore: TipsStoreReader { /// A DB + cache implementation of `TipsStore` trait #[derive(Clone)] pub struct DbTipsStore { - db: Arc, + db: Arc, access: CachedDbSetItem, } impl DbTipsStore { - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc) -> Self { Self { db: Arc::clone(&db), access: CachedDbSetItem::new(db, DatabaseStorePrefixes::Tips.into()) } } diff --git a/consensus/src/model/stores/utxo_diffs.rs b/consensus/src/model/stores/utxo_diffs.rs index 079f08ecb..326687ac6 100644 --- a/consensus/src/model/stores/utxo_diffs.rs +++ b/consensus/src/model/stores/utxo_diffs.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use kaspa_consensus_core::{utxo::utxo_diff::UtxoDiff, BlockHasher}; use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreError; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -27,12 +27,12 @@ pub trait UtxoDiffsStore: UtxoDiffsStoreReader { /// A DB + cache implementation of `UtxoDifferencesStore` trait, with concurrency support. #[derive(Clone)] pub struct DbUtxoDiffsStore { - db: Arc, + db: Arc, access: CachedDbAccess, BlockHasher>, } impl DbUtxoDiffsStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::UtxoDiffs.into()) } } diff --git a/consensus/src/model/stores/utxo_multisets.rs b/consensus/src/model/stores/utxo_multisets.rs index d60b41779..4bc31c056 100644 --- a/consensus/src/model/stores/utxo_multisets.rs +++ b/consensus/src/model/stores/utxo_multisets.rs @@ -1,7 +1,7 @@ use kaspa_consensus_core::BlockHasher; use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreError; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; @@ -22,12 +22,12 @@ pub trait UtxoMultisetsStore: UtxoMultisetsStoreReader { /// A DB + cache implementation of `DbUtxoMultisetsStore` trait, with concurrency support. #[derive(Clone)] pub struct DbUtxoMultisetsStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbUtxoMultisetsStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::UtxoMultisets.into()) } } diff --git a/consensus/src/model/stores/utxo_set.rs b/consensus/src/model/stores/utxo_set.rs index 03add0948..4429dfeea 100644 --- a/consensus/src/model/stores/utxo_set.rs +++ b/consensus/src/model/stores/utxo_set.rs @@ -5,8 +5,8 @@ use kaspa_consensus_core::{ utxo_view::UtxoView, }, }; +use kaspa_database::prelude::RocksDB; use kaspa_database::prelude::StoreResultExtensions; -use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; use kaspa_database::prelude::{CachePolicy, StoreError}; use kaspa_hashes::Hash; @@ -89,13 +89,13 @@ impl From for TransactionOutpoint { #[derive(Clone)] pub struct DbUtxoSetStore { - db: Arc, + db: Arc, prefix: Vec, access: CachedDbAccess>, } impl DbUtxoSetStore { - pub fn new(db: Arc, cache_policy: CachePolicy, prefix: Vec) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy, prefix: Vec) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, prefix.clone()), prefix } } diff --git a/consensus/src/model/stores/virtual_state.rs b/consensus/src/model/stores/virtual_state.rs index 62672a913..fb26725e9 100644 --- a/consensus/src/model/stores/virtual_state.rs +++ b/consensus/src/model/stores/virtual_state.rs @@ -9,7 +9,7 @@ use kaspa_consensus_core::{ }; use kaspa_database::prelude::{BatchDbWriter, CachedDbItem, DirectDbWriter, StoreResultExtensions}; use kaspa_database::prelude::{CachePolicy, StoreResult}; -use kaspa_database::prelude::{StoreError, DB}; +use kaspa_database::prelude::{RocksDB, StoreError}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; @@ -134,7 +134,7 @@ pub struct VirtualStores { } impl VirtualStores { - pub fn new(db: Arc, lkg_virtual_state: LkgVirtualState, utxoset_cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, lkg_virtual_state: LkgVirtualState, utxoset_cache_policy: CachePolicy) -> Self { Self { state: DbVirtualStateStore::new(db.clone(), lkg_virtual_state), utxo_set: DbUtxoSetStore::new(db, utxoset_cache_policy, DatabaseStorePrefixes::VirtualUtxoset.into()), @@ -154,14 +154,14 @@ pub trait VirtualStateStore: VirtualStateStoreReader { /// A DB + cache implementation of `VirtualStateStore` trait #[derive(Clone)] pub struct DbVirtualStateStore { - db: Arc, + db: Arc, access: CachedDbItem>, /// The "last known good" virtual state lkg_virtual_state: LkgVirtualState, } impl DbVirtualStateStore { - pub fn new(db: Arc, lkg_virtual_state: LkgVirtualState) -> Self { + pub fn new(db: Arc, lkg_virtual_state: LkgVirtualState) -> Self { let access = CachedDbItem::new(db.clone(), DatabaseStorePrefixes::VirtualState.into()); // Init the LKG cache from DB store data lkg_virtual_state.store(access.read().unwrap_option().unwrap_or_default()); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 4191a01ce..b89ad45d9 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -10,7 +10,7 @@ use crate::{ reachability::DbReachabilityStore, statuses::{DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader}, tips::{DbTipsStore, TipsStore}, - DB, + RocksDB, }, }, pipeline::{ @@ -48,7 +48,7 @@ pub struct BlockBodyProcessor { pub(super) thread_pool: Arc, // DB - db: Arc, + db: Arc, // Config pub(super) max_block_mass: u64, @@ -91,7 +91,7 @@ impl BlockBodyProcessor { sender: Sender, thread_pool: Arc, - db: Arc, + db: Arc, statuses_store: Arc>, ghostdag_store: Arc, headers_store: Arc, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 5d9bf3fed..4efe1f01c 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -20,7 +20,7 @@ use crate::{ reachability::{DbReachabilityStore, StagingReachabilityStore}, relations::{DbRelationsStore, RelationsStoreReader}, statuses::{DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader}, - DB, + RocksDB, }, }, params::Params, @@ -121,7 +121,7 @@ pub struct HeaderProcessor { pub(super) max_block_level: BlockLevel, // DB - db: Arc, + db: Arc, // Stores pub(super) relations_stores: Arc>>, @@ -162,7 +162,7 @@ impl HeaderProcessor { body_sender: Sender, thread_pool: Arc, params: &Params, - db: Arc, + db: Arc, storage: &Arc, services: &Arc, pruning_lock: SessionLock, diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 2de19c265..8ed8ca8e8 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -35,7 +35,7 @@ use kaspa_consensus_core::{ }; use kaspa_consensusmanager::SessionLock; use kaspa_core::{debug, info, warn}; -use kaspa_database::prelude::{BatchDbWriter, MemoryWriter, StoreResultExtensions, DB}; +use kaspa_database::prelude::{BatchDbWriter, MemoryWriter, RocksDB, StoreResultExtensions}; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_utils::iter::IterExtensions; @@ -62,7 +62,7 @@ pub struct PruningProcessor { receiver: CrossbeamReceiver, // DB - db: Arc, + db: Arc, // Storage storage: Arc, @@ -94,7 +94,7 @@ impl Deref for PruningProcessor { impl PruningProcessor { pub fn new( receiver: CrossbeamReceiver, - db: Arc, + db: Arc, storage: &Arc, services: &Arc, pruning_lock: SessionLock, diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 5a3cf1682..f9cd2dd40 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -31,7 +31,7 @@ use crate::{ utxo_diffs::{DbUtxoDiffsStore, UtxoDiffsStoreReader}, utxo_multisets::{DbUtxoMultisetsStore, UtxoMultisetsStoreReader}, virtual_state::{LkgVirtualState, VirtualState, VirtualStateStoreReader, VirtualStores}, - DB, + RocksDB, }, }, params::Params, @@ -106,7 +106,7 @@ pub struct VirtualStateProcessor { pub(super) thread_pool: Arc, // DB - db: Arc, + db: Arc, // Config pub(super) genesis: GenesisBlock, @@ -170,7 +170,7 @@ impl VirtualStateProcessor { pruning_receiver: CrossbeamReceiver, thread_pool: Arc, params: &Params, - db: Arc, + db: Arc, storage: &Arc, services: &Arc, pruning_lock: SessionLock, diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8c059963a..7398f7820 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -59,7 +59,7 @@ use crate::{ selected_chain::{DbSelectedChainStore, SelectedChainStore}, tips::DbTipsStore, virtual_state::{VirtualState, VirtualStateStore, VirtualStateStoreReader, VirtualStores}, - DB, + RocksDB, }, }, processes::{ @@ -144,7 +144,7 @@ impl RelationsStoreReader for R } pub struct PruningProofManager { - db: Arc, + db: Arc, headers_store: Arc, reachability_store: Arc>, @@ -181,7 +181,7 @@ pub struct PruningProofManager { impl PruningProofManager { #[allow(clippy::too_many_arguments)] pub fn new( - db: Arc, + db: Arc, storage: &Arc, parents_manager: DbParentsManager, reachability_service: MTReachabilityService, @@ -874,7 +874,7 @@ impl PruningProofManager { level: BlockLevel, current_dag_level: BlockLevel, required_block: Option, - temp_db: Arc, + temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() @@ -993,7 +993,7 @@ impl PruningProofManager { fn calc_gd_for_all_levels( &self, pp_header: &HeaderWithBlockLevel, - temp_db: Arc, + temp_db: Arc, ) -> (Vec>, Vec, Vec) { let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; diff --git a/database/src/access.rs b/database/src/access.rs index ad82197db..cce536230 100644 --- a/database/src/access.rs +++ b/database/src/access.rs @@ -1,19 +1,19 @@ -use crate::{cache::CachePolicy, db::DB, errors::StoreError}; +use crate::{cache::CachePolicy, errors::StoreError}; use super::prelude::{Cache, DbKey, DbWriter}; use kaspa_utils::mem_size::MemSizeEstimator; -use rocksdb::{Direction, IterateBounds, IteratorMode, ReadOptions}; use serde::{de::DeserializeOwned, Serialize}; use std::{collections::hash_map::RandomState, error::Error, hash::BuildHasher, sync::Arc}; /// A concurrent DB store access with typed caching. #[derive(Clone)] -pub struct CachedDbAccess +pub struct CachedDbAccess> where TKey: Clone + std::hash::Hash + Eq + Send + Sync, TData: Clone + Send + Sync + MemSizeEstimator, + DB: DbAccess, { - db: Arc, + db: DB, // Cache cache: Cache, @@ -22,13 +22,27 @@ where prefix: Vec, } -impl CachedDbAccess +pub trait DbAccess { + fn has(&self, db_key: DbKey) -> Result; + fn read(&self, db_key: &DbKey) -> Result>, StoreError>; + fn iterator( + &self, + prefix: impl Into>, + seek_from: Option, + ) -> impl Iterator, impl AsRef<[u8]>), Box>> + '_; + fn write(&self, writer: &mut impl DbWriter, db_key: DbKey, data: Vec) -> Result<(), StoreError>; + fn delete(&self, writer: &mut impl DbWriter, db_key: DbKey) -> Result<(), StoreError>; + fn delete_range_by_prefix(&self, writer: &mut impl DbWriter, prefix: &[u8]) -> Result<(), StoreError>; +} + +impl CachedDbAccess where TKey: Clone + std::hash::Hash + Eq + Send + Sync, TData: Clone + Send + Sync + MemSizeEstimator, S: BuildHasher + Default, + DB: DbAccess, { - pub fn new(db: Arc, cache_policy: CachePolicy, prefix: Vec) -> Self { + pub fn new(db: DB, cache_policy: CachePolicy, prefix: Vec) -> Self { Self { db, cache: Cache::new(cache_policy), prefix } } @@ -43,7 +57,7 @@ where where TKey: Clone + AsRef<[u8]>, { - Ok(self.cache.contains_key(&key) || self.db.get_pinned(DbKey::new(&self.prefix, key))?.is_some()) + Ok(self.cache.contains_key(&key) || self.db.has(DbKey::new(&self.prefix, key))?) } pub fn read(&self, key: TKey) -> Result @@ -55,8 +69,8 @@ where Ok(data) } else { let db_key = DbKey::new(&self.prefix, key.clone()); - if let Some(slice) = self.db.get_pinned(&db_key)? { - let data: TData = bincode::deserialize(&slice)?; + if let Some(slice) = self.db.read(&db_key.clone())? { + let data: TData = bincode::deserialize(slice.as_ref())?; self.cache.insert(key, data.clone()); Ok(data) } else { @@ -70,17 +84,12 @@ where TKey: Clone + AsRef<[u8]>, TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned` has short lifetime { - let prefix_key = DbKey::prefix_only(&self.prefix); - let mut read_opts = ReadOptions::default(); - read_opts.set_iterate_range(rocksdb::PrefixRange(prefix_key.as_ref())); - self.db.iterator_opt(IteratorMode::From(prefix_key.as_ref(), Direction::Forward), read_opts).map(move |iter_result| { - match iter_result { - Ok((key, data_bytes)) => match bincode::deserialize(&data_bytes) { - Ok(data) => Ok((key[prefix_key.prefix_len()..].into(), data)), - Err(e) => Err(e.into()), - }, + self.db.iterator(self.prefix.to_vec(), None).map(move |iter_result| match iter_result { + Ok((key, data_bytes)) => match bincode::deserialize(data_bytes.as_ref()) { + Ok(data) => Ok((key.as_ref()[self.prefix.len()..].into(), data)), Err(e) => Err(e.into()), - } + }, + Err(e) => Err(e.into()), }) } @@ -91,7 +100,7 @@ where { let bin_data = bincode::serialize(&data)?; self.cache.insert(key.clone(), data); - writer.put(DbKey::new(&self.prefix, key), bin_data)?; + self.db.write(&mut writer, DbKey::new(&self.prefix, key), bin_data)?; Ok(()) } @@ -108,7 +117,7 @@ where self.cache.insert_many(iter); for (key, data) in iter_clone { let bin_data = bincode::serialize(&data)?; - writer.put(DbKey::new(&self.prefix, key.clone()), bin_data)?; + self.db.write(&mut writer, DbKey::new(&self.prefix, key), bin_data)?; } Ok(()) } @@ -125,7 +134,7 @@ where { for (key, data) in iter { let bin_data = bincode::serialize(&data)?; - writer.put(DbKey::new(&self.prefix, key), bin_data)?; + self.db.write(&mut writer, DbKey::new(&self.prefix, key), bin_data)?; } // We must clear the cache in order to avoid invalidated entries self.cache.remove_all(); @@ -137,7 +146,7 @@ where TKey: Clone + AsRef<[u8]>, { self.cache.remove(&key); - writer.delete(DbKey::new(&self.prefix, key))?; + self.db.delete(&mut writer, DbKey::new(&self.prefix, key))?; Ok(()) } @@ -148,7 +157,7 @@ where let key_iter_clone = key_iter.clone(); self.cache.remove_many(key_iter); for key in key_iter_clone { - writer.delete(DbKey::new(&self.prefix, key.clone()))?; + self.db.delete(&mut writer, DbKey::new(&self.prefix, key.clone()))?; } Ok(()) } @@ -160,8 +169,7 @@ where { self.cache.remove_all(); let db_key = DbKey::prefix_only(&self.prefix); - let (from, to) = rocksdb::PrefixRange(db_key.as_ref()).into_bounds(); - writer.delete_range(from.unwrap(), to.unwrap())?; + self.db.delete_range_by_prefix(&mut writer, db_key.as_ref())?; Ok(()) } @@ -186,16 +194,8 @@ where key }, ); - - let mut read_opts = ReadOptions::default(); - read_opts.set_iterate_range(rocksdb::PrefixRange(db_key.as_ref())); - - let mut db_iterator = match seek_from { - Some(seek_key) => { - self.db.iterator_opt(IteratorMode::From(DbKey::new(&self.prefix, seek_key).as_ref(), Direction::Forward), read_opts) - } - None => self.db.iterator_opt(IteratorMode::Start, read_opts), - }; + let db_key_prefix_len = db_key.prefix_len(); + let mut db_iterator = self.db.iterator(db_key, seek_from.map(|seek_key| DbKey::new(&self.prefix, seek_key))); if skip_first { db_iterator.next(); @@ -203,7 +203,7 @@ where db_iterator.take(limit).map(move |item| match item { Ok((key_bytes, value_bytes)) => match bincode::deserialize::(value_bytes.as_ref()) { - Ok(value) => Ok((key_bytes[db_key.prefix_len()..].into(), value)), + Ok(value) => Ok((key_bytes.as_ref()[db_key_prefix_len..].into(), value)), Err(err) => Err(err.into()), }, Err(err) => Err(err.into()), @@ -217,7 +217,9 @@ where #[cfg(test)] mod tests { - use super::*; + // use super::*; + use crate::access::CachedDbAccess; + use crate::cache::CachePolicy; use crate::{ create_temp_db, prelude::{BatchDbWriter, ConnBuilder, DirectDbWriter}, diff --git a/database/src/db.rs b/database/src/db.rs index b1d6bf24e..d16cdcb1d 100644 --- a/database/src/db.rs +++ b/database/src/db.rs @@ -1,31 +1,37 @@ -use rocksdb::{DBWithThreadMode, MultiThreaded}; -use std::ops::{Deref, DerefMut}; -use std::path::PathBuf; - +use crate::access::DbAccess; +use crate::errors::StoreError; +use crate::key::DbKey; +use crate::prelude::DbWriter; pub use conn_builder::ConnBuilder; +use itertools::Either; use kaspa_utils::fd_budget::FDGuard; +use rocksdb::{DBWithThreadMode, Direction, IterateBounds, IteratorMode, MultiThreaded, ReadOptions}; +use std::borrow::Borrow; +use std::error::Error; +use std::ops::{Deref, DerefMut}; +use std::path::PathBuf; mod conn_builder; /// The DB type used for Kaspad stores -pub struct DB { +pub struct RocksDB { inner: DBWithThreadMode, _fd_guard: FDGuard, } -impl DB { +impl RocksDB { pub fn new(inner: DBWithThreadMode, fd_guard: FDGuard) -> Self { Self { inner, _fd_guard: fd_guard } } } -impl DerefMut for DB { +impl DerefMut for RocksDB { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl Deref for DB { +impl Deref for RocksDB { type Target = DBWithThreadMode; fn deref(&self) -> &Self::Target { @@ -42,3 +48,48 @@ pub fn delete_db(db_dir: PathBuf) { let path = db_dir.to_str().unwrap(); >::destroy(&options, path).expect("DB is expected to be deletable"); } + +impl> DbAccess for T { + fn has(&self, db_key: DbKey) -> Result { + Ok(self.borrow().get_pinned(db_key)?.is_some()) + } + + fn read(&self, db_key: &DbKey) -> Result>, StoreError> { + Ok(self.borrow().get_pinned(&db_key)?) + } + + fn iterator( + &self, + prefix: impl Into>, + seek_from: Option, + ) -> impl Iterator, impl AsRef<[u8]>), Box>> + '_ { + let prefix = prefix.into(); + seek_from.as_ref().inspect(|seek_from| debug_assert!(seek_from.as_ref().starts_with(prefix.as_ref()))); + let mut read_opts = ReadOptions::default(); + read_opts.set_iterate_range(rocksdb::PrefixRange(prefix)); + Iterator::map( + { + if let Some(seek_from) = seek_from { + Either::Left(self.borrow().iterator_opt(IteratorMode::From(seek_from.as_ref(), Direction::Forward), read_opts)) + } else { + Either::Right(self.borrow().iterator_opt(IteratorMode::Start, read_opts)) + } + }, + |r| r.map_err(Into::into), + ) + } + + fn write(&self, writer: &mut impl DbWriter, db_key: DbKey, data: Vec) -> Result<(), StoreError> { + Ok(writer.put(db_key, data)?) + } + + fn delete(&self, writer: &mut impl DbWriter, db_key: DbKey) -> Result<(), StoreError> { + Ok(writer.delete(db_key)?) + } + + fn delete_range_by_prefix(&self, writer: &mut impl DbWriter, prefix: &[u8]) -> Result<(), StoreError> { + let (from, to) = rocksdb::PrefixRange(prefix).into_bounds(); + writer.delete_range(from.unwrap(), to.unwrap())?; + Ok(()) + } +} diff --git a/database/src/db/conn_builder.rs b/database/src/db/conn_builder.rs index 6de330f81..ebb21bb7d 100644 --- a/database/src/db/conn_builder.rs +++ b/database/src/db/conn_builder.rs @@ -1,4 +1,4 @@ -use crate::db::DB; +use crate::db::RocksDB; use rocksdb::{DBWithThreadMode, MultiThreaded}; use std::{path::PathBuf, sync::Arc}; @@ -112,29 +112,32 @@ macro_rules! default_opts { } impl ConnBuilder { - pub fn build(self) -> Result, kaspa_utils::fd_budget::Error> { + pub fn build(self) -> Result, kaspa_utils::fd_budget::Error> { let (opts, guard) = default_opts!(self)?; - let db = Arc::new(DB::new(>::open(&opts, self.db_path.to_str().unwrap()).unwrap(), guard)); + let db = + Arc::new(RocksDB::new(>::open(&opts, self.db_path.to_str().unwrap()).unwrap(), guard)); Ok(db) } } impl ConnBuilder { - pub fn build(self) -> Result, kaspa_utils::fd_budget::Error> { + pub fn build(self) -> Result, kaspa_utils::fd_budget::Error> { let (mut opts, guard) = default_opts!(self)?; opts.enable_statistics(); - let db = Arc::new(DB::new(>::open(&opts, self.db_path.to_str().unwrap()).unwrap(), guard)); + let db = + Arc::new(RocksDB::new(>::open(&opts, self.db_path.to_str().unwrap()).unwrap(), guard)); Ok(db) } } impl ConnBuilder { - pub fn build(self) -> Result, kaspa_utils::fd_budget::Error> { + pub fn build(self) -> Result, kaspa_utils::fd_budget::Error> { let (mut opts, guard) = default_opts!(self)?; opts.enable_statistics(); opts.set_report_bg_io_stats(true); opts.set_stats_dump_period_sec(self.stats_period); - let db = Arc::new(DB::new(>::open(&opts, self.db_path.to_str().unwrap()).unwrap(), guard)); + let db = + Arc::new(RocksDB::new(>::open(&opts, self.db_path.to_str().unwrap()).unwrap(), guard)); Ok(db) } } diff --git a/database/src/item.rs b/database/src/item.rs index bb14b40b7..66f4e6044 100644 --- a/database/src/item.rs +++ b/database/src/item.rs @@ -1,5 +1,5 @@ use crate::{ - db::DB, + db::RocksDB, errors::StoreError, prelude::{DbSetAccess, ReadLock}, }; @@ -16,13 +16,13 @@ use std::{ /// A cached DB item with concurrency support #[derive(Clone)] pub struct CachedDbItem { - db: Arc, + db: Arc, key: Vec, cached_item: Arc>>, } impl CachedDbItem { - pub fn new(db: Arc, key: Vec) -> Self { + pub fn new(db: Arc, key: Vec) -> Self { Self { db, key, cached_item: Arc::new(RwLock::new(None)) } } @@ -104,7 +104,7 @@ where T: Clone + std::hash::Hash + Eq + Send + Sync + DeserializeOwned + Serialize, S: BuildHasher + Default, { - pub fn new(db: Arc, key: Vec) -> Self { + pub fn new(db: Arc, key: Vec) -> Self { Self { access: DbSetAccess::new(db, key), cached_set: Arc::new(RwLock::new(None)) } } diff --git a/database/src/key.rs b/database/src/key.rs index 83fa8ebb2..338fd72e1 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -8,6 +8,12 @@ pub struct DbKey { prefix_len: usize, } +impl From for Vec { + fn from(value: DbKey) -> Self { + value.path.to_vec() + } +} + impl DbKey { pub fn new(prefix: &[u8], key: TKey) -> Self where diff --git a/database/src/lib.rs b/database/src/lib.rs index 5afc908c7..99d3667fe 100644 --- a/database/src/lib.rs +++ b/database/src/lib.rs @@ -19,6 +19,6 @@ pub mod prelude { pub use super::key::DbKey; pub use super::set_access::{CachedDbSetAccess, DbSetAccess, ReadLock}; pub use super::writer::{BatchDbWriter, DbWriter, DirectDbWriter, DirectWriter, MemoryWriter}; - pub use db::{delete_db, ConnBuilder, DB}; + pub use db::{delete_db, ConnBuilder, RocksDB}; pub use errors::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; } diff --git a/database/src/set_access.rs b/database/src/set_access.rs index 3b598081e..df8236bcb 100644 --- a/database/src/set_access.rs +++ b/database/src/set_access.rs @@ -1,4 +1,4 @@ -use crate::{cache::CachePolicy, db::DB, errors::StoreError}; +use crate::{cache::CachePolicy, db::RocksDB, errors::StoreError}; use super::prelude::{Cache, DbKey, DbWriter}; use parking_lot::{RwLock, RwLockReadGuard}; @@ -54,7 +54,7 @@ where S: BuildHasher + Default, W: BuildHasher + Default + Send + Sync, { - pub fn new(db: Arc, cache_policy: CachePolicy, prefix: Vec) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy, prefix: Vec) -> Self { Self { inner: DbSetAccess::new(db, prefix), cache: Cache::new(cache_policy) } } @@ -112,7 +112,7 @@ where TKey: Clone + std::hash::Hash + Eq + Send + Sync, TData: Clone + Send + Sync, { - db: Arc, + db: Arc, // DB bucket/path prefix: Vec, @@ -125,7 +125,7 @@ where TKey: Clone + std::hash::Hash + Eq + Send + Sync + AsRef<[u8]>, TData: Clone + std::hash::Hash + Eq + Send + Sync + DeserializeOwned + Serialize, { - pub fn new(db: Arc, prefix: Vec) -> Self { + pub fn new(db: Arc, prefix: Vec) -> Self { Self { db, prefix, _phantom: Default::default() } } diff --git a/database/src/utils.rs b/database/src/utils.rs index cb12d36df..7a40e3e52 100644 --- a/database/src/utils.rs +++ b/database/src/utils.rs @@ -1,21 +1,21 @@ -use crate::prelude::DB; +use crate::prelude::RocksDB; use std::sync::Weak; use tempfile::TempDir; #[derive(Default)] pub struct DbLifetime { - weak_db_ref: Weak, + weak_db_ref: Weak, optional_tempdir: Option, } impl DbLifetime { - pub fn new(tempdir: TempDir, weak_db_ref: Weak) -> Self { + pub fn new(tempdir: TempDir, weak_db_ref: Weak) -> Self { Self { optional_tempdir: Some(tempdir), weak_db_ref } } /// Tracks the DB reference and makes sure all strong refs are cleaned up /// but does not remove the DB from disk when dropped. - pub fn without_destroy(weak_db_ref: Weak) -> Self { + pub fn without_destroy(weak_db_ref: Weak) -> Self { Self { optional_tempdir: None, weak_db_ref } } } diff --git a/database/src/writer.rs b/database/src/writer.rs index 1599e6d77..17636ecac 100644 --- a/database/src/writer.rs +++ b/database/src/writer.rs @@ -1,7 +1,7 @@ use kaspa_utils::refs::Refs; use rocksdb::WriteBatch; -use crate::prelude::DB; +use crate::prelude::RocksDB; /// Abstraction over direct/batched DB writing pub trait DbWriter { @@ -20,15 +20,15 @@ pub trait DbWriter { pub trait DirectWriter: DbWriter {} pub struct DirectDbWriter<'a> { - db: Refs<'a, DB>, + db: Refs<'a, RocksDB>, } impl<'a> DirectDbWriter<'a> { - pub fn new(db: &'a DB) -> Self { + pub fn new(db: &'a RocksDB) -> Self { Self { db: db.into() } } - pub fn from_arc(db: std::sync::Arc) -> Self { + pub fn from_arc(db: std::sync::Arc) -> Self { Self { db: db.into() } } } diff --git a/indexes/utxoindex/src/index.rs b/indexes/utxoindex/src/index.rs index b71935afa..21481128c 100644 --- a/indexes/utxoindex/src/index.rs +++ b/indexes/utxoindex/src/index.rs @@ -9,7 +9,7 @@ use crate::{ use kaspa_consensus_core::{tx::ScriptPublicKeys, utxo::utxo_diff::UtxoDiff, BlockHashSet}; use kaspa_consensusmanager::{ConsensusManager, ConsensusResetHandler}; use kaspa_core::{info, trace}; -use kaspa_database::prelude::{StoreError, StoreResult, DB}; +use kaspa_database::prelude::{RocksDB, StoreError, StoreResult}; use kaspa_hashes::Hash; use kaspa_index_core::indexed_utxos::BalanceByScriptPublicKey; use kaspa_utils::arc::ArcExtensions; @@ -31,7 +31,7 @@ pub struct UtxoIndex { impl UtxoIndex { /// Creates a new [`UtxoIndex`] within a [`RwLock`] - pub fn new(consensus_manager: Arc, db: Arc) -> UtxoIndexResult>> { + pub fn new(consensus_manager: Arc, db: Arc) -> UtxoIndexResult>> { let mut utxoindex = Self { consensus_manager: consensus_manager.clone(), store: Store::new(db) }; if !utxoindex.is_synced()? { utxoindex.resync()?; diff --git a/indexes/utxoindex/src/stores/indexed_utxos.rs b/indexes/utxoindex/src/stores/indexed_utxos.rs index c9bce2c71..0a3d89eb4 100644 --- a/indexes/utxoindex/src/stores/indexed_utxos.rs +++ b/indexes/utxoindex/src/stores/indexed_utxos.rs @@ -4,7 +4,7 @@ use kaspa_consensus_core::tx::{ ScriptPublicKey, ScriptPublicKeyVersion, ScriptPublicKeys, ScriptVec, TransactionIndexType, TransactionOutpoint, }; use kaspa_core::debug; -use kaspa_database::prelude::{CachePolicy, CachedDbAccess, DirectDbWriter, StoreResult, DB}; +use kaspa_database::prelude::{CachePolicy, CachedDbAccess, DirectDbWriter, RocksDB, StoreResult}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; use kaspa_index_core::indexed_utxos::BalanceByScriptPublicKey; @@ -142,12 +142,12 @@ pub trait UtxoSetByScriptPublicKeyStore: UtxoSetByScriptPublicKeyStoreReader { #[derive(Clone)] pub struct DbUtxoSetByScriptPublicKeyStore { - db: Arc, + db: Arc, access: CachedDbAccess, } impl DbUtxoSetByScriptPublicKeyStore { - pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::UtxoIndex.into()) } } } diff --git a/indexes/utxoindex/src/stores/store_manager.rs b/indexes/utxoindex/src/stores/store_manager.rs index e9e635e53..cb43d2ab3 100644 --- a/indexes/utxoindex/src/stores/store_manager.rs +++ b/indexes/utxoindex/src/stores/store_manager.rs @@ -5,7 +5,7 @@ use kaspa_consensus_core::{ BlockHashSet, }; use kaspa_core::trace; -use kaspa_database::prelude::{CachePolicy, StoreResult, DB}; +use kaspa_database::prelude::{CachePolicy, RocksDB, StoreResult}; use kaspa_index_core::indexed_utxos::BalanceByScriptPublicKey; use crate::{ @@ -26,7 +26,7 @@ pub struct Store { } impl Store { - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc) -> Self { Self { utxoindex_tips_store: DbUtxoIndexTipsStore::new(db.clone()), circulating_supply_store: DbCirculatingSupplyStore::new(db.clone()), diff --git a/indexes/utxoindex/src/stores/supply.rs b/indexes/utxoindex/src/stores/supply.rs index de45864c0..0393451e3 100644 --- a/indexes/utxoindex/src/stores/supply.rs +++ b/indexes/utxoindex/src/stores/supply.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use kaspa_database::{ - prelude::{CachedDbItem, DirectDbWriter, StoreResult, DB}, + prelude::{CachedDbItem, DirectDbWriter, RocksDB, StoreResult}, registry::DatabaseStorePrefixes, }; @@ -21,12 +21,12 @@ pub trait CirculatingSupplyStore: CirculatingSupplyStoreReader { /// A DB + cache implementation of `UtxoIndexTipsStore` trait #[derive(Clone)] pub struct DbCirculatingSupplyStore { - db: Arc, + db: Arc, access: CachedDbItem, } impl DbCirculatingSupplyStore { - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc) -> Self { Self { db: Arc::clone(&db), access: CachedDbItem::new(db, DatabaseStorePrefixes::CirculatingSupply.into()) } } } diff --git a/indexes/utxoindex/src/stores/tips.rs b/indexes/utxoindex/src/stores/tips.rs index 0c4c424ef..d0acb1638 100644 --- a/indexes/utxoindex/src/stores/tips.rs +++ b/indexes/utxoindex/src/stores/tips.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use kaspa_database::{ - prelude::{CachedDbItem, DirectDbWriter, StoreError, StoreResult, DB}, + prelude::{CachedDbItem, DirectDbWriter, RocksDB, StoreError, StoreResult}, registry::DatabaseStorePrefixes, }; @@ -20,12 +20,12 @@ pub trait UtxoIndexTipsStore: UtxoIndexTipsStoreReader { /// A DB + cache implementation of `UtxoIndexTipsStore` trait #[derive(Clone)] pub struct DbUtxoIndexTipsStore { - db: Arc, + db: Arc, access: CachedDbItem>, } impl DbUtxoIndexTipsStore { - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc) -> Self { Self { db: Arc::clone(&db), access: CachedDbItem::new(db.clone(), DatabaseStorePrefixes::UtxoIndexTips.into()) } } } From 81998f50bc8f144b5ced4d316f31bf8922955d6d Mon Sep 17 00:00:00 2001 From: max143672 Date: Thu, 12 Sep 2024 00:03:14 +0300 Subject: [PATCH 54/58] introduce db trait to support different dbs --- Cargo.toml | 1 + database/src/access.rs | 16 +++++++--------- database/src/item.rs | 2 +- database/src/lib.rs | 6 +++--- database/src/{db.rs => rocksdb.rs} | 2 +- database/src/{db => rocksdb}/conn_builder.rs | 2 +- database/src/set_access.rs | 2 +- 7 files changed, 15 insertions(+), 16 deletions(-) rename database/src/{db.rs => rocksdb.rs} (98%) rename database/src/{db => rocksdb}/conn_builder.rs (99%) diff --git a/Cargo.toml b/Cargo.toml index 732279bd5..136d75653 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -217,6 +217,7 @@ rand_chacha = "0.3.1" rand_core = { version = "0.6.4", features = ["std"] } rand_distr = "0.4.3" rayon = "1.8.0" +redb = "2.1.2" regex = "1.10.2" ripemd = { version = "0.1.3", default-features = false } rlimit = "0.10.1" diff --git a/database/src/access.rs b/database/src/access.rs index cce536230..b729657d2 100644 --- a/database/src/access.rs +++ b/database/src/access.rs @@ -7,7 +7,7 @@ use std::{collections::hash_map::RandomState, error::Error, hash::BuildHasher, s /// A concurrent DB store access with typed caching. #[derive(Clone)] -pub struct CachedDbAccess> +pub struct CachedDbAccess> where TKey: Clone + std::hash::Hash + Eq + Send + Sync, TData: Clone + Send + Sync + MemSizeEstimator, @@ -84,12 +84,11 @@ where TKey: Clone + AsRef<[u8]>, TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned` has short lifetime { - self.db.iterator(self.prefix.to_vec(), None).map(move |iter_result| match iter_result { - Ok((key, data_bytes)) => match bincode::deserialize(data_bytes.as_ref()) { + self.db.iterator(self.prefix.to_vec(), None).map(move |iter_result| { + iter_result.and_then(|(key, data_bytes)| match bincode::deserialize(data_bytes.as_ref()) { Ok(data) => Ok((key.as_ref()[self.prefix.len()..].into(), data)), Err(e) => Err(e.into()), - }, - Err(e) => Err(e.into()), + }) }) } @@ -201,12 +200,11 @@ where db_iterator.next(); } - db_iterator.take(limit).map(move |item| match item { - Ok((key_bytes, value_bytes)) => match bincode::deserialize::(value_bytes.as_ref()) { + db_iterator.take(limit).map(move |item| { + item.and_then(|(key_bytes, value_bytes)| match bincode::deserialize::(value_bytes.as_ref()) { Ok(value) => Ok((key_bytes.as_ref()[db_key_prefix_len..].into(), value)), Err(err) => Err(err.into()), - }, - Err(err) => Err(err.into()), + }) }) } diff --git a/database/src/item.rs b/database/src/item.rs index 66f4e6044..afed2603e 100644 --- a/database/src/item.rs +++ b/database/src/item.rs @@ -1,5 +1,5 @@ use crate::{ - db::RocksDB, + rocksdb::RocksDB, errors::StoreError, prelude::{DbSetAccess, ReadLock}, }; diff --git a/database/src/lib.rs b/database/src/lib.rs index 99d3667fe..432ffb49a 100644 --- a/database/src/lib.rs +++ b/database/src/lib.rs @@ -1,6 +1,6 @@ mod access; mod cache; -mod db; +mod rocksdb; mod errors; mod item; mod key; @@ -11,7 +11,7 @@ mod set_access; pub mod utils; pub mod prelude { - use crate::{db, errors}; + use crate::{rocksdb, errors}; pub use super::access::CachedDbAccess; pub use super::cache::{Cache, CachePolicy}; @@ -19,6 +19,6 @@ pub mod prelude { pub use super::key::DbKey; pub use super::set_access::{CachedDbSetAccess, DbSetAccess, ReadLock}; pub use super::writer::{BatchDbWriter, DbWriter, DirectDbWriter, DirectWriter, MemoryWriter}; - pub use db::{delete_db, ConnBuilder, RocksDB}; + pub use rocksdb::{delete_db, ConnBuilder, RocksDB}; pub use errors::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; } diff --git a/database/src/db.rs b/database/src/rocksdb.rs similarity index 98% rename from database/src/db.rs rename to database/src/rocksdb.rs index d16cdcb1d..ffe70db5a 100644 --- a/database/src/db.rs +++ b/database/src/rocksdb.rs @@ -55,7 +55,7 @@ impl> DbAccess for T { } fn read(&self, db_key: &DbKey) -> Result>, StoreError> { - Ok(self.borrow().get_pinned(&db_key)?) + Ok(self.borrow().get_pinned(db_key)?) } fn iterator( diff --git a/database/src/db/conn_builder.rs b/database/src/rocksdb/conn_builder.rs similarity index 99% rename from database/src/db/conn_builder.rs rename to database/src/rocksdb/conn_builder.rs index ebb21bb7d..e824b2563 100644 --- a/database/src/db/conn_builder.rs +++ b/database/src/rocksdb/conn_builder.rs @@ -1,4 +1,4 @@ -use crate::db::RocksDB; +use crate::rocksdb::RocksDB; use rocksdb::{DBWithThreadMode, MultiThreaded}; use std::{path::PathBuf, sync::Arc}; diff --git a/database/src/set_access.rs b/database/src/set_access.rs index df8236bcb..c6647bba2 100644 --- a/database/src/set_access.rs +++ b/database/src/set_access.rs @@ -1,4 +1,4 @@ -use crate::{cache::CachePolicy, db::RocksDB, errors::StoreError}; +use crate::{cache::CachePolicy, rocksdb::RocksDB, errors::StoreError}; use super::prelude::{Cache, DbKey, DbWriter}; use parking_lot::{RwLock, RwLockReadGuard}; From f547bb66b309c8a7d889e17f037043d1c8bf3111 Mon Sep 17 00:00:00 2001 From: max143672 Date: Thu, 12 Sep 2024 10:06:38 +0300 Subject: [PATCH 55/58] tmp works --- Cargo.lock | 10 +++++ database/Cargo.toml | 1 + database/src/errors.rs | 4 ++ database/src/item.rs | 2 +- database/src/lib.rs | 7 ++-- database/src/redb.rs | 79 ++++++++++++++++++++++++++++++++++++++ database/src/set_access.rs | 2 +- 7 files changed, 100 insertions(+), 5 deletions(-) create mode 100644 database/src/redb.rs diff --git a/Cargo.lock b/Cargo.lock index 37f8960dd..ddf952565 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2756,6 +2756,7 @@ dependencies = [ "num_cpus", "parking_lot", "rand 0.8.5", + "redb", "rocksdb", "serde", "smallvec", @@ -5055,6 +5056,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "redb" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58323dc32ea52a8ae105ff94bc0460c5d906307533ba3401aa63db3cbe491fe5" +dependencies = [ + "libc", +] + [[package]] name = "redox_syscall" version = "0.5.3" diff --git a/database/Cargo.toml b/database/Cargo.toml index a5dacae8e..976be9023 100644 --- a/database/Cargo.toml +++ b/database/Cargo.toml @@ -21,6 +21,7 @@ num_cpus.workspace = true num-traits.workspace = true parking_lot.workspace = true rand.workspace = true +redb.workspace = true rocksdb.workspace = true serde.workspace = true smallvec.workspace = true diff --git a/database/src/errors.rs b/database/src/errors.rs index 8467c5e65..b75d51a48 100644 --- a/database/src/errors.rs +++ b/database/src/errors.rs @@ -1,5 +1,6 @@ use crate::prelude::DbKey; use kaspa_hashes::Hash; +use rocksdb::Transaction; use thiserror::Error; #[derive(Error, Debug)] @@ -23,6 +24,9 @@ pub enum StoreError { #[error("bincode error {0}")] DeserializationError(#[from] Box), + + #[error("redb error {0}")] + RedbError(#[from] redb::Error), } pub type StoreResult = std::result::Result; diff --git a/database/src/item.rs b/database/src/item.rs index afed2603e..0b8988ae7 100644 --- a/database/src/item.rs +++ b/database/src/item.rs @@ -1,7 +1,7 @@ use crate::{ - rocksdb::RocksDB, errors::StoreError, prelude::{DbSetAccess, ReadLock}, + rocksdb::RocksDB, }; use super::prelude::{DbKey, DbWriter}; diff --git a/database/src/lib.rs b/database/src/lib.rs index 432ffb49a..44fe4755a 100644 --- a/database/src/lib.rs +++ b/database/src/lib.rs @@ -1,9 +1,10 @@ mod access; mod cache; -mod rocksdb; mod errors; mod item; mod key; +mod redb; +mod rocksdb; mod writer; pub mod registry; @@ -11,7 +12,7 @@ mod set_access; pub mod utils; pub mod prelude { - use crate::{rocksdb, errors}; + use crate::{errors, rocksdb}; pub use super::access::CachedDbAccess; pub use super::cache::{Cache, CachePolicy}; @@ -19,6 +20,6 @@ pub mod prelude { pub use super::key::DbKey; pub use super::set_access::{CachedDbSetAccess, DbSetAccess, ReadLock}; pub use super::writer::{BatchDbWriter, DbWriter, DirectDbWriter, DirectWriter, MemoryWriter}; - pub use rocksdb::{delete_db, ConnBuilder, RocksDB}; pub use errors::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; + pub use rocksdb::{delete_db, ConnBuilder, RocksDB}; } diff --git a/database/src/redb.rs b/database/src/redb.rs new file mode 100644 index 000000000..27d0f7c06 --- /dev/null +++ b/database/src/redb.rs @@ -0,0 +1,79 @@ +use crate::access::DbAccess; +use crate::errors::StoreError; +use crate::key::DbKey; +use crate::prelude::DbWriter; +use itertools::Either; +use redb::{ReadableTable, TableDefinition}; +use std::error::Error; +use std::sync::atomic::{AtomicU64, Ordering}; + +const TABLE: TableDefinition<&[u8], Vec> = TableDefinition::new("0"); + +pub struct Redb { + db: redb::Database, + write_queue_count: AtomicU64, +} + +impl DbAccess for Redb { + fn has(&self, db_key: DbKey) -> Result { + Ok(|| -> Result<_, redb::Error> { Ok(self.db.begin_read()?.open_table(TABLE)?.get(db_key.as_ref())?.is_some()) }()?) + } + + fn read(&self, db_key: &DbKey) -> Result>, StoreError> { + Ok(|| -> Result<_, redb::Error> { + Ok(self.db.begin_read()?.open_table(TABLE)?.get(db_key.as_ref())?.map(|guard| guard.value())) + }()?) + } + + fn iterator( + &self, + prefix: impl Into>, + seek_from: Option, + ) -> impl Iterator, impl AsRef<[u8]>), Box>> + '_ { + let prefix = prefix.into(); + seek_from.as_ref().inspect(|seek_from| debug_assert!(seek_from.as_ref().starts_with(prefix.as_ref()))); + + let table = self.db.begin_read().unwrap().open_table(TABLE).unwrap(); // todo change interface to support errors + Iterator::map( + { + if let Some(seek_from) = seek_from { + Either::Left(table.range(seek_from.as_ref()..).unwrap()) // todo change interface to support errors + } else { + Either::Right(table.range(prefix.as_slice()..).unwrap()) // todo change interface to support errors + } + } + .take_while(move |r| r.as_ref().is_ok_and(|(k, _)| k.value().starts_with(&prefix))), + |r| r.map(|(k, v)| (k.value().to_vec(), v.value())).map_err(Into::into), + ) + } + + fn write(&self, _writer: &mut impl DbWriter, db_key: DbKey, data: Vec) -> Result<(), StoreError> { + let process = || -> Result<_, redb::Error> { + let write_tx = self.db.begin_write()?; + let mut table = write_tx.open_table(TABLE)?; + table.insert(db_key.as_ref(), data)?; + Ok(()) + }; + self.write_queue_count.fetch_add(1, Ordering::Relaxed); + let res = process(); + self.write_queue_count.fetch_sub(1, Ordering::Relaxed); + Ok(res?) + } + + fn delete(&self, _writer: &mut impl DbWriter, db_key: DbKey) -> Result<(), StoreError> { + let process = || -> Result<_, redb::Error> { + let write_tx = self.db.begin_write()?; + let mut table = write_tx.open_table(TABLE)?; + table.remove(db_key.as_ref())?; + Ok(()) + }; + self.write_queue_count.fetch_add(1, Ordering::Relaxed); + let res = process(); + self.write_queue_count.fetch_sub(1, Ordering::Relaxed); + Ok(res?) + } + + fn delete_range_by_prefix(&self, writer: &mut impl DbWriter, prefix: &[u8]) -> Result<(), StoreError> { + todo!() + } +} diff --git a/database/src/set_access.rs b/database/src/set_access.rs index c6647bba2..b95788d99 100644 --- a/database/src/set_access.rs +++ b/database/src/set_access.rs @@ -1,4 +1,4 @@ -use crate::{cache::CachePolicy, rocksdb::RocksDB, errors::StoreError}; +use crate::{cache::CachePolicy, errors::StoreError, rocksdb::RocksDB}; use super::prelude::{Cache, DbKey, DbWriter}; use parking_lot::{RwLock, RwLockReadGuard}; From 38b3d43223bde0d8f39d0abe8af89efe1645c0e3 Mon Sep 17 00:00:00 2001 From: max143672 Date: Thu, 12 Sep 2024 11:21:07 +0300 Subject: [PATCH 56/58] impl dbaccess for redb --- database/src/errors.rs | 1 - database/src/redb.rs | 36 ++++++++++++++++++++++++------------ 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/database/src/errors.rs b/database/src/errors.rs index b75d51a48..02c6188d2 100644 --- a/database/src/errors.rs +++ b/database/src/errors.rs @@ -1,6 +1,5 @@ use crate::prelude::DbKey; use kaspa_hashes::Hash; -use rocksdb::Transaction; use thiserror::Error; #[derive(Error, Debug)] diff --git a/database/src/redb.rs b/database/src/redb.rs index 27d0f7c06..9b50fe78f 100644 --- a/database/src/redb.rs +++ b/database/src/redb.rs @@ -3,7 +3,7 @@ use crate::errors::StoreError; use crate::key::DbKey; use crate::prelude::DbWriter; use itertools::Either; -use redb::{ReadableTable, TableDefinition}; +use redb::TableDefinition; use std::error::Error; use std::sync::atomic::{AtomicU64, Ordering}; @@ -32,21 +32,21 @@ impl DbAccess for Redb { ) -> impl Iterator, impl AsRef<[u8]>), Box>> + '_ { let prefix = prefix.into(); seek_from.as_ref().inspect(|seek_from| debug_assert!(seek_from.as_ref().starts_with(prefix.as_ref()))); - + let upper_bound = [&prefix[..prefix.len() - 1], &[prefix[prefix.len() - 1].saturating_add(1)]].concat(); let table = self.db.begin_read().unwrap().open_table(TABLE).unwrap(); // todo change interface to support errors Iterator::map( - { - if let Some(seek_from) = seek_from { - Either::Left(table.range(seek_from.as_ref()..).unwrap()) // todo change interface to support errors - } else { - Either::Right(table.range(prefix.as_slice()..).unwrap()) // todo change interface to support errors - } - } - .take_while(move |r| r.as_ref().is_ok_and(|(k, _)| k.value().starts_with(&prefix))), + if let Some(seek_from) = seek_from { + Either::Left(table.range(seek_from.as_ref()..upper_bound.as_ref()).unwrap()) + // todo change interface to support errors + } else { + Either::Right(table.range(prefix.as_slice()..upper_bound.as_ref()).unwrap()) + // todo change interface to support errors + }, |r| r.map(|(k, v)| (k.value().to_vec(), v.value())).map_err(Into::into), ) } + // todo writer fn write(&self, _writer: &mut impl DbWriter, db_key: DbKey, data: Vec) -> Result<(), StoreError> { let process = || -> Result<_, redb::Error> { let write_tx = self.db.begin_write()?; @@ -60,6 +60,7 @@ impl DbAccess for Redb { Ok(res?) } + // todo writer fn delete(&self, _writer: &mut impl DbWriter, db_key: DbKey) -> Result<(), StoreError> { let process = || -> Result<_, redb::Error> { let write_tx = self.db.begin_write()?; @@ -73,7 +74,18 @@ impl DbAccess for Redb { Ok(res?) } - fn delete_range_by_prefix(&self, writer: &mut impl DbWriter, prefix: &[u8]) -> Result<(), StoreError> { - todo!() + // todo writer + fn delete_range_by_prefix(&self, _writer: &mut impl DbWriter, prefix: &[u8]) -> Result<(), StoreError> { + let upper_bound = [&prefix[..prefix.len() - 1], &[prefix[prefix.len() - 1].saturating_add(1)]].concat(); + let process = || -> Result<_, redb::Error> { + let write_tx = self.db.begin_write()?; + let mut table = write_tx.open_table(TABLE)?; + table.retain_in(prefix..upper_bound.as_ref(), |_, _| false)?; + Ok(()) + }; + self.write_queue_count.fetch_add(1, Ordering::Relaxed); + let res = process(); + self.write_queue_count.fetch_sub(1, Ordering::Relaxed); + Ok(res?) } } From 5ffbf31165a848a09f40b823117749c06e15ca49 Mon Sep 17 00:00:00 2001 From: max143672 Date: Tue, 24 Dec 2024 20:22:25 +0400 Subject: [PATCH 57/58] extend vcc endpoint to pass sp blue score, group up transactions by merging blocks --- components/consensusmanager/src/session.rs | 8 +++- consensus/core/src/api/mod.rs | 16 +++++--- consensus/core/src/header.rs | 16 ++++++++ consensus/notify/src/notification.rs | 6 ++- consensus/src/consensus/mod.rs | 16 ++++++-- consensus/src/consensus/storage.rs | 3 +- consensus/src/model/stores/headers.rs | 17 +-------- .../pipeline/virtual_processor/processor.rs | 4 ++ consensus/src/processes/parents_builder.rs | 3 +- rpc/core/src/api/notifications.rs | 6 +-- rpc/core/src/convert/notification.rs | 28 +++++++------- rpc/core/src/model/message.rs | 31 ++++++++++------ rpc/core/src/model/tests.rs | 14 +++++-- rpc/core/src/model/tx.rs | 13 +++++-- rpc/grpc/core/proto/rpc.proto | 17 ++++++--- rpc/grpc/core/src/convert/message.rs | 4 +- rpc/grpc/core/src/convert/notification.rs | 4 +- rpc/grpc/core/src/convert/tx.rs | 37 +++++++++++++++---- rpc/service/src/converter/consensus.rs | 37 ++++++++++++------- .../src/daemon_integration_tests.rs | 8 ++-- 20 files changed, 188 insertions(+), 100 deletions(-) diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs index 8e0c6e933..edd9e5a96 100644 --- a/components/consensusmanager/src/session.rs +++ b/components/consensusmanager/src/session.rs @@ -18,9 +18,9 @@ use kaspa_consensus_core::{ use kaspa_utils::sync::rwlock::*; use std::{ops::Deref, sync::Arc}; -pub use tokio::task::spawn_blocking; - use crate::BlockProcessingBatch; +use kaspa_consensus_core::header::CompactHeaderData; +pub use tokio::task::spawn_blocking; #[allow(dead_code)] #[derive(Clone)] @@ -358,6 +358,10 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(move |c| c.get_block(hash)).await } + pub async fn async_get_compact_header_data(&self, hash: Hash) -> ConsensusResult { + self.clone().spawn_blocking(move |c| c.get_compact_header_data(hash)).await + } + pub async fn async_get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult { self.clone().spawn_blocking(move |c| c.get_block_even_if_header_only(hash)).await } diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 7c244b914..da550afd1 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -1,7 +1,3 @@ -use futures_util::future::BoxFuture; -use kaspa_muhash::MuHash; -use std::sync::Arc; - use crate::{ acceptance_data::AcceptanceData, api::args::{TransactionValidationArgs, TransactionValidationBatchArgs}, @@ -22,7 +18,11 @@ use crate::{ tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, }; +use consensus_core::header::CompactHeaderData; +use futures_util::future::BoxFuture; use kaspa_hashes::Hash; +use kaspa_muhash::MuHash; +use std::sync::Arc; pub use self::stats::{BlockCount, ConsensusStats}; @@ -141,7 +141,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - /// source refers to the earliest block from which the current node has full header & block data + /// source refers to the earliest block from which the current node has full header & block data fn get_source(&self) -> Hash { unimplemented!() } @@ -159,7 +159,7 @@ pub trait ConsensusApi: Send + Sync { /// Gets the virtual chain paths from `low` to the `sink` hash, or until `chain_path_added_limit` is reached /// - /// Note: + /// Note: /// 1) `chain_path_added_limit` will populate removed fully, and then the added chain path, up to `chain_path_added_limit` amount of hashes. /// 1.1) use `None to impose no limit with optimized backward chain iteration, for better performance in cases where batching is not required. fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult { @@ -275,6 +275,10 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } + fn get_compact_header_data(&self, hash: Hash) -> ConsensusResult { + unimplemented!() + } + fn get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult { unimplemented!() } diff --git a/consensus/core/src/header.rs b/consensus/core/src/header.rs index e53de4425..832bce891 100644 --- a/consensus/core/src/header.rs +++ b/consensus/core/src/header.rs @@ -105,6 +105,22 @@ impl MemSizeEstimator for Header { } } +#[derive(Clone, Copy, Serialize, Deserialize)] +pub struct CompactHeaderData { + pub daa_score: u64, + pub timestamp: u64, + pub bits: u32, + pub blue_score: u64, +} + +impl MemSizeEstimator for CompactHeaderData {} + +impl From<&Header> for CompactHeaderData { + fn from(header: &Header) -> Self { + Self { daa_score: header.daa_score, timestamp: header.timestamp, bits: header.bits, blue_score: header.blue_score } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/notify/src/notification.rs b/consensus/notify/src/notification.rs index a9e758b0a..6a70c0db2 100644 --- a/consensus/notify/src/notification.rs +++ b/consensus/notify/src/notification.rs @@ -68,6 +68,7 @@ impl NotificationTrait for Notification { removed_chain_block_hashes: payload.removed_chain_block_hashes.clone(), added_chain_block_hashes: payload.added_chain_block_hashes.clone(), added_chain_blocks_acceptance_data: Arc::new(vec![]), + added_chain_block_blue_scores: Arc::new(vec![]), })); } } @@ -107,6 +108,8 @@ impl BlockAddedNotification { pub struct VirtualChainChangedNotification { pub added_chain_block_hashes: Arc>, pub removed_chain_block_hashes: Arc>, + pub added_chain_block_blue_scores: Arc>, + pub added_chain_blocks_acceptance_data: Arc>>, } impl VirtualChainChangedNotification { @@ -114,8 +117,9 @@ impl VirtualChainChangedNotification { added_chain_block_hashes: Arc>, removed_chain_block_hashes: Arc>, added_chain_blocks_acceptance_data: Arc>>, + added_chain_block_blue_scores: Arc>, ) -> Self { - Self { added_chain_block_hashes, removed_chain_block_hashes, added_chain_blocks_acceptance_data } + Self { added_chain_block_hashes, removed_chain_block_hashes, added_chain_block_blue_scores, added_chain_blocks_acceptance_data } } } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 99719d4ac..04d7c649c 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -17,7 +17,7 @@ use crate::{ acceptance_data::AcceptanceDataStoreReader, block_transactions::BlockTransactionsStoreReader, ghostdag::{GhostdagData, GhostdagStoreReader}, - headers::{CompactHeaderData, HeaderStoreReader}, + headers::HeaderStoreReader, headers_selected_tip::HeadersSelectedTipStoreReader, past_pruning_points::PastPruningPointsStoreReader, pruning::PruningStoreReader, @@ -60,14 +60,20 @@ use kaspa_consensus_core::{ pruning::PruningImportError, tx::TxResult, }, - header::Header, + header::{ + Header, + CompactHeaderData + }, merkle::calc_hash_merkle_root, muhash::MuHashExtensions, network::NetworkType, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, - BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher, + BlockHashSet, + BlueWorkType, + ChainPath, + HashMapCustomHasher, }; use kaspa_consensus_notify::root::ConsensusNotificationRoot; @@ -884,6 +890,10 @@ impl ConsensusApi for Consensus { }) } + fn get_compact_header_data(&self, hash: Hash) -> ConsensusResult { + self.headers_store.get_compact_header_data(hash).unwrap_option().ok_or(ConsensusError::BlockNotFound(hash)) + } + fn get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult { let Some(status) = self.statuses_store.read().get(hash).unwrap_option().filter(|&status| status.has_block_header()) else { return Err(ConsensusError::HeaderNotFound(hash)); diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 5e2ff8fcd..e88392a6c 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -7,7 +7,7 @@ use crate::{ daa::DbDaaStore, depth::DbDepthStore, ghostdag::{CompactGhostdagData, DbGhostdagStore}, - headers::{CompactHeaderData, DbHeadersStore}, + headers::DbHeadersStore, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::DbPastPruningPointsStore, pruning::DbPruningStore, @@ -27,6 +27,7 @@ use crate::{ use super::cache_policy_builder::CachePolicyBuilder as PolicyBuilder; use itertools::Itertools; +use kaspa_consensus_core::header::CompactHeaderData; use kaspa_consensus_core::{blockstatus::BlockStatus, BlockHashSet}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; diff --git a/consensus/src/model/stores/headers.rs b/consensus/src/model/stores/headers.rs index 85668f699..f78f1c709 100644 --- a/consensus/src/model/stores/headers.rs +++ b/consensus/src/model/stores/headers.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use kaspa_consensus_core::header::CompactHeaderData; use kaspa_consensus_core::{header::Header, BlockHasher, BlockLevel}; use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess}; use kaspa_database::prelude::{CachePolicy, DB}; @@ -38,22 +39,6 @@ pub trait HeaderStore: HeaderStoreReader { fn delete(&self, hash: Hash) -> Result<(), StoreError>; } -#[derive(Clone, Copy, Serialize, Deserialize)] -pub struct CompactHeaderData { - pub daa_score: u64, - pub timestamp: u64, - pub bits: u32, - pub blue_score: u64, -} - -impl MemSizeEstimator for CompactHeaderData {} - -impl From<&Header> for CompactHeaderData { - fn from(header: &Header) -> Self { - Self { daa_score: header.daa_score, timestamp: header.timestamp, bits: header.bits, blue_score: header.blue_score } - } -} - /// A DB + cache implementation of `HeaderStore` trait, with concurrency support. #[derive(Clone)] pub struct DbHeadersStore { diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 914e0a327..3233cb810 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -350,11 +350,15 @@ impl VirtualStateProcessor { // check for subscriptions before the heavy lifting let added_chain_blocks_acceptance_data = chain_path.added.iter().copied().map(|added| self.acceptance_data_store.get(added).unwrap()).collect_vec(); + + let added_chain_block_blue_scores = + chain_path.added.iter().copied().map(|added| self.headers_store.get_blue_score(added).unwrap()).collect_vec(); self.notification_root .notify(Notification::VirtualChainChanged(VirtualChainChangedNotification::new( chain_path.added.into(), chain_path.removed.into(), Arc::new(added_chain_blocks_acceptance_data), + Arc::new(added_chain_block_blue_scores), ))) .expect("expecting an open unbounded channel"); } diff --git a/consensus/src/processes/parents_builder.rs b/consensus/src/processes/parents_builder.rs index 14df3fcec..1ab9f2122 100644 --- a/consensus/src/processes/parents_builder.rs +++ b/consensus/src/processes/parents_builder.rs @@ -220,6 +220,7 @@ mod tests { use kaspa_database::prelude::{ReadLock, StoreError, StoreResult}; use kaspa_hashes::Hash; use parking_lot::RwLock; + use kaspa_consensus_core::header::CompactHeaderData; struct HeaderStoreMock { map: RwLock>, @@ -252,7 +253,7 @@ mod tests { fn get_compact_header_data( &self, hash: kaspa_hashes::Hash, - ) -> Result { + ) -> Result { unimplemented!() } diff --git a/rpc/core/src/api/notifications.rs b/rpc/core/src/api/notifications.rs index 503af0de8..12145ef80 100644 --- a/rpc/core/src/api/notifications.rs +++ b/rpc/core/src/api/notifications.rs @@ -25,7 +25,7 @@ pub enum Notification { #[display(fmt = "BlockAdded notification: block hash {}", "_0.block.header.hash")] BlockAdded(BlockAddedNotification), - #[display(fmt = "VirtualChainChanged notification: {} removed blocks, {} added blocks, {} accepted transactions", "_0.removed_chain_block_hashes.len()", "_0.added_chain_block_hashes.len()", "_0.accepted_transaction_ids.len()")] + #[display(fmt = "VirtualChainChanged notification: {} removed blocks, {} added blocks, {} accepted transactions", "_0.removed_chain_block_hashes.len()", "_0.added_chain_block_hashes.len()", "_0.added_acceptance_data.len()")] VirtualChainChanged(VirtualChainChangedNotification), #[display(fmt = "FinalityConflict notification: violating block hash {}", "_0.violating_block_hash")] @@ -84,11 +84,11 @@ impl NotificationTrait for Notification { match subscription.active() { true => { if let Notification::VirtualChainChanged(ref payload) = self { - if !subscription.include_accepted_transaction_ids() && !payload.accepted_transaction_ids.is_empty() { + if !subscription.include_accepted_transaction_ids() && !payload.added_acceptance_data.is_empty() { return Some(Notification::VirtualChainChanged(VirtualChainChangedNotification { removed_chain_block_hashes: payload.removed_chain_block_hashes.clone(), added_chain_block_hashes: payload.added_chain_block_hashes.clone(), - accepted_transaction_ids: Arc::new(vec![]), + added_acceptance_data: Arc::new(vec![]), })); } } diff --git a/rpc/core/src/convert/notification.rs b/rpc/core/src/convert/notification.rs index 6251cc1cd..25e39f97e 100644 --- a/rpc/core/src/convert/notification.rs +++ b/rpc/core/src/convert/notification.rs @@ -2,13 +2,14 @@ use crate::{ convert::utxo::utxo_set_into_rpc, BlockAddedNotification, FinalityConflictNotification, FinalityConflictResolvedNotification, - NewBlockTemplateNotification, Notification, PruningPointUtxoSetOverrideNotification, RpcAcceptedTransactionIds, - SinkBlueScoreChangedNotification, UtxosChangedNotification, VirtualChainChangedNotification, VirtualDaaScoreChangedNotification, + NewBlockTemplateNotification, Notification, PruningPointUtxoSetOverrideNotification, RpcAcceptanceData, + RpcMergesetBlockAcceptanceData, SinkBlueScoreChangedNotification, UtxosChangedNotification, VirtualChainChangedNotification, + VirtualDaaScoreChangedNotification, }; +use kaspa_consensus_core::acceptance_data::{AcceptedTxEntry, MergesetBlockAcceptanceData}; use kaspa_consensus_notify::notification as consensus_notify; use kaspa_index_core::notification as index_notify; use std::sync::Arc; - // ---------------------------------------------------------------------------- // consensus_core to rpc_core // ---------------------------------------------------------------------------- @@ -49,19 +50,20 @@ impl From<&consensus_notify::VirtualChainChangedNotification> for VirtualChainCh // If acceptance data array is empty, it means that the subscription was set to not // include accepted_transaction_ids. Otherwise, we expect acceptance data to correlate // with the added chain block hashes - accepted_transaction_ids: Arc::new(if item.added_chain_blocks_acceptance_data.is_empty() { + added_acceptance_data: Arc::new(if item.added_chain_blocks_acceptance_data.is_empty() { vec![] } else { - item.added_chain_block_hashes + item.added_chain_blocks_acceptance_data .iter() - .zip(item.added_chain_blocks_acceptance_data.iter()) - .map(|(hash, acceptance_data)| RpcAcceptedTransactionIds { - accepting_block_hash: hash.to_owned(), - // We collect accepted tx ids from all mergeset blocks - accepted_transaction_ids: acceptance_data - .iter() - .flat_map(|x| x.accepted_transactions.iter().map(|tx| tx.transaction_id)) - .collect(), + .zip(item.added_chain_block_blue_scores.iter()) + .map(|(acceptance_data, &accepting_blue_score)| RpcAcceptanceData { + accepting_blue_score, + mergeset_block_acceptance_data: acceptance_data.iter().map(|MergesetBlockAcceptanceData{ block_hash, accepted_transactions }| { + let mut accepted_transactions:Vec<_> = accepted_transactions.to_vec(); + accepted_transactions.sort_unstable_by_key(|entry| entry.index_within_block); + RpcMergesetBlockAcceptanceData{ merged_block_hash: *block_hash, accepted_transaction_ids: accepted_transactions.into_iter().map(|AcceptedTxEntry{ transaction_id, .. }| transaction_id).collect() } + }).collect() + }) .collect() }), diff --git a/rpc/core/src/model/message.rs b/rpc/core/src/model/message.rs index ba8d6abf7..8af88196d 100644 --- a/rpc/core/src/model/message.rs +++ b/rpc/core/src/model/message.rs @@ -10,6 +10,7 @@ use std::{ fmt::{Display, Formatter}, sync::Arc, }; +use std::io::ErrorKind; use workflow_serializer::prelude::*; pub type RpcExtraData = Vec; @@ -892,25 +893,25 @@ impl Deserializer for GetVirtualChainFromBlockRequest { pub struct GetVirtualChainFromBlockResponse { pub removed_chain_block_hashes: Vec, pub added_chain_block_hashes: Vec, - pub accepted_transaction_ids: Vec, + pub added_acceptance_data: Vec, } impl GetVirtualChainFromBlockResponse { pub fn new( removed_chain_block_hashes: Vec, added_chain_block_hashes: Vec, - accepted_transaction_ids: Vec, + added_acceptance_data: Vec, ) -> Self { - Self { removed_chain_block_hashes, added_chain_block_hashes, accepted_transaction_ids } + Self { removed_chain_block_hashes, added_chain_block_hashes, added_acceptance_data } } } impl Serializer for GetVirtualChainFromBlockResponse { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - store!(u16, &1, writer)?; + store!(u16, &254, writer)?; store!(Vec, &self.removed_chain_block_hashes, writer)?; store!(Vec, &self.added_chain_block_hashes, writer)?; - store!(Vec, &self.accepted_transaction_ids, writer)?; + store!(Vec, &self.added_acceptance_data, writer)?; Ok(()) } @@ -919,11 +920,14 @@ impl Serializer for GetVirtualChainFromBlockResponse { impl Deserializer for GetVirtualChainFromBlockResponse { fn deserialize(reader: &mut R) -> std::io::Result { let _version = load!(u16, reader)?; + if _version != 254 { + return Err(std::io::Error::new(ErrorKind::Other,"Expected 254-th version")) + } let removed_chain_block_hashes = load!(Vec, reader)?; let added_chain_block_hashes = load!(Vec, reader)?; - let accepted_transaction_ids = load!(Vec, reader)?; + let added_acceptance_data = load!(Vec, reader)?; - Ok(Self { removed_chain_block_hashes, added_chain_block_hashes, accepted_transaction_ids }) + Ok(Self { removed_chain_block_hashes, added_chain_block_hashes, added_acceptance_data }) } } @@ -2812,15 +2816,15 @@ impl Deserializer for NotifyVirtualChainChangedResponse { pub struct VirtualChainChangedNotification { pub removed_chain_block_hashes: Arc>, pub added_chain_block_hashes: Arc>, - pub accepted_transaction_ids: Arc>, + pub added_acceptance_data: Arc>, } impl Serializer for VirtualChainChangedNotification { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - store!(u16, &1, writer)?; + store!(u16, &254, writer)?; store!(Vec, &self.removed_chain_block_hashes, writer)?; store!(Vec, &self.added_chain_block_hashes, writer)?; - store!(Vec, &self.accepted_transaction_ids, writer)?; + store!(Vec, &self.added_acceptance_data, writer)?; Ok(()) } } @@ -2828,13 +2832,16 @@ impl Serializer for VirtualChainChangedNotification { impl Deserializer for VirtualChainChangedNotification { fn deserialize(reader: &mut R) -> std::io::Result { let _version = load!(u16, reader)?; + if _version != 254 { + return Err(std::io::Error::new(ErrorKind::Other, "expected 254-th version")); + } let removed_chain_block_hashes = load!(Vec, reader)?; let added_chain_block_hashes = load!(Vec, reader)?; - let accepted_transaction_ids = load!(Vec, reader)?; + let accepted_transaction_ids = load!(Vec, reader)?; Ok(Self { removed_chain_block_hashes: removed_chain_block_hashes.into(), added_chain_block_hashes: added_chain_block_hashes.into(), - accepted_transaction_ids: accepted_transaction_ids.into(), + added_acceptance_data: accepted_transaction_ids.into(), }) } } diff --git a/rpc/core/src/model/tests.rs b/rpc/core/src/model/tests.rs index d931f5ac2..11de11ff7 100644 --- a/rpc/core/src/model/tests.rs +++ b/rpc/core/src/model/tests.rs @@ -646,9 +646,15 @@ mod mockery { test!(GetVirtualChainFromBlockRequest); - impl Mock for RpcAcceptedTransactionIds { + impl Mock for RpcMergesetBlockAcceptanceData { fn mock() -> Self { - RpcAcceptedTransactionIds { accepting_block_hash: mock(), accepted_transaction_ids: mock() } + RpcMergesetBlockAcceptanceData{ merged_block_hash: mock(), accepted_transaction_ids: mock() } + } + } + + impl Mock for RpcAcceptanceData { + fn mock() -> Self { + RpcAcceptanceData { accepting_blue_score: mock(), mergeset_block_acceptance_data: mock() } } } @@ -657,7 +663,7 @@ mod mockery { GetVirtualChainFromBlockResponse { removed_chain_block_hashes: mock(), added_chain_block_hashes: mock(), - accepted_transaction_ids: mock(), + added_acceptance_data: mock(), } } } @@ -1109,7 +1115,7 @@ mod mockery { VirtualChainChangedNotification { removed_chain_block_hashes: mock(), added_chain_block_hashes: mock(), - accepted_transaction_ids: mock(), + added_acceptance_data: mock(), } } } diff --git a/rpc/core/src/model/tx.rs b/rpc/core/src/model/tx.rs index 0c17e26f5..9c3c6630b 100644 --- a/rpc/core/src/model/tx.rs +++ b/rpc/core/src/model/tx.rs @@ -394,7 +394,14 @@ impl Deserializer for RpcTransactionVerboseData { /// Represents accepted transaction ids #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] -pub struct RpcAcceptedTransactionIds { - pub accepting_block_hash: RpcHash, - pub accepted_transaction_ids: Vec, +pub struct RpcMergesetBlockAcceptanceData { + pub merged_block_hash: RpcHash, + pub accepted_transaction_ids: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcAcceptanceData { + pub accepting_blue_score: u64, + pub mergeset_block_acceptance_data: Vec, } diff --git a/rpc/grpc/core/proto/rpc.proto b/rpc/grpc/core/proto/rpc.proto index e218681b6..d864a0e87 100644 --- a/rpc/grpc/core/proto/rpc.proto +++ b/rpc/grpc/core/proto/rpc.proto @@ -345,7 +345,7 @@ message VirtualChainChangedNotificationMessage{ repeated string addedChainBlockHashes = 3; // Will be filled only if `includeAcceptedTransactionIds = true` in the notify request. - repeated RpcAcceptedTransactionIds acceptedTransactionIds = 2; + repeated RpcAcceptanceData addedAcceptanceData = 4; } // GetBlockRequestMessage requests information about a specific block @@ -386,12 +386,17 @@ message GetVirtualChainFromBlockRequestMessage{ bool includeAcceptedTransactionIds = 2; } -message RpcAcceptedTransactionIds{ - string acceptingBlockHash = 1; - repeated string acceptedTransactionIds = 2; +message RpcMergesetBlockAcceptanceData { + string mergedBlockHash = 1; + repeated string acceptedTransactionIds = 2; // order according to order within block } -message GetVirtualChainFromBlockResponseMessage{ +message RpcAcceptanceData { + uint64 acceptingBlueScore = 1; + repeated RpcMergesetBlockAcceptanceData mergesetBlockAcceptanceData = 2; +} + +message GetVirtualChainFromBlockResponseMessage { // The chain blocks that were removed, in high-to-low order repeated string removedChainBlockHashes = 1; @@ -400,7 +405,7 @@ message GetVirtualChainFromBlockResponseMessage{ // The transactions accepted by each block in addedChainBlockHashes. // Will be filled only if `includeAcceptedTransactionIds = true` in the request. - repeated RpcAcceptedTransactionIds acceptedTransactionIds = 2; + repeated RpcAcceptanceData addedAcceptanceData = 4; RPCError error = 1000; } diff --git a/rpc/grpc/core/src/convert/message.rs b/rpc/grpc/core/src/convert/message.rs index 67ac60650..770c6a50d 100644 --- a/rpc/grpc/core/src/convert/message.rs +++ b/rpc/grpc/core/src/convert/message.rs @@ -272,8 +272,8 @@ from!(item: RpcResult<&kaspa_rpc_core::GetVirtualChainFromBlockResponse>, protow Self { removed_chain_block_hashes: item.removed_chain_block_hashes.iter().map(|x| x.to_string()).collect(), added_chain_block_hashes: item.added_chain_block_hashes.iter().map(|x| x.to_string()).collect(), - accepted_transaction_ids: item.accepted_transaction_ids.iter().map(|x| x.into()).collect(), error: None, + added_acceptance_data: item.added_acceptance_data.iter().map(|x| x.into()).collect(), } }); @@ -756,7 +756,7 @@ try_from!(item: &protowire::GetVirtualChainFromBlockResponseMessage, RpcResult, _>>()?, added_chain_block_hashes: item.added_chain_block_hashes.iter().map(|x| RpcHash::from_str(x)).collect::, _>>()?, - accepted_transaction_ids: item.accepted_transaction_ids.iter().map(|x| x.try_into()).collect::, _>>()?, + added_acceptance_data: item.added_acceptance_data.iter().map(|x| x.try_into()).collect::, _>>()?, } }); diff --git a/rpc/grpc/core/src/convert/notification.rs b/rpc/grpc/core/src/convert/notification.rs index 2f2273af1..032408aaa 100644 --- a/rpc/grpc/core/src/convert/notification.rs +++ b/rpc/grpc/core/src/convert/notification.rs @@ -45,7 +45,7 @@ from!(item: &kaspa_rpc_core::VirtualChainChangedNotification, VirtualChainChange Self { removed_chain_block_hashes: item.removed_chain_block_hashes.iter().map(|x| x.to_string()).collect(), added_chain_block_hashes: item.added_chain_block_hashes.iter().map(|x| x.to_string()).collect(), - accepted_transaction_ids: item.accepted_transaction_ids.iter().map(|x| x.into()).collect(), + added_acceptance_data: item.added_acceptance_data.iter().map(|x| x.into()).collect(), } }); @@ -142,7 +142,7 @@ try_from!(item: &VirtualChainChangedNotificationMessage, kaspa_rpc_core::Virtual added_chain_block_hashes: Arc::new( item.added_chain_block_hashes.iter().map(|x| RpcHash::from_str(x)).collect::, _>>()?, ), - accepted_transaction_ids: Arc::new(item.accepted_transaction_ids.iter().map(|x| x.try_into()).collect::, _>>()?), + added_acceptance_data: Arc::new(item.added_acceptance_data.iter().map(|x| x.try_into()).collect::, _>>()?), } }); diff --git a/rpc/grpc/core/src/convert/tx.rs b/rpc/grpc/core/src/convert/tx.rs index 7a75a0255..16a016608 100644 --- a/rpc/grpc/core/src/convert/tx.rs +++ b/rpc/grpc/core/src/convert/tx.rs @@ -1,8 +1,8 @@ use crate::protowire; use crate::{from, try_from}; -use kaspa_rpc_core::{FromRpcHex, RpcError, RpcHash, RpcResult, RpcScriptVec, ToRpcHex}; +use kaspa_rpc_core::{FromRpcHex, RpcError, RpcHash, RpcMergesetBlockAcceptanceData, RpcResult, RpcScriptVec, ToRpcHex}; use std::str::FromStr; - +use kaspa_consensus_core::tx::TransactionId; // ---------------------------------------------------------------------------- // rpc_core to protowire // ---------------------------------------------------------------------------- @@ -75,10 +75,19 @@ from!(item: &kaspa_rpc_core::RpcTransactionOutputVerboseData, protowire::RpcTran } }); -from!(item: &kaspa_rpc_core::RpcAcceptedTransactionIds, protowire::RpcAcceptedTransactionIds, { +from!(item: &kaspa_rpc_core::RpcAcceptanceData, protowire::RpcAcceptanceData, { Self { - accepting_block_hash: item.accepting_block_hash.to_string(), - accepted_transaction_ids: item.accepted_transaction_ids.iter().map(|x| x.to_string()).collect(), + accepting_blue_score: item.accepting_blue_score, + mergeset_block_acceptance_data: item.mergeset_block_acceptance_data.iter() + .map( + |RpcMergesetBlockAcceptanceData{ + merged_block_hash, + accepted_transaction_ids, + }| + protowire::RpcMergesetBlockAcceptanceData{ + merged_block_hash: merged_block_hash.to_string(), + accepted_transaction_ids: accepted_transaction_ids.iter().map(TransactionId::to_string).collect(), + }).collect(), } }); @@ -182,10 +191,22 @@ try_from!(item: &protowire::RpcTransactionOutputVerboseData, kaspa_rpc_core::Rpc } }); -try_from!(item: &protowire::RpcAcceptedTransactionIds, kaspa_rpc_core::RpcAcceptedTransactionIds, { +try_from!(item: &protowire::RpcAcceptanceData, kaspa_rpc_core::RpcAcceptanceData, { Self { - accepting_block_hash: RpcHash::from_str(&item.accepting_block_hash)?, - accepted_transaction_ids: item.accepted_transaction_ids.iter().map(|x| RpcHash::from_str(x)).collect::, _>>()?, + accepting_blue_score: item.accepting_blue_score, + mergeset_block_acceptance_data: item.mergeset_block_acceptance_data.iter() + .map( + |protowire::RpcMergesetBlockAcceptanceData{ + merged_block_hash, + accepted_transaction_ids, + }| kaspa_rpc_core::RpcResult::Ok(RpcMergesetBlockAcceptanceData{ + merged_block_hash: RpcHash::from_str(merged_block_hash)?, + accepted_transaction_ids: accepted_transaction_ids.iter() + .map(|txid| RpcHash::from_str(txid)) + .collect::, _>>()?, + }) + ) + .collect::, _>>()?, } }); diff --git a/rpc/service/src/converter/consensus.rs b/rpc/service/src/converter/consensus.rs index c744300e5..76393c9b7 100644 --- a/rpc/service/src/converter/consensus.rs +++ b/rpc/service/src/converter/consensus.rs @@ -1,12 +1,13 @@ use async_trait::async_trait; use kaspa_addresses::Address; use kaspa_consensus_core::{ + acceptance_data::MergesetBlockAcceptanceData, block::Block, config::Config, hashing::tx::hash, header::Header, tx::{MutableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, - ChainPath, + ChainPath }; use kaspa_consensus_notify::notification::{self as consensus_notify, Notification as ConsensusNotification}; use kaspa_consensusmanager::{ConsensusManager, ConsensusProxy}; @@ -14,9 +15,9 @@ use kaspa_math::Uint256; use kaspa_mining::model::{owner_txs::OwnerTransactions, TransactionIdSet}; use kaspa_notify::converter::Converter; use kaspa_rpc_core::{ - BlockAddedNotification, Notification, RpcAcceptedTransactionIds, RpcBlock, RpcBlockVerboseData, RpcHash, RpcMempoolEntry, - RpcMempoolEntryByAddress, RpcResult, RpcTransaction, RpcTransactionInput, RpcTransactionOutput, RpcTransactionOutputVerboseData, - RpcTransactionVerboseData, + BlockAddedNotification, Notification, RpcAcceptanceData, RpcBlock, RpcBlockVerboseData, RpcHash, RpcMempoolEntry, + RpcMempoolEntryByAddress, RpcMergesetBlockAcceptanceData, RpcResult, RpcTransaction, RpcTransactionInput, RpcTransactionOutput, + RpcTransactionOutputVerboseData, RpcTransactionVerboseData, }; use kaspa_txscript::{extract_script_pub_key_address, script_class::ScriptClass}; use std::{collections::HashMap, fmt::Debug, sync::Arc}; @@ -163,17 +164,27 @@ impl ConsensusConverter { consensus: &ConsensusProxy, chain_path: &ChainPath, merged_blocks_limit: Option, - ) -> RpcResult> { + ) -> RpcResult> { let acceptance_data = consensus.async_get_blocks_acceptance_data(chain_path.added.clone(), merged_blocks_limit).await.unwrap(); - Ok(chain_path - .added - .iter() - .zip(acceptance_data.iter()) - .map(|(hash, block_data)| RpcAcceptedTransactionIds { - accepting_block_hash: hash.to_owned(), - accepted_transaction_ids: block_data + let mut acceptance_daa_scores = Vec::with_capacity(chain_path.added.len()); + for hash in chain_path.added.iter() { + acceptance_daa_scores.push(consensus.async_get_compact_header_data(*hash).await?.blue_score); + } + Ok(acceptance_data.iter() + .zip(acceptance_daa_scores) + .map(|(block_data, accepting_blue_score)| RpcAcceptanceData { + accepting_blue_score, + mergeset_block_acceptance_data: block_data .iter() - .flat_map(|x| x.accepted_transactions.iter().map(|tx| tx.transaction_id)) + .map(|MergesetBlockAcceptanceData { block_hash, accepted_transactions }| { + let mut accepted_transactions = accepted_transactions.clone(); + accepted_transactions.sort_unstable_by_key(|entry| entry.index_within_block); + + RpcMergesetBlockAcceptanceData { + merged_block_hash: *block_hash, + accepted_transaction_ids: accepted_transactions.into_iter().map(|v| v.transaction_id).collect(), + } + }) .collect(), }) .collect()) diff --git a/testing/integration/src/daemon_integration_tests.rs b/testing/integration/src/daemon_integration_tests.rs index 460cf049c..dd2653848 100644 --- a/testing/integration/src/daemon_integration_tests.rs +++ b/testing/integration/src/daemon_integration_tests.rs @@ -115,8 +115,8 @@ async fn daemon_mining_test() { .unwrap(); assert_eq!(vc.removed_chain_block_hashes.len(), 0); assert_eq!(vc.added_chain_block_hashes.len(), 10); - assert_eq!(vc.accepted_transaction_ids.len(), 10); - for accepted_txs_pair in vc.accepted_transaction_ids { + assert_eq!(vc.added_acceptance_data.len(), 10); + for accepted_txs_pair in vc.added_acceptance_data.iter().flat_map(|v| &v.mergeset_block_acceptance_data) { assert_eq!(accepted_txs_pair.accepted_transaction_ids.len(), 1); } } @@ -234,8 +234,8 @@ async fn daemon_utxos_propagation_test() { let vc = rpc_client2.get_virtual_chain_from_block(kaspa_consensus::params::SIMNET_GENESIS.hash, true).await.unwrap(); assert_eq!(vc.removed_chain_block_hashes.len(), 0); assert_eq!(vc.added_chain_block_hashes.len() as u64, initial_blocks); - assert_eq!(vc.accepted_transaction_ids.len() as u64, initial_blocks); - for accepted_txs_pair in vc.accepted_transaction_ids { + assert_eq!(vc.added_acceptance_data.len() as u64, initial_blocks); + for accepted_txs_pair in vc.added_acceptance_data.iter().flat_map(|v| &v.mergeset_block_acceptance_data) { assert_eq!(accepted_txs_pair.accepted_transaction_ids.len(), 1); } From 52fa6b6fd85b045e6b886a015d0749fdcc36439b Mon Sep 17 00:00:00 2001 From: max143672 Date: Tue, 24 Dec 2024 20:30:34 +0400 Subject: [PATCH 58/58] style fmt --- consensus/notify/src/notification.rs | 7 ++++++- consensus/src/consensus/mod.rs | 10 ++-------- consensus/src/processes/parents_builder.rs | 7 ++----- rpc/core/src/convert/notification.rs | 20 ++++++++++++++------ rpc/core/src/model/message.rs | 4 ++-- rpc/core/src/model/tests.rs | 4 ++-- rpc/grpc/core/src/convert/tx.rs | 2 +- rpc/service/src/converter/consensus.rs | 5 +++-- 8 files changed, 32 insertions(+), 27 deletions(-) diff --git a/consensus/notify/src/notification.rs b/consensus/notify/src/notification.rs index 6a70c0db2..c17ded91c 100644 --- a/consensus/notify/src/notification.rs +++ b/consensus/notify/src/notification.rs @@ -119,7 +119,12 @@ impl VirtualChainChangedNotification { added_chain_blocks_acceptance_data: Arc>>, added_chain_block_blue_scores: Arc>, ) -> Self { - Self { added_chain_block_hashes, removed_chain_block_hashes, added_chain_block_blue_scores, added_chain_blocks_acceptance_data } + Self { + added_chain_block_hashes, + removed_chain_block_hashes, + added_chain_block_blue_scores, + added_chain_blocks_acceptance_data, + } } } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 04d7c649c..f2c0d8ff8 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -60,20 +60,14 @@ use kaspa_consensus_core::{ pruning::PruningImportError, tx::TxResult, }, - header::{ - Header, - CompactHeaderData - }, + header::{CompactHeaderData, Header}, merkle::calc_hash_merkle_root, muhash::MuHashExtensions, network::NetworkType, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, - BlockHashSet, - BlueWorkType, - ChainPath, - HashMapCustomHasher, + BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher, }; use kaspa_consensus_notify::root::ConsensusNotificationRoot; diff --git a/consensus/src/processes/parents_builder.rs b/consensus/src/processes/parents_builder.rs index 1ab9f2122..9e880d778 100644 --- a/consensus/src/processes/parents_builder.rs +++ b/consensus/src/processes/parents_builder.rs @@ -212,6 +212,7 @@ mod tests { use super::ParentsManager; use itertools::Itertools; + use kaspa_consensus_core::header::CompactHeaderData; use kaspa_consensus_core::{ blockhash::{BlockHashes, ORIGIN}, header::Header, @@ -220,7 +221,6 @@ mod tests { use kaspa_database::prelude::{ReadLock, StoreError, StoreResult}; use kaspa_hashes::Hash; use parking_lot::RwLock; - use kaspa_consensus_core::header::CompactHeaderData; struct HeaderStoreMock { map: RwLock>, @@ -250,10 +250,7 @@ mod tests { Ok(self.map.read().get(&hash).unwrap().header.clone()) } - fn get_compact_header_data( - &self, - hash: kaspa_hashes::Hash, - ) -> Result { + fn get_compact_header_data(&self, hash: kaspa_hashes::Hash) -> Result { unimplemented!() } diff --git a/rpc/core/src/convert/notification.rs b/rpc/core/src/convert/notification.rs index 25e39f97e..12bd5da83 100644 --- a/rpc/core/src/convert/notification.rs +++ b/rpc/core/src/convert/notification.rs @@ -58,12 +58,20 @@ impl From<&consensus_notify::VirtualChainChangedNotification> for VirtualChainCh .zip(item.added_chain_block_blue_scores.iter()) .map(|(acceptance_data, &accepting_blue_score)| RpcAcceptanceData { accepting_blue_score, - mergeset_block_acceptance_data: acceptance_data.iter().map(|MergesetBlockAcceptanceData{ block_hash, accepted_transactions }| { - let mut accepted_transactions:Vec<_> = accepted_transactions.to_vec(); - accepted_transactions.sort_unstable_by_key(|entry| entry.index_within_block); - RpcMergesetBlockAcceptanceData{ merged_block_hash: *block_hash, accepted_transaction_ids: accepted_transactions.into_iter().map(|AcceptedTxEntry{ transaction_id, .. }| transaction_id).collect() } - }).collect() - + mergeset_block_acceptance_data: acceptance_data + .iter() + .map(|MergesetBlockAcceptanceData { block_hash, accepted_transactions }| { + let mut accepted_transactions: Vec<_> = accepted_transactions.to_vec(); + accepted_transactions.sort_unstable_by_key(|entry| entry.index_within_block); + RpcMergesetBlockAcceptanceData { + merged_block_hash: *block_hash, + accepted_transaction_ids: accepted_transactions + .into_iter() + .map(|AcceptedTxEntry { transaction_id, .. }| transaction_id) + .collect(), + } + }) + .collect(), }) .collect() }), diff --git a/rpc/core/src/model/message.rs b/rpc/core/src/model/message.rs index 8af88196d..904a50c96 100644 --- a/rpc/core/src/model/message.rs +++ b/rpc/core/src/model/message.rs @@ -6,11 +6,11 @@ use kaspa_notify::subscription::{context::SubscriptionContext, single::UtxosChan use kaspa_utils::hex::ToHex; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use std::io::ErrorKind; use std::{ fmt::{Display, Formatter}, sync::Arc, }; -use std::io::ErrorKind; use workflow_serializer::prelude::*; pub type RpcExtraData = Vec; @@ -921,7 +921,7 @@ impl Deserializer for GetVirtualChainFromBlockResponse { fn deserialize(reader: &mut R) -> std::io::Result { let _version = load!(u16, reader)?; if _version != 254 { - return Err(std::io::Error::new(ErrorKind::Other,"Expected 254-th version")) + return Err(std::io::Error::new(ErrorKind::Other, "Expected 254-th version")); } let removed_chain_block_hashes = load!(Vec, reader)?; let added_chain_block_hashes = load!(Vec, reader)?; diff --git a/rpc/core/src/model/tests.rs b/rpc/core/src/model/tests.rs index 11de11ff7..73dc6cba5 100644 --- a/rpc/core/src/model/tests.rs +++ b/rpc/core/src/model/tests.rs @@ -648,13 +648,13 @@ mod mockery { impl Mock for RpcMergesetBlockAcceptanceData { fn mock() -> Self { - RpcMergesetBlockAcceptanceData{ merged_block_hash: mock(), accepted_transaction_ids: mock() } + RpcMergesetBlockAcceptanceData { merged_block_hash: mock(), accepted_transaction_ids: mock() } } } impl Mock for RpcAcceptanceData { fn mock() -> Self { - RpcAcceptanceData { accepting_blue_score: mock(), mergeset_block_acceptance_data: mock() } + RpcAcceptanceData { accepting_blue_score: mock(), mergeset_block_acceptance_data: mock() } } } diff --git a/rpc/grpc/core/src/convert/tx.rs b/rpc/grpc/core/src/convert/tx.rs index 16a016608..82c0507c9 100644 --- a/rpc/grpc/core/src/convert/tx.rs +++ b/rpc/grpc/core/src/convert/tx.rs @@ -1,8 +1,8 @@ use crate::protowire; use crate::{from, try_from}; +use kaspa_consensus_core::tx::TransactionId; use kaspa_rpc_core::{FromRpcHex, RpcError, RpcHash, RpcMergesetBlockAcceptanceData, RpcResult, RpcScriptVec, ToRpcHex}; use std::str::FromStr; -use kaspa_consensus_core::tx::TransactionId; // ---------------------------------------------------------------------------- // rpc_core to protowire // ---------------------------------------------------------------------------- diff --git a/rpc/service/src/converter/consensus.rs b/rpc/service/src/converter/consensus.rs index 76393c9b7..1e664a86f 100644 --- a/rpc/service/src/converter/consensus.rs +++ b/rpc/service/src/converter/consensus.rs @@ -7,7 +7,7 @@ use kaspa_consensus_core::{ hashing::tx::hash, header::Header, tx::{MutableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, - ChainPath + ChainPath, }; use kaspa_consensus_notify::notification::{self as consensus_notify, Notification as ConsensusNotification}; use kaspa_consensusmanager::{ConsensusManager, ConsensusProxy}; @@ -170,7 +170,8 @@ impl ConsensusConverter { for hash in chain_path.added.iter() { acceptance_daa_scores.push(consensus.async_get_compact_header_data(*hash).await?.blue_score); } - Ok(acceptance_data.iter() + Ok(acceptance_data + .iter() .zip(acceptance_daa_scores) .map(|(block_data, accepting_blue_score)| RpcAcceptanceData { accepting_blue_score,