Skip to content

Commit

Permalink
Optimization for Contracts Trie calculation.
Browse files Browse the repository at this point in the history
  • Loading branch information
amosStarkware committed Jul 29, 2024
1 parent 6236ba9 commit 03b96a8
Show file tree
Hide file tree
Showing 10 changed files with 300 additions and 137 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,12 @@ pub async fn tree_computation_flow(
)
.expect("Failed to create the updated skeleton tree");

StorageTrie::create::<TreeHashFunctionImpl>(updated_skeleton.into(), leaf_modifications)
.await
.expect("Failed to create the filled tree")
StorageTrie::create_no_additional_output::<TreeHashFunctionImpl>(
updated_skeleton.into(),
leaf_modifications,
)
.await
.expect("Failed to create the filled tree")
}

pub async fn single_tree_flow_test(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,13 @@ pub enum FilledTreeError<L: Leaf> {
#[error("Deleted leaf at index {0:?} appears in the updated skeleton tree.")]
DeletedLeafInSkeleton(NodeIndex),
#[error("Double update at node {index:?}. Existing value: {existing_value:?}.")]
DoubleUpdate {
DoubleOutputUpdate {
index: NodeIndex,
existing_value: Box<FilledNode<L>>,
},
#[error("Double update at node {index:?}.")]
//TODO(Amos): Add the existing value to the error message.
DoubleAdditionalOutputUpdate { index: NodeIndex },
#[error(transparent)]
Leaf(#[from] LeafError),
#[error("Missing node at index {0:?}.")]
Expand Down
122 changes: 74 additions & 48 deletions crates/committer/src/patricia_merkle_tree/filled_tree/forest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::patricia_merkle_tree::filled_tree::tree::FilledTree;
use crate::patricia_merkle_tree::filled_tree::tree::{
ClassesTrie, ContractsTrie, StorageTrie, StorageTrieMap,
};
use crate::patricia_merkle_tree::node_data::leaf::{ContractState, LeafModifications};
use crate::patricia_merkle_tree::node_data::leaf::{ContractState, Leaf, LeafModifications};
use crate::patricia_merkle_tree::types::NodeIndex;
use crate::patricia_merkle_tree::updated_skeleton_tree::hash_function::ForestHashFunction;
use crate::patricia_merkle_tree::updated_skeleton_tree::skeleton_forest::UpdatedSkeletonForest;
Expand All @@ -16,7 +16,6 @@ use crate::storage::storage_trait::Storage;

use std::collections::HashMap;
use std::sync::Arc;
use tokio::task::JoinSet;

pub struct FilledForest {
pub storage_tries: StorageTrieMap,
Expand Down Expand Up @@ -48,74 +47,101 @@ impl FilledForest {
}

pub(crate) async fn create<TH: ForestHashFunction + 'static>(
mut updated_forest: UpdatedSkeletonForest,
updated_forest: UpdatedSkeletonForest,
storage_updates: HashMap<ContractAddress, LeafModifications<StarknetStorageValue>>,
classes_updates: LeafModifications<CompiledClassHash>,
original_contracts_trie_leaves: &HashMap<NodeIndex, ContractState>,
address_to_class_hash: &HashMap<ContractAddress, ClassHash>,
address_to_nonce: &HashMap<ContractAddress, Nonce>,
) -> ForestResult<Self> {
let classes_trie_task = tokio::spawn(ClassesTrie::create::<TH>(
let classes_trie = ClassesTrie::create_no_additional_output::<TH>(
Arc::new(updated_forest.classes_trie),
Arc::new(classes_updates),
));
let mut contracts_trie_modifications = HashMap::new();
let mut filled_storage_tries = HashMap::new();
let mut contracts_state_tasks = JoinSet::new();
)
.await?;

for (address, inner_updates) in storage_updates {
let updated_storage_trie = updated_forest
.storage_tries
.remove(&address)
.ok_or(ForestError::MissingUpdatedSkeleton(address))?;

let original_contract_state = original_contracts_trie_leaves
.get(&NodeIndex::from_contract_address(&address))
.ok_or(ForestError::MissingContractCurrentState(address))?;
contracts_state_tasks.spawn(Self::new_contract_state::<TH>(
address,
*(address_to_nonce
.get(&address)
.unwrap_or(&original_contract_state.nonce)),
*(address_to_class_hash
.get(&address)
.unwrap_or(&original_contract_state.class_hash)),
updated_storage_trie,
inner_updates,
));
}

while let Some(result) = contracts_state_tasks.join_next().await {
let (address, new_contract_state, filled_storage_trie) = result??;
contracts_trie_modifications.insert(
NodeIndex::from_contract_address(&address),
new_contract_state,
);
filled_storage_tries.insert(address, filled_storage_trie);
}

let contracts_trie_task = tokio::spawn(ContractsTrie::create::<TH>(
let (contracts_trie, storage_tries) = ContractsTrie::create::<TH>(
Arc::new(updated_forest.contracts_trie),
Arc::new(contracts_trie_modifications),
));
Arc::new(FilledForest::get_contracts_trie_leaf_input(
original_contracts_trie_leaves,
storage_updates,
updated_forest.storage_tries,
address_to_class_hash,
address_to_nonce,
)?),
)
.await?;

Ok(Self {
storage_tries: filled_storage_tries,
contracts_trie: contracts_trie_task.await??,
classes_trie: classes_trie_task.await??,
storage_tries: storage_tries
.unwrap_or_else(|| panic!("Missing storage tries."))
.into_iter()
.map(|(node_index, storage_trie)| (node_index.to_contract_address(), storage_trie))
.collect(),
contracts_trie,
classes_trie,
})
}

// TODO(Amos, 1/8/2024): Can this be done more efficiently?
// should error be returned if keys are missing?
fn get_contracts_trie_leaf_input(
original_contracts_trie_leaves: &HashMap<NodeIndex, ContractState>,
mut storage_updates: HashMap<ContractAddress, LeafModifications<StarknetStorageValue>>,
mut storage_tries: HashMap<ContractAddress, UpdatedSkeletonTreeImpl>,
address_to_class_hash: &HashMap<ContractAddress, ClassHash>,
address_to_nonce: &HashMap<ContractAddress, Nonce>,
) -> ForestResult<HashMap<NodeIndex, <ContractState as Leaf>::I>> {
let mut leaf_index_to_leaf_input = HashMap::new();
assert_eq!(storage_updates.len(), storage_tries.len());
// `storage_updates` includes all modified contracts, see
// StateDiff::actual_storage_updates().
for contract_address in storage_updates.keys().cloned().collect::<Vec<_>>() {
let original_contract_state = original_contracts_trie_leaves
.get(&NodeIndex::from_contract_address(&contract_address))
.ok_or(ForestError::MissingContractCurrentState(contract_address))?;
leaf_index_to_leaf_input.insert(
NodeIndex::from_contract_address(&contract_address),
(
NodeIndex::from_contract_address(&contract_address),
*(address_to_nonce
.get(&contract_address)
.unwrap_or(&original_contract_state.nonce)),
*(address_to_class_hash
.get(&contract_address)
.unwrap_or(&original_contract_state.class_hash)),
storage_tries.remove(&contract_address).unwrap_or_else(|| {
panic!(
"Missing update skeleton tree for contract {:?}",
contract_address
)
}),
storage_updates
.remove(&contract_address)
.unwrap_or_else(|| {
panic!(
"Missing storage updates for contract {:?}",
contract_address
)
}),
),
);
}
Ok(leaf_index_to_leaf_input)
}

async fn new_contract_state<TH: ForestHashFunction + 'static>(
contract_address: ContractAddress,
new_nonce: Nonce,
new_class_hash: ClassHash,
updated_storage_trie: UpdatedSkeletonTreeImpl,
inner_updates: LeafModifications<StarknetStorageValue>,
) -> ForestResult<(ContractAddress, ContractState, StorageTrie)> {
let filled_storage_trie =
StorageTrie::create::<TH>(Arc::new(updated_storage_trie), Arc::new(inner_updates))
.await?;
let filled_storage_trie = StorageTrie::create_no_additional_output::<TH>(
Arc::new(updated_storage_trie),
Arc::new(inner_updates),
)
.await?;
let new_root_hash = filled_storage_trie.get_root_hash();
Ok((
contract_address,
Expand Down
Loading

0 comments on commit 03b96a8

Please sign in to comment.