From b1a498c2ca82cf94fe0c57114d452f2cb9c5a607 Mon Sep 17 00:00:00 2001 From: Jason Zhu Date: Sat, 1 Feb 2025 00:09:19 +0000 Subject: [PATCH] Inline all the StableBtreeMap operations --- rs/backend/nns-dapp-exports-production.txt | 1 - rs/backend/nns-dapp-exports-test.txt | 1 - rs/backend/nns-dapp.did | 2 - rs/backend/src/accounts_store.rs | 152 +++----- rs/backend/src/accounts_store/schema.rs | 158 -------- .../accounts_in_unbounded_stable_btree_map.rs | 139 ------- .../schema/label_serialization.rs | 107 ------ .../schema/label_serialization/tests.rs | 49 --- rs/backend/src/accounts_store/schema/map.rs | 89 ----- rs/backend/src/accounts_store/schema/proxy.rs | 124 ------ .../schema/proxy/enum_boilerplate.rs | 78 ---- .../accounts_store/schema/proxy/migration.rs | 114 ------ rs/backend/src/accounts_store/schema/tests.rs | 357 ------------------ rs/backend/src/accounts_store/tests.rs | 2 - rs/backend/src/accounts_store/toy_data.rs | 7 +- rs/backend/src/main.rs | 41 +- rs/backend/src/state/tests.rs | 2 +- 17 files changed, 64 insertions(+), 1359 deletions(-) delete mode 100644 rs/backend/src/accounts_store/schema.rs delete mode 100644 rs/backend/src/accounts_store/schema/accounts_in_unbounded_stable_btree_map.rs delete mode 100644 rs/backend/src/accounts_store/schema/label_serialization.rs delete mode 100644 rs/backend/src/accounts_store/schema/label_serialization/tests.rs delete mode 100644 rs/backend/src/accounts_store/schema/map.rs delete mode 100644 rs/backend/src/accounts_store/schema/proxy.rs delete mode 100644 rs/backend/src/accounts_store/schema/proxy/enum_boilerplate.rs delete mode 100644 rs/backend/src/accounts_store/schema/proxy/migration.rs delete mode 100644 rs/backend/src/accounts_store/schema/tests.rs diff --git a/rs/backend/nns-dapp-exports-production.txt b/rs/backend/nns-dapp-exports-production.txt index 957b1b50964..c04cd85ff65 100644 --- a/rs/backend/nns-dapp-exports-production.txt +++ b/rs/backend/nns-dapp-exports-production.txt @@ -22,5 +22,4 @@ canister_update register_hardware_wallet canister_update rename_canister canister_update rename_sub_account canister_update set_imported_tokens -canister_update step_migration main diff --git a/rs/backend/nns-dapp-exports-test.txt b/rs/backend/nns-dapp-exports-test.txt index 57bd454574f..c8c6c9a6e2e 100644 --- a/rs/backend/nns-dapp-exports-test.txt +++ b/rs/backend/nns-dapp-exports-test.txt @@ -24,5 +24,4 @@ canister_update register_hardware_wallet canister_update rename_canister canister_update rename_sub_account canister_update set_imported_tokens -canister_update step_migration main diff --git a/rs/backend/nns-dapp.did b/rs/backend/nns-dapp.did index a8fcf09616f..45702ad395d 100644 --- a/rs/backend/nns-dapp.did +++ b/rs/backend/nns-dapp.did @@ -239,8 +239,6 @@ service: (opt Config) -> { http_request: (request: HttpRequest) -> (HttpResponse) query; add_stable_asset: (asset: blob) -> (); - step_migration: (nat32) -> (); - // Methods available in the test build only: get_toy_account: (nat64) -> (GetAccountResponse) query; } diff --git a/rs/backend/src/accounts_store.rs b/rs/backend/src/accounts_store.rs index 59fcef9e419..20e8a2e95b8 100644 --- a/rs/backend/src/accounts_store.rs +++ b/rs/backend/src/accounts_store.rs @@ -1,5 +1,4 @@ //! User accounts and transactions. -use crate::accounts_store::schema::accounts_in_unbounded_stable_btree_map::AccountsDbAsUnboundedStableBTreeMap; use crate::multi_part_transactions_processor::MultiPartTransactionsProcessor; use crate::state::{partitions::PartitionType, with_partitions, StableState}; use crate::stats::Stats; @@ -7,7 +6,9 @@ use candid::CandidType; use dfn_candid::Candid; use histogram::AccountsStoreHistogram; use ic_base_types::{CanisterId, PrincipalId}; -use ic_stable_structures::{storable::Bound, Storable}; +use ic_stable_structures::{ + memory_manager::VirtualMemory, storable::Bound, DefaultMemoryImpl, StableBTreeMap, Storable, +}; use icp_ledger::{AccountIdentifier, BlockIndex, Subaccount}; use itertools::Itertools; use on_wire::{FromWire, IntoWire}; @@ -16,14 +17,8 @@ use std::borrow::Cow; use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap, VecDeque}; use std::fmt; -use std::ops::RangeBounds; pub mod histogram; -pub mod schema; -use schema::{ - proxy::{AccountsDb, AccountsDbAsProxy}, - AccountsDbTrait, -}; // This limit is for DoS protection but should be increased if we get close to // the limit. @@ -35,61 +30,41 @@ const MAX_SUB_ACCOUNT_ID: u8 = u8::MAX - 1; // Can be revisited if users find this too restrictive. const MAX_IMPORTED_TOKENS: i32 = 20; -/// Accounts, transactions and related data. -/// -/// Note: Some monitoring fields are not included in the `Eq` and `PartialEq` implementations. Additionally, please note -/// that the `ic_stable_structures::BTreeMap` does not have an implementation of `Eq`, so special care needs to be taken when comparing -/// data backed by stable structures. -#[derive(Default)] -#[cfg_attr(test, derive(Eq, PartialEq))] +/// Accounts and related data. pub struct AccountsStore { // TODO(NNS1-720): Use AccountIdentifier directly as the key for this HashMap - accounts_db: schema::proxy::AccountsDbAsProxy, + accounts_db: StableBTreeMap, Account, VirtualMemory>, accounts_db_stats: AccountsDbStats, } +impl Default for AccountsStore { + fn default() -> Self { + Self::new() + } +} + impl fmt::Debug for AccountsStore { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, - "AccountsStore{{accounts_db: {:?}, accounts_db_stats: {:?}}}", - self.accounts_db, self.accounts_db_stats, + "AccountsStore{{StableBTreeMap{{.. {} entries}}, accounts_db_stats: {:?}}}", + self.accounts_db.len(), + self.accounts_db_stats, ) } } -impl AccountsDbTrait for AccountsStore { - fn db_insert_account(&mut self, account_key: &[u8], account: Account) { - self.accounts_db.db_insert_account(account_key, account); - } - fn db_contains_account(&self, account_key: &[u8]) -> bool { - self.accounts_db.db_contains_account(account_key) - } - fn db_get_account(&self, account_key: &[u8]) -> Option { - self.accounts_db.db_get_account(account_key) - } - fn db_remove_account(&mut self, account_key: &[u8]) { - self.accounts_db.db_remove_account(account_key); - } - fn db_accounts_len(&self) -> u64 { - self.accounts_db.db_accounts_len() - } - fn iter(&self) -> Box, Account)> + '_> { - self.accounts_db.iter() - } - fn first_key_value(&self) -> Option<(Vec, Account)> { - self.accounts_db.first_key_value() - } - fn last_key_value(&self) -> Option<(Vec, Account)> { - self.accounts_db.last_key_value() - } - fn values(&self) -> Box + '_> { - self.accounts_db.values() - } - fn range(&self, key_range: impl RangeBounds>) -> Box, Account)> + '_> { - self.accounts_db.range(key_range) +/// Check whether two account databases contain the same data. +/// +/// It should be possible to use this to confirm that data has been preserved during a migration. +#[cfg(test)] +impl PartialEq for AccountsStore { + fn eq(&self, other: &Self) -> bool { + self.accounts_db.range(..).eq(other.accounts_db.range(..)) && self.accounts_db_stats == other.accounts_db_stats } } +#[cfg(test)] +impl Eq for AccountsStore {} #[derive(Default, CandidType, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct AccountsDbStats { @@ -318,11 +293,10 @@ pub enum DetachCanisterResponse { impl AccountsStore { /// Creates a new `AccountsStore`. Should be called in `init`. + #[must_use] pub fn new() -> Self { let accounts_partition = with_partitions(|partitions| partitions.get(PartitionType::Accounts.memory_id())); - let accounts_db = AccountsDbAsProxy::from(AccountsDb::UnboundedStableBTreeMap( - AccountsDbAsUnboundedStableBTreeMap::new(accounts_partition), - )); + let accounts_db = StableBTreeMap::new(accounts_partition); let accounts_db_stats = AccountsDbStats::default(); Self { @@ -332,12 +306,11 @@ impl AccountsStore { } /// Recovers the state from stable memory. Should be called in `post_upgrade`. + #[must_use] pub fn new_restored(serializable_state: AccountsStoreSerializableState) -> Self { let AccountsStoreSerializableState { accounts_db_stats } = serializable_state; let accounts_partition = with_partitions(|partitions| partitions.get(PartitionType::Accounts.memory_id())); - let accounts_db = AccountsDbAsProxy::from(AccountsDb::UnboundedStableBTreeMap( - AccountsDbAsUnboundedStableBTreeMap::load(accounts_partition), - )); + let accounts_db = StableBTreeMap::load(accounts_partition); Self { accounts_db, @@ -345,22 +318,10 @@ impl AccountsStore { } } - /// Determines whether a migration is being performed. - #[must_use] - #[allow(dead_code)] - pub fn migration_in_progress(&self) -> bool { - self.accounts_db.migration_in_progress() - } - /// Advances the migration by one step. - /// - /// Note: This is a pass-through to the underlying `AccountsDb::step_migration`. Please see that for further details. - pub fn step_migration(&mut self, step_size: u32) { - self.accounts_db.step_migration(step_size); - } #[must_use] pub fn get_account(&self, caller: PrincipalId) -> Option { let account_identifier = AccountIdentifier::from(caller); - if let Some(account) = self.accounts_db.db_get_account(&account_identifier.to_vec()) { + if let Some(account) = self.accounts_db.get(&account_identifier.to_vec()) { // If the principal is empty, return None so that the browser will call add_account // which will allow us to set the principal. let principal = account.principal?; @@ -403,19 +364,17 @@ impl AccountsStore { pub fn add_account(&mut self, caller: PrincipalId) -> bool { self.assert_account_limit(); let account_identifier = AccountIdentifier::from(caller); - if let Some(account) = self.accounts_db.db_get_account(&account_identifier.to_vec()) { + if let Some(account) = self.accounts_db.get(&account_identifier.to_vec()) { if account.principal.is_none() { // This is an old account that needs a one-off fix to set the principal and update the transactions. let mut account = account.clone(); account.principal = Some(caller); - self.accounts_db - .db_insert_account(&account_identifier.to_vec(), account); + self.accounts_db.insert(account_identifier.to_vec(), account); } false } else { let new_account = Account::new(caller, account_identifier); - self.accounts_db - .db_insert_account(&account_identifier.to_vec(), new_account); + self.accounts_db.insert(account_identifier.to_vec(), new_account); true } @@ -430,7 +389,7 @@ impl AccountsStore { return CreateSubAccountResponse::NameTooLong; } - let Some(mut account) = self.accounts_db.db_get_account(&account_identifier.to_vec()) else { + let Some(mut account) = self.accounts_db.get(&account_identifier.to_vec()) else { return CreateSubAccountResponse::AccountNotFound; }; @@ -443,8 +402,7 @@ impl AccountsStore { let named_sub_account = NamedSubAccount::new(sub_account_name.clone(), sub_account_identifier); account.sub_accounts.insert(sub_account_id, named_sub_account); - self.accounts_db - .db_insert_account(&account_identifier.to_vec(), account); + self.accounts_db.insert(account_identifier.to_vec(), account); self.accounts_db_stats.sub_accounts_count += 1; @@ -464,14 +422,14 @@ impl AccountsStore { if !Self::validate_account_name(&request.new_name) { RenameSubAccountResponse::NameTooLong - } else if let Some(mut account) = self.accounts_db.db_get_account(&account_identifier) { + } else if let Some(mut account) = self.accounts_db.get(&account_identifier) { if let Some(sub_account) = account .sub_accounts .values_mut() .find(|sub_account| sub_account.account_identifier == request.account_identifier) { sub_account.name = request.new_name; - self.accounts_db.db_insert_account(&account_identifier, account); + self.accounts_db.insert(account_identifier, account); RenameSubAccountResponse::Ok } else { RenameSubAccountResponse::SubAccountNotFound @@ -490,7 +448,7 @@ impl AccountsStore { if !Self::validate_account_name(&request.name) { RegisterHardwareWalletResponse::NameTooLong - } else if let Some(mut account) = self.accounts_db.db_get_account(&account_identifier.to_vec()).clone() { + } else if let Some(mut account) = self.accounts_db.get(&account_identifier.to_vec()).clone() { if account.hardware_wallet_accounts.len() == (u8::MAX as usize) { RegisterHardwareWalletResponse::HardwareWalletLimitExceeded } else if account @@ -507,8 +465,7 @@ impl AccountsStore { account .hardware_wallet_accounts .sort_unstable_by_key(|hw| hw.name.clone()); - self.accounts_db - .db_insert_account(&account_identifier.to_vec(), account); + self.accounts_db.insert(account_identifier.to_vec(), account); self.accounts_db_stats.hardware_wallet_accounts_count += 1; RegisterHardwareWalletResponse::Ok @@ -534,7 +491,7 @@ impl AccountsStore { let account_identifier = AccountIdentifier::from(caller).to_vec(); - let Some(mut account) = self.accounts_db.db_get_account(&account_identifier) else { + let Some(mut account) = self.accounts_db.get(&account_identifier) else { return AttachCanisterResponse::AccountNotFound; }; @@ -598,7 +555,7 @@ impl AccountsStore { account.canisters.push(new_canister); account.canisters.sort(); - self.accounts_db.db_insert_account(&account_identifier, account); + self.accounts_db.insert(account_identifier, account); AttachCanisterResponse::Ok } @@ -607,7 +564,7 @@ impl AccountsStore { if Self::validate_canister_name(&request.name) { let account_identifier = AccountIdentifier::from(caller).to_vec(); - if let Some(mut account) = self.accounts_db.db_get_account(&account_identifier) { + if let Some(mut account) = self.accounts_db.get(&account_identifier) { if !request.name.is_empty() && account.canisters.iter().any(|c| c.name == request.name) { return RenameCanisterResponse::NameAlreadyTaken; } @@ -619,7 +576,7 @@ impl AccountsStore { ..existing_canister }); account.canisters.sort(); - self.accounts_db.db_insert_account(&account_identifier, account); + self.accounts_db.insert(account_identifier, account); RenameCanisterResponse::Ok } else { RenameCanisterResponse::CanisterNotFound @@ -636,10 +593,10 @@ impl AccountsStore { pub fn detach_canister(&mut self, caller: PrincipalId, request: DetachCanisterRequest) -> DetachCanisterResponse { let account_identifier = AccountIdentifier::from(caller).to_vec(); - if let Some(mut account) = self.accounts_db.db_get_account(&account_identifier) { + if let Some(mut account) = self.accounts_db.get(&account_identifier) { if let Some(index) = Self::find_canister_index(&account, request.canister_id) { account.canisters.remove(index); - self.accounts_db.db_insert_account(&account_identifier, account); + self.accounts_db.insert(account_identifier, account); DetachCanisterResponse::Ok } else { DetachCanisterResponse::CanisterNotFound @@ -652,7 +609,7 @@ impl AccountsStore { #[must_use] pub fn get_canisters(&self, caller: PrincipalId) -> Vec { let account_identifier = AccountIdentifier::from(caller); - if let Some(account) = self.accounts_db.db_get_account(&account_identifier.to_vec()) { + if let Some(account) = self.accounts_db.get(&account_identifier.to_vec()) { account.canisters.clone() } else { Vec::new() @@ -670,19 +627,19 @@ impl AccountsStore { }; } let account_identifier = AccountIdentifier::from(caller).to_vec(); - let Some(mut account) = self.accounts_db.db_get_account(&account_identifier) else { + let Some(mut account) = self.accounts_db.get(&account_identifier) else { return SetImportedTokensResponse::AccountNotFound; }; account.imported_tokens = Some(new_imported_tokens); - self.accounts_db.db_insert_account(&account_identifier, account); + self.accounts_db.insert(account_identifier, account); SetImportedTokensResponse::Ok } pub fn get_imported_tokens(&mut self, caller: PrincipalId) -> GetImportedTokensResponse { let account_identifier = AccountIdentifier::from(caller).to_vec(); - let Some(account) = self.accounts_db.db_get_account(&account_identifier) else { + let Some(account) = self.accounts_db.get(&account_identifier) else { return GetImportedTokensResponse::AccountNotFound; }; @@ -690,10 +647,10 @@ impl AccountsStore { } pub fn get_stats(&self, stats: &mut Stats) { - stats.accounts_count = self.accounts_db.db_accounts_len(); + stats.accounts_count = self.accounts_db.len(); stats.sub_accounts_count = self.accounts_db_stats.sub_accounts_count; stats.hardware_wallet_accounts_count = self.accounts_db_stats.hardware_wallet_accounts_count; - stats.migration_countdown = Some(self.accounts_db.migration_countdown()); + stats.migration_countdown = Some(0); } #[must_use] @@ -718,12 +675,21 @@ impl AccountsStore { } fn assert_account_limit(&self) { - let db_accounts_len = self.accounts_db.db_accounts_len(); + let db_accounts_len = self.accounts_db.len(); assert!( db_accounts_len < ACCOUNT_LIMIT, "Pre migration account limit exceeded {db_accounts_len}" ); } + + #[cfg(test)] + pub(crate) fn range( + &self, + key_range: impl std::ops::RangeBounds>, + ) -> Box, Account)> + '_> { + let iterator = self.accounts_db.range(key_range); + Box::new(iterator) + } } /// The component(s) of the `AccountsStore` that are serialized and de-serialized during upgrades. diff --git a/rs/backend/src/accounts_store/schema.rs b/rs/backend/src/accounts_store/schema.rs deleted file mode 100644 index aecca1399fc..00000000000 --- a/rs/backend/src/accounts_store/schema.rs +++ /dev/null @@ -1,158 +0,0 @@ -//! Data storage schemas. - -// Schemas -pub mod accounts_in_unbounded_stable_btree_map; -pub mod map; -pub mod proxy; - -// Mechanics -use crate::accounts_store::Account; -use candid::{CandidType, Deserialize}; -use core::ops::RangeBounds; -use serde::Serialize; -use strum_macros::EnumIter; -mod label_serialization; -#[cfg(test)] -pub mod tests; - -/// API methods that must be implemented by any account store. -/// -/// # Example -/// -/// ``` -/// use nns_dapp::accounts_store::schema::map::AccountsDbAsMap; -/// use nns_dapp::accounts_store::Account; -/// use icp_ledger::{AccountIdentifier}; -/// use ic_base_types::{CanisterId, PrincipalId}; -/// use crate::nns_dapp::accounts_store::schema::AccountsDbTrait; -/// -/// let mut mock = AccountsDbAsMap::default(); -/// let caller = PrincipalId::new_user_test_id(1); // Typically a user making an API call. -/// let account_identifier = AccountIdentifier::from(caller); -/// let new_account = Account::new(caller, account_identifier); -/// mock.db_insert_account(&account_identifier.to_vec(), new_account.clone()); -/// assert!(mock.db_contains_account(&account_identifier.to_vec())); -/// assert_eq!(mock.db_accounts_len(), 1); -/// assert_eq!(mock.db_get_account(&account_identifier.to_vec()), Some(new_account.clone())); -/// mock.db_remove_account(&account_identifier.to_vec()); -/// assert!(!mock.db_contains_account(&account_identifier.to_vec())); -/// assert_eq!(mock.db_accounts_len(), 0); -/// ``` -/// -/// Note: The key is `&[u8]` for historical reasons. It _may_ be possible -/// to change this to `AccountIdentifier`. -pub trait AccountsDbTrait { - // Basic CRUD - - /// Inserts an account into the data store. - fn db_insert_account(&mut self, account_key: &[u8], account: Account); - /// Checks if an account is in the data store. - fn db_contains_account(&self, account_key: &[u8]) -> bool; - /// Gets an account from the data store. - fn db_get_account(&self, account_key: &[u8]) -> Option; - /// Removes an account from the data store. - fn db_remove_account(&mut self, account_key: &[u8]); - - // Statistics - - /// Returns the number of accounts in the data store. - /// - /// Note: This is purely for statistical purposes and is the only statistic - /// currently measured for accounts. More statistics _may_ be added in future. - fn db_accounts_len(&self) -> u64; - - // Utilities - - /// Tries to modify an account, if it exists, with the given function. The modified account is - /// saved only if the function returns a successful result. - /// - /// Warning: This does NOT guarantee any locking mechanism. IC canisters can execute only one - /// update function at a time, so there is no issue of several functions executing at the same - /// time. As such, protection against concurrent access is not a priority. - /// - /// # Arguments - /// - `account_key`: the account lookup key, typically `account_identifier.to_vec()`. - /// - `f`: a function that takes a mutable reference to the account as an argument and returns - /// a result. - /// - /// # Returns - /// - `None`, if the account does not exist. - /// - `Some(result)`, where `result` is the value returned by `f`, if the account exists. - #[cfg(test)] - fn db_try_with_account(&mut self, account_key: &[u8], f: F) -> Option> - where - // The closure takes an account as an argument. It may return any type. - F: Fn(&mut Account) -> Result, - { - if let Some(mut account) = self.db_get_account(account_key) { - let ans = f(&mut account); - if ans.is_ok() { - self.db_insert_account(account_key, account); - } - Some(ans) - } else { - None - } - } - - /// Iterates over entries in the data store. - fn iter(&self) -> Box, Account)> + '_>; - - /// Returns the first key-value pair in the map. The key in this pair is the maximum key in the map. - fn first_key_value(&self) -> Option<(Vec, Account)>; - - /// Returns the last key-value pair in the map. The key in this pair is the maximum key in the map. - fn last_key_value(&self) -> Option<(Vec, Account)>; - - /// Iterates over accounts in the data store. - fn values(&self) -> Box + '_> { - let iterator = self.iter().map(|(_key, value)| value); - Box::new(iterator) - } - - /// Returns an iterator over the entries in the map where keys belong to the specified range. - fn range(&self, key_range: impl RangeBounds>) -> Box, Account)> + '_>; -} - -/// A label to identify the schema. -/// -/// Note: The numeric representations of these labels are guaranteed to be stable. -#[repr(u32)] -#[derive(Clone, Copy, Debug, EnumIter, PartialEq, Eq, CandidType, Serialize, Deserialize)] -pub enum SchemaLabel { - /// Data is stored on the heap in a `BTreeMap` and serialized to stable memory on upgrade. - /// Implemented by: [`map::AccountsDbAsMap`] - Map = 0, - /// Every account is serialized separately and stored in a `StableBTreeMap`. The remaining - /// data, mostly consisting of transactions, is serialized into a single large blob in the - /// `pre_upgrade` hook. - AccountsInStableMemory = 1, -} - -impl Default for SchemaLabel { - fn default() -> Self { - Self::AccountsInStableMemory - } -} - -/// Schema Label as written to stable memory. -pub type SchemaLabelBytes = [u8; SchemaLabel::MAX_BYTES]; - -/// Errors that can occur when de-serializing a schema label. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum SchemaLabelError { - InvalidChecksum, - InvalidLabel(u32), - InsufficientBytes, -} - -/// A trait for data stores that support `BTreeMap` for account storage. -#[cfg(test)] -pub trait AccountsDbBTreeMapTrait { - /// Creates a database from a map of accounts. - #[cfg(test)] - fn from_map(map: std::collections::BTreeMap, Account>) -> Self; - /// Provides the accounts as a map. - #[cfg(test)] - fn as_map(&self) -> &std::collections::BTreeMap, Account>; -} diff --git a/rs/backend/src/accounts_store/schema/accounts_in_unbounded_stable_btree_map.rs b/rs/backend/src/accounts_store/schema/accounts_in_unbounded_stable_btree_map.rs deleted file mode 100644 index e30d28513ea..00000000000 --- a/rs/backend/src/accounts_store/schema/accounts_in_unbounded_stable_btree_map.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! Data storage schema `AccountsInStableMemory`: Accounts data is stored in a `StableBTreeMap`, -//! other data is on the heap and serialized in `pre_upgrade` hooks. -//! -//! Data is stored in a [`ic_stable_structures::btreemap::BTreeMap`](https://docs.rs/ic-stable-structures/0.6.0/ic_stable_structures/btreemap/struct.BTreeMap.html) -//! with values that are [`ic_stable_structures::storable::Bound::Unbounded`](https://docs.rs/ic-stable-structures/0.6.0/ic_stable_structures/storable/enum.Bound.html#variant.Unbounded) -//! as described on the [dfinity forum](https://forum.dfinity.org/t/stable-structures-removing-the-bounded-size-requirement/21167). - -use super::{Account, AccountsDbTrait}; -use core::ops::RangeBounds; -use ic_stable_structures::memory_manager::VirtualMemory; -use ic_stable_structures::DefaultMemoryImpl; -use ic_stable_structures::{btreemap::BTreeMap as StableBTreeMap, Memory}; -#[cfg(test)] -use std::collections::BTreeMap as StdBTreeMap; -use std::fmt; - -pub type ProductionMemoryType = VirtualMemory; - -pub struct AccountsDbAsUnboundedStableBTreeMap -where - M: Memory, -{ - accounts: StableBTreeMap, Account, M>, -} - -impl AccountsDbAsUnboundedStableBTreeMap -where - M: Memory, -{ - /// Creates a new, empty database. - pub fn new(memory: M) -> Self { - Self { - accounts: StableBTreeMap::new(memory), - } - } - /// Loads a database. - pub fn load(memory: M) -> Self { - Self { - accounts: StableBTreeMap::load(memory), - } - } -} - -#[cfg(test)] -impl Default for AccountsDbAsUnboundedStableBTreeMap { - fn default() -> Self { - Self::new(DefaultMemoryImpl::default()) - } -} - -impl AccountsDbTrait for AccountsDbAsUnboundedStableBTreeMap -where - M: Memory, -{ - fn db_insert_account(&mut self, account_key: &[u8], account: Account) { - self.accounts.insert(account_key.to_vec(), account); - } - fn db_contains_account(&self, account_key: &[u8]) -> bool { - self.accounts.contains_key(&account_key.to_vec()) - } - fn db_get_account(&self, account_key: &[u8]) -> Option { - self.accounts.get(&account_key.to_vec()) // TODO: Change the trait to &Vec. - } - fn db_remove_account(&mut self, account_key: &[u8]) { - self.accounts.remove(&account_key.to_vec()); - } - fn db_accounts_len(&self) -> u64 { - self.accounts.len() - } - fn iter(&self) -> Box, Account)> + '_> { - let iterator = self.accounts.iter(); - Box::new(iterator) - } - fn first_key_value(&self) -> Option<(Vec, Account)> { - self.accounts.first_key_value() - } - fn last_key_value(&self) -> Option<(Vec, Account)> { - self.accounts.last_key_value() - } - fn values(&self) -> Box + '_> { - let iterator = self.accounts.iter().map(|(_key, value)| value); - Box::new(iterator) - } - fn range(&self, key_range: impl RangeBounds>) -> Box, Account)> + '_> { - let iterator = self.accounts.range(key_range); - Box::new(iterator) - } -} - -impl fmt::Debug for AccountsDbAsUnboundedStableBTreeMap -where - M: Memory, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - write!( - f, - "AccountsDbAsUnboundedStableBTreeMap {{ accounts: StableBTreeMap{{.. {} entries}} }}", - self.accounts.len() - ) - } -} - -#[cfg(test)] -mod tests { - use super::super::tests::test_accounts_db; - use super::AccountsDbAsUnboundedStableBTreeMap; - use super::*; - use crate::accounts_store::schema::tests::toy_account; - use crate::accounts_store::schema::AccountsDbTrait; - use ic_stable_structures::memory_manager::{MemoryId, MemoryManager}; - - // Test that the AccountsDbTrait implementation works. - test_accounts_db!(AccountsDbAsUnboundedStableBTreeMap::default()); - - #[test] - fn should_be_able_to_load_existing_database() { - // Prepare a virtual memory, as in production. - let raw_memory = DefaultMemoryImpl::default(); - raw_memory.grow(5); - let memory_manager = MemoryManager::init(raw_memory); - let random_memory_id = MemoryId::new(9); - // ... and some accounts to store. - let accounts: StdBTreeMap<_, _> = vec![(b"key"[..].to_owned(), toy_account(1, 2))].into_iter().collect(); - // Store the accounts in a new database. - let mut new_db = AccountsDbAsUnboundedStableBTreeMap::new(memory_manager.get(random_memory_id)); - for (key, account) in accounts.iter() { - new_db.db_insert_account(&key, account.clone()); - } - let new_accounts: StdBTreeMap<_, _> = new_db.range(..).collect(); - assert_eq!(accounts, new_accounts, "Failed to store accounts in new database."); - // Load the accounts from a new database using the same memory. - let loaded_db = AccountsDbAsUnboundedStableBTreeMap::load(memory_manager.get(random_memory_id)); - let loaded_accounts: StdBTreeMap<_, _> = loaded_db.range(..).collect(); - assert_eq!( - new_accounts, loaded_accounts, - "Failed to load accounts from existing stable memory." - ); - } -} diff --git a/rs/backend/src/accounts_store/schema/label_serialization.rs b/rs/backend/src/accounts_store/schema/label_serialization.rs deleted file mode 100644 index 01b9adc8053..00000000000 --- a/rs/backend/src/accounts_store/schema/label_serialization.rs +++ /dev/null @@ -1,107 +0,0 @@ -//! Methods for serializing and deserializing schema labels. -use super::{SchemaLabel, SchemaLabelBytes, SchemaLabelError}; -use ic_crypto_sha2::Sha256; -use std::convert::{TryFrom, TryInto}; - -#[cfg(test)] -mod tests; - -/// Internal type for just the serialized schema label without a checksum. -type SchemaBytesWithoutChecksum = [u8; SchemaLabel::LABEL_BYTES]; - -impl SchemaLabel { - /// When serialized, the offset of the bytes containing the label. - pub const LABEL_OFFSET: usize = 0; - /// The number of bytes needed to store just the schema label. - pub const LABEL_BYTES: usize = 4; - /// When serialized, the offset of the bytes containing the checksum, if included. - pub const CHECKSUM_OFFSET: usize = Self::LABEL_BYTES; - /// The length of the SHA256 checksum of the schema label. - pub const CHECKSUM_BYTES: usize = 32; - /// The number of bytes needed to store the schema label and its checksum. - pub const MAX_BYTES: usize = Self::CHECKSUM_OFFSET + Self::CHECKSUM_BYTES; - /// String used to distinguish this checksum from checksums for other purposes. - pub const DOMAIN_SEPARATOR: &'static [u8; 12] = b"schema-label"; - - /// Checksum of the label. - fn checksum(self_bytes: [u8; Self::LABEL_BYTES]) -> [u8; Self::CHECKSUM_BYTES] { - let mut state = Sha256::new(); - state.write(Self::DOMAIN_SEPARATOR); - state.write(&self_bytes); - state.finish() - } - - /// Adds a checksum to the label bytes. - fn with_checksum(label_bytes: SchemaBytesWithoutChecksum) -> SchemaLabelBytes { - let mut bytes = [0u8; SchemaLabel::MAX_BYTES]; - bytes[SchemaLabel::LABEL_OFFSET..SchemaLabel::LABEL_OFFSET + SchemaLabel::LABEL_BYTES] - .copy_from_slice(&label_bytes); - let checksum = SchemaLabel::checksum(label_bytes); - bytes[SchemaLabel::CHECKSUM_OFFSET..SchemaLabel::CHECKSUM_OFFSET + SchemaLabel::CHECKSUM_BYTES] - .copy_from_slice(&checksum); - bytes - } -} - -impl TryFrom for SchemaLabel { - type Error = SchemaLabelError; - fn try_from(value: u32) -> Result { - match value { - 0 => Ok(Self::Map), - 1 => Ok(Self::AccountsInStableMemory), - other => Err(SchemaLabelError::InvalidLabel(other)), - } - } -} - -impl From for SchemaBytesWithoutChecksum { - fn from(label: SchemaLabel) -> [u8; SchemaLabel::LABEL_BYTES] { - (label as u32).to_le_bytes() - } -} - -impl TryFrom<&SchemaBytesWithoutChecksum> for SchemaLabel { - type Error = SchemaLabelError; - fn try_from(value: &[u8; SchemaLabel::LABEL_BYTES]) -> Result { - let label_num = u32::from_le_bytes(*value); - Self::try_from(label_num) - } -} - -impl From for SchemaLabelBytes { - fn from(label: SchemaLabel) -> [u8; SchemaLabel::MAX_BYTES] { - let label_bytes = SchemaBytesWithoutChecksum::from(label); - SchemaLabel::with_checksum(label_bytes) - } -} - -impl TryFrom<&SchemaLabelBytes> for SchemaLabel { - type Error = SchemaLabelError; - fn try_from(value: &[u8; SchemaLabel::MAX_BYTES]) -> Result { - let label_bytes: [u8; SchemaLabel::LABEL_BYTES] = value - [SchemaLabel::LABEL_OFFSET..SchemaLabel::LABEL_OFFSET + SchemaLabel::LABEL_BYTES] - .try_into() - .map_err(|_| SchemaLabelError::InsufficientBytes)?; - let actual_checksum = - &value[SchemaLabel::CHECKSUM_OFFSET..SchemaLabel::CHECKSUM_OFFSET + SchemaLabel::CHECKSUM_BYTES]; - let expected_checksum = Self::checksum(label_bytes); - if expected_checksum == *actual_checksum { - Self::try_from(&label_bytes) - } else { - Err(SchemaLabelError::InvalidChecksum) - } - } -} - -impl TryFrom<&[u8]> for SchemaLabel { - type Error = SchemaLabelError; - fn try_from(value: &[u8]) -> Result { - let bytes: &SchemaLabelBytes = value - .chunks(SchemaLabel::MAX_BYTES) - .next() - .ok_or(SchemaLabelError::InsufficientBytes)? // Zero bytes lead to no chunks. - .try_into() - .map_err(|_| SchemaLabelError::InsufficientBytes)?; // There are some bytes but not enough to make a full chunk. - Self::try_from(bytes) - } -} diff --git a/rs/backend/src/accounts_store/schema/label_serialization/tests.rs b/rs/backend/src/accounts_store/schema/label_serialization/tests.rs deleted file mode 100644 index 877b26b47aa..00000000000 --- a/rs/backend/src/accounts_store/schema/label_serialization/tests.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Tests for schema label serilaization. - -use super::*; -use crate::accounts_store::schema::SchemaLabel; -use pretty_assertions::assert_eq; - -#[test] -fn valid_label_with_invalid_checksum_should_fail() { - let label_bytes = SchemaLabelBytes::from(SchemaLabel::Map); - let mut label_bytes_with_invalid_checksum = label_bytes; - label_bytes_with_invalid_checksum[SchemaLabel::CHECKSUM_OFFSET] ^= 1; - assert_eq!( - Err(SchemaLabelError::InvalidChecksum), - SchemaLabel::try_from(&label_bytes_with_invalid_checksum) - ); -} - -#[test] -fn empty_slice_should_fail_with_length_error() { - let empty_slice: &[u8] = &[]; - assert_eq!( - Err(SchemaLabelError::InsufficientBytes), - SchemaLabel::try_from(empty_slice) - ); -} - -#[test] -fn short_slice_should_fail_with_length_error() { - let short_slice: &[u8] = &[8u8; SchemaLabel::MAX_BYTES - 1]; - assert_eq!( - Err(SchemaLabelError::InsufficientBytes), - SchemaLabel::try_from(short_slice) - ); -} - -#[test] -fn adequate_slice_should_succeed() { - let valid_label_bytes = SchemaLabelBytes::from(SchemaLabel::Map); - for surplus in [0, 100] { - let mut valid_label_bytes_with_surplus = vec![9; SchemaLabel::MAX_BYTES + surplus]; - valid_label_bytes_with_surplus[..SchemaLabel::MAX_BYTES].copy_from_slice(&valid_label_bytes); - assert_eq!( - Ok(SchemaLabel::Map), - SchemaLabel::try_from(&valid_label_bytes_with_surplus[..]), - "Failed to parse with a surplus of {} bytes", - surplus - ); - } -} diff --git a/rs/backend/src/accounts_store/schema/map.rs b/rs/backend/src/accounts_store/schema/map.rs deleted file mode 100644 index 49f63c442ae..00000000000 --- a/rs/backend/src/accounts_store/schema/map.rs +++ /dev/null @@ -1,89 +0,0 @@ -//! An accounts DB implemented as a hash map. - -#[cfg(test)] -use super::AccountsDbBTreeMapTrait; -use super::{Account, AccountsDbTrait}; -use core::fmt; -use core::ops::RangeBounds; -use std::collections::BTreeMap; - -#[derive(Default)] -#[cfg_attr(test, derive(Eq, PartialEq))] -pub struct AccountsDbAsMap { - accounts: BTreeMap, Account>, -} - -impl AccountsDbTrait for AccountsDbAsMap { - fn db_insert_account(&mut self, account_key: &[u8], account: Account) { - self.accounts.insert(account_key.to_vec(), account); - } - fn db_contains_account(&self, account_key: &[u8]) -> bool { - self.accounts.contains_key(account_key) - } - fn db_get_account(&self, account_key: &[u8]) -> Option { - self.accounts.get(account_key).cloned() - } - fn db_remove_account(&mut self, account_key: &[u8]) { - self.accounts.remove(account_key); - } - fn db_accounts_len(&self) -> u64 { - self.accounts.len() as u64 - } - fn iter(&self) -> Box, Account)> + '_> { - let iterator = self.accounts.iter().map(|(key, val)| (key.clone(), val.clone())); - Box::new(iterator) - } - fn first_key_value(&self) -> Option<(Vec, Account)> { - self.accounts - .first_key_value() - .map(|(key, val)| (key.clone(), val.clone())) - } - fn last_key_value(&self) -> Option<(Vec, Account)> { - self.accounts - .last_key_value() - .map(|(key, val)| (key.clone(), val.clone())) - } - fn values(&self) -> Box + '_> { - let iterator = self.accounts.values().cloned(); - Box::new(iterator) - } - fn range(&self, key_range: impl RangeBounds>) -> Box, Account)> + '_> { - let iterator = self - .accounts - .range(key_range) - .map(|(key, val)| (key.clone(), val.clone())); - Box::new(iterator) - } -} - -#[cfg(test)] -impl AccountsDbBTreeMapTrait for AccountsDbAsMap { - #[cfg(test)] - fn from_map(map: BTreeMap, Account>) -> Self { - Self { accounts: map } - } - #[cfg(test)] - fn as_map(&self) -> &BTreeMap, Account> { - &self.accounts - } -} - -impl fmt::Debug for AccountsDbAsMap { - /// Summarizes the accounts DB contents for debug printouts. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "AccountsDbAsMap{{... {} entries}}", self.db_accounts_len()) - } -} - -#[cfg(test)] -mod tests { - use super::super::tests::{assert_map_conversions_work, test_accounts_db}; - use super::AccountsDbAsMap; - - test_accounts_db!(AccountsDbAsMap::default()); - - #[test] - fn map_conversions_should_work() { - assert_map_conversions_work::(); - } -} diff --git a/rs/backend/src/accounts_store/schema/proxy.rs b/rs/backend/src/accounts_store/schema/proxy.rs deleted file mode 100644 index 4a4b26044c7..00000000000 --- a/rs/backend/src/accounts_store/schema/proxy.rs +++ /dev/null @@ -1,124 +0,0 @@ -//! Accounts DB that delegates API calls to underlying implementations. -//! -//! The proxy manages migrations from one implementation to another. -use super::accounts_in_unbounded_stable_btree_map::{AccountsDbAsUnboundedStableBTreeMap, ProductionMemoryType}; -use super::{map::AccountsDbAsMap, Account, AccountsDbTrait}; -use core::fmt; -use core::ops::RangeBounds; - -mod enum_boilerplate; -mod migration; - -/// An accounts database delegates API calls to underlying implementations. -/// -/// Notes: -/// - The proxy manages migrations from one implementation to another, if applicable. -/// - The proxy code itself will specify which databases are currently in -/// use and how to migrate from one database to another. -/// - It is the responsibility of the post-install hook to look at any -/// version information and set up the db accordingly. -/// -/// # Current data storage -/// - Accounts are stored as a map. No migrations are undertaken. -#[derive(Debug)] -pub struct AccountsDbAsProxy { - authoritative_db: AccountsDb, - migration: Option, -} - -impl Default for AccountsDbAsProxy { - fn default() -> Self { - AccountsDb::Map(AccountsDbAsMap::default()).into() - } -} - -struct Migration { - /// The database being migrated to - db: AccountsDb, - /// The next account to migrate. - next_to_migrate: Option>, -} - -impl fmt::Debug for Migration { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Note: The `next_to_migrate` field is rarely interesting so is omitted. - // The database type and number of entries suffices. - self.db.fmt(f) - } -} - -impl From for AccountsDbAsProxy { - fn from(db: AccountsDb) -> Self { - AccountsDbAsProxy { - authoritative_db: db, - migration: None, - } - } -} - -#[derive(Debug)] -pub enum AccountsDb { - Map(AccountsDbAsMap), - UnboundedStableBTreeMap(AccountsDbAsUnboundedStableBTreeMap), -} - -impl AccountsDbTrait for AccountsDbAsProxy { - /// Inserts into all the underlying databases. - fn db_insert_account(&mut self, account_key: &[u8], account: Account) { - self.authoritative_db.db_insert_account(account_key, account.clone()); - if let Some(migration) = &mut self.migration { - migration.db.db_insert_account(account_key, account); - } - } - /// Checks the authoritative database. - fn db_contains_account(&self, account_key: &[u8]) -> bool { - self.authoritative_db.db_contains_account(account_key) - } - /// Gets an account from the authoritative database. - fn db_get_account(&self, account_key: &[u8]) -> Option { - self.authoritative_db.db_get_account(account_key) - } - /// Removes an account from all underlying databases. - fn db_remove_account(&mut self, account_key: &[u8]) { - self.authoritative_db.db_remove_account(account_key); - if let Some(migration) = self.migration.as_mut() { - migration.db.db_remove_account(account_key); - } - } - /// Gets the length from the authoritative database. - fn db_accounts_len(&self) -> u64 { - self.authoritative_db.db_accounts_len() - } - /// Iterates over the entries of the authoritative database. - fn iter(&self) -> Box, Account)> + '_> { - self.authoritative_db.iter() - } - /// Gets the first key-value pair in the authoritative database. - fn first_key_value(&self) -> Option<(Vec, Account)> { - self.authoritative_db.first_key_value() - } - /// Gets the last key-value pair in the authoritative database. - fn last_key_value(&self) -> Option<(Vec, Account)> { - self.authoritative_db.last_key_value() - } - /// Iterates over the values of the authoritative database. - fn values(&self) -> Box + '_> { - self.authoritative_db.values() - } - /// Iterates over a range of accounts in the authoritative db. - fn range(&self, key_range: impl RangeBounds>) -> Box, Account)> + '_> { - self.authoritative_db.range(key_range) - } -} - -/// Check whether two account databases contain the same data. -/// -/// It should be possible to use this to confirm that data has been preserved during a migration. -#[cfg(test)] -impl PartialEq for AccountsDbAsProxy { - fn eq(&self, other: &Self) -> bool { - self.authoritative_db.range(..).eq(other.authoritative_db.range(..)) - } -} -#[cfg(test)] -impl Eq for AccountsDbAsProxy {} diff --git a/rs/backend/src/accounts_store/schema/proxy/enum_boilerplate.rs b/rs/backend/src/accounts_store/schema/proxy/enum_boilerplate.rs deleted file mode 100644 index da021a9dc15..00000000000 --- a/rs/backend/src/accounts_store/schema/proxy/enum_boilerplate.rs +++ /dev/null @@ -1,78 +0,0 @@ -//! Boilerplate for implementing traits for the `AccountsDb` enum. -//! -//! Each function is implemented by calling the same function on the applicable variant. There is probably a macro for this. -use super::{Account, AccountsDb, AccountsDbTrait, RangeBounds}; - -// TODO: This is boilerplate. can it be eliminated with a macro? -impl AccountsDbTrait for AccountsDb { - fn db_insert_account(&mut self, account_key: &[u8], account: Account) { - match self { - AccountsDb::Map(map_db) => map_db.db_insert_account(account_key, account), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.db_insert_account(account_key, account); - } - } - } - fn db_contains_account(&self, account_key: &[u8]) -> bool { - match self { - AccountsDb::Map(map_db) => map_db.db_contains_account(account_key), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.db_contains_account(account_key) - } - } - } - fn db_get_account(&self, account_key: &[u8]) -> Option { - match self { - AccountsDb::Map(map_db) => map_db.db_get_account(account_key), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.db_get_account(account_key) - } - } - } - fn db_remove_account(&mut self, account_key: &[u8]) { - match self { - AccountsDb::Map(map_db) => map_db.db_remove_account(account_key), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.db_remove_account(account_key); - } - } - } - fn db_accounts_len(&self) -> u64 { - match self { - AccountsDb::Map(map_db) => map_db.db_accounts_len(), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.db_accounts_len() - } - } - } - fn iter(&self) -> Box, Account)> + '_> { - match self { - AccountsDb::Map(map_db) => map_db.iter(), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => unbounded_stable_btree_map_db.iter(), - } - } - fn first_key_value(&self) -> Option<(Vec, Account)> { - match self { - AccountsDb::Map(map_db) => map_db.first_key_value(), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.first_key_value() - } - } - } - fn last_key_value(&self) -> Option<(Vec, Account)> { - match self { - AccountsDb::Map(map_db) => map_db.last_key_value(), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.last_key_value() - } - } - } - fn range(&self, key_range: impl RangeBounds>) -> Box, Account)> + '_> { - match self { - AccountsDb::Map(map_db) => map_db.range(key_range), - AccountsDb::UnboundedStableBTreeMap(unbounded_stable_btree_map_db) => { - unbounded_stable_btree_map_db.range(key_range) - } - } - } -} diff --git a/rs/backend/src/accounts_store/schema/proxy/migration.rs b/rs/backend/src/accounts_store/schema/proxy/migration.rs deleted file mode 100644 index 7e5df9eda15..00000000000 --- a/rs/backend/src/accounts_store/schema/proxy/migration.rs +++ /dev/null @@ -1,114 +0,0 @@ -//! Code for migration from the authoritative database to a new database. -use super::{AccountsDbAsProxy, AccountsDbTrait}; -use ic_cdk::{eprintln, println}; - -impl AccountsDbAsProxy { - /// The default number of accounts to move in a migration step. - pub const MIGRATION_STEP_SIZE: u32 = 20; - /// The maximum number of accounts to move in a migration step. - pub const MIGRATION_STEP_SIZE_MAX: u32 = 1000; - /// The progress meter count reserved for finalizing a migration. - pub const MIGRATION_FINALIZATION_BLOCKS: u32 = 1; - - /// Determines whether a migration is in progress. - #[must_use] - #[allow(dead_code)] - pub fn migration_in_progress(&self) -> bool { - self.migration.is_some() - } - - /// Migration countdown; when it reaches zero, the migration is complete. - /// - /// Note: This is a rough estimate of the number of blocks needed to complete the migration. - /// The approximation is caused by the following: - /// - If an entry is added or modified ahead of the "next to migrate" entry, the element will still be migrated but the delta between the size of the authoritative db and the new db will reduce by one. - /// This approximation is unlikely to be an issue but if it is, it can be corrected for with a small increase in complexity: - /// When performing CRUD, apply the operation to the new database ONLY if either: - /// - `next_to_migrate` is `None` (i.e. the migration is complete) - /// - The key is strictly less than `next_to_migrate`. - /// - If the migration encounters a batch of extremely large accounts, migration slows down. - #[must_use] - pub fn migration_countdown(&self) -> u32 { - self.migration.as_ref().map_or(0, |migration| { - let accounts_to_migrate = self - .authoritative_db - .db_accounts_len() - .saturating_sub(migration.db.db_accounts_len()); - let blocks_for_migration = accounts_to_migrate.div_ceil(u64::from(Self::MIGRATION_STEP_SIZE)); - let blocks_for_migration = u32::try_from(blocks_for_migration).unwrap_or(u32::MAX); - blocks_for_migration.saturating_add(Self::MIGRATION_FINALIZATION_BLOCKS) - }) - } - - /// Advances the migration by one step. - /// - /// # Arguments - /// - `step_size`: The maximum number of accounts to migrate on this step. - /// - This may be no larger than `Self::MIGRATION_STEP_SIZE_MAX`. If it is larger, it will be reduced. - pub fn step_migration(&mut self, step_size: u32) { - // Ensure that the step size is modest: - let step_size = step_size.clamp(1, Self::MIGRATION_STEP_SIZE_MAX); - if let Some(migration) = &mut self.migration { - if let Some(next_to_migrate) = &migration.next_to_migrate { - println!("Stepping migration: {:?} -> {:?}", self.authoritative_db, migration.db); - let mut range = self.authoritative_db.range(next_to_migrate.clone()..); - for (key, account) in (&mut range).take(usize::try_from(step_size).unwrap_or(usize::MAX)) { - migration.db.db_insert_account(&key, account); - } - migration.next_to_migrate = range.next().map(|(key, _account)| key.clone()); - } else { - self.complete_migration(); - } - } - } - - /// Completes any migration in progress. - /// - /// The migration will be cancelled, instead of completed, if an invariant check fails: - /// - The old and new databases have different lengths. - /// - (More checks MAY be added in future.) - pub fn complete_migration(&mut self) { - if let Some(migration) = self.migration.take() { - // Sanity check before calling migration complete: - { - // Number of accounts should be the same: - let old = self.authoritative_db.db_accounts_len(); - let new = migration.db.db_accounts_len(); - if old != new { - eprintln!("MIGRATION ERROR: Account migration failed: Old and new account databases have different lengths: {old} -> {new}\n Migration will be aborted."); - return; - } - } - { - // The first account in the BTreeMap should be the same. - // Given that keys are random this effectively a random account. - // - // Note: IF the migration intentionally modifies accounts, this check will have to be adjusted to compare just the invariant aspects of the account. - let old = self.authoritative_db.first_key_value(); - let new = migration.db.first_key_value(); - if old != new { - eprintln!("MIGRATION ERROR: Old and new account databases have different first entries: {old:?} -> {new:?}\n Migration will be aborted."); - return; - } - } - { - // The last account in the BTreeMap should be the same. - // Given that keys are random this effectively a random account. - // - // Note: IF the migration intentionally modifies accounts, this check will have to be adjusted to compare just the invariant aspects of the account. - let old = self.authoritative_db.last_key_value(); - let new = migration.db.last_key_value(); - if old != new { - eprintln!("MIGRATION ERROR: Old and new account databases have different last entries: {old:?} -> {new:?}\n Migration will be aborted."); - return; - } - } - // Sanity checks passed. Make the new database authoritative: - println!( - "Account migration complete: {:?} -> {:?}", - self.authoritative_db, migration.db - ); - self.authoritative_db = migration.db; - } - } -} diff --git a/rs/backend/src/accounts_store/schema/tests.rs b/rs/backend/src/accounts_store/schema/tests.rs deleted file mode 100644 index 6fbddf405a6..00000000000 --- a/rs/backend/src/accounts_store/schema/tests.rs +++ /dev/null @@ -1,357 +0,0 @@ -//! Generic tests for account storage. - -use super::super::{AccountIdentifier, CanisterId, NamedCanister, PrincipalId}; -use super::*; -use pretty_assertions::assert_eq; -use std::collections::{BTreeMap, BTreeSet, HashMap}; - -/// Creates a toy canister. -pub fn toy_canister(account_index: u64, canister_index: u64) -> NamedCanister { - let canister_id = CanisterId::from(canister_index); - NamedCanister { - name: format!("canister_{account_index}_{canister_index}"), - canister_id, - block_index: Some(123), - } -} - -/// Creates a toy account. The contents do not need to be meaningful; do need to have size. -pub fn toy_account(account_index: u64, num_canisters: u64) -> Account { - let principal = PrincipalId::new_user_test_id(account_index); - let account_identifier = AccountIdentifier::from(principal); - let mut account = Account { - principal: Some(principal), - account_identifier, - sub_accounts: HashMap::new(), - hardware_wallet_accounts: Vec::new(), - canisters: Vec::new(), - imported_tokens: None, - }; - // Attaches canisters to the account. - for canister_index in 0..num_canisters { - account.canisters.push(toy_canister(account_index, canister_index)); - } - // FIN - account -} - -/// Verifies that an arbitrary `AccountsDbTrait` implementation does basic crud correctly. -/// -/// Individual implementations are expected to perform their own tests for error conditions -/// relevant to them. -pub fn assert_basic_crud_works(mut storage: D) -where - D: AccountsDbTrait, -{ - let account_key = vec![1, 2, 3]; - let account = toy_account(1, 5); - // Create: - storage.db_insert_account(&account_key, account.clone()); - // Read: - assert!(storage.db_contains_account(&account_key)); - assert_eq!(storage.db_get_account(&account_key), Some(account.clone())); - // Update: - let updated_account = toy_account(1, 1000); - storage.db_insert_account(&account_key, updated_account.clone()); - assert!(storage.db_contains_account(&account_key)); - assert_eq!(storage.db_get_account(&account_key), Some(updated_account.clone())); - // Delete: - storage.db_remove_account(&account_key); - assert!(!storage.db_contains_account(&account_key)); - assert_eq!(storage.db_get_account(&account_key), None); -} - -/// Verifies that the update function `db_try_with_account()` works correctly. -pub fn assert_update_with_happy_path_works(mut storage: D) -where - D: AccountsDbTrait, -{ - let account_key = vec![1, 2, 3]; - let account = toy_account(1, 5); - // Create: - storage.db_insert_account(&account_key, account.clone()); - assert_eq!(storage.db_get_account(&account_key), Some(account.clone())); - // Update: - // Modify by adding a canister - { - // We will add a new canister: - let canister = toy_canister(3, 14159265359); - let expected_last_canister = canister.clone(); - // Verify that it is not already the last... - assert_ne!( - storage - .db_get_account(&account_key) - .expect("Failed to get account") - .canisters - .last() - .expect("Account should have had canisters"), - &expected_last_canister - ); - // The function return value; it should be passed through. - let return_value: Result = Ok(42); - // Update the account: - let actual_return_value = storage.db_try_with_account(&account_key, move |account| { - account.canisters.push(canister.clone()); - // The return value should be passed through. - return_value - }); - assert_eq!(Some(return_value), actual_return_value); - // Verify that the new canister is now the last. - assert_eq!( - storage - .db_get_account(&account_key) - .expect("Failed to get account") - .canisters - .last() - .expect("Account should have had canisters"), - &expected_last_canister - ); - } -} - -/// Verifies that the update function `db_try_with_account()` does NOT save changes if -/// the modifying function returns an error. -pub fn assert_update_not_saved_on_error(mut storage: D) -where - D: AccountsDbTrait, -{ - let account_key = vec![1, 2, 3]; - let account_initial_value = toy_account(1, 5); - // Create: - storage.db_insert_account(&account_key, account_initial_value.clone()); - assert_eq!( - storage.db_get_account(&account_key), - Some(account_initial_value.clone()) - ); - // Update: - // Modify by adding a canister but then return an error. - { - // We will add a new canister: - let canister = toy_canister(3, 14159265359); - let expected_last_canister = canister.clone(); - // Verify that it is not already the last... - assert_ne!( - storage - .db_get_account(&account_key) - .expect("Failed to get account") - .canisters - .last() - .expect("Account should have had canisters"), - &expected_last_canister - ); - // The function return value; it should be passed through. - let return_value: Result = Err(42); - // Update the account: - let actual_return_value = storage.db_try_with_account(&account_key, move |account| { - account.canisters.push(canister.clone()); - // The return value should be passed through. - return_value - }); - assert_eq!(Some(return_value), actual_return_value); - // Verify that the account has not changed. - assert_eq!( - storage.db_get_account(&account_key).expect("Failed to get account"), - account_initial_value - ); - } -} - -/// Verifies that the update function `db_try_with_account()` returns None if there is no account -/// for the given key. -pub fn assert_update_with_missing_key_returns_none(mut storage: D) -where - D: AccountsDbTrait, -{ - let account_key = vec![1, 2, 3]; - let account_key_2 = vec![1, 2, 3, 4]; - let account = toy_account(1, 5); - // Create: - storage.db_insert_account(&account_key, account.clone()); - assert_eq!(storage.db_get_account(&account_key), Some(account.clone())); - // Updates: - // Modifies by adding a canister - { - // Updates the account: - let actual_return_value: Option> = storage - .db_try_with_account(&account_key_2, move |_account| { - panic!("If the requested account is not found, the update function should not be called.") - }); - assert_eq!(None, actual_return_value); - // Verifies that the one account we created before the update call is unchanged. - assert_eq!( - storage.db_get_account(&account_key).expect("Failed to get account"), - account - ); - } -} - -/// Verifies that the account count is correct. -pub fn assert_account_count_is_correct(mut storage: D) -where - D: AccountsDbTrait, -{ - // We will generate this many accounts in this test - const NUM_TEST_ACCOUNTS: u64 = 10; - - // Loookup key for a test account - fn account_key(account_index: u64) -> [u8; 1] { - [account_index as u8 + 5] - } - - // Local toy account, making sure that the index and canister ID are not the same - fn test_account(account_index: u64) -> Account { - let account_id = 100 - account_index; - let num_canisters = 10 * account_index * account_index; // This takes up to 810 canisters, which should be enough to stress any storage backend. - toy_account(account_id, num_canisters) - } - - // Verify that the provided database is empty. - assert_eq!( - storage.db_accounts_len(), - 0, - "The account database should be empty before we start testing." - ); - for account_index in 0..NUM_TEST_ACCOUNTS { - storage.db_insert_account(&account_key(account_index), test_account(account_index)); - assert_eq!( - account_index + 1, - storage.db_accounts_len(), - "Number of canisters does not correspond to the number of canisters inserted." - ); - } - // Modifying accounts should not change the length. - assert_eq!( - NUM_TEST_ACCOUNTS, - storage.db_accounts_len(), - "Expected to have all the canisters by now." - ); - let response: Result<(), ()> = Ok(()); - storage.db_try_with_account(&[0], move |_| response); - assert_eq!( - NUM_TEST_ACCOUNTS, - storage.db_accounts_len(), - "Modifying a canister should not change the count." - ); - - // Deleting accounts should reduce the length. - // To test, we will delete the first, one in the middle and the last. - storage.db_remove_account(&account_key(0)); - assert_eq!( - NUM_TEST_ACCOUNTS - 1, - storage.db_accounts_len(), - "Deleting one account should reduce the account count by one." - ); - storage.db_remove_account(&account_key(5)); - assert_eq!( - NUM_TEST_ACCOUNTS - 2, - storage.db_accounts_len(), - "Deleting two accounts should reduce the account count by two." - ); - storage.db_remove_account(&account_key(NUM_TEST_ACCOUNTS - 1)); - assert_eq!( - NUM_TEST_ACCOUNTS - 3, - storage.db_accounts_len(), - "Deleting three accounts should reduce the account count by three." - ); - // Deleting a non-existent account, or an account that has already been deleted, should change nothing. - storage.db_remove_account(&account_key(NUM_TEST_ACCOUNTS - 1)); - assert_eq!( - NUM_TEST_ACCOUNTS - 3, - storage.db_accounts_len(), - "Deleting an account again should not affect the count.." - ); - storage.db_remove_account(&account_key(NUM_TEST_ACCOUNTS + 1)); - assert_eq!( - NUM_TEST_ACCOUNTS - 3, - storage.db_accounts_len(), - "Deleting a non-existent canister should not affect the count." - ); -} - -/// Verifies that the `values()` iterator works correctly. -pub fn assert_iterates_over_values(mut storage: D) -where - D: AccountsDbTrait, -{ - // To store accounts in say a BTreeMap requires adding comparison operators to accounts. - // These are not generally needed or wanted. What we will do instead is to compare - // account IDs. - let mut expected_values = BTreeSet::new(); - for account_id in 0..10 { - let account_key = vec![account_id as u8]; - let account = toy_account(account_id, 5); - expected_values.insert(account.principal.unwrap()); - storage.db_insert_account(&account_key, account.clone()); - } - let actual_values = storage - .values() - .map(|account| account.principal.unwrap()) - .collect::>(); - assert_eq!(expected_values, actual_values); -} - -/// Verifies that a database, created from a map, returns that same map. -/// -/// Note: This applies only to some implementations; it is not generally expected of all db -/// implementations. -pub fn assert_map_conversions_work() -where - D: AccountsDbTrait + AccountsDbBTreeMapTrait, -{ - // Prepare a map of accounts. - let accounts = (0..5).fold(BTreeMap::new(), |mut accounts, index| { - let account_key = vec![index as u8, 9, 1, 4]; - let account = toy_account(index, 1); - accounts.insert(account_key, account); - accounts - }); - // Create a database from the map of accounts. - let accounts_db = D::from_map(accounts.clone()); - // Verify that the database contains all the accounts and no more. - for (account_key, account) in &accounts { - assert_eq!(accounts_db.db_get_account(account_key), Some(account.clone())); - } - assert_eq!(accounts_db.db_accounts_len(), accounts.len() as u64); - // Verify that the database returns the same map. - assert_eq!(accounts_db.as_map(), &accounts); -} - -/// Tests common functionality of `AccountsDbTrait` implementations. -/// -/// # Arguments -/// `$implementation`: An expression that evaluates to an empty `AccountsDbTrait` implementation. -/// Note: The expression will be called repeatedly. -macro_rules! test_accounts_db { - ($implementation:expr) => { - #[test] - fn map_accounts_db_should_crud() { - crate::accounts_store::schema::tests::assert_basic_crud_works($implementation); - } - - #[test] - fn map_accounts_update_with_happy_path_should_update_account() { - crate::accounts_store::schema::tests::assert_update_with_happy_path_works($implementation); - } - - #[test] - fn map_accounts_update_with_error_path_should_not_change_account() { - crate::accounts_store::schema::tests::assert_update_not_saved_on_error($implementation); - } - - #[test] - fn map_update_with_missing_key_should_return_none() { - crate::accounts_store::schema::tests::assert_update_with_missing_key_returns_none($implementation); - } - - #[test] - fn map_account_counts_should_be_correct() { - crate::accounts_store::schema::tests::assert_account_count_is_correct($implementation); - } - - #[test] - fn map_accounts_db_should_iterate_over_values() { - crate::accounts_store::schema::tests::assert_iterates_over_values($implementation); - } - }; -} -pub(crate) use test_accounts_db; diff --git a/rs/backend/src/accounts_store/tests.rs b/rs/backend/src/accounts_store/tests.rs index 6829de12b58..d841cbec60f 100644 --- a/rs/backend/src/accounts_store/tests.rs +++ b/rs/backend/src/accounts_store/tests.rs @@ -1337,5 +1337,3 @@ fn accounts_should_implement_storable() { let parsed = Account::from_bytes(bytes); assert_eq!(account, parsed); } - -crate::accounts_store::schema::tests::test_accounts_db!(AccountsStore::default()); diff --git a/rs/backend/src/accounts_store/toy_data.rs b/rs/backend/src/accounts_store/toy_data.rs index 12c2846723a..cc49f248702 100644 --- a/rs/backend/src/accounts_store/toy_data.rs +++ b/rs/backend/src/accounts_store/toy_data.rs @@ -1,8 +1,7 @@ //! Test data for unit tests and test networks. use crate::accounts_store::{ - schema::AccountsDbTrait, Account, AccountsStore, AttachCanisterRequest, CanisterId, PrincipalId, - RegisterHardwareWalletRequest, + Account, AccountsStore, AttachCanisterRequest, CanisterId, PrincipalId, RegisterHardwareWalletRequest, }; #[cfg(test)] @@ -124,7 +123,7 @@ impl AccountsStore { /// - The index of the first account created by this call. The account indices are `first...first+num_accounts-1`. pub fn create_toy_accounts(&mut self, num_accounts: u64) -> u64 { // If we call this function twice, we don't want to create the same accounts again, so we index from the number of existing accounts. - let num_existing_accounts = self.accounts_db.db_accounts_len(); + let num_existing_accounts = self.accounts_db.len(); let (index_range_start, index_range_end) = (num_existing_accounts, (num_existing_accounts + num_accounts)); // Creates accounts: for toy_account_index in index_range_start..index_range_end { @@ -174,5 +173,5 @@ impl AccountsStore { fn should_be_able_to_create_large_accounts_store() { let num_accounts = 10_000; let accounts_store = AccountsStore::with_toy_accounts(num_accounts); - assert_eq!(num_accounts, accounts_store.accounts_db.db_accounts_len()); + assert_eq!(num_accounts, accounts_store.accounts_db.len()); } diff --git a/rs/backend/src/main.rs b/rs/backend/src/main.rs index 2cdfcde16bd..69398ccc2a9 100644 --- a/rs/backend/src/main.rs +++ b/rs/backend/src/main.rs @@ -12,12 +12,10 @@ use crate::state::{init_state, restore_state, save_state, with_state, with_state use crate::tvl::TvlResponse; use candid::candid_method; -use accounts_store::schema::proxy::AccountsDbAsProxy; pub use candid::{CandidType, Deserialize}; use dfn_candid::{candid, candid_one}; use dfn_core::{over, over_async}; -use ic_cdk::api::call::RejectionCode; -use ic_cdk::{eprintln, println}; +use ic_cdk::println; use ic_cdk_macros::{init, post_upgrade, pre_upgrade}; use icp_ledger::AccountIdentifier; pub use serde::Serialize; @@ -304,43 +302,6 @@ pub fn get_histogram_impl() -> AccountsStoreHistogram { with_state(|state| state.accounts_store.get_histogram()) } -/// Steps the migration. -#[export_name = "canister_update step_migration"] -pub fn step_migration() { - over(candid_one, step_migration_impl); -} - -#[candid_method(update, rename = "step_migration")] -fn step_migration_impl(step_size: u32) { - let caller = ic_cdk::caller(); - let own_canister_id = ic_cdk::api::id(); - if caller != own_canister_id { - dfn_core::api::trap_with("Only the canister itself may call step_migration"); - } - with_state_mut(|s| { - s.accounts_store.step_migration(step_size); - }); -} - -/// Calls `step_migration()` without panicking and rolling back if anything goes wrong. -#[allow(dead_code)] -async fn call_step_migration(step_size: u32) -> Result<(), (RejectionCode, String)> { - ic_cdk::api::call::call(ic_cdk::id(), "step_migration", (step_size,)).await -} - -/// Calls step migration, dropping the step size to 1 on failure. -#[allow(dead_code)] -async fn call_step_migration_with_retries() { - for step_size in [AccountsDbAsProxy::MIGRATION_STEP_SIZE, 1] { - if let Err((code, msg)) = call_step_migration(step_size).await { - println!("WARNING: step_migration failed with step size {step_size}: {code:?} {msg}"); - } else { - return; - } - } - eprintln!("ERROR: step_migration failed."); -} - /// Add an asset to be served by the canister. /// /// Only a whitelist of assets are accepted. diff --git a/rs/backend/src/state/tests.rs b/rs/backend/src/state/tests.rs index 4bbb9763df2..59243be7783 100644 --- a/rs/backend/src/state/tests.rs +++ b/rs/backend/src/state/tests.rs @@ -1,5 +1,5 @@ use crate::{ - accounts_store::{schema::AccountsDbTrait, RegisterHardwareWalletRequest}, + accounts_store::RegisterHardwareWalletRequest, assets::{insert_asset_into_state, Asset}, state::{reset_partitions, PerformanceCounts, State}, stats::Stats,