diff --git a/src/analytics/enrich_rms.rs b/src/analytics/enrich_rms.rs index 4576f03..73d426e 100644 --- a/src/analytics/enrich_rms.rs +++ b/src/analytics/enrich_rms.rs @@ -245,5 +245,4 @@ fn test_rms_pipeline() { assert!((s3.rms_24hour > 57.0) && (s3.rms_24hour < 58.0)); process_shill(&mut swaps); - // dbg!(&swaps); } diff --git a/src/analytics/offline_matching.rs b/src/analytics/offline_matching.rs index 0ee44f7..53c582a 100644 --- a/src/analytics/offline_matching.rs +++ b/src/analytics/offline_matching.rs @@ -89,78 +89,10 @@ pub async fn get_date_range_deposits_alt( deposited, }; top_deposits.push(d); - // dbg!(&d); } Ok(top_deposits) } -// pub async fn get_date_range_deposits( -// pool: &Graph, -// top_n: u64, -// start: DateTime, -// end: DateTime, -// ) -> Result> { -// let mut top_deposits = vec![]; - -// let q = format!( -// // r#" -// // WITH "0xf57d3968d0bfd5b3120fda88f34310c70bd72033f77422f4407fbbef7c24557a" AS olswap_deposit - -// // // Step 1: Get the list of all depositors -// // MATCH (depositor:Account)-[tx:Tx]->(onboard:Account {{address: olswap_deposit}}) -// // WITH COLLECT(DISTINCT depositor) AS all_depositors, olswap_deposit, tx - -// // // Step 2: Match depositors and amounts within the date range - -// // UNWIND all_depositors AS depositor - -// // OPTIONAL MATCH (depositor)-[tx2:Tx]->(onboard:Account {{address: olswap_deposit}}) -// // WHERE tx2.block_datetime >= datetime('{}') AND tx2.block_datetime <= datetime('{}') - -// // WITH -// // depositor.address AS account, -// // COALESCE(SUM(tx2.V7_OlAccountTransfer_amount), 0)/1000000 AS deposit_amount -// // RETURN account, toFloat(deposit_amount) as deposited -// // ORDER BY deposited DESC - -// // "#, -// r#" -// WITH "0xf57d3968d0bfd5b3120fda88f34310c70bd72033f77422f4407fbbef7c24557a" as exchange_deposit -// MATCH -// (u:Account)-[tx:Tx]->(onboard:Account {{address: exchange_deposit}}) -// WHERE -// tx.`block_datetime` > datetime("{}") -// AND tx.`block_datetime` < datetime("{}") -// WITH -// DISTINCT(u), -// SUM(tx.V7_OlAccountTransfer_amount) AS totalTxAmount -// ORDER BY totalTxAmount DESCENDING -// RETURN u.address AS account, toFloat(totalTxAmount) / 1000000 AS deposited - -// "#, -// start.to_rfc3339(), -// end.to_rfc3339(), -// // top_n, -// ); -// let cypher_query = neo4rs::query(&q); - -// // Execute the query -// let mut result = pool.execute(cypher_query).await?; - -// // Fetch the first row only -// while let Some(r) = result.next().await? { -// let account_str = r.get::("account").unwrap_or("unknown".to_string()); -// let deposited = r.get::("deposited").unwrap_or(0.0); -// let d = Deposit { -// account: account_str.parse().unwrap_or(AccountAddress::ZERO), -// deposited, -// }; -// top_deposits.push(d); -// // dbg!(&d); -// } -// Ok(top_deposits) -// } - pub async fn get_exchange_users( pool: &Graph, top_n: u64, @@ -194,7 +126,6 @@ pub async fn get_exchange_users( let funded = r.get::("funded").unwrap_or(0.0); let d = MinFunding { user_id, funded }; min_funding.push(d); - // dbg!(&d); } Ok(min_funding) } @@ -226,7 +157,6 @@ pub async fn get_exchange_users_only_outflows(pool: &Graph) -> Result("funded").unwrap_or(0.0); let d = MinFunding { user_id, funded }; min_funding.push(d); - // dbg!(&d); } Ok(min_funding) } @@ -311,11 +240,6 @@ impl Matching { .map(|el| el.user_id) .collect(); - // dbg!(&ids); - // let user_ledger = funded.iter().find(|el| { - // // check if we have already identified it - // self.definite.0.get(el.user_id).none() - // }); Ok((*ids.first().unwrap(), *ids.get(1).unwrap())) } @@ -460,7 +384,6 @@ impl Matching { let mut eval: Vec = vec![]; deposits.iter().for_each(|el| { - // dbg!(&el); if el.deposited >= user.funded && // must not already have been tagged impossible !pending.impossible.contains(&el.account) && diff --git a/src/cypher_templates.rs b/src/cypher_templates.rs index df43767..93caad6 100644 --- a/src/cypher_templates.rs +++ b/src/cypher_templates.rs @@ -230,7 +230,6 @@ use serde_json::Value; pub fn to_cypher_object(object: &T) -> Result { // Serialize the struct to a JSON value let serialized_value = serde_json::to_value(object).expect("Failed to serialize"); - // dbg!(&serialized_value); let flattener = smooth_json::Flattener { separator: "_", diff --git a/src/enrich_exchange_onboarding.rs b/src/enrich_exchange_onboarding.rs index 214d3ea..b7f806e 100644 --- a/src/enrich_exchange_onboarding.rs +++ b/src/enrich_exchange_onboarding.rs @@ -105,7 +105,6 @@ pub async fn impl_batch_tx_insert(pool: &Graph, batch_txs: &[ExchangeOnRamp]) -> // cypher queries makes it annoying to do a single insert of users and // txs let cypher_string = ExchangeOnRamp::cypher_batch_link_owner(&list_str); - // dbg!(&cypher_string); // Execute the query let cypher_query = neo4rs::query(&cypher_string); diff --git a/src/extract_transactions.rs b/src/extract_transactions.rs index 6e4d177..6055ce2 100644 --- a/src/extract_transactions.rs +++ b/src/extract_transactions.rs @@ -42,20 +42,20 @@ pub async fn extract_current_transactions( // first increment the block metadata. This assumes the vector is sequential. if let Some(block) = tx.try_as_block_metadata() { - // check the epochs are incrementing or not - if epoch > block.epoch() - && round > block.round() - && timestamp > block.timestamp_usecs() - { - dbg!( - epoch, - block.epoch(), - round, - block.round(), - timestamp, - block.timestamp_usecs() - ); - } + // // check the epochs are incrementing or not + // if epoch > block.epoch() + // && round > block.round() + // && timestamp > block.timestamp_usecs() + // { + // dbg!( + // epoch, + // block.epoch(), + // round, + // block.round(), + // timestamp, + // block.timestamp_usecs() + // ); + // } epoch = block.epoch(); round = block.round(); diff --git a/src/load.rs b/src/load.rs index 5b85478..5cd6c0c 100644 --- a/src/load.rs +++ b/src/load.rs @@ -42,7 +42,13 @@ pub async fn ingest_all( let pending = queue::get_queued(pool).await?; info!("pending archives: {}", pending.len()); + // This manifest may be for a .gz file, we should handle here as well for (_p, m) in archive_map.0.iter() { + info!("checking if we need to decompress"); + let (new_unzip_path, temp) = unzip_temp::maybe_handle_gz(&m.archive_dir)?; + let mut better_man = ManifestInfo::new(&new_unzip_path); + better_man.set_info()?; + println!( "\nProcessing: {:?} with archive: {}", m.contents, @@ -60,6 +66,7 @@ pub async fn ingest_all( m.archive_dir.display() ); } + drop(temp); } Ok(()) @@ -70,9 +77,6 @@ pub async fn try_load_one_archive( pool: &Graph, batch_size: usize, ) -> Result { - info!("checking if we need to decompress"); - let (archive_path, temp) = unzip_temp::maybe_handle_gz(&man.archive_dir)?; - let mut all_results = BatchTxReturn::new(); match man.contents { crate::scan::BundleContent::Unknown => todo!(), @@ -82,24 +86,23 @@ pub async fn try_load_one_archive( error!("no framework version detected"); bail!("could not load archive from manifest"); } - crate::scan::FrameworkVersion::V5 => extract_v5_snapshot(&archive_path).await?, + crate::scan::FrameworkVersion::V5 => extract_v5_snapshot(&man.archive_dir).await?, crate::scan::FrameworkVersion::V6 => { - extract_current_snapshot(&archive_path).await? + extract_current_snapshot(&man.archive_dir).await? } crate::scan::FrameworkVersion::V7 => { - extract_current_snapshot(&archive_path).await? + extract_current_snapshot(&man.archive_dir).await? } }; snapshot_batch(&snaps, pool, batch_size, &man.archive_id).await?; } crate::scan::BundleContent::Transaction => { - let (txs, _) = extract_current_transactions(&archive_path, &man.version).await?; + let (txs, _) = extract_current_transactions(&man.archive_dir, &man.version).await?; let batch_res = load_tx_cypher::tx_batch(&txs, pool, batch_size, &man.archive_id).await?; all_results.increment(&batch_res); } crate::scan::BundleContent::EpochEnding => todo!(), } - drop(temp); Ok(all_results) } diff --git a/src/load_account_state.rs b/src/load_account_state.rs index 361aeba..f8ee33c 100644 --- a/src/load_account_state.rs +++ b/src/load_account_state.rs @@ -45,9 +45,7 @@ pub async fn snapshot_batch( match impl_batch_snapshot_insert(pool, c).await { Ok(batch) => { - // dbg!(&batch); all_results.increment(&batch); - // dbg!(&all_results); queue::update_task(pool, archive_id, true, i).await?; info!("...success"); } diff --git a/src/scan.rs b/src/scan.rs index 14d48d8..9a51d61 100644 --- a/src/scan.rs +++ b/src/scan.rs @@ -33,6 +33,44 @@ pub struct ManifestInfo { } impl ManifestInfo { + pub fn new(archive_dir: &Path) -> Self { + let archive_id = archive_dir + .file_name() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + ManifestInfo { + archive_dir: archive_dir.to_path_buf(), + archive_id, + version: FrameworkVersion::Unknown, + contents: BundleContent::Unknown, + processed: false, + } + } + + pub fn set_info(&mut self) -> Result<()> { + self.set_contents()?; + self.try_set_framework_version(); + Ok(()) + } + + /// find out the type of content in the manifest + pub fn set_contents(&mut self) -> Result<()> { + // filenames may be in .gz format + let pattern = format!( + "{}/*.manifest*", // also try .gz + self.archive_dir + .to_str() + .context("cannot parse starting dir")? + ); + + if let Some(man_file) = glob(&pattern)?.flatten().next() { + self.contents = BundleContent::new_from_man_file(&man_file); + } + Ok(()) + } + pub fn try_set_framework_version(&mut self) -> FrameworkVersion { match self.contents { BundleContent::Unknown => return FrameworkVersion::Unknown, @@ -41,9 +79,8 @@ impl ManifestInfo { // first check if the v7 manifest will parse if let Ok(_bak) = load_snapshot_manifest(&man_path) { self.version = FrameworkVersion::V7; - }; - - if v5_read_from_snapshot_manifest(&self.archive_dir.join("state.manifest")).is_ok() + } else if v5_read_from_snapshot_manifest(&self.archive_dir.join("state.manifest")) + .is_ok() { self.version = FrameworkVersion::V5; } @@ -83,6 +120,19 @@ pub enum BundleContent { EpochEnding, } impl BundleContent { + pub fn new_from_man_file(man_file: &Path) -> Self { + let s = man_file.to_str().expect("invalid path"); + if s.contains("transaction.manifest") { + return BundleContent::Transaction; + }; + if s.contains("epoch_ending.manifest") { + return BundleContent::EpochEnding; + }; + if s.contains("state.manifest") { + return BundleContent::StateSnapshot; + }; + BundleContent::Unknown + } pub fn filename(&self) -> String { match self { BundleContent::Unknown => "*.manifest".to_string(), @@ -110,44 +160,29 @@ pub fn scan_dir_archive( let mut archive = BTreeMap::new(); - for entry in glob(&pattern)? { - match entry { - Ok(manifest_path) => { - let dir = manifest_path - .parent() - .context("no parent dir found")? - .to_owned(); - let contents = test_content(&manifest_path); - let archive_id = dir.file_name().unwrap().to_str().unwrap().to_owned(); - let mut m = ManifestInfo { - archive_dir: dir.clone(), - archive_id, - version: FrameworkVersion::Unknown, - contents, - processed: false, - }; - m.try_set_framework_version(); - - archive.insert(manifest_path.clone(), m); - } - Err(e) => println!("{:?}", e), - } + for manifest_path in glob(&pattern)?.flatten() { + let archive_dir = manifest_path + .parent() + .expect("can't find manifest dir, weird"); + let mut man = ManifestInfo::new(archive_dir); + man.set_info()?; + archive.insert(archive_dir.to_path_buf(), man); } Ok(ArchiveMap(archive)) } -/// find out the type of content in the manifest -fn test_content(manifest_path: &Path) -> BundleContent { - let s = manifest_path.to_str().expect("path invalid"); - if s.contains("transaction.manifest") { - return BundleContent::Transaction; - }; - if s.contains("epoch_ending.manifest") { - return BundleContent::EpochEnding; - }; - if s.contains("state.manifest") { - return BundleContent::StateSnapshot; - }; - - BundleContent::Unknown -} +// /// find out the type of content in the manifest +// fn test_content(manifest_path: &Path) -> BundleContent { +// let s = manifest_path.to_str().expect("path invalid"); +// if s.contains("transaction.manifest") { +// return BundleContent::Transaction; +// }; +// if s.contains("epoch_ending.manifest") { +// return BundleContent::EpochEnding; +// }; +// if s.contains("state.manifest") { +// return BundleContent::StateSnapshot; +// }; + +// BundleContent::Unknown +// } diff --git a/src/unzip_temp.rs b/src/unzip_temp.rs index 194d59b..1d1c194 100644 --- a/src/unzip_temp.rs +++ b/src/unzip_temp.rs @@ -2,8 +2,8 @@ use anyhow::{Context, Result}; use diem_temppath::TempPath; use flate2::read::GzDecoder; use glob::glob; -use libra_storage::read_tx_chunk::load_tx_chunk_manifest; -use log::{debug, info, warn}; +// use libra_storage::read_tx_chunk::load_tx_chunk_manifest; +use log::{info, warn}; use std::{ fs::File, io::copy, @@ -94,18 +94,19 @@ pub fn decompress_all_gz(parent_dir: &Path, dst_dir: &Path) -> Result<()> { fn maybe_fix_manifest(archive_path: &Path) -> Result<()> { let pattern = format!("{}/**/*.manifest", archive_path.display()); for manifest_path in glob(&pattern)?.flatten() { - let mut manifest = load_tx_chunk_manifest(&manifest_path)?; - debug!("old manifest:\n{:#}", &serde_json::to_string(&manifest)?); - - manifest.chunks.iter_mut().for_each(|e| { - if e.proof.contains(".gz") { - e.proof = e.proof.trim_end_matches(".gz").to_string(); - } - if e.transactions.contains(".gz") { - e.transactions = e.transactions.trim_end_matches(".gz").to_string(); - } - }); - let literal = serde_json::to_string(&manifest)?; + let literal = std::fs::read_to_string(&manifest_path)?.replace(".gz", ""); + // let mut manifest = load_tx_chunk_manifest(&manifest_path)?; + // debug!("old manifest:\n{:#}", &serde_json::to_string(&manifest)?); + + // manifest.chunks.iter_mut().for_each(|e| { + // if e.proof.contains(".gz") { + // e.proof = e.proof.trim_end_matches(".gz").to_string(); + // } + // if e.transactions.contains(".gz") { + // e.transactions = e.transactions.trim_end_matches(".gz").to_string(); + // } + // }); + // let literal = serde_json::to_string(&manifest)?; warn!( "rewriting .manifest file to remove .gz paths, {}, {:#}", @@ -124,12 +125,16 @@ pub fn maybe_handle_gz(archive_path: &Path) -> Result<(PathBuf, Option // maybe stuff isn't unzipped yet let pattern = format!("{}/*.*.gz", archive_path.display()); if glob(&pattern)?.count() > 0 { - info!("Decompressing a temp folder. If you do not want to decompress files on the fly (which are not saved), then you workflow to do a `gunzip -r` before starting this."); - let temp_dir = TempPath::new(); + let mut temp_dir = TempPath::new(); temp_dir.create_as_dir()?; + temp_dir.persist(); + // need to preserve the parent dir name in temp, since the manifest files reference it. let dir_name = archive_path.file_name().unwrap().to_str().unwrap(); let new_archive_path = temp_dir.path().join(dir_name); + + info!("Decompressing a temp folder. If you do not want to decompress files on the fly (which are not saved), then you workflow to do a `gunzip -r` before starting this. Temp folder: {}", &new_archive_path.display()); + std::fs::create_dir_all(&new_archive_path)?; decompress_all_gz(archive_path, &new_archive_path)?; // fix the manifest in the TEMP path diff --git a/src/v5_rpc_to_raw.rs b/src/v5_rpc_to_raw.rs index cefed4a..9948664 100644 --- a/src/v5_rpc_to_raw.rs +++ b/src/v5_rpc_to_raw.rs @@ -5,10 +5,11 @@ use libra_backwards_compatibility::version_five::transaction_view_v5::BytesView; pub fn bytesview_to_transaction_v5(b: &BytesView) { let tx: TransactionV5 = bcs::from_bytes(b.inner()).unwrap(); if let TransactionV5::UserTransaction(signed_transaction) = tx { - dbg!(&signed_transaction); + println!("{:?}", &signed_transaction); } } +#[ignore] #[test] fn test_bytesview_cast() { let bytes = hex::decode("00c8336044cdf1878d9738ed0a041b235e0000000000000000030000000000000000000000000000000111546f7765725374617465536372697074731d6d696e657273746174655f636f6d6d69745f62795f6f70657261746f720005107ec16859c24200d8e074809d252ac740212022229f389e88b56c48527f456f68cb765cdb792c4b5d2cac46d489364f61b106ec0aea0a003d17e04e050af8605fbb3e316407ea8fe2ccdf237e1b5a9ef8bf9ec3558e7ae8ffca3468d88e96e8121c3749f6681863b172206320934f8cc394fa5041d8d2d1490021bfdba4840f2f374415b5fdc52e25d79e873558fe090d7e627cc0bfac3594a2001f836d03ba301a45c2ce647f4d072542399a6b1f5ceeabead77905b93ed77d910079946e47b3bce5747328e9594f30f8ff6825a0564cb2468cbc50ac81f0e1990eff89b7eb81b0348288e91fd5a3452d9a0b275d1ecef2c2bab7defbd352472b39f3007e42b3f04a6063b8f7ac7900f01e1bdf325e0bdbac6ea8baf6b1a1dae6f248c3ff919dfca3ec7f26c93ed4d5513d81432a5b9e167fefde6641eb601947c3517dd90066dc5742273263641a3417e83a95eb999a93d089d97686c6b28e1f3f1f38daa4006575fe0882b10e96354ff2bb50b0d9e85dcca13c53763b6d5020b78e81fce36100387f52eb5d9a0ac573d86cf1e762a8d7ccd07af875e3a80ca65f662cb6cd85e4000ea581fa8a4e5a1b68cd7bc4c3605fd792c990a4a735183d728645cb1150fd770021f15ce84fe56c6be017fc566200303d5d7b75a1480d013a744de6725631cb76001cca7768b321c438013c02623241b3864239115e447f81db09769b3c36bfa3e3003b3cda85c084e072959c9d833e88525c9f1259e446db3918ef684434845fb9ccffcca6de724a82fea06b29102ac6fcc97ab857873a92437e21dff917dbfc4e1d69002b06376d6e656eb727b32715fcb9efce084e7a7260dbfe6d438464936796afbd0026de3ddd474aead0ff49f08a311c4117b5ae88886da407bc4e3b9716056cea4500084363abf26dd9181af9418bcbcadaec3625941c6e0a0afbc8df586a8a1d744e0001d8a59df8053336b547b2eabea4faf8eb00e7e6472b026046f719ca5b8fd9a5005d303e6c14375e109c37459f36b72d972b29ede44464a48f1702a9f82e8bd0d8ffc089001820dc51c3e1a1d1a205871a802fc6288b0256d85a5827f70c93f5a179006974fb2130ec2d74c7e8281f1b5e8847a4cdaba0c2abc54ee336ebea6b97febcffeca2e1b10eea280405f4c01de7687bc09d010c5b15b7e75ef8a837d5f412b14100546d651b59c0e50dcce208e1b3a39bc5625ef2797886435928ef79b4a45f77a100081ec5c722fe0611f9428fb92ad267637d163336b2b8db661f1921864392c92f001ce3095ad551c70930e8ab25ce2fa033fe3713e610aa1e694823bec8218d622200110e180b55d3b51dce5cf5c3e7469e41c946e1a5d84b3ae4fd4f4911b9a236a90008e18492ddcbeb44330a6c336d849a420660c1f3f2e812d7a6a76b762aa6508f00011520920a153f325441da1cc1122afaddd742a346dbaaa9347ea7e621252f97001f8caf5edf79baf850632ecc4ce4b0b1b712299917f5408beee17245471ad0b4ffe10f1cd147c03cd217a60b54a40048d3a217328cc8ae90a465b219faa8a69667007a880a6081666b909df985c32efbc00ec53372d6960a8a5be11315d03eda1a2dffc89bc7cbd2c2e9cafee92e76decfa1dd79883b47449d2591f96ce12c08dfc9dd00716971162dd4d476a66c0dba6b210d931a3cfbff80412d60d7a41800f9975f62ffa0e8f4b7d55ef173ef66737c3c2138d51ac50cf0fc45d0e7f00ad66bd214359f005e6447c8afb5f62dd613bf070036d10ffdc69586e21f46e710d88995c4990c4effcdaa9ceb2104a956e8275dad8ab09585bce2aae2f55d73685640df82bc2d85cd0011bf5c5e51a8ff3ea9f8612456f31cc52b32b6bfe8d35fb22ab385f75a8fe2900002fde26ce3f1f2b2d1b2aec2202450106b5fd301029321f7e8f6daefb643fb890005f2a69bbcc02994872f6ca6e255660785b439bc4eb82e466863a070f250cc5300010cd0475df7f2f890703f8a0d08316951b1de551e83a718b5e083d3b63c9ec108000e27070000000008000200000000000010270000000000000100000000000000034741530486796100000000010020d78ee09ad0cfd2d7da6c2cc5fda1d035542d0177cf7aefc34aae63bf39ec127b40efcaeb1d04e32ecfa6845a78825dd878e7b7367853643b412485c5d4cc775ba20b3b4eecf831f3b59273bde833d134763d6f3a2aaebe037112868faedb3f6508").unwrap(); diff --git a/src/warehouse_cli.rs b/src/warehouse_cli.rs index 7c27f5d..9c82686 100644 --- a/src/warehouse_cli.rs +++ b/src/warehouse_cli.rs @@ -1,6 +1,6 @@ use anyhow::{bail, Result}; use clap::{Parser, Subcommand}; -use log::{debug, error, info, warn}; +use log::{error, info, warn}; use neo4rs::Graph; use serde_json::json; use std::path::PathBuf; @@ -13,8 +13,8 @@ use crate::{ load::{ingest_all, try_load_one_archive}, load_exchange_orders, neo4j_init::{self, get_credentials_from_env, PASS_ENV, URI_ENV, USER_ENV}, - scan::{scan_dir_archive, BundleContent}, - util, + scan::{scan_dir_archive, BundleContent, ManifestInfo}, + unzip_temp, util, }; #[derive(Parser)] @@ -156,17 +156,15 @@ impl WarehouseCli { archive_dir, batch_size, } => { - let am = scan_dir_archive(archive_dir, None)?; - debug!("archive map: {:?}", &am); - if am.0.is_empty() { - error!("cannot find .manifest file under {}", archive_dir.display()); - } - for (_p, man) in am.0 { - let pool = try_db_connection_pool(self).await?; - neo4j_init::maybe_create_indexes(&pool).await?; + info!("checking if we need to decompress"); + let (archive_dir, temp) = unzip_temp::maybe_handle_gz(archive_dir)?; + let mut man = ManifestInfo::new(&archive_dir); + man.set_info()?; + let pool = try_db_connection_pool(self).await?; + neo4j_init::maybe_create_indexes(&pool).await?; - try_load_one_archive(&man, &pool, batch_size.unwrap_or(250)).await?; - } + try_load_one_archive(&man, &pool, batch_size.unwrap_or(250)).await?; + drop(temp); } Sub::Check { archive_dir } => { let am = scan_dir_archive(archive_dir, None)?; diff --git a/tests/experimental/load_coin.rs b/tests/experimental/load_coin.rs index 1fabbe0..517d119 100644 --- a/tests/experimental/load_coin.rs +++ b/tests/experimental/load_coin.rs @@ -95,9 +95,7 @@ fn test_format() { time: WarehouseTime::default(), balance: Some(WarehouseBalance { balance: 10, - // legacy_balance: Some(10), }), }; let s = increment_balance_template(&record); - // dbg!(&s); } diff --git a/tests/experimental/query_balance.rs b/tests/experimental/query_balance.rs index 37d382d..c01e5d5 100644 --- a/tests/experimental/query_balance.rs +++ b/tests/experimental/query_balance.rs @@ -22,7 +22,6 @@ pub async fn query_last_balance( ); let row = sqlx::query(&query_template).fetch_one(pool).await?; - // dbg!(&row); let dummy = WarehouseBalance { balance: 0 }; diff --git a/tests/test_analytics.rs b/tests/test_analytics.rs index fd8d87f..786bdbc 100644 --- a/tests/test_analytics.rs +++ b/tests/test_analytics.rs @@ -133,7 +133,7 @@ async fn test_submit_exchange_ledger() -> Result<()> { let mut tracker = BalanceTracker::new(); tracker.replay_transactions(&mut orders)?; - dbg!(&tracker.0.len()); + let days_records = tracker.0.len(); assert!(days_records == 47); @@ -257,8 +257,6 @@ async fn test_offline_analytics_matching() -> Result<()> { ) .await; - dbg!(&m.definite); - Ok(()) } @@ -273,7 +271,6 @@ async fn test_easy_sellers() -> Result<()> { let mut user_list = offline_matching::get_exchange_users_only_outflows(&pool).await?; user_list .sort_by(|a, b: &offline_matching::MinFunding| b.funded.partial_cmp(&a.funded).unwrap()); - dbg!(&user_list.len()); let deposits = offline_matching::get_date_range_deposits_alt( &pool, @@ -290,21 +287,7 @@ async fn test_easy_sellers() -> Result<()> { m.match_exact_sellers(&user_list, &deposits, 1.05); - dbg!(&m.definite.len()); - - dbg!(&m.definite); m.write_cache_to_file(&dir)?; - // let _ = m - // .depth_search_by_top_n_accounts( - // &pool, - // parse_date("2024-01-07"), - // parse_date("2024-03-15"), - // 101, - // Some(dir), - // ) - // .await; - // dbg!(&m.definite.len()); - Ok(()) } diff --git a/tests/test_json_rescue_v5_load.rs b/tests/test_json_rescue_v5_load.rs index 66661ca..b2f0472 100644 --- a/tests/test_json_rescue_v5_load.rs +++ b/tests/test_json_rescue_v5_load.rs @@ -67,7 +67,7 @@ async fn test_load_queue() -> anyhow::Result<()> { let path = fixtures::v5_json_tx_path(); let tx_count = json_rescue_v5_load::rip_concurrent_limited(&path, &pool, None).await?; - dbg!(&tx_count); + assert!(tx_count == 13); let tx_count = json_rescue_v5_load::rip_concurrent_limited(&path, &pool, None).await?; @@ -85,7 +85,6 @@ async fn test_rescue_v5_parse_set_wallet_tx() -> anyhow::Result<()> { let path = fixtures::v5_json_tx_path().join("example_set_wallet_type.json"); let (vec_tx, _, _) = extract_v5_json_rescue(&path)?; - dbg!(&vec_tx); let c = start_neo4j_container(); let port = c.get_host_port_ipv4(7687); @@ -97,7 +96,6 @@ async fn test_rescue_v5_parse_set_wallet_tx() -> anyhow::Result<()> { .expect("could start index"); let res = tx_batch(&vec_tx, &pool, 100, "test-set-wallet").await?; - dbg!(&res); assert!(res.created_tx > 0); @@ -114,9 +112,7 @@ async fn test_rescue_v5_parse_set_wallet_tx() -> anyhow::Result<()> { let mut result = pool.execute(cypher_query).await?; // Fetch the first row only - let row = result.next().await?; - // let total_tx_count: i64 = row.get("total_tx_count").unwrap(); - dbg!(&row); + let _row = result.next().await?; Ok(()) } diff --git a/tests/test_json_rescue_v5_parse.rs b/tests/test_json_rescue_v5_parse.rs index b060daf..efb86b5 100644 --- a/tests/test_json_rescue_v5_parse.rs +++ b/tests/test_json_rescue_v5_parse.rs @@ -57,9 +57,7 @@ fn test_rescue_v5_parse_miner_tx() -> anyhow::Result<()> { if let TransactionV5::UserTransaction(u) = &t { if let TransactionPayload::ScriptFunction(_) = &u.raw_txn.payload { println!("ScriptFunction"); - dbg!(&u.raw_txn.payload); - let sf = ScriptFunctionCall::decode(&u.raw_txn.payload); - dbg!(&sf); + let _sf = ScriptFunctionCall::decode(&u.raw_txn.payload); } } @@ -71,7 +69,6 @@ fn test_json_format_example() -> anyhow::Result<()> { let p = fixtures::v5_json_tx_path().join("example_create_user.json"); let (tx, _, _) = extract_v5_json_rescue(&p)?; - dbg!(&tx); let first = tx.first().unwrap(); assert!(first.sender.to_hex_literal() == *"0xecaf65add1b785b0495e3099f4045ec0"); @@ -84,7 +81,6 @@ fn test_json_full_file() -> anyhow::Result<()> { let p = fixtures::v5_json_tx_path().join("10000-10999.json"); let (tx, _, _) = extract_v5_json_rescue(&p)?; - dbg!(&tx); assert!(tx.len() == 4); let first = tx.first().unwrap(); diff --git a/tests/test_load_state.rs b/tests/test_load_state.rs index b83b2ea..ac31fb0 100644 --- a/tests/test_load_state.rs +++ b/tests/test_load_state.rs @@ -142,7 +142,6 @@ async fn test_snapshot_entrypoint() -> anyhow::Result<()> { // Fetch the first row only let row = result.next().await?.unwrap(); let count: i64 = row.get("count_state_edges").unwrap(); - dbg!(&count); assert!(count == 17338i64); Ok(()) diff --git a/tests/test_neo4j_meta.rs b/tests/test_neo4j_meta.rs index c214e20..e72041d 100644 --- a/tests/test_neo4j_meta.rs +++ b/tests/test_neo4j_meta.rs @@ -68,7 +68,6 @@ async fn test_tx_insert() -> Result<()> { while let Ok(Some(row)) = result.next().await { let node: Node = row.get("p").unwrap(); let id: String = node.get("address").unwrap(); - dbg!(&id); assert!(id == *"0xa11ce"); } @@ -168,8 +167,7 @@ async fn get_remote_neo4j() -> Result<()> { let mut rows = g .execute("CREATE (p: Account {name: 'hi'})\n RETURN p".into()) .await?; - let r = rows.next().await?; - dbg!(&r); + let _r = rows.next().await?; Ok(()) } diff --git a/tests/test_scan_dirs.rs b/tests/test_scan_dirs.rs index 89845c8..2b352f9 100644 --- a/tests/test_scan_dirs.rs +++ b/tests/test_scan_dirs.rs @@ -14,7 +14,6 @@ fn test_scan_dir_for_v5_manifests() -> Result<()> { let s = scan_dir_archive(&start_here, None)?; - dbg!(&s); assert!(s.0.len() == 1); let (_k, v) = s.0.first_key_value().unwrap(); assert!(v.version == FrameworkVersion::V5); @@ -22,18 +21,6 @@ fn test_scan_dir_for_v5_manifests() -> Result<()> { Ok(()) } -// #[test] - -// fn test_scan_dir_for_v5_final() -> Result<()> { -// let start_here = fixtures::v5_fixtures_path(); -// let s = start_here.parent().unwrap().join("v5_final_epoch/state_ver_141722729.0ab2"); -// let s = scan_dir_archive(&s, None)?; - -// dbg!(&s); - -// assert!(s.0.len() == 1); -// Ok(()) -// } #[test] fn test_scan_dir_for_v7_manifests() -> Result<()> { @@ -42,8 +29,7 @@ fn test_scan_dir_for_v7_manifests() -> Result<()> { let s = scan_dir_archive(&start_here, None)?; let archives = s.0; - // TODO: clean up test fixtures. There are cases of .gz and decompressed data. - assert!(archives.len() == 7); + assert!(archives.len() == 5); Ok(()) }