From 075a3cd7a82f0bb4f054b6e5ba3418c661ca64a0 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 13 Feb 2025 15:43:32 +0100 Subject: [PATCH 01/69] feat(antctl): enable `upnp` by default & replace `--upnp` with `--no-upnp` --- ant-node-manager/src/add_services/config.rs | 2 +- ant-node-manager/src/add_services/mod.rs | 12 ++++++------ ant-node-manager/src/bin/cli/main.rs | 10 +++++----- ant-node-manager/src/cmd/node.rs | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/ant-node-manager/src/add_services/config.rs b/ant-node-manager/src/add_services/config.rs index b2037bbc86..c0e74a7b44 100644 --- a/ant-node-manager/src/add_services/config.rs +++ b/ant-node-manager/src/add_services/config.rs @@ -194,7 +194,7 @@ pub struct AddNodeServiceOptions { pub rpc_port: Option, pub service_data_dir_path: PathBuf, pub service_log_dir_path: PathBuf, - pub upnp: bool, + pub no_upnp: bool, pub user: Option, pub user_mode: bool, pub version: String, diff --git a/ant-node-manager/src/add_services/mod.rs b/ant-node-manager/src/add_services/mod.rs index 842040b49c..ee4d923ef8 100644 --- a/ant-node-manager/src/add_services/mod.rs +++ b/ant-node-manager/src/add_services/mod.rs @@ -170,21 +170,21 @@ pub async fn add_node( match nat_status { NatDetectionStatus::Public => { - options.upnp = false; + options.no_upnp = true; // UPnP not needed options.home_network = false; } NatDetectionStatus::UPnP => { - options.upnp = true; + options.no_upnp = false; options.home_network = false; } NatDetectionStatus::Private => { - options.upnp = false; + options.no_upnp = true; options.home_network = true; } } debug!( "Auto-setting NAT flags: upnp={}, home_network={}", - options.upnp, options.home_network + !options.no_upnp, options.home_network ); } @@ -208,7 +208,7 @@ pub async fn add_node( rpc_socket_addr, antnode_path: service_antnode_path.clone(), service_user: options.user.clone(), - upnp: options.upnp, + upnp: !options.no_upnp, } .build()?; @@ -248,7 +248,7 @@ pub async fn add_node( pid: None, service_name, status: ServiceStatus::Added, - upnp: options.upnp, + upnp: !options.no_upnp, user: options.user.clone(), user_mode: options.user_mode, version: options.version.clone(), diff --git a/ant-node-manager/src/bin/cli/main.rs b/ant-node-manager/src/bin/cli/main.rs index 5da5e99071..aaff4b0762 100644 --- a/ant-node-manager/src/bin/cli/main.rs +++ b/ant-node-manager/src/bin/cli/main.rs @@ -217,11 +217,11 @@ pub enum SubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, - /// Try to use UPnP to open a port in the home router and allow incoming connections. + /// Disables UPnP. /// - /// This requires a antnode binary built with the 'upnp' feature. + /// By default, antnode will try to use UPnP if available. Use this flag to disable UPnP. #[clap(long, default_value_t = false)] - upnp: bool, + no_upnp: bool, /// Provide a antnode binary using a URL. /// /// The binary must be inside a zip or gzipped tar archive. @@ -947,7 +947,7 @@ async fn main() -> Result<()> { rpc_address, rpc_port, url, - upnp, + no_upnp, user, version, }) => { @@ -973,7 +973,7 @@ async fn main() -> Result<()> { rpc_address, rpc_port, path, - upnp, + no_upnp, url, user, version, diff --git a/ant-node-manager/src/cmd/node.rs b/ant-node-manager/src/cmd/node.rs index 3812834811..531894cce3 100644 --- a/ant-node-manager/src/cmd/node.rs +++ b/ant-node-manager/src/cmd/node.rs @@ -57,7 +57,7 @@ pub async fn add( rpc_address: Option, rpc_port: Option, src_path: Option, - upnp: bool, + no_upnp: bool, url: Option, user: Option, version: Option, @@ -136,7 +136,7 @@ pub async fn add( antnode_dir_path: service_data_dir_path.clone(), service_data_dir_path, service_log_dir_path, - upnp, + no_upnp, user: service_user, user_mode, version, From c99b45c19d987d6494fa0e010732a49b57fc39b3 Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 17 Feb 2025 12:45:27 +0900 Subject: [PATCH 02/69] feat!: improve names in fs api, make all use payment option --- ant-cli/src/commands/file.rs | 6 +- autonomi/README.md | 2 +- autonomi/examples/put_and_dir_upload.rs | 2 +- .../high_level/files/archive_private.rs | 5 ++ .../client/high_level/files/archive_public.rs | 15 ++-- .../src/client/high_level/files/fs_private.rs | 33 +++++---- .../src/client/high_level/files/fs_public.rs | 32 +++++---- autonomi/src/lib.rs | 6 +- autonomi/src/python.rs | 30 ++++---- autonomi/tests/{fs.rs => files.rs} | 70 ++++++++++++++++++- 10 files changed, 146 insertions(+), 55 deletions(-) rename autonomi/tests/{fs.rs => files.rs} (59%) diff --git a/ant-cli/src/commands/file.rs b/ant-cli/src/commands/file.rs index d9f52c6a13..ecd2640d6a 100644 --- a/ant-cli/src/commands/file.rs +++ b/ant-cli/src/commands/file.rs @@ -10,6 +10,7 @@ use crate::network::NetworkPeers; use crate::utils::collect_upload_summary; use crate::wallet::load_wallet; use autonomi::client::address::addr_to_str; +use autonomi::client::payment::PaymentOption; use autonomi::ClientOperatingStrategy; use autonomi::ResponseQuorum; use color_eyre::eyre::Context; @@ -46,6 +47,7 @@ pub async fn upload( let mut client = crate::actions::connect_to_network_with_config(peers, config).await?; let wallet = load_wallet(client.evm_network())?; + let payment = PaymentOption::Wallet(wallet); let event_receiver = client.enable_client_events(); let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); @@ -65,14 +67,14 @@ pub async fn upload( let local_addr; let archive = if public { let (_cost, xor_name) = client - .dir_and_archive_upload_public(dir_path, &wallet) + .dir_upload_public(dir_path, payment.clone()) .await .wrap_err("Failed to upload file")?; local_addr = addr_to_str(xor_name); local_addr.clone() } else { let (_cost, private_data_access) = client - .dir_and_archive_upload(dir_path, &wallet) + .dir_upload(dir_path, payment) .await .wrap_err("Failed to upload dir and archive")?; diff --git a/autonomi/README.md b/autonomi/README.md index 82ce268378..e539d48255 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -33,7 +33,7 @@ async fn main() -> Result<(), Box> { let _data_fetched = client.data_get_public(&data_addr).await?; // Put and fetch directory from local file system. - let dir_addr = client.dir_and_archive_upload_public("files/to/upload".into(), &wallet).await?; + let dir_addr = client.dir_upload_public("files/to/upload".into(), &wallet).await?; client .dir_download_public(dir_addr, "files/downloaded".into()) .await?; diff --git a/autonomi/examples/put_and_dir_upload.rs b/autonomi/examples/put_and_dir_upload.rs index 55ede2d89a..1301285bd8 100644 --- a/autonomi/examples/put_and_dir_upload.rs +++ b/autonomi/examples/put_and_dir_upload.rs @@ -20,7 +20,7 @@ async fn main() -> Result<(), Box> { // Put and fetch directory from local file system. let (_cost, dir_addr) = client - .dir_and_archive_upload_public("files/to/upload".into(), &wallet) + .dir_upload_public("files/to/upload".into(), wallet.into()) .await?; client .dir_download_public(&dir_addr, "files/downloaded".into()) diff --git a/autonomi/src/client/high_level/files/archive_private.rs b/autonomi/src/client/high_level/files/archive_private.rs index 45a2872c69..b30ca1b5c9 100644 --- a/autonomi/src/client/high_level/files/archive_private.rs +++ b/autonomi/src/client/high_level/files/archive_private.rs @@ -34,6 +34,11 @@ pub type PrivateArchiveAccess = DataMapChunk; /// The data maps are stored within this structure instead of uploading them to the network, keeping the data private. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct PrivateArchive { + /// Path of the file in the directory + /// | DataMap of the chunks of this file + /// | | Metadata of the file + /// | | | + /// V V V map: BTreeMap, } diff --git a/autonomi/src/client/high_level/files/archive_public.rs b/autonomi/src/client/high_level/files/archive_public.rs index a879f7e7f2..ce2f226ee7 100644 --- a/autonomi/src/client/high_level/files/archive_public.rs +++ b/autonomi/src/client/high_level/files/archive_public.rs @@ -13,7 +13,7 @@ use std::{ use ant_networking::time::{Duration, SystemTime, UNIX_EPOCH}; -use crate::{AttoTokens, Wallet}; +use crate::{client::payment::PaymentOption, AttoTokens}; use bytes::Bytes; use serde::{Deserialize, Serialize}; use xor_name::XorName; @@ -36,6 +36,11 @@ pub type ArchiveAddr = XorName; /// to the network, of which the addresses are stored in this archive. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct PublicArchive { + /// Path of the file in the directory + /// | Data address of the content of the file (points to a DataMap) + /// | | Metadata of the file + /// | | | + /// V V V map: BTreeMap, } @@ -152,21 +157,23 @@ impl Client { /// /// ```no_run /// # use autonomi::{Client, client::{data::DataAddr, files::{Metadata, archive_public::{PublicArchive, ArchiveAddr}}}}; + /// # use autonomi::client::payment::PaymentOption; /// # use std::path::PathBuf; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// # let client = Client::init().await?; /// # let wallet = todo!(); + /// # let payment = PaymentOption::Wallet(wallet); /// let mut archive = PublicArchive::new(); /// archive.add_file(PathBuf::from("file.txt"), DataAddr::random(&mut rand::thread_rng()), Metadata::new_with_size(0)); - /// let (cost, address) = client.archive_put_public(&archive, &wallet).await?; + /// let (cost, address) = client.archive_put_public(&archive, payment).await?; /// # Ok(()) /// # } /// ``` pub async fn archive_put_public( &self, archive: &PublicArchive, - wallet: &Wallet, + payment_option: PaymentOption, ) -> Result<(AttoTokens, ArchiveAddr), PutError> { let bytes = archive .to_bytes() @@ -178,7 +185,7 @@ impl Client { archive.map().len() ); - let result = self.data_put_public(bytes, wallet.into()).await; + let result = self.data_put_public(bytes, payment_option).await; debug!("Uploaded archive {archive:?} to the network and the address is {result:?}"); result } diff --git a/autonomi/src/client/high_level/files/fs_private.rs b/autonomi/src/client/high_level/files/fs_private.rs index 88998e64be..2ac45d2d3d 100644 --- a/autonomi/src/client/high_level/files/fs_private.rs +++ b/autonomi/src/client/high_level/files/fs_private.rs @@ -18,10 +18,11 @@ use super::archive_private::{PrivateArchive, PrivateArchiveAccess}; use super::{get_relative_file_path_from_abs_file_and_folder_path, FILE_UPLOAD_BATCH_SIZE}; use super::{DownloadError, UploadError}; +use crate::client::payment::PaymentOption; use crate::client::PutError; use crate::client::{data_types::chunk::DataMapChunk, utils::process_tasks_with_max_concurrency}; use crate::self_encryption::encrypt; -use crate::{AttoTokens, Client, Wallet}; +use crate::{AttoTokens, Client}; use ant_protocol::storage::{Chunk, DataTypes}; use bytes::Bytes; use std::path::PathBuf; @@ -58,12 +59,14 @@ impl Client { Ok(()) } - /// Upload a directory to the network. The directory is recursively walked and each file is uploaded to the network. + /// Upload the content of all files in a directory to the network. + /// The directory is recursively walked and each file is uploaded to the network. + /// /// The data maps of these (private) files are not uploaded but returned within the [`PrivateArchive`] return type. - pub async fn dir_upload( + pub async fn dir_content_upload( &self, dir_path: PathBuf, - wallet: &Wallet, + payment_option: PaymentOption, ) -> Result<(AttoTokens, PrivateArchive), UploadError> { info!("Uploading directory as private: {dir_path:?}"); let start = tokio::time::Instant::now(); @@ -155,7 +158,7 @@ impl Client { .pay_for_content_addrs( DataTypes::Chunk, combined_xor_names.into_iter(), - wallet.into(), + payment_option, ) .await .inspect_err(|err| error!("Error paying for data: {err:?}")) @@ -224,16 +227,18 @@ impl Client { Ok((total_cost, private_archive)) } - /// Same as [`Client::dir_upload`] but also uploads the archive (privately) to the network. + /// Same as [`Client::dir_content_upload`] but also uploads the archive (privately) to the network. /// /// Returns the [`PrivateArchiveAccess`] allowing the private archive to be downloaded from the network. - pub async fn dir_and_archive_upload( + pub async fn dir_upload( &self, dir_path: PathBuf, - wallet: &Wallet, + payment_option: PaymentOption, ) -> Result<(AttoTokens, PrivateArchiveAccess), UploadError> { - let (cost1, archive) = self.dir_upload(dir_path, wallet).await?; - let (cost2, archive_addr) = self.archive_put(&archive, wallet.into()).await?; + let (cost1, archive) = self + .dir_content_upload(dir_path, payment_option.clone()) + .await?; + let (cost2, archive_addr) = self.archive_put(&archive, payment_option).await?; let total_cost = cost1.checked_add(cost2).unwrap_or_else(|| { error!("Total cost overflowed: {cost1:?} + {cost2:?}"); cost1 @@ -241,12 +246,12 @@ impl Client { Ok((total_cost, archive_addr)) } - /// Upload a private file to the network. + /// Upload the content of a private file to the network. /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns [`DataMapChunk`] (pointing to the datamap) - pub async fn file_upload( + pub async fn file_content_upload( &self, path: PathBuf, - wallet: &Wallet, + payment_option: PaymentOption, ) -> Result<(AttoTokens, DataMapChunk), UploadError> { info!("Uploading file: {path:?}"); #[cfg(feature = "loud")] @@ -254,7 +259,7 @@ impl Client { let data = tokio::fs::read(path).await?; let data = Bytes::from(data); - let (total_cost, addr) = self.data_put(data, wallet.into()).await?; + let (total_cost, addr) = self.data_put(data, payment_option).await?; debug!("Uploaded file successfully in the privateAchive: {addr:?}"); Ok((total_cost, addr)) } diff --git a/autonomi/src/client/high_level/files/fs_public.rs b/autonomi/src/client/high_level/files/fs_public.rs index f4c861516f..d23da1f6bc 100644 --- a/autonomi/src/client/high_level/files/fs_public.rs +++ b/autonomi/src/client/high_level/files/fs_public.rs @@ -11,10 +11,11 @@ use super::{DownloadError, FileCostError, Metadata, UploadError}; use crate::client::high_level::files::{ get_relative_file_path_from_abs_file_and_folder_path, FILE_UPLOAD_BATCH_SIZE, }; +use crate::client::payment::PaymentOption; use crate::client::{high_level::data::DataAddr, utils::process_tasks_with_max_concurrency}; use crate::client::{Client, PutError}; use crate::self_encryption::encrypt; -use crate::{Amount, AttoTokens, Wallet}; +use crate::{Amount, AttoTokens}; use ant_networking::time::{Duration, SystemTime}; use ant_protocol::storage::{Chunk, DataTypes}; use bytes::Bytes; @@ -57,15 +58,16 @@ impl Client { Ok(()) } - /// Upload a directory to the network. The directory is recursively walked and each file is uploaded to the network. + /// Upload the content of all files in a directory to the network. + /// The directory is recursively walked and each file is uploaded to the network. /// /// The data maps of these files are uploaded on the network, making the individual files publicly available. /// /// This returns, but does not upload (!),the [`PublicArchive`] containing the data maps of the uploaded files. - pub async fn dir_upload_public( + pub async fn dir_content_upload_public( &self, dir_path: PathBuf, - wallet: &Wallet, + payment_option: PaymentOption, ) -> Result<(AttoTokens, PublicArchive), UploadError> { info!("Uploading directory: {dir_path:?}"); let start = tokio::time::Instant::now(); @@ -161,7 +163,7 @@ impl Client { .pay_for_content_addrs( DataTypes::Chunk, combined_xor_names.into_iter(), - wallet.into(), + payment_option, ) .await .inspect_err(|err| error!("Error paying for data: {err:?}")) @@ -238,16 +240,18 @@ impl Client { Ok((total_cost, public_archive)) } - /// Same as [`Client::dir_upload_public`] but also uploads the archive to the network. + /// Same as [`Client::dir_content_upload_public`] but also uploads the archive to the network. /// /// Returns the [`ArchiveAddr`] of the uploaded archive. - pub async fn dir_and_archive_upload_public( + pub async fn dir_upload_public( &self, dir_path: PathBuf, - wallet: &Wallet, + payment_option: PaymentOption, ) -> Result<(AttoTokens, ArchiveAddr), UploadError> { - let (cost1, archive) = self.dir_upload_public(dir_path, wallet).await?; - let (cost2, archive_addr) = self.archive_put_public(&archive, wallet).await?; + let (cost1, archive) = self + .dir_content_upload_public(dir_path, payment_option.clone()) + .await?; + let (cost2, archive_addr) = self.archive_put_public(&archive, payment_option).await?; let total_cost = cost1.checked_add(cost2).unwrap_or_else(|| { error!("Total cost overflowed: {cost1:?} + {cost2:?}"); cost1 @@ -255,12 +259,12 @@ impl Client { Ok((total_cost, archive_addr)) } - /// Upload a file to the network. + /// Upload the content of a file to the network. /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns DataAddr (pointing to the datamap) - pub async fn file_upload_public( + pub async fn file_content_upload_public( &self, path: PathBuf, - wallet: &Wallet, + payment_option: PaymentOption, ) -> Result<(AttoTokens, DataAddr), UploadError> { info!("Uploading file: {path:?}"); #[cfg(feature = "loud")] @@ -268,7 +272,7 @@ impl Client { let data = tokio::fs::read(path.clone()).await?; let data = Bytes::from(data); - let (cost, addr) = self.data_put_public(data, wallet.into()).await?; + let (cost, addr) = self.data_put_public(data, payment_option).await?; debug!("File {path:?} uploaded to the network at {addr:?}"); Ok((cost, addr)) } diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 50a11d1460..6866ead0f3 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -12,6 +12,7 @@ //! //! ```no_run //! use autonomi::{Bytes, Client, Wallet}; +//! use autonomi::client::payment::PaymentOption; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -20,13 +21,14 @@ //! // Default wallet of testnet. //! let key = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; //! let wallet = Wallet::new_from_private_key(Default::default(), key)?; +//! let payment = PaymentOption::Wallet(wallet); //! //! // Put and fetch data. -//! let (cost, data_addr) = client.data_put_public(Bytes::from("Hello, World"), (&wallet).into()).await?; +//! let (cost, data_addr) = client.data_put_public(Bytes::from("Hello, World"), payment.clone()).await?; //! let _data_fetched = client.data_get_public(&data_addr).await?; //! //! // Put and fetch directory from local file system. -//! let (cost, dir_addr) = client.dir_and_archive_upload_public("files/to/upload".into(), &wallet).await?; +//! let (cost, dir_addr) = client.dir_upload_public("files/to/upload".into(), payment).await?; //! client.dir_download_public(&dir_addr, "files/downloaded".into()).await?; //! //! Ok(()) diff --git a/autonomi/src/python.rs b/autonomi/src/python.rs index f313ec7e3e..fd9333dad0 100644 --- a/autonomi/src/python.rs +++ b/autonomi/src/python.rs @@ -423,13 +423,13 @@ impl PyClient { &self, py: Python<'a>, archive: PyPublicArchive, - wallet: PyWallet, + payment: PyPaymentOption, ) -> PyResult> { let client = self.inner.clone(); future_into_py(py, async move { let (cost, addr) = client - .archive_put_public(&archive.inner, &wallet.inner) + .archive_put_public(&archive.inner, payment.inner) .await .map_err(|e| { PyRuntimeError::new_err(format!("Failed to put public archive: {e}")) @@ -494,17 +494,17 @@ impl PyClient { /// Upload a directory to the network. The directory is recursively walked and each file is uploaded to the network. /// The data maps of these (private) files are not uploaded but returned within the PrivateArchive return type. - fn dir_upload<'a>( + fn dir_content_upload<'a>( &self, py: Python<'a>, dir_path: PathBuf, - wallet: PyWallet, + payment: PyPaymentOption, ) -> PyResult> { let client = self.inner.clone(); future_into_py(py, async move { let (cost, archive) = client - .dir_upload(dir_path, &wallet.inner) + .dir_content_upload(dir_path, payment.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to upload directory: {e}")))?; Ok((cost.to_string(), PyPrivateArchive { inner: archive })) @@ -535,17 +535,17 @@ impl PyClient { /// Same as `dir_upload` but also uploads the archive (privately) to the network. /// /// Returns the data map allowing the private archive to be downloaded from the network. - fn dir_and_archive_upload<'a>( + fn dir_upload<'a>( &self, py: Python<'a>, dir_path: PathBuf, - wallet: PyWallet, + payment: PyPaymentOption, ) -> PyResult> { let client = self.inner.clone(); future_into_py(py, async move { let (cost, data_map) = client - .dir_and_archive_upload(dir_path, &wallet.inner) + .dir_upload(dir_path, payment.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to upload directory: {e}")))?; Ok((cost.to_string(), PyDataMapChunk { inner: data_map })) @@ -642,18 +642,18 @@ impl PyClient { /// Upload a directory as a public archive to the network. /// Returns the network address where the archive is stored. - fn dir_and_archive_upload_public<'a>( + fn dir_upload_public<'a>( &self, py: Python<'a>, dir_path: PathBuf, - wallet: &PyWallet, + payment: &PyPaymentOption, ) -> PyResult> { let client = self.inner.clone(); - let wallet = wallet.inner.clone(); + let payment = payment.inner.clone(); future_into_py(py, async move { let (cost, addr) = client - .dir_and_archive_upload_public(dir_path, &wallet) + .dir_upload_public(dir_path, payment) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to upload directory: {e}")))?; Ok((cost.to_string(), crate::client::address::addr_to_str(addr))) @@ -685,17 +685,17 @@ impl PyClient { /// The data maps of these files are uploaded on the network, making the individual files publicly available. /// /// This returns, but does not upload (!),the `PublicArchive` containing the data maps of the uploaded files. - fn dir_upload_public<'a>( + fn dir_content_upload_public<'a>( &self, py: Python<'a>, dir_path: PathBuf, - wallet: PyWallet, + payment: PyPaymentOption, ) -> PyResult> { let client = self.inner.clone(); future_into_py(py, async move { let (cost, archive) = client - .dir_upload_public(dir_path, &wallet.inner) + .dir_content_upload_public(dir_path, payment.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to upload directory: {e}")))?; Ok((cost.to_string(), PyPublicArchive { inner: archive })) diff --git a/autonomi/tests/fs.rs b/autonomi/tests/files.rs similarity index 59% rename from autonomi/tests/fs.rs rename to autonomi/tests/files.rs index ebeba12f7a..202e7c5aac 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/files.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use ant_logging::LogBuilder; +use autonomi::client::payment::PaymentOption; use autonomi::Client; use eyre::Result; use serial_test::serial; @@ -30,7 +31,7 @@ async fn dir_upload_download() -> Result<()> { let wallet = get_funded_wallet(); let (_cost, addr) = client - .dir_and_archive_upload_public("tests/file/test_dir".into(), &wallet) + .dir_upload_public("tests/file/test_dir".into(), wallet.into()) .await?; sleep(Duration::from_secs(10)).await; @@ -86,7 +87,7 @@ async fn file_into_vault() -> Result<()> { let client_sk = bls::SecretKey::random(); let (_cost, addr) = client - .dir_and_archive_upload_public("tests/file/test_dir".into(), &wallet) + .dir_upload_public("tests/file/test_dir".into(), wallet.clone().into()) .await?; sleep(Duration::from_secs(2)).await; @@ -111,3 +112,68 @@ async fn file_into_vault() -> Result<()> { Ok(()) } + +#[tokio::test] +#[serial] +async fn file_advanced_use() -> Result<()> { + let _log_appender_guard = + LogBuilder::init_single_threaded_tokio_test("file_advanced_use", false); + + let client = Client::init_local().await?; + let wallet = get_funded_wallet(); + let payment_option = PaymentOption::Wallet(wallet); + + // upload a directory + let (cost, mut archive) = client + .dir_content_upload("tests/file/test_dir/dir_a".into(), payment_option.clone()) + .await?; + println!("cost to upload private directory: {cost:?}"); + println!("archive: {archive:#?}"); + + // upload an additional file separately + let (cost, file_datamap) = client + .file_content_upload( + "tests/file/test_dir/example_file_b".into(), + payment_option.clone(), + ) + .await?; + println!("cost to upload additional file: {cost:?}"); + + // add that file to the archive with custom metadata + let custom_metadata = autonomi::client::files::Metadata { + created: 42, + modified: 84, + size: 126, + extra: Some("custom metadata".to_string()), + }; + archive.add_file("example_file_b".into(), file_datamap, custom_metadata); + + // upload an additional file separately + let (cost, file_datamap) = client + .file_content_upload( + "tests/file/test_dir/example_file_a".into(), + payment_option.clone(), + ) + .await?; + println!("cost to upload additional file: {cost:?}"); + + // add that file to the archive with custom metadata + let custom_metadata = autonomi::client::files::Metadata::new_with_size(126); + archive.add_file("example_file_a".into(), file_datamap, custom_metadata); + + // upload the archive + let (cost, archive_datamap) = client.archive_put(&archive, payment_option.clone()).await?; + println!("cost to upload archive: {cost:?}"); + + // download the entire directory + let dest = "tests/file/test_dir_fetched2"; + client.dir_download(&archive_datamap, dest.into()).await?; + + // compare the two directories + assert_eq!( + compute_dir_sha256("tests/file/test_dir")?, + compute_dir_sha256(dest)?, + ); + + Ok(()) +} From aede5aa640fbb5c99218b1c63d1a700acce3089c Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 17 Feb 2025 22:59:00 +0800 Subject: [PATCH 03/69] chore(CI): remove un-necessary duplicated CI tests --- .github/workflows/merge.yml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 9e766cb84e..69cdc9f4d4 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -152,13 +152,17 @@ jobs: timeout-minutes: 5 run: cargo test --release --package ant-networking --features="open-metrics" can_store_after_restart - - name: Run network tests - timeout-minutes: 25 - run: cargo test --release --package ant-networking --features="open-metrics" -- --skip can_store_after_restart - - - name: Run network tests - timeout-minutes: 5 - run: cargo test --release --package ant-networking --features="open-metrics" can_store_after_restart + # Same set of tests shall be executed with `encrypt-records` flag enabled. + # With now changed to `always carry out encryption`, no need to re-run the same set again. + # Re-enable the following block once default behaviour changed. + # + # - name: Run network tests + # timeout-minutes: 25 + # run: cargo test --release --package ant-networking --features="open-metrics, encrypt-records" -- --skip can_store_after_restart + + # - name: Run network tests + # timeout-minutes: 5 + # run: cargo test --release --package ant-networking --features="open-metrics, encrypt-records" can_store_after_restart - name: Run protocol tests timeout-minutes: 25 From c40223d0f4f94f5570f708934b2d7a961fc6ed3e Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 17 Feb 2025 20:59:33 +0800 Subject: [PATCH 04/69] fix(node): avoid dead-lock on record_store cache access --- ant-networking/src/event/mod.rs | 9 ++++++++- ant-networking/src/record_store.rs | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index 2ad32c079f..0560a8d28a 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -195,7 +195,14 @@ impl Debug for NetworkEvent { write!(f, "NetworkEvent::TerminateNode({reason:?})") } NetworkEvent::FailedToFetchHolders(bad_nodes) => { - write!(f, "NetworkEvent::FailedToFetchHolders({bad_nodes:?})") + let pretty_log: Vec<_> = bad_nodes + .iter() + .map(|(peer_id, record_key)| { + let pretty_key = PrettyPrintRecordKey::from(record_key); + (peer_id, pretty_key) + }) + .collect(); + write!(f, "NetworkEvent::FailedToFetchHolders({pretty_log:?})") } NetworkEvent::QuoteVerification { quotes } => { write!( diff --git a/ant-networking/src/record_store.rs b/ant-networking/src/record_store.rs index ea9222eb73..f448c44dce 100644 --- a/ant-networking/src/record_store.rs +++ b/ant-networking/src/record_store.rs @@ -851,7 +851,7 @@ impl RecordStore for NodeRecordStore { let cached_record = self.records_cache.get(k); // first return from FIFO cache if existing there if let Some((record, _timestamp)) = cached_record { - return Some(Cow::Borrowed(record)); + return Some(Cow::Owned(record.clone())); } if !self.records.contains_key(k) { From ea2b744fd9aeca6d89b679752cc35babdaf02c7f Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 17 Feb 2025 21:27:23 +0800 Subject: [PATCH 05/69] chore(node): not fetch from network when replicate fetch failed --- ant-node/src/replication.rs | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/ant-node/src/replication.rs b/ant-node/src/replication.rs index 3d64c894cf..534dbd947e 100644 --- a/ant-node/src/replication.rs +++ b/ant-node/src/replication.rs @@ -8,11 +8,10 @@ use crate::{error::Result, node::Node}; use ant_evm::ProofOfPayment; -use ant_networking::{GetRecordCfg, Network, ResponseQuorum}; -use ant_protocol::storage::DataTypes; +use ant_networking::Network; use ant_protocol::{ messages::{Query, QueryResponse, Request, Response}, - storage::ValidationType, + storage::{DataTypes, ValidationType}, NetworkAddress, PrettyPrintRecordKey, }; use libp2p::{ @@ -69,22 +68,8 @@ impl Node { let record = if let Some(record_content) = record_opt { Record::new(key, record_content.to_vec()) } else { - debug!( - "Can not fetch record {pretty_key:?} from node {holder:?}, fetching from the network" - ); - let get_cfg = GetRecordCfg { - get_quorum: ResponseQuorum::One, - retry_strategy: Default::default(), - target_record: None, - expected_holders: Default::default(), - }; - match node.network().get_record_from_network(key, &get_cfg).await { - Ok(record) => record, - Err(err) => { - error!("During replication fetch of {pretty_key:?}, failed in re-attempt of get from network {err:?}"); - return; - } - } + debug!("Can not fetch record {pretty_key:?} from node {holder:?}"); + return; }; debug!( From 53e87cf9242d91df46e2fe0b16169d52b8989a31 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 19 Feb 2025 05:19:27 +0800 Subject: [PATCH 06/69] chore(node): improve fetch replicate record procedure logging --- ant-node/src/replication.rs | 48 ++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/ant-node/src/replication.rs b/ant-node/src/replication.rs index 534dbd947e..b246222d6d 100644 --- a/ant-node/src/replication.rs +++ b/ant-node/src/replication.rs @@ -46,39 +46,33 @@ impl Node { requester, key: NetworkAddress::from_record_key(&key), }); - let record_opt = if let Ok(resp) = node.network().send_request(req, holder).await { - match resp { - Response::Query(QueryResponse::GetReplicatedRecord(result)) => match result - { - Ok((_holder, record_content)) => Some(record_content), - Err(err) => { - debug!("Failed fetch record {pretty_key:?} from node {holder:?}, with error {err:?}"); - None - } - }, - other => { - debug!("Cannot fetch record {pretty_key:?} from node {holder:?}, with response {other:?}"); - None + + let record = match node.network().send_request(req, holder).await { + Ok(Response::Query(QueryResponse::GetReplicatedRecord(result))) => match result + { + Ok((_holder, record_content)) => { + debug!("Fecthed record {pretty_key:?} from holder {holder:?}"); + Record::new(key, record_content.to_vec()) } + Err(err) => { + info!("Failed fetch record {pretty_key:?} from holder {holder:?}, with error {err:?}"); + return; + } + }, + Ok(other) => { + info!("Cannot fetch record {pretty_key:?} from holder {holder:?}, with response {other:?}"); + return; + } + Err(err) => { + info!("Failed to send request to fetch record {pretty_key:?} from holder {holder:?}, with error {err:?}"); + return; } - } else { - None - }; - - let record = if let Some(record_content) = record_opt { - Record::new(key, record_content.to_vec()) - } else { - debug!("Can not fetch record {pretty_key:?} from node {holder:?}"); - return; }; - debug!( - "Got Replication Record {pretty_key:?} from network, validating and storing it" - ); if let Err(err) = node.store_replicated_in_record(record).await { - error!("During store replication fetched {pretty_key:?}, got error {err:?}"); + error!("During store replication fetched {pretty_key:?} from holder {holder:?}, got error {err:?}"); } else { - debug!("Completed storing Replication Record {pretty_key:?} from network."); + debug!("Completed storing Replication Record {pretty_key:?} from holder {holder:?}."); } }); } From 4e5c3dfab44a592e0f20b5a7d005d51ec84ee2f5 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 19 Feb 2025 05:23:42 +0800 Subject: [PATCH 07/69] chore(node): lower parallel replication fetches --- ant-networking/src/replication_fetcher.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ant-networking/src/replication_fetcher.rs b/ant-networking/src/replication_fetcher.rs index 0a65b35684..ae0aab42ab 100644 --- a/ant-networking/src/replication_fetcher.rs +++ b/ant-networking/src/replication_fetcher.rs @@ -14,14 +14,14 @@ use ant_protocol::{ NetworkAddress, PrettyPrintRecordKey, }; use libp2p::{ - kad::{KBucketDistance as Distance, RecordKey, K_VALUE}, + kad::{KBucketDistance as Distance, RecordKey}, PeerId, }; use std::collections::{hash_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}; use tokio::{sync::mpsc, time::Duration}; // Max parallel fetches that can be undertaken at the same time. -const MAX_PARALLEL_FETCH: usize = K_VALUE.get(); +const MAX_PARALLEL_FETCH: usize = 5; // The duration after which a peer will be considered failed to fetch data from, // if no response got from that peer. From fb98b051891cadce34ae7a552c13d6ab11b8ef9e Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 19 Feb 2025 05:44:25 +0800 Subject: [PATCH 08/69] fix(node): issues comes in too quick shall not trigger extra action --- ant-networking/src/cmd.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ant-networking/src/cmd.rs b/ant-networking/src/cmd.rs index bc42fa4fbf..937172ed7b 100644 --- a/ant-networking/src/cmd.rs +++ b/ant-networking/src/cmd.rs @@ -1024,6 +1024,8 @@ impl SwarmDriver { if is_new_issue { issue_vec.push((issue, Instant::now())); + } else { + return; } // Only consider candidate as a bad node when: From f80ff0b905456a43f462d177e420027ac7349af3 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 19 Feb 2025 05:45:49 +0800 Subject: [PATCH 09/69] chore(node): disable balck_list --- ant-networking/src/cmd.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ant-networking/src/cmd.rs b/ant-networking/src/cmd.rs index 937172ed7b..7dd613dc00 100644 --- a/ant-networking/src/cmd.rs +++ b/ant-networking/src/cmd.rs @@ -1039,9 +1039,12 @@ impl SwarmDriver { // If it is a connection issue, we don't need to consider it as a bad node if matches!(issue, NodeIssue::ConnectionIssue) { is_connection_issue = true; - } else { - *is_bad = true; } + // TODO: disable black_list currently. + // re-enable once got more statistics from large scaled network + // else { + // *is_bad = true; + // } new_bad_behaviour = Some(issue.clone()); info!("Peer {peer_id:?} accumulated {issue_counts} times of issue {issue:?}. Consider it as a bad node now."); // Once a bad behaviour detected, no point to continue From 9df3640c69d96c627fe83d966ab635cf8cac7ce1 Mon Sep 17 00:00:00 2001 From: qima Date: Tue, 18 Feb 2025 19:31:56 +0800 Subject: [PATCH 10/69] chore(node): improve failed replicate fetch log format --- ant-node/src/node.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ant-node/src/node.rs b/ant-node/src/node.rs index 86ad2f47c4..e0c2fbb660 100644 --- a/ant-node/src/node.rs +++ b/ant-node/src/node.rs @@ -531,10 +531,17 @@ impl Node { NetworkEvent::FailedToFetchHolders(bad_nodes) => { event_header = "FailedToFetchHolders"; let network = self.network().clone(); + let pretty_log: Vec<_> = bad_nodes + .iter() + .map(|(peer_id, record_key)| { + let pretty_key = PrettyPrintRecordKey::from(record_key); + (peer_id, pretty_key) + }) + .collect(); // Note: this log will be checked in CI, and expecting `not appear`. // any change to the keyword `failed to fetch` shall incur // correspondent CI script change as well. - debug!("Received notification from replication_fetcher, notifying {bad_nodes:?} failed to fetch replication copies from."); + debug!("Received notification from replication_fetcher, notifying {pretty_log:?} failed to fetch replication copies from."); let _handle = spawn(async move { for (peer_id, record_key) in bad_nodes { // Obsoleted fetch request (due to flooded in fresh replicates) could result From fa3190b98555cf36c7ba2a265cc7f63e5615d705 Mon Sep 17 00:00:00 2001 From: qima Date: Tue, 18 Feb 2025 21:05:06 +0800 Subject: [PATCH 11/69] fix(test): avoid occasional failure --- ant-networking/src/record_store.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ant-networking/src/record_store.rs b/ant-networking/src/record_store.rs index f448c44dce..3a0ea1c2ee 100644 --- a/ant-networking/src/record_store.rs +++ b/ant-networking/src/record_store.rs @@ -1733,8 +1733,11 @@ mod tests { // Add records up to cache size cache.push_back(record1.key.clone(), record1.clone()); + sleep(Duration::from_millis(1)).await; cache.push_back(record2.key.clone(), record2.clone()); + sleep(Duration::from_millis(1)).await; cache.push_back(record3.key.clone(), record3.clone()); + sleep(Duration::from_millis(1)).await; // Verify all records are present assert!(cache.get(&record1.key).is_some()); From 8e5a930eb3bbe5e6ad788ada4a4d87214ff3e2d2 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 18 Feb 2025 20:16:26 +0530 Subject: [PATCH 12/69] fix(relay): remove relay clients if we have no more connections --- ant-networking/src/event/swarm.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ant-networking/src/event/swarm.rs b/ant-networking/src/event/swarm.rs index 599a97ab91..d029997056 100644 --- a/ant-networking/src/event/swarm.rs +++ b/ant-networking/src/event/swarm.rs @@ -107,9 +107,11 @@ impl SwarmDriver { renewed: _, } => { self.connected_relay_clients.insert(src_peer_id); + info!("Relay reservation accepted from {src_peer_id:?}. Relay client count: {}", self.connected_relay_clients.len()); } libp2p::relay::Event::ReservationTimedOut { src_peer_id } => { self.connected_relay_clients.remove(&src_peer_id); + info!("Relay reservation timed out from {src_peer_id:?}. Relay client count: {}", self.connected_relay_clients.len()); } _ => {} } @@ -264,6 +266,14 @@ impl SwarmDriver { event_string = "ConnectionClosed"; debug!(%peer_id, ?connection_id, ?cause, num_established, "ConnectionClosed: {}", endpoint_str(&endpoint)); let _ = self.live_connected_peers.remove(&connection_id); + + if num_established == 0 && self.connected_relay_clients.remove(&peer_id) { + info!( + "Relay client has been disconnected: {peer_id:?}. Relay client count: {}", + self.connected_relay_clients.len() + ); + } + self.record_connection_metrics(); } SwarmEvent::OutgoingConnectionError { From e00749d649d21eaa1575fe8080014705d6a96a13 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 20 Feb 2025 02:12:16 +0530 Subject: [PATCH 13/69] fix(metrics): caclulate peers in rt directly from kbuckets --- ant-networking/src/cmd.rs | 13 +--- ant-networking/src/driver.rs | 20 +++--- ant-networking/src/event/mod.rs | 124 +++++++++++++++++--------------- 3 files changed, 76 insertions(+), 81 deletions(-) diff --git a/ant-networking/src/cmd.rs b/ant-networking/src/cmd.rs index bc42fa4fbf..9d50657ea8 100644 --- a/ant-networking/src/cmd.rs +++ b/ant-networking/src/cmd.rs @@ -626,15 +626,8 @@ impl SwarmDriver { sender, } => { cmd_string = "GetLocalQuotingMetrics"; - let ( - _index, - _total_peers, - peers_in_non_full_buckets, - num_of_full_buckets, - _kbucket_table_stats, - ) = self.kbuckets_status(); - let estimated_network_size = - Self::estimate_network_size(peers_in_non_full_buckets, num_of_full_buckets); + let kbucket_status = self.get_kbuckets_status(); + self.update_on_kbucket_status(&kbucket_status); let (quoting_metrics, is_already_stored) = match self .swarm .behaviour_mut() @@ -644,7 +637,7 @@ impl SwarmDriver { &key, data_type, data_size, - Some(estimated_network_size as u64), + Some(kbucket_status.estimated_network_size as u64), ) { Ok(res) => res, Err(err) => { diff --git a/ant-networking/src/driver.rs b/ant-networking/src/driver.rs index db91c7e98a..45874e0fae 100644 --- a/ant-networking/src/driver.rs +++ b/ant-networking/src/driver.rs @@ -841,24 +841,20 @@ impl SwarmDriver { } _ = set_farthest_record_interval.tick() => { if !self.is_client { - let ( - _index, - _total_peers, - peers_in_non_full_buckets, - num_of_full_buckets, - _kbucket_table_stats, - ) = self.kbuckets_status(); - let estimated_network_size = - Self::estimate_network_size(peers_in_non_full_buckets, num_of_full_buckets); - if estimated_network_size <= CLOSE_GROUP_SIZE { - info!("Not enough estimated network size {estimated_network_size}, with {peers_in_non_full_buckets} peers_in_non_full_buckets and {num_of_full_buckets}num_of_full_buckets."); + let kbucket_status = self.get_kbuckets_status(); + self.update_on_kbucket_status(&kbucket_status); + if kbucket_status.estimated_network_size <= CLOSE_GROUP_SIZE { + info!("Not enough estimated network size {}, with {} peers_in_non_full_buckets and {} num_of_full_buckets.", + kbucket_status.estimated_network_size, + kbucket_status.peers_in_non_full_buckets, + kbucket_status.num_of_full_buckets); continue; } // The entire Distance space is U256 // (U256::MAX is 115792089237316195423570985008687907853269984665640564039457584007913129639935) // The network density (average distance among nodes) can be estimated as: // network_density = entire_U256_space / estimated_network_size - let density = U256::MAX / U256::from(estimated_network_size); + let density = U256::MAX / U256::from(kbucket_status.estimated_network_size); let density_distance = density * U256::from(CLOSE_GROUP_SIZE); // Use distance to close peer to avoid the situation that diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index 2ad32c079f..54f614b179 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -37,8 +37,29 @@ use std::{ }; use tokio::sync::oneshot; -// (total_buckets, total_peers, peers_in_non_full_buckets, num_of_full_buckets, kbucket_table_stats) -type KBucketStatus = (usize, usize, usize, usize, Vec<(usize, usize, u32)>); +#[derive(Debug, Clone)] +pub(crate) struct KBucketStatus { + pub(crate) total_buckets: usize, + pub(crate) total_peers: usize, + pub(crate) peers_in_non_full_buckets: usize, + pub(crate) num_of_full_buckets: usize, + pub(crate) kbucket_table_stats: Vec<(usize, usize, u32)>, + pub(crate) estimated_network_size: usize, +} + +impl KBucketStatus { + pub(crate) fn log(&self) { + info!( + "kBucketTable has {:?} kbuckets {:?} peers, {:?}, estimated network size: {:?}", + self.total_buckets, + self.total_peers, + self.kbucket_table_stats, + self.estimated_network_size + ); + #[cfg(feature = "loud")] + println!("Estimated network size: {:?}", self.estimated_network_size); + } +} /// NodeEvent enum #[derive(CustomDebug)] @@ -239,12 +260,21 @@ impl SwarmDriver { /// Update state on addition of a peer to the routing table. pub(crate) fn update_on_peer_addition(&mut self, added_peer: PeerId, addresses: Addresses) { - self.peers_in_rt = self.peers_in_rt.saturating_add(1); - let n_peers = self.peers_in_rt; - info!("New peer added to routing table: {added_peer:?}, now we have #{n_peers} connected peers"); + let kbucket_status = self.get_kbuckets_status(); + self.update_on_kbucket_status(&kbucket_status); + + let distance = NetworkAddress::from_peer(self.self_peer_id) + .distance(&NetworkAddress::from_peer(added_peer)); + info!("New peer added to routing table: {added_peer:?}. We now have #{} connected peers. It has a {:?} distance to us.", + self.peers_in_rt, distance.ilog2()); #[cfg(feature = "loud")] - println!("New peer added to routing table: {added_peer:?}, now we have #{n_peers} connected peers"); + println!( + "New peer added to routing table: {added_peer:?}, now we have #{} connected peers", + self.peers_in_rt + ); + + kbucket_status.log(); if let Some(bootstrap_cache) = &mut self.bootstrap_cache { for addr in addresses.iter() { @@ -252,52 +282,42 @@ impl SwarmDriver { } } - self.log_kbuckets(&added_peer); self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); #[cfg(feature = "open-metrics")] if self.metrics_recorder.is_some() { self.check_for_change_in_our_close_group(); } - - #[cfg(feature = "open-metrics")] - if let Some(metrics_recorder) = &self.metrics_recorder { - metrics_recorder - .peers_in_routing_table - .set(self.peers_in_rt as i64); - } } /// Update state on removal of a peer from the routing table. pub(crate) fn update_on_peer_removal(&mut self, removed_peer: PeerId) { - self.peers_in_rt = self.peers_in_rt.saturating_sub(1); + let kbucket_status = self.get_kbuckets_status(); + self.update_on_kbucket_status(&kbucket_status); // ensure we disconnect bad peer // err result just means no connections were open let _result = self.swarm.disconnect_peer_id(removed_peer); + let distance = NetworkAddress::from_peer(self.self_peer_id) + .distance(&NetworkAddress::from_peer(removed_peer)); info!( - "Peer removed from routing table: {removed_peer:?}, now we have #{} connected peers", - self.peers_in_rt + "Peer removed from routing table: {removed_peer:?}. We now have #{} connected peers. It has a {:?} distance to us.", + self.peers_in_rt, distance.ilog2() ); - self.log_kbuckets(&removed_peer); + self.send_event(NetworkEvent::PeerRemoved(removed_peer, self.peers_in_rt)); + kbucket_status.log(); + #[cfg(feature = "open-metrics")] if self.metrics_recorder.is_some() { self.check_for_change_in_our_close_group(); } - - #[cfg(feature = "open-metrics")] - if let Some(metrics_recorder) = &self.metrics_recorder { - metrics_recorder - .peers_in_routing_table - .set(self.peers_in_rt as i64); - } } - /// Collect kbuckets status - pub(crate) fn kbuckets_status(&mut self) -> KBucketStatus { + /// Get the status of the kbucket table. + pub(crate) fn get_kbuckets_status(&mut self) -> KBucketStatus { let mut kbucket_table_stats = vec![]; let mut index = 0; let mut total_peers = 0; @@ -324,51 +344,37 @@ impl SwarmDriver { } index += 1; } - ( - index, - total_peers, - peers_in_non_full_buckets, - num_of_full_buckets, - kbucket_table_stats, - ) - } - /// Logs the kbuckets also records the bucket info. - pub(crate) fn log_kbuckets(&mut self, peer: &PeerId) { - let distance = NetworkAddress::from_peer(self.self_peer_id) - .distance(&NetworkAddress::from_peer(*peer)); - info!("Peer {peer:?} has a {:?} distance to us", distance.ilog2()); + let estimated_network_size = + Self::estimate_network_size(peers_in_non_full_buckets, num_of_full_buckets); - let ( - index, + KBucketStatus { + total_buckets: index, total_peers, peers_in_non_full_buckets, num_of_full_buckets, kbucket_table_stats, - ) = self.kbuckets_status(); + estimated_network_size, + } + } - let estimated_network_size = - Self::estimate_network_size(peers_in_non_full_buckets, num_of_full_buckets); + /// Update SwarmDriver field & also record metrics based on the newly calculated kbucket status. + pub(crate) fn update_on_kbucket_status(&mut self, status: &KBucketStatus) { + self.peers_in_rt = status.total_peers; #[cfg(feature = "open-metrics")] if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder + .peers_in_routing_table + .set(status.total_peers as i64); + + let estimated_network_size = Self::estimate_network_size( + status.peers_in_non_full_buckets, + status.num_of_full_buckets, + ); let _ = metrics_recorder .estimated_network_size .set(estimated_network_size as i64); } - - // Just to warn if our tracking goes out of sync with libp2p. Can happen if someone forgets to call - // update_on_peer_addition or update_on_peer_removal when adding or removing a peer. - // Only log every 10th peer to avoid spamming the logs. - if total_peers % 10 == 0 && total_peers != self.peers_in_rt { - warn!( - "Total peers in routing table: {}, but kbucket table has {total_peers} peers", - self.peers_in_rt - ); - } - - info!("kBucketTable has {index:?} kbuckets {total_peers:?} peers, {kbucket_table_stats:?}, estimated network size: {estimated_network_size:?}"); - #[cfg(feature = "loud")] - println!("Estimated network size: {estimated_network_size:?}"); } /// Estimate the number of nodes in the network From 342a50cfbee86a43d020f688822058ce2b6682b3 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 19 Feb 2025 19:06:25 +0530 Subject: [PATCH 14/69] feat(metrics): add more metrics related to relays --- ant-networking/src/event/identify.rs | 2 +- ant-networking/src/event/mod.rs | 34 ++++++++++++++++++++++++-- ant-networking/src/metrics/mod.rs | 36 ++++++++++++++++++++++++++-- ant-networking/src/relay_manager.rs | 6 ++--- 4 files changed, 69 insertions(+), 9 deletions(-) diff --git a/ant-networking/src/event/identify.rs b/ant-networking/src/event/identify.rs index c06eac14e5..6b9ee45cdd 100644 --- a/ant-networking/src/event/identify.rs +++ b/ant-networking/src/event/identify.rs @@ -88,7 +88,7 @@ impl SwarmDriver { .collect(), }; - let is_relayed_peer = is_a_relayed_peer(&addrs); + let is_relayed_peer = is_a_relayed_peer(addrs.iter()); let is_bootstrap_peer = self .bootstrap_peers diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index 54f614b179..a8f08796a7 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -11,7 +11,7 @@ mod kad; mod request_response; mod swarm; -use crate::{driver::SwarmDriver, error::Result}; +use crate::{driver::SwarmDriver, error::Result, relay_manager::is_a_relayed_peer}; use core::fmt; use custom_debug::Debug as CustomDebug; use libp2p::{ @@ -41,7 +41,9 @@ use tokio::sync::oneshot; pub(crate) struct KBucketStatus { pub(crate) total_buckets: usize, pub(crate) total_peers: usize, + pub(crate) total_relay_peers: usize, pub(crate) peers_in_non_full_buckets: usize, + pub(crate) relay_peers_in_non_full_buckets: usize, pub(crate) num_of_full_buckets: usize, pub(crate) kbucket_table_stats: Vec<(usize, usize, u32)>, pub(crate) estimated_network_size: usize, @@ -284,7 +286,6 @@ impl SwarmDriver { self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); - #[cfg(feature = "open-metrics")] if self.metrics_recorder.is_some() { self.check_for_change_in_our_close_group(); } @@ -321,14 +322,25 @@ impl SwarmDriver { let mut kbucket_table_stats = vec![]; let mut index = 0; let mut total_peers = 0; + let mut total_relay_peers = 0; let mut peers_in_non_full_buckets = 0; + let mut relay_peers_in_non_full_buckets = 0; let mut num_of_full_buckets = 0; for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { let range = kbucket.range(); let num_entires = kbucket.num_entries(); + kbucket.iter().for_each(|entry| { + if is_a_relayed_peer(entry.node.value.iter()) { + total_relay_peers += 1; + if num_entires < K_VALUE.get() { + relay_peers_in_non_full_buckets += 1; + } + } + }); + if num_entires >= K_VALUE.get() { num_of_full_buckets += 1; } else { @@ -351,7 +363,9 @@ impl SwarmDriver { KBucketStatus { total_buckets: index, total_peers, + total_relay_peers, peers_in_non_full_buckets, + relay_peers_in_non_full_buckets, num_of_full_buckets, kbucket_table_stats, estimated_network_size, @@ -367,6 +381,18 @@ impl SwarmDriver { .peers_in_routing_table .set(status.total_peers as i64); + let _ = metrics_recorder + .relay_peers_in_routing_table + .set(status.total_relay_peers as i64); + + let _ = metrics_recorder + .peers_in_non_full_buckets + .set(status.peers_in_non_full_buckets as i64); + + let _ = metrics_recorder + .relay_peers_in_non_full_buckets + .set(status.relay_peers_in_non_full_buckets as i64); + let estimated_network_size = Self::estimate_network_size( status.peers_in_non_full_buckets, status.num_of_full_buckets, @@ -374,6 +400,10 @@ impl SwarmDriver { let _ = metrics_recorder .estimated_network_size .set(estimated_network_size as i64); + + let _ = metrics_recorder + .percentage_of_relay_peers + .set((status.total_relay_peers as f64 / status.total_peers as f64) * 100.0); } } diff --git a/ant-networking/src/metrics/mod.rs b/ant-networking/src/metrics/mod.rs index c78508ecb1..92e032c599 100644 --- a/ant-networking/src/metrics/mod.rs +++ b/ant-networking/src/metrics/mod.rs @@ -12,8 +12,6 @@ mod relay_client; pub mod service; mod upnp; -use std::sync::atomic::AtomicU64; - use crate::MetricsRegistries; use crate::{log_markers::Marker, time::sleep}; use bad_node::{BadNodeMetrics, BadNodeMetricsMsg, TimeFrame}; @@ -25,6 +23,7 @@ use prometheus_client::{ metrics::family::Family, metrics::{counter::Counter, gauge::Gauge}, }; +use std::sync::atomic::AtomicU64; use sysinfo::{Pid, ProcessRefreshKind, System}; use tokio::time::Duration; @@ -43,8 +42,12 @@ pub(crate) struct NetworkMetricsRecorder { // metrics from ant-networking pub(crate) connected_peers: Gauge, pub(crate) estimated_network_size: Gauge, + pub(crate) percentage_of_relay_peers: Gauge, pub(crate) open_connections: Gauge, pub(crate) peers_in_routing_table: Gauge, + pub(crate) relay_peers_in_routing_table: Gauge, + pub(crate) peers_in_non_full_buckets: Gauge, + pub(crate) relay_peers_in_non_full_buckets: Gauge, pub(crate) records_stored: Gauge, pub(crate) relay_reservation_health: Gauge, @@ -107,6 +110,12 @@ impl NetworkMetricsRecorder { "The estimated number of nodes in the network calculated by the peers in our RT", estimated_network_size.clone(), ); + let percentage_of_relay_peers = Gauge::::default(); + sub_registry.register( + "percentage_of_relay_peers", + "The percentage of relay peers in our routing table", + percentage_of_relay_peers.clone(), + ); let open_connections = Gauge::default(); sub_registry.register( "open_connections", @@ -119,6 +128,25 @@ impl NetworkMetricsRecorder { "The total number of peers in our routing table", peers_in_routing_table.clone(), ); + let relay_peers_in_routing_table = Gauge::default(); + sub_registry.register( + "relay_peers_in_routing_table", + "The total number of relay peers in our routing table", + relay_peers_in_routing_table.clone(), + ); + + let peers_in_non_full_buckets = Gauge::default(); + sub_registry.register( + "peers_in_non_full_buckets", + "The number of peers in our routing table that are not in full buckets", + peers_in_non_full_buckets.clone(), + ); + let relay_peers_in_non_full_buckets = Gauge::default(); + sub_registry.register( + "relay_peers_in_non_full_buckets", + "The number of relay peers in our routing table that are not in full buckets", + relay_peers_in_non_full_buckets.clone(), + ); let shunned_count = Counter::default(); sub_registry.register( @@ -226,10 +254,14 @@ impl NetworkMetricsRecorder { records_stored, estimated_network_size, + percentage_of_relay_peers, connected_peers, open_connections, relay_reservation_health, peers_in_routing_table, + relay_peers_in_routing_table, + peers_in_non_full_buckets, + relay_peers_in_non_full_buckets, relevant_records, max_records, received_payment_count, diff --git a/ant-networking/src/relay_manager.rs b/ant-networking/src/relay_manager.rs index 896467a4f3..649c80e313 100644 --- a/ant-networking/src/relay_manager.rs +++ b/ant-networking/src/relay_manager.rs @@ -41,10 +41,8 @@ const RESERVATION_SCORE_ROLLING_WINDOW: usize = 100; #[cfg(feature = "open-metrics")] type ConnectionsFromPeer = Vec<(PeerId, ConnectionId, SystemTime, Option)>; -pub(crate) fn is_a_relayed_peer(addrs: &HashSet) -> bool { - addrs - .iter() - .any(|multiaddr| multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit))) +pub(crate) fn is_a_relayed_peer<'a>(mut addrs: impl Iterator) -> bool { + addrs.any(|multiaddr| multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit))) } /// Manage the relay servers that a private node is connected to. From c28ceaa3e0f1a087fc3ab2d211605a9e60619ad9 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 20 Feb 2025 21:16:24 +0530 Subject: [PATCH 15/69] feat(metrics): add metrics to denote the number of relay clients connected to us --- ant-networking/src/event/mod.rs | 4 +++- ant-networking/src/event/swarm.rs | 24 +++++++++++++++++++++--- ant-networking/src/metrics/mod.rs | 8 ++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index a8f08796a7..5153648ee7 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -52,9 +52,10 @@ pub(crate) struct KBucketStatus { impl KBucketStatus { pub(crate) fn log(&self) { info!( - "kBucketTable has {:?} kbuckets {:?} peers, {:?}, estimated network size: {:?}", + "kBucketTable has {:?} kbuckets {:?} peers ({} relay peers), {:?}, estimated network size: {:?}", self.total_buckets, self.total_peers, + self.total_relay_peers, self.kbucket_table_stats, self.estimated_network_size ); @@ -286,6 +287,7 @@ impl SwarmDriver { self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); + #[cfg(feature = "open-metrics")] if self.metrics_recorder.is_some() { self.check_for_change_in_our_close_group(); } diff --git a/ant-networking/src/event/swarm.rs b/ant-networking/src/event/swarm.rs index d029997056..1dccc17fcf 100644 --- a/ant-networking/src/event/swarm.rs +++ b/ant-networking/src/event/swarm.rs @@ -108,10 +108,24 @@ impl SwarmDriver { } => { self.connected_relay_clients.insert(src_peer_id); info!("Relay reservation accepted from {src_peer_id:?}. Relay client count: {}", self.connected_relay_clients.len()); + + #[cfg(feature = "open-metrics")] + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder + .connected_relay_clients + .set(self.connected_relay_clients.len() as i64); + } } libp2p::relay::Event::ReservationTimedOut { src_peer_id } => { self.connected_relay_clients.remove(&src_peer_id); info!("Relay reservation timed out from {src_peer_id:?}. Relay client count: {}", self.connected_relay_clients.len()); + + #[cfg(feature = "open-metrics")] + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder + .connected_relay_clients + .set(self.connected_relay_clients.len() as i64); + } } _ => {} } @@ -604,13 +618,17 @@ impl SwarmDriver { /// Record the metrics on update of connection state. fn record_connection_metrics(&self) { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.metrics_recorder { - metrics + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder .open_connections .set(self.live_connected_peers.len() as i64); - metrics + metrics_recorder .connected_peers .set(self.swarm.connected_peers().count() as i64); + + metrics_recorder + .connected_relay_clients + .set(self.connected_relay_clients.len() as i64); } } diff --git a/ant-networking/src/metrics/mod.rs b/ant-networking/src/metrics/mod.rs index 92e032c599..31335ad2c0 100644 --- a/ant-networking/src/metrics/mod.rs +++ b/ant-networking/src/metrics/mod.rs @@ -41,6 +41,7 @@ pub(crate) struct NetworkMetricsRecorder { // metrics from ant-networking pub(crate) connected_peers: Gauge, + pub(crate) connected_relay_clients: Gauge, pub(crate) estimated_network_size: Gauge, pub(crate) percentage_of_relay_peers: Gauge, pub(crate) open_connections: Gauge, @@ -103,6 +104,12 @@ impl NetworkMetricsRecorder { "The number of peers that we are currently connected to", connected_peers.clone(), ); + let connected_relay_clients = Gauge::default(); + sub_registry.register( + "connected_relay_clients", + "The number of relay clients that are currently connected to us", + connected_relay_clients.clone(), + ); let estimated_network_size = Gauge::default(); sub_registry.register( @@ -256,6 +263,7 @@ impl NetworkMetricsRecorder { estimated_network_size, percentage_of_relay_peers, connected_peers, + connected_relay_clients, open_connections, relay_reservation_health, peers_in_routing_table, From 06e59ec02ae989995265561d65fdfa92ae2e267f Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 21 Feb 2025 20:40:29 +0800 Subject: [PATCH 16/69] fix(node): correct relay_nodes percentage calculation and remove un-needed metrics --- ant-networking/src/event/mod.rs | 16 +++++----------- ant-networking/src/metrics/mod.rs | 17 ----------------- 2 files changed, 5 insertions(+), 28 deletions(-) diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index 5153648ee7..4c98614691 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -387,14 +387,6 @@ impl SwarmDriver { .relay_peers_in_routing_table .set(status.total_relay_peers as i64); - let _ = metrics_recorder - .peers_in_non_full_buckets - .set(status.peers_in_non_full_buckets as i64); - - let _ = metrics_recorder - .relay_peers_in_non_full_buckets - .set(status.relay_peers_in_non_full_buckets as i64); - let estimated_network_size = Self::estimate_network_size( status.peers_in_non_full_buckets, status.num_of_full_buckets, @@ -403,9 +395,11 @@ impl SwarmDriver { .estimated_network_size .set(estimated_network_size as i64); - let _ = metrics_recorder - .percentage_of_relay_peers - .set((status.total_relay_peers as f64 / status.total_peers as f64) * 100.0); + let _ = metrics_recorder.percentage_of_relay_peers.set( + (status.relay_peers_in_non_full_buckets as f64 + / status.peers_in_non_full_buckets as f64) + * 100.0, + ); } } diff --git a/ant-networking/src/metrics/mod.rs b/ant-networking/src/metrics/mod.rs index 31335ad2c0..bc78c099de 100644 --- a/ant-networking/src/metrics/mod.rs +++ b/ant-networking/src/metrics/mod.rs @@ -47,8 +47,6 @@ pub(crate) struct NetworkMetricsRecorder { pub(crate) open_connections: Gauge, pub(crate) peers_in_routing_table: Gauge, pub(crate) relay_peers_in_routing_table: Gauge, - pub(crate) peers_in_non_full_buckets: Gauge, - pub(crate) relay_peers_in_non_full_buckets: Gauge, pub(crate) records_stored: Gauge, pub(crate) relay_reservation_health: Gauge, @@ -142,19 +140,6 @@ impl NetworkMetricsRecorder { relay_peers_in_routing_table.clone(), ); - let peers_in_non_full_buckets = Gauge::default(); - sub_registry.register( - "peers_in_non_full_buckets", - "The number of peers in our routing table that are not in full buckets", - peers_in_non_full_buckets.clone(), - ); - let relay_peers_in_non_full_buckets = Gauge::default(); - sub_registry.register( - "relay_peers_in_non_full_buckets", - "The number of relay peers in our routing table that are not in full buckets", - relay_peers_in_non_full_buckets.clone(), - ); - let shunned_count = Counter::default(); sub_registry.register( "shunned_count", @@ -268,8 +253,6 @@ impl NetworkMetricsRecorder { relay_reservation_health, peers_in_routing_table, relay_peers_in_routing_table, - peers_in_non_full_buckets, - relay_peers_in_non_full_buckets, relevant_records, max_records, received_payment_count, From 97aa0810ada0bee51e998bc7462bdc95d1720065 Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 21 Feb 2025 21:57:07 +0800 Subject: [PATCH 17/69] chore(node): rename percentage_of_relay_peers to relay_peers_percentage --- ant-networking/src/event/mod.rs | 2 +- ant-networking/src/metrics/mod.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index 4c98614691..f880c62ee6 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -395,7 +395,7 @@ impl SwarmDriver { .estimated_network_size .set(estimated_network_size as i64); - let _ = metrics_recorder.percentage_of_relay_peers.set( + let _ = metrics_recorder.relay_peers_percentage.set( (status.relay_peers_in_non_full_buckets as f64 / status.peers_in_non_full_buckets as f64) * 100.0, diff --git a/ant-networking/src/metrics/mod.rs b/ant-networking/src/metrics/mod.rs index bc78c099de..a1ba7f5b1d 100644 --- a/ant-networking/src/metrics/mod.rs +++ b/ant-networking/src/metrics/mod.rs @@ -43,7 +43,7 @@ pub(crate) struct NetworkMetricsRecorder { pub(crate) connected_peers: Gauge, pub(crate) connected_relay_clients: Gauge, pub(crate) estimated_network_size: Gauge, - pub(crate) percentage_of_relay_peers: Gauge, + pub(crate) relay_peers_percentage: Gauge, pub(crate) open_connections: Gauge, pub(crate) peers_in_routing_table: Gauge, pub(crate) relay_peers_in_routing_table: Gauge, @@ -115,11 +115,11 @@ impl NetworkMetricsRecorder { "The estimated number of nodes in the network calculated by the peers in our RT", estimated_network_size.clone(), ); - let percentage_of_relay_peers = Gauge::::default(); + let relay_peers_percentage = Gauge::::default(); sub_registry.register( - "percentage_of_relay_peers", + "relay_peers_percentage", "The percentage of relay peers in our routing table", - percentage_of_relay_peers.clone(), + relay_peers_percentage.clone(), ); let open_connections = Gauge::default(); sub_registry.register( @@ -246,7 +246,7 @@ impl NetworkMetricsRecorder { records_stored, estimated_network_size, - percentage_of_relay_peers, + relay_peers_percentage, connected_peers, connected_relay_clients, open_connections, From 885574b845d394110c27c9c8fd9b0142550a4755 Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 20 Feb 2025 20:41:29 +0800 Subject: [PATCH 18/69] feat(node): use closer peers as relayer candidate --- ant-networking/src/relay_manager.rs | 35 ++++++++++++++++------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/ant-networking/src/relay_manager.rs b/ant-networking/src/relay_manager.rs index 649c80e313..13d0030eea 100644 --- a/ant-networking/src/relay_manager.rs +++ b/ant-networking/src/relay_manager.rs @@ -6,16 +6,21 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::driver::{BadNodes, NodeBehaviour}; +use crate::{ + driver::{BadNodes, NodeBehaviour}, + NetworkAddress, +}; use itertools::Itertools; use libp2p::swarm::ConnectionId; use libp2p::{ - core::transport::ListenerId, multiaddr::Protocol, Multiaddr, PeerId, StreamProtocol, Swarm, + core::transport::ListenerId, kad::KBucketDistance as Distance, multiaddr::Protocol, Multiaddr, + PeerId, StreamProtocol, Swarm, }; #[cfg(feature = "open-metrics")] use prometheus_client::metrics::gauge::Gauge; -use rand::Rng; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +#[cfg(feature = "open-metrics")] +use std::collections::VecDeque; +use std::collections::{BTreeMap, HashMap, HashSet}; #[cfg(feature = "open-metrics")] use std::sync::atomic::AtomicU64; use std::time::Instant; @@ -51,7 +56,7 @@ pub(crate) fn is_a_relayed_peer<'a>(mut addrs: impl Iterator, + relay_server_candidates: BTreeMap, /// The relay servers that we are waiting for a reservation from. waiting_for_reservation: BTreeMap, /// The relay servers that we are connected to. @@ -150,8 +155,11 @@ impl RelayManager { // Hence here can add the addr directly. if let Some(relay_addr) = Self::craft_relay_address(addr, Some(*peer_id)) { debug!("Adding {peer_id:?} with {relay_addr:?} as a potential relay candidate"); - self.relay_server_candidates - .push_back((*peer_id, relay_addr)); + let distance = NetworkAddress::from_peer(self.self_peer_id) + .distance(&NetworkAddress::from_peer(*peer_id)); + let _ = self + .relay_server_candidates + .insert(distance, (*peer_id, relay_addr)); } } } else { @@ -181,15 +189,10 @@ impl RelayManager { // todo: should we remove all our other `listen_addr`? And should we block from adding `add_external_address` if // we're behind nat? - // Pick a random candidate from the vector. Check if empty, or `gen_range` panics for empty range. - let index = if self.relay_server_candidates.is_empty() { - debug!("No more relay candidates."); - break; - } else { - rand::thread_rng().gen_range(0..self.relay_server_candidates.len()) - }; - - if let Some((peer_id, relay_addr)) = self.relay_server_candidates.remove(index) { + // Pick a closest candidate as a potential relay_server. + if let Some((_distance, (peer_id, relay_addr))) = + self.relay_server_candidates.pop_first() + { // skip if detected as a bad node if let Some((_, is_bad)) = bad_nodes.get(&peer_id) { if *is_bad { From 298a63fa23a7434e7f43a3fe7555ebdb058b3617 Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 20 Feb 2025 22:20:59 +0800 Subject: [PATCH 19/69] chore(node): reduce MAX_CONCURRENT_RELAY_CONNECTIONS to 2 --- ant-networking/src/relay_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ant-networking/src/relay_manager.rs b/ant-networking/src/relay_manager.rs index 13d0030eea..afbdd916fa 100644 --- a/ant-networking/src/relay_manager.rs +++ b/ant-networking/src/relay_manager.rs @@ -27,7 +27,7 @@ use std::time::Instant; #[cfg(feature = "open-metrics")] use std::{collections::btree_map::Entry, time::SystemTime}; -const MAX_CONCURRENT_RELAY_CONNECTIONS: usize = 4; +const MAX_CONCURRENT_RELAY_CONNECTIONS: usize = 2; const MAX_POTENTIAL_CANDIDATES: usize = 1000; /// We could get multiple incoming connections from the same peer through multiple relay servers, and only one of them From ae9d87c6c2886a1b66d37ebaac379c13811ad7cd Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 13 Feb 2025 16:46:05 +0100 Subject: [PATCH 20/69] refactor(antctl)!: rename `--home-network` flag to `--relay` --- ant-node-manager/src/add_services/config.rs | 2 +- ant-node-manager/src/add_services/mod.rs | 14 +++++++------- ant-node-manager/src/bin/cli/main.rs | 14 ++++++-------- ant-node-manager/src/cmd/node.rs | 4 ++-- 4 files changed, 16 insertions(+), 18 deletions(-) diff --git a/ant-node-manager/src/add_services/config.rs b/ant-node-manager/src/add_services/config.rs index c0e74a7b44..cc75fb3835 100644 --- a/ant-node-manager/src/add_services/config.rs +++ b/ant-node-manager/src/add_services/config.rs @@ -180,7 +180,7 @@ pub struct AddNodeServiceOptions { pub enable_metrics_server: bool, pub env_variables: Option>, pub evm_network: EvmNetwork, - pub home_network: bool, + pub relay: bool, pub log_format: Option, pub max_archived_log_files: Option, pub max_log_files: Option, diff --git a/ant-node-manager/src/add_services/mod.rs b/ant-node-manager/src/add_services/mod.rs index ee4d923ef8..8f5d995d51 100644 --- a/ant-node-manager/src/add_services/mod.rs +++ b/ant-node-manager/src/add_services/mod.rs @@ -171,20 +171,20 @@ pub async fn add_node( match nat_status { NatDetectionStatus::Public => { options.no_upnp = true; // UPnP not needed - options.home_network = false; + options.relay = false; } NatDetectionStatus::UPnP => { options.no_upnp = false; - options.home_network = false; + options.relay = false; } NatDetectionStatus::Private => { options.no_upnp = true; - options.home_network = true; + options.relay = true; } } debug!( - "Auto-setting NAT flags: upnp={}, home_network={}", - !options.no_upnp, options.home_network + "Auto-setting NAT flags: upnp={}, relay={}", + !options.no_upnp, options.relay ); } @@ -193,7 +193,7 @@ pub async fn add_node( data_dir_path: service_data_dir_path.clone(), env_variables: options.env_variables.clone(), evm_network: options.evm_network.clone(), - home_network: options.home_network, + home_network: options.relay, log_dir_path: service_log_dir_path.clone(), log_format: options.log_format, max_archived_log_files: options.max_archived_log_files, @@ -229,7 +229,7 @@ pub async fn add_node( connected_peers: None, data_dir_path: service_data_dir_path.clone(), evm_network: options.evm_network.clone(), - home_network: options.home_network, + home_network: options.relay, listen_addr: None, log_dir_path: service_log_dir_path.clone(), log_format: options.log_format, diff --git a/ant-node-manager/src/bin/cli/main.rs b/ant-node-manager/src/bin/cli/main.rs index aaff4b0762..2a9d8fb0f2 100644 --- a/ant-node-manager/src/bin/cli/main.rs +++ b/ant-node-manager/src/bin/cli/main.rs @@ -83,13 +83,13 @@ pub enum SubCmd { /// and they will need to be explicitly started again. #[clap(long, default_value_t = false)] auto_restart: bool, - /// Auto set NAT flags (--upnp or --home-network) if our NAT status has been obtained by + /// Auto set NAT flags (--upnp or --relay) if our NAT status has been obtained by /// running the NAT detection command. /// /// Using the argument will cause an error if the NAT detection command has not already /// ran. /// - /// This will override any --upnp or --home-network options. + /// This will override any --upnp or --relay options. #[clap(long, default_value_t = false)] auto_set_nat_flags: bool, /// The number of service instances. @@ -126,11 +126,9 @@ pub enum SubCmd { /// Specify what EVM network to use for payments. #[command(subcommand)] evm_network: EvmNetworkCommand, - /// Set this flag to use the antnode '--home-network' feature. - /// - /// This enables the use of antnode services from a home network with a router. + /// Set this flag if UPnP doesn't work, and you are not able to manually port forward. #[clap(long)] - home_network: bool, + relay: bool, /// Provide the path for the log directory for the installed node. /// /// This path is a prefix. Each installed node will have its own directory underneath it. @@ -932,7 +930,7 @@ async fn main() -> Result<()> { enable_metrics_server, env_variables, evm_network, - home_network, + relay, log_dir_path, log_format, max_archived_log_files, @@ -959,7 +957,7 @@ async fn main() -> Result<()> { enable_metrics_server, env_variables, Some(evm_network.try_into()?), - home_network, + relay, log_dir_path, log_format, max_archived_log_files, diff --git a/ant-node-manager/src/cmd/node.rs b/ant-node-manager/src/cmd/node.rs index 531894cce3..5bb84aef6b 100644 --- a/ant-node-manager/src/cmd/node.rs +++ b/ant-node-manager/src/cmd/node.rs @@ -43,7 +43,7 @@ pub async fn add( enable_metrics_server: bool, env_variables: Option>, evm_network: Option, - home_network: bool, + relay: bool, log_dir_path: Option, log_format: Option, max_archived_log_files: Option, @@ -120,7 +120,7 @@ pub async fn add( enable_metrics_server, evm_network: evm_network.unwrap_or(EvmNetwork::ArbitrumOne), env_variables, - home_network, + relay, log_format, max_archived_log_files, max_log_files, From 2ab32bea14c750c48489e2a0863ae38662c4f3ca Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 13 Feb 2025 16:49:36 +0100 Subject: [PATCH 21/69] fix(antctl): conflicts_with `connection-timeout` error --- ant-node-manager/src/bin/cli/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ant-node-manager/src/bin/cli/main.rs b/ant-node-manager/src/bin/cli/main.rs index 2a9d8fb0f2..0007a9094b 100644 --- a/ant-node-manager/src/bin/cli/main.rs +++ b/ant-node-manager/src/bin/cli/main.rs @@ -329,7 +329,7 @@ pub enum SubCmd { /// 'connection-timeout' argument. /// /// Units are milliseconds. - #[clap(long, conflicts_with = "connection-timeout")] + #[clap(long, conflicts_with = "connection_timeout")] interval: Option, /// The peer ID of the service to start. /// @@ -366,7 +366,7 @@ pub enum SubCmd { /// An interval applied between stopping each service. /// /// Units are milliseconds. - #[clap(long, conflicts_with = "connection-timeout")] + #[clap(long)] interval: Option, /// The peer ID of the service to stop. /// @@ -427,7 +427,7 @@ pub enum SubCmd { /// 'connection-timeout' argument. /// /// Units are milliseconds. - #[clap(long, conflicts_with = "connection-timeout")] + #[clap(long, conflicts_with = "connection_timeout")] interval: Option, /// Provide a path for the antnode binary to be used by the service. /// From 28a53baaeea09b0303c38b884a96e1ec1a05f14c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 13 Feb 2025 18:36:04 +0100 Subject: [PATCH 22/69] feat(launchpad): remove `Home Network` from options --- node-launchpad/src/components/popup/connection_mode.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/node-launchpad/src/components/popup/connection_mode.rs b/node-launchpad/src/components/popup/connection_mode.rs index 71906a12a4..b8fe5d2a6d 100644 --- a/node-launchpad/src/components/popup/connection_mode.rs +++ b/node-launchpad/src/components/popup/connection_mode.rs @@ -54,6 +54,7 @@ impl ChangeConnectionModePopUp { pub fn new(connection_mode: ConnectionMode) -> Result { let mut selected_connection_mode: ConnectionModeItem = ConnectionModeItem::default(); let connection_modes_items: Vec = ConnectionMode::iter() + .filter(|cm| cm != &ConnectionMode::HomeNetwork) .map(|connection_mode_item| ConnectionModeItem { connection_mode: connection_mode_item, status: if connection_mode == connection_mode_item { From 7590e58ccae8a32d2569f7cd9bbc28d105b1ba79 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 14 Feb 2025 09:42:23 +0100 Subject: [PATCH 23/69] chore(launchpad): on app data load swap `HomeNetwork` with `Automatic` --- node-launchpad/src/config.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index 6a16aab547..57ccacb944 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -142,11 +142,16 @@ impl AppData { color_eyre::eyre::eyre!("Failed to read app data file: {}", e) })?; - let app_data: AppData = serde_json::from_str(&data).map_err(|e| { + let mut app_data: AppData = serde_json::from_str(&data).map_err(|e| { error!("Failed to parse app data: {}", e); color_eyre::eyre::eyre!("Failed to parse app data: {}", e) })?; + // Don't allow the manual setting to HomeNetwork anymore + if let Some(ConnectionMode::HomeNetwork) = app_data.connection_mode { + app_data.connection_mode = Some(ConnectionMode::Automatic); + } + Ok(app_data) } From c990c057ea8a5d18a5ca80298b7daacc53fa3ae6 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 14 Feb 2025 13:50:19 +0100 Subject: [PATCH 24/69] feat(launchpad): add upnp check --- Cargo.lock | 54 ++++++++++++++++++++++++- node-launchpad/Cargo.toml | 2 + node-launchpad/src/app.rs | 5 +++ node-launchpad/src/components/status.rs | 41 ++++++++++++++++--- node-launchpad/src/lib.rs | 1 + node-launchpad/src/upnp.rs | 45 +++++++++++++++++++++ 6 files changed, 141 insertions(+), 7 deletions(-) create mode 100644 node-launchpad/src/upnp.rs diff --git a/Cargo.lock b/Cargo.lock index a3caeb7b84..8627e3659a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4919,6 +4919,19 @@ dependencies = [ "xmltree", ] +[[package]] +name = "igd-next" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2830127baaaa55dae9aa5ee03158d5aa3687a9c2c11ce66870452580cc695df4" +dependencies = [ + "attohttpc", + "log", + "rand 0.8.5", + "url", + "xmltree", +] + [[package]] name = "ignore" version = "0.4.23" @@ -5632,7 +5645,7 @@ checksum = "d457b9ecceb66e7199f049926fad447f1f17f040e8d29d690c086b4cab8ed14a" dependencies = [ "futures", "futures-timer", - "igd-next", + "igd-next 0.15.1", "libp2p-core", "libp2p-swarm", "tokio", @@ -5698,6 +5711,18 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +[[package]] +name = "local-ip-address" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +dependencies = [ + "libc", + "neli", + "thiserror 1.0.69", + "windows-sys 0.59.0", +] + [[package]] name = "lock_api" version = "0.4.12" @@ -6016,6 +6041,31 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "neli" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +dependencies = [ + "byteorder", + "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + [[package]] name = "netlink-packet-core" version = "0.7.0" @@ -6140,9 +6190,11 @@ dependencies = [ "fs_extra", "futures", "human-panic", + "igd-next 0.16.0", "itertools 0.12.1", "json5", "libc", + "local-ip-address", "log", "pretty_assertions", "prometheus-parse", diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 233ef6e2ea..04671549c5 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -47,9 +47,11 @@ faccess = "0.2.4" futures = "0.3.28" fs_extra = "1.3.0" human-panic = "1.2.0" +igd-next = "0.16.0" itertools = "~0.12.1" json5 = "0.4.1" libc = "0.2.148" +local-ip-address = "0.6.3" log = "0.4.20" pretty_assertions = "1.4.0" prometheus-parse = "0.2.5" diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index 457ba41f6d..043f7dacc7 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -8,6 +8,7 @@ use std::path::PathBuf; +use crate::upnp::is_upnp_supported; use crate::{ action::Action, components::{ @@ -78,6 +79,9 @@ impl App { let connection_mode = app_data .connection_mode .unwrap_or(ConnectionMode::Automatic); + + let upnp_supported = is_upnp_supported(); + let port_from = app_data.port_from.unwrap_or(PORT_MIN); let port_to = app_data.port_to.unwrap_or(PORT_MAX); let storage_mountpoint = app_data @@ -98,6 +102,7 @@ impl App { antnode_path, data_dir_path, connection_mode, + upnp_supported, port_from: Some(port_from), port_to: Some(port_to), }; diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 5ce84cf6fc..2e6497a994 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -20,7 +20,7 @@ use crate::connection_mode::ConnectionMode; use crate::error::ErrorPopup; use crate::node_mgmt::{MaintainNodesArgs, NodeManagement, NodeManagementTask, UpgradeNodesArgs}; use crate::node_mgmt::{PORT_MAX, PORT_MIN}; -use crate::style::{COOL_GREY, INDIGO}; +use crate::style::{COOL_GREY, INDIGO, SIZZLING_RED}; use crate::tui::Event; use crate::{ action::{Action, StatusActions}, @@ -102,6 +102,8 @@ pub struct Status<'a> { data_dir_path: PathBuf, // Connection mode connection_mode: ConnectionMode, + // UPnP support + upnp_supported: bool, // Port from port_from: Option, // Port to @@ -121,6 +123,7 @@ pub struct StatusConfig { pub allocated_disk_space: usize, pub antnode_path: Option, pub connection_mode: ConnectionMode, + pub upnp_supported: bool, pub data_dir_path: PathBuf, pub network_id: Option, pub peers_args: PeersArgs, @@ -150,6 +153,7 @@ impl Status<'_> { antnode_path: config.antnode_path, data_dir_path: config.data_dir_path, connection_mode: config.connection_mode, + upnp_supported: config.upnp_supported, port_from: config.port_from, port_to: config.port_to, error_popup: None, @@ -798,19 +802,44 @@ impl Component for Status<'_> { ]); let connection_mode_string = match self.connection_mode { - ConnectionMode::HomeNetwork => "Home Network", - ConnectionMode::UPnP => "UPnP", - ConnectionMode::CustomPorts => &format!( + ConnectionMode::HomeNetwork => "Home Network".to_string(), + ConnectionMode::UPnP => "UPnP".to_string(), + ConnectionMode::CustomPorts => format!( "Custom Ports {}-{}", self.port_from.unwrap_or(PORT_MIN), self.port_to.unwrap_or(PORT_MIN + PORT_ALLOCATION) ), - ConnectionMode::Automatic => "Automatic", + ConnectionMode::Automatic => "Automatic".to_string(), }; + let mut connection_mode_line = vec![Span::styled( + connection_mode_string, + Style::default().fg(GHOST_WHITE), + )]; + + if matches!( + self.connection_mode, + ConnectionMode::Automatic | ConnectionMode::UPnP + ) { + connection_mode_line.push(Span::styled(" (", Style::default().fg(GHOST_WHITE))); + + if self.connection_mode == ConnectionMode::Automatic { + connection_mode_line.push(Span::styled("UPnP: ", Style::default().fg(GHOST_WHITE))); + } + + let span = match self.upnp_supported { + true => Span::styled("supported", Style::default().fg(EUCALYPTUS)), + false => Span::styled("unsupported", Style::default().fg(SIZZLING_RED)), + }; + + connection_mode_line.push(span); + + connection_mode_line.push(Span::styled(")", Style::default().fg(GHOST_WHITE))); + } + let connection_mode_row = Row::new(vec![ Cell::new("Connection".to_string()).fg(GHOST_WHITE), - Cell::new(connection_mode_string).fg(LIGHT_PERIWINKLE), + Cell::new(Line::from(connection_mode_line)), ]); let stats_rows = vec![storage_allocated_row, memory_use_row, connection_mode_row]; diff --git a/node-launchpad/src/lib.rs b/node-launchpad/src/lib.rs index 28e8535f42..8d34ed5bb1 100644 --- a/node-launchpad/src/lib.rs +++ b/node-launchpad/src/lib.rs @@ -18,6 +18,7 @@ pub mod node_stats; pub mod style; pub mod system; pub mod tui; +mod upnp; pub mod utils; pub mod widgets; diff --git a/node-launchpad/src/upnp.rs b/node-launchpad/src/upnp.rs new file mode 100644 index 0000000000..546c14a7bd --- /dev/null +++ b/node-launchpad/src/upnp.rs @@ -0,0 +1,45 @@ +extern crate igd_next as igd; + +use local_ip_address::local_ip; +use std::net::SocketAddr; + +pub(crate) fn is_upnp_supported() -> bool { + match igd::search_gateway(Default::default()) { + Err(_) => { + // No UPnP gateway found. + info!("No UPnP gateway found"); + false + } + Ok(gateway) => { + if let Ok(local_ip) = local_ip() { + const PROTOCOL: igd::PortMappingProtocol = igd::PortMappingProtocol::TCP; + const PORT: u16 = 12356; + + let local_addr = SocketAddr::new(local_ip, PORT); + + match gateway.add_port(PROTOCOL, PORT, local_addr, 60, "Autonomi Launchpad test") { + Err(_) => { + // UPnP gateway found, but could not open port. + info!("UPnP gateway found, but could not open port"); + false + } + Ok(()) => { + // UPnP successful. + info!("UPnP successful"); + + // Try to remove port again, but don't care about the result. + // Lease time is only 60s anyway. + let _ = gateway.remove_port(PROTOCOL, PORT); + + true + } + } + } else { + // UPnP gateway found, but could not get local IP + // This shouldn't happen + info!("UPnP gateway found, but could not get local IP"); + false + } + } + } +} From 065a915ae21d92cb20b44d21e4cecdd9e7b78964 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 14 Feb 2025 14:12:04 +0100 Subject: [PATCH 25/69] feat(launchpad): show node connection mode --- node-launchpad/src/components/status.rs | 9 ++++++++- node-launchpad/src/connection_mode.rs | 23 ++++++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 2e6497a994..28c76af27e 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -16,7 +16,7 @@ use crate::action::OptionsActions; use crate::components::popup::port_range::PORT_ALLOCATION; use crate::components::utils::open_logs; use crate::config::get_launchpad_nodes_data_dir_path; -use crate::connection_mode::ConnectionMode; +use crate::connection_mode::{ConnectionMode, NodeConnectionMode}; use crate::error::ErrorPopup; use crate::node_mgmt::{MaintainNodesArgs, NodeManagement, NodeManagementTask, UpgradeNodesArgs}; use crate::node_mgmt::{PORT_MAX, PORT_MIN}; @@ -65,6 +65,7 @@ const MBPS_WIDTH: usize = 13; const RECORDS_WIDTH: usize = 4; const PEERS_WIDTH: usize = 5; const CONNS_WIDTH: usize = 5; +const MODE_WIDTH: usize = 7; const STATUS_WIDTH: usize = 8; const SPINNER_WIDTH: usize = 1; @@ -247,6 +248,7 @@ impl Status<'_> { records: 0, peers: 0, connections: 0, + mode: NodeConnectionMode::from(node_item), status: NodeStatus::Added, // Set initial status as Added spinner: Throbber::default(), spinner_state: ThrobberState::default(), @@ -281,6 +283,7 @@ impl Status<'_> { records: 0, peers: 0, connections: 0, + mode: NodeConnectionMode::from(node_item), status, spinner: Throbber::default(), spinner_state: ThrobberState::default(), @@ -968,6 +971,7 @@ impl Component for Status<'_> { Constraint::Min(RECORDS_WIDTH as u16), Constraint::Min(PEERS_WIDTH as u16), Constraint::Min(CONNS_WIDTH as u16), + Constraint::Min(MODE_WIDTH as u16), Constraint::Min(STATUS_WIDTH as u16), Constraint::Max(SPINNER_WIDTH as u16), ]; @@ -985,6 +989,7 @@ impl Component for Status<'_> { Cell::new("Recs").fg(COOL_GREY), Cell::new("Peers").fg(COOL_GREY), Cell::new("Conns").fg(COOL_GREY), + Cell::new("Mode").fg(COOL_GREY), Cell::new("Status").fg(COOL_GREY), Cell::new(" ").fg(COOL_GREY), // Spinner ]) @@ -1218,6 +1223,7 @@ pub struct NodeItem<'a> { records: usize, peers: usize, connections: usize, + mode: NodeConnectionMode, status: NodeStatus, spinner: Throbber<'a>, spinner_state: ThrobberState, @@ -1319,6 +1325,7 @@ impl NodeItem<'_> { " ".repeat(CONNS_WIDTH.saturating_sub(self.connections.to_string().len())), self.connections.to_string() ), + self.mode.to_string(), self.status.to_string(), ]; let throbber_area = Rect::new(area.width - 3, area.y + 2 + index as u16, 1, 1); diff --git a/node-launchpad/src/connection_mode.rs b/node-launchpad/src/connection_mode.rs index c3f5290327..ea1097e48e 100644 --- a/node-launchpad/src/connection_mode.rs +++ b/node-launchpad/src/connection_mode.rs @@ -1,7 +1,8 @@ use std::fmt::{Display, Formatter, Result}; +use ant_service_management::NodeServiceData; use serde::{Deserialize, Serialize}; -use strum::EnumIter; +use strum::{Display, EnumIter}; #[derive(Clone, Copy, Debug, Default, EnumIter, Eq, Hash, PartialEq)] pub enum ConnectionMode { @@ -55,3 +56,23 @@ impl Serialize for ConnectionMode { serializer.serialize_str(s) } } + +#[derive(Default, Debug, Clone, Serialize, Display)] +pub enum NodeConnectionMode { + UPnP, + Relay, + Manual, + #[default] + Unknown, +} + +impl From<&NodeServiceData> for NodeConnectionMode { + fn from(nsd: &NodeServiceData) -> Self { + match (nsd.upnp, nsd.home_network) { + (true, false) => Self::UPnP, + (false, true) => Self::Relay, + (false, false) => Self::Manual, + _ => Self::Unknown, + } + } +} From fff415d9adf3efacb2e2031c82d43d74bb179a06 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 14 Feb 2025 14:49:09 +0100 Subject: [PATCH 26/69] fix(launchpad): get upnp support async --- node-launchpad/src/action.rs | 3 +++ node-launchpad/src/app.rs | 16 ++++++++++++--- node-launchpad/src/components/status.rs | 27 +++++++++++++++++++------ node-launchpad/src/upnp.rs | 20 +++++++++++++----- 4 files changed, 52 insertions(+), 14 deletions(-) diff --git a/node-launchpad/src/action.rs b/node-launchpad/src/action.rs index 5f4669a4d7..48cd997671 100644 --- a/node-launchpad/src/action.rs +++ b/node-launchpad/src/action.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::upnp::UpnpSupport; use crate::{ connection_mode::ConnectionMode, mode::{InputMode, Scene}, @@ -29,6 +30,8 @@ pub enum Action { StoreRewardsAddress(String), StoreNodesToStart(usize), + SetUpnpSupport(UpnpSupport), + Tick, Render, Resize(u16, u16), diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index 043f7dacc7..a062253cff 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -8,7 +8,7 @@ use std::path::PathBuf; -use crate::upnp::is_upnp_supported; +use crate::upnp::{get_upnp_support, UpnpSupport}; use crate::{ action::Action, components::{ @@ -80,7 +80,7 @@ impl App { .connection_mode .unwrap_or(ConnectionMode::Automatic); - let upnp_supported = is_upnp_supported(); + let upnp_support = UpnpSupport::Loading; let port_from = app_data.port_from.unwrap_or(PORT_MIN); let port_to = app_data.port_to.unwrap_or(PORT_MAX); @@ -102,7 +102,7 @@ impl App { antnode_path, data_dir_path, connection_mode, - upnp_supported, + upnp_support, port_from: Some(port_from), port_to: Some(port_to), }; @@ -167,6 +167,16 @@ impl App { pub async fn run(&mut self) -> Result<()> { let (action_tx, mut action_rx) = mpsc::unbounded_channel(); + let action_tx_clone = action_tx.clone(); + + tokio::spawn(async move { + let upnp_support = tokio::task::spawn_blocking(get_upnp_support) + .await + .unwrap_or_else(|_| UpnpSupport::Unknown); + + let _ = action_tx_clone.send(Action::SetUpnpSupport(upnp_support)); + }); + let mut tui = tui::Tui::new()? .tick_rate(self.tick_rate) .frame_rate(self.frame_rate); diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 28c76af27e..11d6a85150 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -22,6 +22,7 @@ use crate::node_mgmt::{MaintainNodesArgs, NodeManagement, NodeManagementTask, Up use crate::node_mgmt::{PORT_MAX, PORT_MIN}; use crate::style::{COOL_GREY, INDIGO, SIZZLING_RED}; use crate::tui::Event; +use crate::upnp::UpnpSupport; use crate::{ action::{Action, StatusActions}, config::Config, @@ -104,7 +105,7 @@ pub struct Status<'a> { // Connection mode connection_mode: ConnectionMode, // UPnP support - upnp_supported: bool, + upnp_support: UpnpSupport, // Port from port_from: Option, // Port to @@ -124,7 +125,7 @@ pub struct StatusConfig { pub allocated_disk_space: usize, pub antnode_path: Option, pub connection_mode: ConnectionMode, - pub upnp_supported: bool, + pub upnp_support: UpnpSupport, pub data_dir_path: PathBuf, pub network_id: Option, pub peers_args: PeersArgs, @@ -154,7 +155,7 @@ impl Status<'_> { antnode_path: config.antnode_path, data_dir_path: config.data_dir_path, connection_mode: config.connection_mode, - upnp_supported: config.upnp_supported, + upnp_support: config.upnp_support, port_from: config.port_from, port_to: config.port_to, error_popup: None, @@ -476,6 +477,10 @@ impl Component for Status<'_> { action_sender, })?; } + Action::SetUpnpSupport(ref upnp_support) => { + debug!("Setting UPnP support: {upnp_support:?}"); + self.upnp_support = upnp_support.clone(); + } Action::StatusActions(status_action) => match status_action { StatusActions::NodesStatsObtained(stats) => { self.node_stats = stats; @@ -830,9 +835,19 @@ impl Component for Status<'_> { connection_mode_line.push(Span::styled("UPnP: ", Style::default().fg(GHOST_WHITE))); } - let span = match self.upnp_supported { - true => Span::styled("supported", Style::default().fg(EUCALYPTUS)), - false => Span::styled("unsupported", Style::default().fg(SIZZLING_RED)), + let span = match self.upnp_support { + UpnpSupport::Supported => { + Span::styled("supported", Style::default().fg(EUCALYPTUS)) + } + UpnpSupport::Unsupported => { + Span::styled("unsupported", Style::default().fg(SIZZLING_RED)) + } + UpnpSupport::Loading => { + Span::styled("loading..", Style::default().fg(LIGHT_PERIWINKLE)) + } + UpnpSupport::Unknown => { + Span::styled("unknown", Style::default().fg(LIGHT_PERIWINKLE)) + } }; connection_mode_line.push(span); diff --git a/node-launchpad/src/upnp.rs b/node-launchpad/src/upnp.rs index 546c14a7bd..668f151ef0 100644 --- a/node-launchpad/src/upnp.rs +++ b/node-launchpad/src/upnp.rs @@ -1,14 +1,24 @@ extern crate igd_next as igd; use local_ip_address::local_ip; +use serde::{Deserialize, Serialize}; use std::net::SocketAddr; +use strum::Display; -pub(crate) fn is_upnp_supported() -> bool { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] +pub enum UpnpSupport { + Supported, + Unsupported, + Loading, + Unknown, +} + +pub(crate) fn get_upnp_support() -> UpnpSupport { match igd::search_gateway(Default::default()) { Err(_) => { // No UPnP gateway found. info!("No UPnP gateway found"); - false + UpnpSupport::Unsupported } Ok(gateway) => { if let Ok(local_ip) = local_ip() { @@ -21,7 +31,7 @@ pub(crate) fn is_upnp_supported() -> bool { Err(_) => { // UPnP gateway found, but could not open port. info!("UPnP gateway found, but could not open port"); - false + UpnpSupport::Unsupported } Ok(()) => { // UPnP successful. @@ -31,14 +41,14 @@ pub(crate) fn is_upnp_supported() -> bool { // Lease time is only 60s anyway. let _ = gateway.remove_port(PROTOCOL, PORT); - true + UpnpSupport::Supported } } } else { // UPnP gateway found, but could not get local IP // This shouldn't happen info!("UPnP gateway found, but could not get local IP"); - false + UpnpSupport::Unsupported } } } From 9c68464202950fb3beea77545e8785cf6bd204d3 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 14 Feb 2025 15:53:10 +0100 Subject: [PATCH 27/69] chore(launchpad): change UPnP `unsupported` to `disabled / unsupported` --- node-launchpad/src/components/status.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 11d6a85150..0322be5a84 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -840,7 +840,7 @@ impl Component for Status<'_> { Span::styled("supported", Style::default().fg(EUCALYPTUS)) } UpnpSupport::Unsupported => { - Span::styled("unsupported", Style::default().fg(SIZZLING_RED)) + Span::styled("disabled / unsupported", Style::default().fg(SIZZLING_RED)) } UpnpSupport::Loading => { Span::styled("loading..", Style::default().fg(LIGHT_PERIWINKLE)) From f7bfc8f67a0c57fd8ab694956662babb1a7afa95 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 16 Feb 2025 20:04:14 +0100 Subject: [PATCH 28/69] fix(launchpad): clippy --- node-launchpad/src/app.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index a062253cff..c5686b254b 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -172,7 +172,7 @@ impl App { tokio::spawn(async move { let upnp_support = tokio::task::spawn_blocking(get_upnp_support) .await - .unwrap_or_else(|_| UpnpSupport::Unknown); + .unwrap_or(UpnpSupport::Unknown); let _ = action_tx_clone.send(Action::SetUpnpSupport(upnp_support)); }); From d9dc14e06a48226dd23d220628c3a7cf35907451 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 19 Feb 2025 09:32:20 +0100 Subject: [PATCH 29/69] feat(antctl): stop restarting crashed nodes --- Cargo.lock | 25 +++++++++++++++++++-- Cargo.toml | 3 +++ ant-node-manager/src/add_services/config.rs | 3 +++ ant-node-manager/src/add_services/mod.rs | 1 + ant-node-manager/src/lib.rs | 21 +++++++++++++++++ ant-service-management/src/auditor.rs | 1 + ant-service-management/src/daemon.rs | 1 + ant-service-management/src/faucet.rs | 1 + ant-service-management/src/node.rs | 1 + 9 files changed, 55 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8627e3659a..4f5c7bc685 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2252,6 +2252,15 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbd0f76e066e64fdc5631e3bb46381254deab9ef1158292f27c8c57e3bf3fe59" +[[package]] +name = "codepage" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48f68d061bc2828ae826206326e61251aca94c1e4a5305cf52d9138639c918b4" +dependencies = [ + "encoding_rs", +] + [[package]] name = "color-eyre" version = "0.6.3" @@ -3083,6 +3092,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" +[[package]] +name = "encoding-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87b881ab2524b96a5ce932056c7482ba6152e2226fed3936b3e592adeb95ca6d" +dependencies = [ + "codepage", + "encoding_rs", + "windows-sys 0.52.0", +] + [[package]] name = "encoding_rs" version = "0.8.35" @@ -8332,11 +8352,12 @@ dependencies = [ [[package]] name = "service-manager" version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59d7d62c9733631445d1b3fc7854c780088408d4b79a20dd928aaec41854ca3a" +source = "git+https://github.com/mickvandijke/service-manager-rs?rev=bcdde8cbdfd27a8e6b1773a22eb8df76cb0fbd22#bcdde8cbdfd27a8e6b1773a22eb8df76cb0fbd22" dependencies = [ "cfg-if", "dirs", + "encoding-utils", + "encoding_rs", "plist", "which 4.4.2", "xml-rs", diff --git a/Cargo.toml b/Cargo.toml index 9b620b320b..e92d072e3a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,3 +52,6 @@ tag = false [workspace.dependencies] backtrace = "=0.3.71" + +[patch.crates-io] +service-manager = { git = "https://github.com/mickvandijke/service-manager-rs", rev = "bcdde8cbdfd27a8e6b1773a22eb8df76cb0fbd22" } diff --git a/ant-node-manager/src/add_services/config.rs b/ant-node-manager/src/add_services/config.rs index cc75fb3835..2d624204b5 100644 --- a/ant-node-manager/src/add_services/config.rs +++ b/ant-node-manager/src/add_services/config.rs @@ -166,6 +166,7 @@ impl InstallNodeServiceCtxBuilder { program: self.antnode_path.to_path_buf(), username: self.service_user.clone(), working_directory: None, + disable_restart_on_failure: true, }) } } @@ -231,6 +232,7 @@ impl InstallAuditorServiceCtxBuilder { program: self.auditor_path.to_path_buf(), username: Some(self.service_user.to_string()), working_directory: None, + disable_restart_on_failure: false, }) } } @@ -263,6 +265,7 @@ impl InstallFaucetServiceCtxBuilder { program: self.faucet_path.to_path_buf(), username: Some(self.service_user.to_string()), working_directory: None, + disable_restart_on_failure: false, }) } } diff --git a/ant-node-manager/src/add_services/mod.rs b/ant-node-manager/src/add_services/mod.rs index 8f5d995d51..b8da45dbf1 100644 --- a/ant-node-manager/src/add_services/mod.rs +++ b/ant-node-manager/src/add_services/mod.rs @@ -429,6 +429,7 @@ pub fn add_daemon( program: options.daemon_install_bin_path.clone(), username: Some(options.user), working_directory: None, + disable_restart_on_failure: false, }; match service_control.install(install_ctx, false) { diff --git a/ant-node-manager/src/lib.rs b/ant-node-manager/src/lib.rs index e0d6d908d3..e7803420ae 100644 --- a/ant-node-manager/src/lib.rs +++ b/ant-node-manager/src/lib.rs @@ -2665,6 +2665,7 @@ mod tests { program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -2836,6 +2837,7 @@ mod tests { program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3013,6 +3015,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3173,6 +3176,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3342,6 +3346,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3521,6 +3526,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3695,6 +3701,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3864,6 +3871,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4043,6 +4051,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4204,6 +4213,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4368,6 +4378,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4529,6 +4540,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4693,6 +4705,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4854,6 +4867,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -5018,6 +5032,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -5179,6 +5194,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -5343,6 +5359,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -5505,6 +5522,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -5670,6 +5688,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -5844,6 +5863,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -6013,6 +6033,7 @@ network_id: None, program: current_node_bin.to_path_buf(), username: Some("ant".to_string()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) diff --git a/ant-service-management/src/auditor.rs b/ant-service-management/src/auditor.rs index cea9273395..e9090eb15f 100644 --- a/ant-service-management/src/auditor.rs +++ b/ant-service-management/src/auditor.rs @@ -69,6 +69,7 @@ impl ServiceStateActions for AuditorService<'_> { program: self.service_data.auditor_path.to_path_buf(), username: Some(self.service_data.user.to_string()), working_directory: None, + disable_restart_on_failure: false, }) } diff --git a/ant-service-management/src/daemon.rs b/ant-service-management/src/daemon.rs index 0b3282ad60..41b26a51bf 100644 --- a/ant-service-management/src/daemon.rs +++ b/ant-service-management/src/daemon.rs @@ -72,6 +72,7 @@ impl ServiceStateActions for DaemonService<'_> { program: self.service_data.daemon_path.clone(), username: None, working_directory: None, + disable_restart_on_failure: false, }; Ok(install_ctx) } diff --git a/ant-service-management/src/faucet.rs b/ant-service-management/src/faucet.rs index 7aa0d15b30..e5b636ac9c 100644 --- a/ant-service-management/src/faucet.rs +++ b/ant-service-management/src/faucet.rs @@ -66,6 +66,7 @@ impl ServiceStateActions for FaucetService<'_> { program: self.service_data.faucet_path.to_path_buf(), username: Some(self.service_data.user.to_string()), working_directory: None, + disable_restart_on_failure: false, }) } diff --git a/ant-service-management/src/node.rs b/ant-service-management/src/node.rs index cd92f6bac0..5858593715 100644 --- a/ant-service-management/src/node.rs +++ b/ant-service-management/src/node.rs @@ -138,6 +138,7 @@ impl ServiceStateActions for NodeService<'_> { program: self.service_data.antnode_path.to_path_buf(), username: self.service_data.user.clone(), working_directory: None, + disable_restart_on_failure: true, }) } From 231f5ac33994c6c7c435efa9209f610a87790c54 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 20 Feb 2025 11:51:15 +0100 Subject: [PATCH 30/69] chore: temp patch service-manager with fork --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f5c7bc685..9ad399a0ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8352,7 +8352,7 @@ dependencies = [ [[package]] name = "service-manager" version = "0.7.1" -source = "git+https://github.com/mickvandijke/service-manager-rs?rev=bcdde8cbdfd27a8e6b1773a22eb8df76cb0fbd22#bcdde8cbdfd27a8e6b1773a22eb8df76cb0fbd22" +source = "git+https://github.com/mickvandijke/service-manager-rs?rev=2a5ca4b0c876ee91dfe58315df41f889ff276e47#2a5ca4b0c876ee91dfe58315df41f889ff276e47" dependencies = [ "cfg-if", "dirs", diff --git a/Cargo.toml b/Cargo.toml index e92d072e3a..bcad22a833 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,4 +54,4 @@ tag = false backtrace = "=0.3.71" [patch.crates-io] -service-manager = { git = "https://github.com/mickvandijke/service-manager-rs", rev = "bcdde8cbdfd27a8e6b1773a22eb8df76cb0fbd22" } +service-manager = { git = "https://github.com/mickvandijke/service-manager-rs", rev = "2a5ca4b0c876ee91dfe58315df41f889ff276e47" } From ec262ab44dac7a30bade23e8e8f0921edc1d9fb9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 21 Feb 2025 12:20:39 +0100 Subject: [PATCH 31/69] feat(antnode): log critical failures to `critical_failure.log` --- ant-networking/src/event/mod.rs | 14 ++++++++++++++ ant-node/src/bin/antnode/log.rs | 11 +++++++++++ ant-node/src/bin/antnode/main.rs | 3 +++ ant-node/src/node.rs | 3 +-- 4 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 ant-node/src/bin/antnode/log.rs diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index 2ad32c079f..cb4215eeba 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -31,6 +31,7 @@ use ant_protocol::{ }; #[cfg(feature = "open-metrics")] use std::collections::HashSet; +use std::fmt::Display; use std::{ collections::BTreeMap, fmt::{Debug, Formatter}, @@ -214,6 +215,19 @@ impl Debug for NetworkEvent { } } +impl Display for TerminateNodeReason { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + TerminateNodeReason::HardDiskWriteError => { + write!(f, "HardDiskWriteError") + } + TerminateNodeReason::UpnpGatewayNotFound => { + write!(f, "UPnP gateway not found. Enable UPnP on your router to allow incoming connections or manually port forward.") + } + } + } +} + impl SwarmDriver { /// Check for changes in our close group #[cfg(feature = "open-metrics")] diff --git a/ant-node/src/bin/antnode/log.rs b/ant-node/src/bin/antnode/log.rs new file mode 100644 index 0000000000..3e9dbcc307 --- /dev/null +++ b/ant-node/src/bin/antnode/log.rs @@ -0,0 +1,11 @@ +use std::path::PathBuf; + +const CRITICAL_FAILURE_LOG_FILE: &str = "critical_failure.log"; + +pub fn set_critical_failure(log_output_dest: &str, reason: &str) { + let log_path = PathBuf::from(log_output_dest).join(CRITICAL_FAILURE_LOG_FILE); + let datetime_prefix = chrono::Utc::now(); + let message = format!("[{datetime_prefix}] {reason}"); + std::fs::write(log_path, message) + .unwrap_or_else(|err| error!("Failed to write to {CRITICAL_FAILURE_LOG_FILE}: {}", err)); +} diff --git a/ant-node/src/bin/antnode/main.rs b/ant-node/src/bin/antnode/main.rs index 5e8fbb95b8..e94bd26ad4 100644 --- a/ant-node/src/bin/antnode/main.rs +++ b/ant-node/src/bin/antnode/main.rs @@ -9,9 +9,11 @@ #[macro_use] extern crate tracing; +mod log; mod rpc_service; mod subcommands; +use crate::log::set_critical_failure; use crate::subcommands::EvmNetworkCommand; use ant_bootstrap::{BootstrapCacheStore, PeersArgs}; use ant_evm::{get_evm_network, EvmNetwork, RewardsAddress}; @@ -453,6 +455,7 @@ You can check your reward balance by running: } StopResult::Error(cause) => { error!("Node stopped with error: {}", cause); + set_critical_failure(log_output_dest, &cause.to_string()); return Err(cause); } } diff --git a/ant-node/src/node.rs b/ant-node/src/node.rs index 86ad2f47c4..3a4f885dcf 100644 --- a/ant-node/src/node.rs +++ b/ant-node/src/node.rs @@ -521,12 +521,11 @@ impl Node { } }); } - NetworkEvent::TerminateNode { reason } => { event_header = "TerminateNode"; error!("Received termination from swarm_driver due to {reason:?}"); self.events_channel() - .broadcast(NodeEvent::TerminateNode(format!("{reason:?}"))); + .broadcast(NodeEvent::TerminateNode(format!("{reason}"))); } NetworkEvent::FailedToFetchHolders(bad_nodes) => { event_header = "FailedToFetchHolders"; From 4629c6f841c3f32b1f1e9d7f9689c7203ae8f1b7 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 21 Feb 2025 15:15:57 +0100 Subject: [PATCH 32/69] feat(antctl): show node failure reason in `status` command --- Cargo.lock | 1 + ant-node-manager/src/lib.rs | 29 +++++++++++++++++++++++------ ant-service-management/Cargo.toml | 1 + ant-service-management/src/node.rs | 21 +++++++++++++++++++++ 4 files changed, 46 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ad399a0ab..ace89b2b22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1118,6 +1118,7 @@ dependencies = [ "ant-logging", "ant-protocol", "async-trait", + "chrono", "dirs-next", "libp2p", "libp2p-identity", diff --git a/ant-node-manager/src/lib.rs b/ant-node-manager/src/lib.rs index e7803420ae..57437c7f29 100644 --- a/ant-node-manager/src/lib.rs +++ b/ant-node-manager/src/lib.rs @@ -408,6 +408,14 @@ pub async fn status_report( "PID: {}", node.pid.map_or("-".to_string(), |p| p.to_string()) ); + if node.status == ServiceStatus::Stopped { + if let Some(failure_reason) = node.get_critical_failure() { + println!( + "Failure reason: [{}] {}", + failure_reason.0, failure_reason.1 + ); + } + } println!("Data path: {}", node.data_dir_path.to_string_lossy()); println!("Log path: {}", node.log_dir_path.to_string_lossy()); println!("Bin path: {}", node.antnode_path.to_string_lossy()); @@ -448,8 +456,8 @@ pub async fn status_report( } } else { println!( - "{:<18} {:<52} {:<7} {:>15}", - "Service Name", "Peer ID", "Status", "Connected Peers" + "{:<18} {:<52} {:<7} {:>15} {:<}", + "Service Name", "Peer ID", "Status", "Connected Peers", "Failure" ); let nodes = node_registry .nodes @@ -462,29 +470,38 @@ pub async fn status_report( .connected_peers .clone() .map_or("-".to_string(), |p| p.len().to_string()); + let failure_reason = if node.status == ServiceStatus::Stopped { + node.get_critical_failure() + .map_or("-".to_string(), |(_time, reason)| reason) + } else { + "-".to_string() + }; println!( - "{:<18} {:<52} {:<7} {:>15}", + "{:<18} {:<52} {:<7} {:>15} {:<}", node.service_name, peer_id, format_status(&node.status), - connected_peers + connected_peers, + failure_reason ); } if let Some(daemon) = &node_registry.daemon { println!( - "{:<18} {:<52} {:<7} {:>15}", + "{:<18} {:<52} {:<7} {:>15} {:>15}", daemon.service_name, "-", format_status(&daemon.status), + "-", "-" ); } if let Some(faucet) = &node_registry.faucet { println!( - "{:<18} {:<52} {:<7} {:>15}", + "{:<18} {:<52} {:<7} {:>15} {:>15}", faucet.service_name, "-", format_status(&faucet.status), + "-", "-" ); } diff --git a/ant-service-management/Cargo.toml b/ant-service-management/Cargo.toml index 16db7269c0..4c486d4967 100644 --- a/ant-service-management/Cargo.toml +++ b/ant-service-management/Cargo.toml @@ -29,6 +29,7 @@ tokio = { version = "1.32.0", features = ["time"] } tonic = { version = "0.6.2" } tracing = { version = "~0.1.26" } tracing-core = "0.1.30" +chrono = "0.4.39" [build-dependencies] # watch out updating this, protoc compiler needs to be installed on all build systems diff --git a/ant-service-management/src/node.rs b/ant-service-management/src/node.rs index 5858593715..eafe7bb2e5 100644 --- a/ant-service-management/src/node.rs +++ b/ant-service-management/src/node.rs @@ -386,6 +386,27 @@ impl NodeServiceData { } None } + + /// Returns an optional critical failure of the node. + pub fn get_critical_failure(&self) -> Option<(chrono::DateTime, String)> { + const CRITICAL_FAILURE_LOG_FILE: &str = "critical_failure.log"; + + let log_path = self.log_dir_path.join(CRITICAL_FAILURE_LOG_FILE); + + if let Ok(content) = std::fs::read_to_string(log_path) { + if let Some((timestamp, message)) = content.split_once(']') { + let timestamp_trimmed = timestamp.trim_start_matches('[').trim(); + if let Ok(datetime) = timestamp_trimmed.parse::>() { + let message_trimmed = message + .trim() + .trim_start_matches("Node terminated due to: "); + return Some((datetime, message_trimmed.to_string())); + } + } + } + + None + } } /// Pushes arguments from the `PeersArgs` struct to the provided `args` vector. From e7cbaf80474906b4d208cc116311d5fc06d2509c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 21 Feb 2025 16:36:30 +0100 Subject: [PATCH 33/69] feat(launchpad): show node failure reason in status window --- node-launchpad/src/components/status.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 0322be5a84..8f1b0bac35 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -68,6 +68,7 @@ const PEERS_WIDTH: usize = 5; const CONNS_WIDTH: usize = 5; const MODE_WIDTH: usize = 7; const STATUS_WIDTH: usize = 8; +const FAILURE_WIDTH: usize = 64; const SPINNER_WIDTH: usize = 1; #[derive(Clone)] @@ -251,6 +252,7 @@ impl Status<'_> { connections: 0, mode: NodeConnectionMode::from(node_item), status: NodeStatus::Added, // Set initial status as Added + failure: node_item.get_critical_failure(), spinner: Throbber::default(), spinner_state: ThrobberState::default(), }; @@ -286,6 +288,7 @@ impl Status<'_> { connections: 0, mode: NodeConnectionMode::from(node_item), status, + failure: node_item.get_critical_failure(), spinner: Throbber::default(), spinner_state: ThrobberState::default(), }) @@ -988,6 +991,7 @@ impl Component for Status<'_> { Constraint::Min(CONNS_WIDTH as u16), Constraint::Min(MODE_WIDTH as u16), Constraint::Min(STATUS_WIDTH as u16), + Constraint::Fill(FAILURE_WIDTH as u16), Constraint::Max(SPINNER_WIDTH as u16), ]; @@ -1006,6 +1010,7 @@ impl Component for Status<'_> { Cell::new("Conns").fg(COOL_GREY), Cell::new("Mode").fg(COOL_GREY), Cell::new("Status").fg(COOL_GREY), + Cell::new("Failure").fg(COOL_GREY), Cell::new(" ").fg(COOL_GREY), // Spinner ]) .style(Style::default().add_modifier(Modifier::BOLD)); @@ -1240,6 +1245,7 @@ pub struct NodeItem<'a> { connections: usize, mode: NodeConnectionMode, status: NodeStatus, + failure: Option<(chrono::DateTime, String)>, spinner: Throbber<'a>, spinner_state: ThrobberState, } @@ -1307,6 +1313,17 @@ impl NodeItem<'_> { _ => {} }; + let failure = self.failure.as_ref().map_or_else( + || "-".to_string(), + |(_dt, msg)| { + if self.status == NodeStatus::Stopped { + msg.clone() + } else { + "-".to_string() + } + }, + ); + let row = vec![ self.name.clone().to_string(), self.version.to_string(), @@ -1342,6 +1359,7 @@ impl NodeItem<'_> { ), self.mode.to_string(), self.status.to_string(), + failure, ]; let throbber_area = Rect::new(area.width - 3, area.y + 2 + index as u16, 1, 1); From 318069f9a84852ff40d588e71633b2e3046a57cb Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 21 Feb 2025 16:47:59 +0100 Subject: [PATCH 34/69] fix(antnode): clear critical_failure log file on boot --- ant-node/src/bin/antnode/log.rs | 7 +++++++ ant-node/src/bin/antnode/main.rs | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/ant-node/src/bin/antnode/log.rs b/ant-node/src/bin/antnode/log.rs index 3e9dbcc307..81c855cd3f 100644 --- a/ant-node/src/bin/antnode/log.rs +++ b/ant-node/src/bin/antnode/log.rs @@ -9,3 +9,10 @@ pub fn set_critical_failure(log_output_dest: &str, reason: &str) { std::fs::write(log_path, message) .unwrap_or_else(|err| error!("Failed to write to {CRITICAL_FAILURE_LOG_FILE}: {}", err)); } + +pub fn reset_critical_failure(log_output_dest: &str) { + let log_path = PathBuf::from(log_output_dest).join(CRITICAL_FAILURE_LOG_FILE); + if log_path.exists() { + let _ = std::fs::remove_file(log_path); + } +} diff --git a/ant-node/src/bin/antnode/main.rs b/ant-node/src/bin/antnode/main.rs index e94bd26ad4..0ce4980844 100644 --- a/ant-node/src/bin/antnode/main.rs +++ b/ant-node/src/bin/antnode/main.rs @@ -13,7 +13,7 @@ mod log; mod rpc_service; mod subcommands; -use crate::log::set_critical_failure; +use crate::log::{reset_critical_failure, set_critical_failure}; use crate::subcommands::EvmNetworkCommand; use ant_bootstrap::{BootstrapCacheStore, PeersArgs}; use ant_evm::{get_evm_network, EvmNetwork, RewardsAddress}; @@ -368,6 +368,8 @@ async fn run_node( ) -> Result> { let started_instant = std::time::Instant::now(); + reset_critical_failure(log_output_dest); + info!("Starting node ..."); let running_node = node_builder.build_and_run()?; From d8ee2333e789fc0a0cc0b9cd1f96931a5b677d52 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 21 Feb 2025 16:52:12 +0100 Subject: [PATCH 35/69] chore: update service-manager crate version --- Cargo.lock | 5 +++-- Cargo.toml | 3 --- ant-node-manager/Cargo.toml | 2 +- ant-service-management/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ace89b2b22..9db5274cae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8352,8 +8352,9 @@ dependencies = [ [[package]] name = "service-manager" -version = "0.7.1" -source = "git+https://github.com/mickvandijke/service-manager-rs?rev=2a5ca4b0c876ee91dfe58315df41f889ff276e47#2a5ca4b0c876ee91dfe58315df41f889ff276e47" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cae942acfe9cecd4450998408f52e1c1ee083145226b7b803bd0d82e1c86912" dependencies = [ "cfg-if", "dirs", diff --git a/Cargo.toml b/Cargo.toml index bcad22a833..9b620b320b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,6 +52,3 @@ tag = false [workspace.dependencies] backtrace = "=0.3.71" - -[patch.crates-io] -service-manager = { git = "https://github.com/mickvandijke/service-manager-rs", rev = "2a5ca4b0c876ee91dfe58315df41f889ff276e47" } diff --git a/ant-node-manager/Cargo.toml b/ant-node-manager/Cargo.toml index 07e986164b..d0293919cf 100644 --- a/ant-node-manager/Cargo.toml +++ b/ant-node-manager/Cargo.toml @@ -49,7 +49,7 @@ rand = "0.8.5" semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -service-manager = "0.7.0" +service-manager = "0.8.0" sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/ant-service-management/Cargo.toml b/ant-service-management/Cargo.toml index 4c486d4967..e2e2954293 100644 --- a/ant-service-management/Cargo.toml +++ b/ant-service-management/Cargo.toml @@ -22,7 +22,7 @@ prost = { version = "0.9" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" -service-manager = "0.7.0" +service-manager = "0.8.0" sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } From a5f9f0dd16fd2aae17d7a5c383297eaa3ed3893c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 21 Feb 2025 17:13:58 +0100 Subject: [PATCH 36/69] fix: clippy --- ant-node-manager/src/add_services/tests.rs | 182 ++++++++++++--------- 1 file changed, 104 insertions(+), 78 deletions(-) diff --git a/ant-node-manager/src/add_services/tests.rs b/ant-node-manager/src/add_services/tests.rs index e1b2bea58a..9f6a18bb37 100644 --- a/ant-node-manager/src/add_services/tests.rs +++ b/ant-node-manager/src/add_services/tests.rs @@ -168,7 +168,7 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -183,7 +183,7 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -336,7 +336,7 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -351,7 +351,7 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -425,7 +425,7 @@ async fn add_genesis_node_should_return_an_error_if_count_is_greater_than_1() -> delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -440,7 +440,7 @@ async fn add_genesis_node_should_return_an_error_if_count_is_greater_than_1() -> antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -652,7 +652,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -667,7 +667,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -832,7 +832,7 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() delete_antnode_src: true, enable_metrics_server: false, env_variables: env_variables.clone(), - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -847,7 +847,7 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -1018,7 +1018,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -1033,7 +1033,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { antnode_dir_path: temp_dir.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -1167,6 +1167,7 @@ async fn add_node_should_create_service_file_with_first_arg() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -1181,7 +1182,7 @@ async fn add_node_should_create_service_file_with_first_arg() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -1196,7 +1197,7 @@ async fn add_node_should_create_service_file_with_first_arg() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -1323,6 +1324,7 @@ async fn add_node_should_create_service_file_with_peers_args() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -1337,7 +1339,7 @@ async fn add_node_should_create_service_file_with_peers_args() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -1352,7 +1354,7 @@ async fn add_node_should_create_service_file_with_peers_args() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -1474,6 +1476,7 @@ async fn add_node_should_create_service_file_with_local_arg() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -1488,7 +1491,7 @@ async fn add_node_should_create_service_file_with_local_arg() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -1503,7 +1506,7 @@ async fn add_node_should_create_service_file_with_local_arg() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -1629,6 +1632,7 @@ async fn add_node_should_create_service_file_with_network_contacts_url_arg() -> .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -1643,7 +1647,7 @@ async fn add_node_should_create_service_file_with_network_contacts_url_arg() -> delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -1658,7 +1662,7 @@ async fn add_node_should_create_service_file_with_network_contacts_url_arg() -> antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -1783,6 +1787,7 @@ async fn add_node_should_create_service_file_with_testnet_arg() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -1797,7 +1802,7 @@ async fn add_node_should_create_service_file_with_testnet_arg() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -1812,7 +1817,7 @@ async fn add_node_should_create_service_file_with_testnet_arg() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -1934,6 +1939,7 @@ async fn add_node_should_create_service_file_with_ignore_cache_arg() -> Result<( .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -1948,7 +1954,7 @@ async fn add_node_should_create_service_file_with_ignore_cache_arg() -> Result<( delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -1963,7 +1969,7 @@ async fn add_node_should_create_service_file_with_ignore_cache_arg() -> Result<( antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -2086,6 +2092,7 @@ async fn add_node_should_create_service_file_with_custom_bootstrap_cache_path() .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -2100,7 +2107,7 @@ async fn add_node_should_create_service_file_with_custom_bootstrap_cache_path() delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -2115,7 +2122,7 @@ async fn add_node_should_create_service_file_with_custom_bootstrap_cache_path() antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -2231,6 +2238,7 @@ async fn add_node_should_create_service_file_with_network_id() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -2245,7 +2253,7 @@ async fn add_node_should_create_service_file_with_network_id() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -2260,7 +2268,7 @@ async fn add_node_should_create_service_file_with_network_id() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -2374,6 +2382,7 @@ async fn add_node_should_use_custom_ip() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -2388,7 +2397,7 @@ async fn add_node_should_use_custom_ip() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -2403,7 +2412,7 @@ async fn add_node_should_use_custom_ip() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -2520,7 +2529,7 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -2535,7 +2544,7 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -2647,6 +2656,7 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -2705,6 +2715,7 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -2763,6 +2774,7 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -2777,7 +2789,7 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -2792,7 +2804,7 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -2896,7 +2908,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -2911,7 +2923,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3013,7 +3025,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -3028,7 +3040,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3091,7 +3103,7 @@ async fn add_node_should_return_an_error_if_port_and_node_count_do_not_match() - delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -3106,7 +3118,7 @@ async fn add_node_should_return_an_error_if_port_and_node_count_do_not_match() - antnode_dir_path: temp_dir.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3174,7 +3186,7 @@ async fn add_node_should_return_an_error_if_multiple_services_are_specified_with delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -3189,7 +3201,7 @@ async fn add_node_should_return_an_error_if_multiple_services_are_specified_with antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3305,6 +3317,7 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3319,7 +3332,7 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> delete_antnode_src: true, enable_metrics_server: true, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -3334,7 +3347,7 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3442,6 +3455,7 @@ async fn add_node_should_set_max_archived_log_files() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3456,7 +3470,7 @@ async fn add_node_should_set_max_archived_log_files() -> Result<()> { delete_antnode_src: false, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: Some(20), max_log_files: None, @@ -3471,7 +3485,7 @@ async fn add_node_should_set_max_archived_log_files() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3580,6 +3594,7 @@ async fn add_node_should_set_max_log_files() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3594,7 +3609,7 @@ async fn add_node_should_set_max_log_files() -> Result<()> { delete_antnode_src: false, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: Some(20), @@ -3609,7 +3624,7 @@ async fn add_node_should_set_max_log_files() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3716,6 +3731,7 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3774,6 +3790,7 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3832,6 +3849,7 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -3846,7 +3864,7 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -3861,7 +3879,7 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -3962,7 +3980,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -3977,7 +3995,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -4080,7 +4098,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -4095,7 +4113,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -4199,6 +4217,7 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4250,6 +4269,7 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4301,6 +4321,7 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -4315,7 +4336,7 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -4330,7 +4351,7 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -4442,7 +4463,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -4457,7 +4478,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -4560,7 +4581,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -4575,7 +4596,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -4689,7 +4710,7 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: true, + relay: true, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -4704,7 +4725,7 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: true, + no_upnp: false, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -4815,7 +4836,7 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: true, + relay: true, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -4830,7 +4851,7 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -4941,7 +4962,7 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -4956,7 +4977,7 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: true, + no_upnp: false, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -5027,7 +5048,7 @@ async fn add_node_should_return_an_error_if_nat_status_is_none_but_auto_set_nat_ delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: true, + relay: true, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -5042,7 +5063,7 @@ async fn add_node_should_return_an_error_if_nat_status_is_none_but_auto_set_nat_ antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -5121,6 +5142,7 @@ async fn add_auditor_should_add_an_auditor_service() -> Result<()> { program: auditor_install_path.to_path_buf(), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: false, }), eq(false), ) @@ -5269,6 +5291,7 @@ async fn add_auditor_should_include_beta_encryption_key_if_specified() -> Result program: auditor_install_path.to_path_buf(), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: false, }), eq(false), ) @@ -5356,6 +5379,7 @@ async fn add_faucet_should_add_a_faucet_service() -> Result<()> { program: faucet_install_path.to_path_buf(), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: false, }), eq(false), ) @@ -5504,6 +5528,7 @@ async fn add_daemon_should_add_a_daemon_service() -> Result<()> { program: daemon_install_path.to_path_buf(), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: false, }), eq(false), ) @@ -5683,7 +5708,7 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R delete_antnode_src: false, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -5698,7 +5723,7 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -5811,7 +5836,7 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( delete_antnode_src: false, enable_metrics_server: false, env_variables: None, - home_network: true, + relay: true, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -5826,7 +5851,7 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), @@ -5939,7 +5964,7 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { delete_antnode_src: false, enable_metrics_server: false, env_variables: None, - home_network: true, + relay: true, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -5954,7 +5979,7 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: true, version: latest_version.to_string(), @@ -6064,7 +6089,7 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { delete_antnode_src: false, enable_metrics_server: false, env_variables: None, - home_network: true, + relay: true, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -6079,7 +6104,7 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: true, + no_upnp: false, user: Some(get_username()), user_mode: true, version: latest_version.to_string(), @@ -6183,6 +6208,7 @@ async fn add_node_should_auto_restart() -> Result<()> { .join(ANTNODE_FILE_NAME), username: Some(get_username()), working_directory: None, + disable_restart_on_failure: true, }), eq(false), ) @@ -6198,7 +6224,7 @@ async fn add_node_should_auto_restart() -> Result<()> { delete_antnode_src: true, enable_metrics_server: false, env_variables: None, - home_network: false, + relay: false, log_format: None, max_archived_log_files: None, max_log_files: None, @@ -6213,7 +6239,7 @@ async fn add_node_should_auto_restart() -> Result<()> { antnode_src_path: antnode_download_path.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), - upnp: false, + no_upnp: true, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), From 7978e60618d793491416d77bf8b727eddbb880d1 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 19 Feb 2025 11:39:17 +0900 Subject: [PATCH 37/69] feat: merge for archives --- autonomi/src/client/high_level/files/archive_private.rs | 7 +++++++ autonomi/src/client/high_level/files/archive_public.rs | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/autonomi/src/client/high_level/files/archive_private.rs b/autonomi/src/client/high_level/files/archive_private.rs index 45a2872c69..e3c6c4b580 100644 --- a/autonomi/src/client/high_level/files/archive_private.rs +++ b/autonomi/src/client/high_level/files/archive_private.rs @@ -124,6 +124,13 @@ impl PrivateArchive { Ok(root_serialized) } + + /// Merge with another archive + /// + /// Note that if there are duplicate entries for the same filename, the files from the other archive will be the ones that are kept. + pub fn merge(&mut self, other: &PrivateArchive) { + self.map.extend(other.map.clone()); + } } impl Client { diff --git a/autonomi/src/client/high_level/files/archive_public.rs b/autonomi/src/client/high_level/files/archive_public.rs index a879f7e7f2..38d9a61530 100644 --- a/autonomi/src/client/high_level/files/archive_public.rs +++ b/autonomi/src/client/high_level/files/archive_public.rs @@ -123,6 +123,13 @@ impl PublicArchive { Ok(root_serialized) } + + /// Merge with another archive + /// + /// Note that if there are duplicate entries for the same filename, the files from the other archive will be the ones that are kept. + pub fn merge(&mut self, other: &PublicArchive) { + self.map.extend(other.map.clone()); + } } impl Client { From b1418883a6b75fe1f05149db32dfd6cf20c876b7 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 19 Feb 2025 12:50:20 +0900 Subject: [PATCH 38/69] feat: support direct download of files or datamaps in the CLI --- ant-cli/src/actions/download.rs | 99 +++++++++++++++++---- autonomi/src/client/high_level/files/mod.rs | 10 +++ 2 files changed, 91 insertions(+), 18 deletions(-) diff --git a/ant-cli/src/actions/download.rs b/ant-cli/src/actions/download.rs index 758c2ba7a4..b6ec5abfcf 100644 --- a/ant-cli/src/actions/download.rs +++ b/ant-cli/src/actions/download.rs @@ -8,10 +8,13 @@ use super::get_progress_bar; use autonomi::{ + chunk::DataMapChunk, client::{ - address::str_to_addr, files::archive_private::PrivateArchiveAccess, - files::archive_public::ArchiveAddr, + address::str_to_addr, + files::{archive_private::PrivateArchiveAccess, archive_public::ArchiveAddr}, + GetError, }, + data::DataAddr, Client, }; use color_eyre::{ @@ -21,19 +24,25 @@ use color_eyre::{ use std::path::PathBuf; pub async fn download(addr: &str, dest_path: &str, client: &Client) -> Result<()> { - let public_address = str_to_addr(addr).ok(); - let private_address = crate::user_data::get_local_private_archive_access(addr) - .inspect_err(|e| error!("Failed to get private archive access: {e}")) - .ok(); - - match (public_address, private_address) { - (Some(public_address), _) => download_public(addr, public_address, dest_path, client).await, - (_, Some(private_address)) => download_private(addr, private_address, dest_path, client).await, - _ => Err(eyre!("Failed to parse data address {addr}")) + let try_public_address = str_to_addr(addr).ok(); + if let Some(public_address) = try_public_address { + return download_public(addr, public_address, dest_path, client).await; + } + + let try_private_address = crate::user_data::get_local_private_archive_access(addr).ok(); + if let Some(private_address) = try_private_address { + return download_private(addr, private_address, dest_path, client).await; + } + + let try_datamap = DataMapChunk::from_hex(addr).ok(); + if let Some(datamap) = try_datamap { + return download_from_datamap(addr, datamap, dest_path, client).await; + } + + Err(eyre!("Failed to parse data address {addr}")) .with_suggestion(|| "Public addresses look like this: 0037cfa13eae4393841cbc00c3a33cade0f98b8c1f20826e5c51f8269e7b09d7") .with_suggestion(|| "Private addresses look like this: 1358645341480028172") - .with_suggestion(|| "Try the `file list` command to get addresses you have access to"), - } + .with_suggestion(|| "Try the `file list` command to get addresses you have access to") } async fn download_private( @@ -45,7 +54,7 @@ async fn download_private( let archive = client .archive_get(&private_address) .await - .wrap_err("Failed to fetch data from address")?; + .wrap_err("Failed to fetch Private Archive from address")?; let progress_bar = get_progress_bar(archive.iter().count() as u64)?; let mut all_errs = vec![]; @@ -88,10 +97,16 @@ async fn download_public( dest_path: &str, client: &Client, ) -> Result<()> { - let archive = client - .archive_get_public(&address) - .await - .wrap_err("Failed to fetch data from address")?; + let archive = match client.archive_get_public(&address).await { + Ok(archive) => archive, + Err(GetError::Deserialization(_)) => { + info!("Failed to deserialize Public Archive from address, trying to fetch data assuming it is a single file instead"); + return download_public_single_file(addr, address, dest_path, client) + .await + .wrap_err("Failed to fetch public file from address"); + } + Err(e) => return Err(e).wrap_err("Failed to fetch Public Archive from address")?, + }; let progress_bar = get_progress_bar(archive.iter().count() as u64)?; let mut all_errs = vec![]; @@ -127,3 +142,51 @@ async fn download_public( Err(eyre!("Errors while downloading data")) } } + +async fn download_public_single_file( + addr: &str, + address: DataAddr, + dest_path: &str, + client: &Client, +) -> Result<()> { + let bytes = match client.data_get_public(&address).await { + Ok(bytes) => bytes, + Err(e) => { + let err = format!("Failed to fetch file at {addr:?}: {e}"); + return Err(eyre!(err)).wrap_err("Failed to fetch file content from address"); + } + }; + + let path = PathBuf::from(dest_path); + let here = PathBuf::from("."); + let parent = path.parent().unwrap_or_else(|| &here); + std::fs::create_dir_all(parent)?; + std::fs::write(path, bytes)?; + info!("Successfully downloaded file at: {addr}"); + println!("Successfully downloaded file at: {addr}"); + Ok(()) +} + +async fn download_from_datamap( + addr: &str, + datamap: DataMapChunk, + dest_path: &str, + client: &Client, +) -> Result<()> { + let bytes = match client.data_get(&datamap).await { + Ok(bytes) => bytes, + Err(e) => { + let err = format!("Failed to fetch file {addr:?}: {e}"); + return Err(eyre!(err)).wrap_err("Failed to fetch file content from address"); + } + }; + + let path = PathBuf::from(dest_path); + let here = PathBuf::from("."); + let parent = path.parent().unwrap_or_else(|| &here); + std::fs::create_dir_all(parent)?; + std::fs::write(path, bytes)?; + info!("Successfully downloaded file from datamap at: {addr}"); + println!("Successfully downloaded file from datamap at: {addr}"); + Ok(()) +} diff --git a/autonomi/src/client/high_level/files/mod.rs b/autonomi/src/client/high_level/files/mod.rs index 36d6fd84e9..f19a5f1c86 100644 --- a/autonomi/src/client/high_level/files/mod.rs +++ b/autonomi/src/client/high_level/files/mod.rs @@ -35,6 +35,10 @@ pub static FILE_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { }); /// Metadata for a file in an archive. Time values are UNIX timestamps. +/// +/// The recommended way to create a new [`Metadata`] is to use [`Metadata::new_with_size`]. +/// +/// The [`Metadata::default`] method creates a new [`Metadata`] with 0 as size and the current time for created and modified. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Metadata { /// File creation time on local file system. See [`std::fs::Metadata::created`] for details per OS. @@ -48,6 +52,12 @@ pub struct Metadata { pub extra: Option, } +impl Default for Metadata { + fn default() -> Self { + Self::new_with_size(0) + } +} + impl Metadata { /// Create a new metadata struct with the current time as uploaded, created and modified. pub fn new_with_size(size: u64) -> Self { From 679336afdcb3064f9a0f9617b7959e33aae1b2f8 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 19 Feb 2025 16:07:41 +0900 Subject: [PATCH 39/69] feat: metadata empty constructor for privacy --- autonomi/src/client/high_level/files/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/autonomi/src/client/high_level/files/mod.rs b/autonomi/src/client/high_level/files/mod.rs index f19a5f1c86..2b746908e7 100644 --- a/autonomi/src/client/high_level/files/mod.rs +++ b/autonomi/src/client/high_level/files/mod.rs @@ -39,6 +39,8 @@ pub static FILE_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { /// The recommended way to create a new [`Metadata`] is to use [`Metadata::new_with_size`]. /// /// The [`Metadata::default`] method creates a new [`Metadata`] with 0 as size and the current time for created and modified. +/// +/// The [`Metadata::empty`] method creates a new [`Metadata`] filled with 0s. Use this if you don't want to reveal any metadata. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Metadata { /// File creation time on local file system. See [`std::fs::Metadata::created`] for details per OS. @@ -73,6 +75,16 @@ impl Metadata { extra: None, } } + + /// Create a new empty metadata struct + pub fn empty() -> Self { + Self { + created: 0, + modified: 0, + size: 0, + extra: None, + } + } } #[derive(Error, Debug, PartialEq, Eq)] From 9463a4b3b08a4eb4bfa7b3342f0decae0bb0fcd5 Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 20 Feb 2025 11:37:46 +0900 Subject: [PATCH 40/69] feat: improvements and unit test for merge --- ant-cli/src/actions/download.rs | 3 +- .../high_level/files/archive_private.rs | 39 +++++++++++++++++++ .../client/high_level/files/archive_public.rs | 33 ++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/ant-cli/src/actions/download.rs b/ant-cli/src/actions/download.rs index b6ec5abfcf..6218479dfb 100644 --- a/ant-cli/src/actions/download.rs +++ b/ant-cli/src/actions/download.rs @@ -42,6 +42,7 @@ pub async fn download(addr: &str, dest_path: &str, client: &Client) -> Result<() Err(eyre!("Failed to parse data address {addr}")) .with_suggestion(|| "Public addresses look like this: 0037cfa13eae4393841cbc00c3a33cade0f98b8c1f20826e5c51f8269e7b09d7") .with_suggestion(|| "Private addresses look like this: 1358645341480028172") + .with_suggestion(|| "You can also use a hex encoded DataMap directly here") .with_suggestion(|| "Try the `file list` command to get addresses you have access to") } @@ -100,7 +101,7 @@ async fn download_public( let archive = match client.archive_get_public(&address).await { Ok(archive) => archive, Err(GetError::Deserialization(_)) => { - info!("Failed to deserialize Public Archive from address, trying to fetch data assuming it is a single file instead"); + info!("Failed to deserialize Public Archive from address {addr}, trying to fetch data assuming it is a single file instead"); return download_public_single_file(addr, address, dest_path, client) .await .wrap_err("Failed to fetch public file from address"); diff --git a/autonomi/src/client/high_level/files/archive_private.rs b/autonomi/src/client/high_level/files/archive_private.rs index e3c6c4b580..6f132af08c 100644 --- a/autonomi/src/client/high_level/files/archive_private.rs +++ b/autonomi/src/client/high_level/files/archive_private.rs @@ -164,3 +164,42 @@ impl Client { result } } + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn test_private_archive_merge() { + let mut arch = PrivateArchive::new(); + let file1 = PathBuf::from_str("file1").unwrap(); + let file2 = PathBuf::from_str("file2").unwrap(); + arch.add_file( + file1.clone(), + DataMapChunk::from_hex("1111").unwrap(), + Metadata::new_with_size(1), + ); + let mut other_arch = PrivateArchive::new(); + other_arch.add_file( + file2.clone(), + DataMapChunk::from_hex("AAAA").unwrap(), + Metadata::new_with_size(2), + ); + arch.merge(&other_arch); + assert_eq!(arch.map().len(), 2); + assert_eq!(arch.map().get(&file1).unwrap().1.size, 1); + assert_eq!(arch.map().get(&file2).unwrap().1.size, 2); + + let mut arch_with_duplicate = PrivateArchive::new(); + arch_with_duplicate.add_file( + file1.clone(), + DataMapChunk::from_hex("BBBB").unwrap(), + Metadata::new_with_size(5), + ); + arch.merge(&arch_with_duplicate); + assert_eq!(arch.map().len(), 2); + assert_eq!(arch.map().get(&file1).unwrap().1.size, 5); + assert_eq!(arch.map().get(&file2).unwrap().1.size, 2); + } +} diff --git a/autonomi/src/client/high_level/files/archive_public.rs b/autonomi/src/client/high_level/files/archive_public.rs index 38d9a61530..ed499bad4e 100644 --- a/autonomi/src/client/high_level/files/archive_public.rs +++ b/autonomi/src/client/high_level/files/archive_public.rs @@ -289,4 +289,37 @@ mod test { // Our old data structure should be forward compatible with the new one. assert!(PublicArchive::from_bytes(Bytes::from(arch_p1_ser)).is_ok()); } + + #[test] + fn test_archive_merge() { + let mut arch = PublicArchive::new(); + let file1 = PathBuf::from_str("file1").unwrap(); + let file2 = PathBuf::from_str("file2").unwrap(); + arch.add_file( + file1.clone(), + DataAddr::random(&mut rand::thread_rng()), + Metadata::new_with_size(1), + ); + let mut other_arch = PublicArchive::new(); + other_arch.add_file( + file2.clone(), + DataAddr::random(&mut rand::thread_rng()), + Metadata::new_with_size(2), + ); + arch.merge(&other_arch); + assert_eq!(arch.map().len(), 2); + assert_eq!(arch.map().get(&file1).unwrap().1.size, 1); + assert_eq!(arch.map().get(&file2).unwrap().1.size, 2); + + let mut arch_with_duplicate = PublicArchive::new(); + arch_with_duplicate.add_file( + file1.clone(), + DataAddr::random(&mut rand::thread_rng()), + Metadata::new_with_size(5), + ); + arch.merge(&arch_with_duplicate); + assert_eq!(arch.map().len(), 2); + assert_eq!(arch.map().get(&file1).unwrap().1.size, 5); + assert_eq!(arch.map().get(&file2).unwrap().1.size, 2); + } } From c75e32ee0a0e7df90bed442611ff4d650e0c9d2f Mon Sep 17 00:00:00 2001 From: Ermine Jose Date: Wed, 12 Feb 2025 21:22:01 +0530 Subject: [PATCH 41/69] fix: out of bounds array error in antctl --- ant-node-manager/src/cmd/node.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ant-node-manager/src/cmd/node.rs b/ant-node-manager/src/cmd/node.rs index 3812834811..36e81eb0db 100644 --- a/ant-node-manager/src/cmd/node.rs +++ b/ant-node-manager/src/cmd/node.rs @@ -476,10 +476,12 @@ pub async fn upgrade( ) .await?; - debug!( - "listen addresses for nodes[0]: {:?}", - node_registry.nodes[0].listen_addr - ); + if let Some(node) = node_registry.nodes.first() { + debug!("listen addresses for nodes[0]: {:?}", node.listen_addr); + } else { + debug!("There are no nodes currently added or active"); + } + if !use_force { let node_versions = node_registry .nodes From 6638e914194da34146238c7cbaf53f185529344c Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Sat, 22 Feb 2025 15:17:22 +0000 Subject: [PATCH 42/69] chore(release): release candidate 2025.1.2.4 ================== Crate Versions ================== ant-bootstrap: 0.1.6-rc.1 ant-build-info: 0.1.25-rc.1 ant-cli: 0.3.8-rc.1 ant-evm: 0.1.10-rc.1 ant-logging: 0.2.47-rc.1 ant-metrics: 0.1.26-rc.1 ant-networking: 0.3.6-rc.1 ant-node: 0.3.7-rc.1 ant-node-manager: 0.12.0-rc.1 ant-node-rpc-client: 0.6.43-rc.1 ant-protocol: 1.0.1-rc.1 ant-service-management: 0.4.9-rc.1 ant-token-supplies: 0.1.64-rc.1 autonomi: 0.4.0-rc.1 evmlib: 0.1.10-rc.1 evm-testnet: 0.1.10-rc.1 nat-detection: 0.2.17-rc.1 node-launchpad: 0.5.5-rc.1 test-utils: 0.4.17-rc.1 =================== Binary Versions =================== ant: 0.3.8-rc.1 antctl: 0.12.0-rc.1 antctld: 0.12.0-rc.1 antnode: 0.3.7-rc.1 antnode_rpc_client: 0.6.43-rc.1 nat-detection: 0.2.17-rc.1 node-launchpad: 0.5.5-rc.1 --- Cargo.lock | 38 +++++++++++++++--------------- ant-bootstrap/Cargo.toml | 6 ++--- ant-build-info/Cargo.toml | 2 +- ant-build-info/src/release_info.rs | 2 +- ant-cli/Cargo.toml | 14 +++++------ ant-evm/Cargo.toml | 4 ++-- ant-logging/Cargo.toml | 2 +- ant-metrics/Cargo.toml | 2 +- ant-networking/Cargo.toml | 10 ++++---- ant-node-manager/Cargo.toml | 14 +++++------ ant-node-rpc-client/Cargo.toml | 12 +++++----- ant-node/Cargo.toml | 22 ++++++++--------- ant-protocol/Cargo.toml | 6 ++--- ant-service-management/Cargo.toml | 10 ++++---- ant-token-supplies/Cargo.toml | 2 +- autonomi/Cargo.toml | 12 +++++----- evm-testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++---- node-launchpad/Cargo.toml | 14 +++++------ release-cycle-info | 2 +- test-utils/Cargo.toml | 4 ++-- 22 files changed, 97 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9db5274cae..f59af610d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -772,7 +772,7 @@ dependencies = [ [[package]] name = "ant-bootstrap" -version = "0.1.5" +version = "0.1.6-rc.1" dependencies = [ "ant-logging", "ant-protocol", @@ -796,7 +796,7 @@ dependencies = [ [[package]] name = "ant-build-info" -version = "0.1.24" +version = "0.1.25-rc.1" dependencies = [ "chrono", "tracing", @@ -805,7 +805,7 @@ dependencies = [ [[package]] name = "ant-cli" -version = "0.3.7" +version = "0.3.8-rc.1" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -836,7 +836,7 @@ dependencies = [ [[package]] name = "ant-evm" -version = "0.1.9" +version = "0.1.10-rc.1" dependencies = [ "custom_debug", "evmlib", @@ -858,7 +858,7 @@ dependencies = [ [[package]] name = "ant-logging" -version = "0.2.46" +version = "0.2.47-rc.1" dependencies = [ "chrono", "color-eyre", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "ant-metrics" -version = "0.1.25" +version = "0.1.26-rc.1" dependencies = [ "clap", "color-eyre", @@ -897,7 +897,7 @@ dependencies = [ [[package]] name = "ant-networking" -version = "0.3.5" +version = "0.3.6-rc.1" dependencies = [ "aes-gcm-siv", "ant-bootstrap", @@ -938,7 +938,7 @@ dependencies = [ [[package]] name = "ant-node" -version = "0.3.6" +version = "0.3.7-rc.1" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -994,7 +994,7 @@ dependencies = [ [[package]] name = "ant-node-manager" -version = "0.11.8" +version = "0.12.0-rc.1" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -1037,7 +1037,7 @@ dependencies = [ [[package]] name = "ant-node-rpc-client" -version = "0.6.42" +version = "0.6.43-rc.1" dependencies = [ "ant-build-info", "ant-logging", @@ -1061,7 +1061,7 @@ dependencies = [ [[package]] name = "ant-protocol" -version = "1.0.0" +version = "1.0.1-rc.1" dependencies = [ "ant-build-info", "ant-evm", @@ -1111,7 +1111,7 @@ dependencies = [ [[package]] name = "ant-service-management" -version = "0.4.8" +version = "0.4.9-rc.1" dependencies = [ "ant-bootstrap", "ant-evm", @@ -1139,7 +1139,7 @@ dependencies = [ [[package]] name = "ant-token-supplies" -version = "0.1.63" +version = "0.1.64-rc.1" dependencies = [ "dirs-next", "reqwest 0.11.27", @@ -1577,7 +1577,7 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "autonomi" -version = "0.3.6" +version = "0.4.0-rc.1" dependencies = [ "alloy", "ant-bootstrap", @@ -3186,7 +3186,7 @@ dependencies = [ [[package]] name = "evm-testnet" -version = "0.1.9" +version = "0.1.10-rc.1" dependencies = [ "ant-evm", "clap", @@ -3197,7 +3197,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.9" +version = "0.1.10-rc.1" dependencies = [ "alloy", "dirs-next", @@ -6046,7 +6046,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.16" +version = "0.2.17-rc.1" dependencies = [ "ant-build-info", "ant-networking", @@ -6187,7 +6187,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.5.4" +version = "0.5.5-rc.1" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -8801,7 +8801,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "test-utils" -version = "0.4.16" +version = "0.4.17-rc.1" dependencies = [ "bytes", "color-eyre", diff --git a/ant-bootstrap/Cargo.toml b/ant-bootstrap/Cargo.toml index e07357939b..c08b40c487 100644 --- a/ant-bootstrap/Cargo.toml +++ b/ant-bootstrap/Cargo.toml @@ -7,14 +7,14 @@ license = "GPL-3.0" name = "ant-bootstrap" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.5" +version = "0.1.6-rc.1" [features] local = [] [dependencies] -ant-logging = { path = "../ant-logging", version = "0.2.46" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } atomic-write-file = "0.2.2" chrono = { version = "0.4", features = ["serde"] } clap = { version = "4.2.1", features = ["derive", "env"] } diff --git a/ant-build-info/Cargo.toml b/ant-build-info/Cargo.toml index 81b07d37f0..20c4fd23b1 100644 --- a/ant-build-info/Cargo.toml +++ b/ant-build-info/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-build-info" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.24" +version = "0.1.25-rc.1" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/ant-build-info/src/release_info.rs b/ant-build-info/src/release_info.rs index 7595767a34..91829eb153 100644 --- a/ant-build-info/src/release_info.rs +++ b/ant-build-info/src/release_info.rs @@ -1,4 +1,4 @@ pub const RELEASE_YEAR: &str = "2025"; pub const RELEASE_MONTH: &str = "1"; pub const RELEASE_CYCLE: &str = "2"; -pub const RELEASE_CYCLE_COUNTER: &str = "3"; +pub const RELEASE_CYCLE_COUNTER: &str = "4"; diff --git a/ant-cli/Cargo.toml b/ant-cli/Cargo.toml index 7b676d561e..273993be90 100644 --- a/ant-cli/Cargo.toml +++ b/ant-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "ant-cli" description = "CLI client for the Autonomi network" license = "GPL-3.0" -version = "0.3.7" +version = "0.3.8-rc.1" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -23,11 +23,11 @@ name = "files" harness = false [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.5" } -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-logging = { path = "../ant-logging", version = "0.2.46" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } -autonomi = { path = "../autonomi", version = "0.3.6", features = [ "loud" ] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.1", features = [ "loud" ] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.3" const-hex = "1.13.1" @@ -54,7 +54,7 @@ tracing = { version = "~0.1.26" } walkdir = "2.5.0" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.3.6" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.1" } criterion = "0.5.1" eyre = "0.6.8" rand = { version = "~0.8.5", features = ["small_rng"] } diff --git a/ant-evm/Cargo.toml b/ant-evm/Cargo.toml index 1e15b8d6f8..4c571b1a44 100644 --- a/ant-evm/Cargo.toml +++ b/ant-evm/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-evm" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.9" +version = "0.1.10-rc.1" [features] external-signer = ["evmlib/external-signer"] @@ -15,7 +15,7 @@ test-utils = [] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.9" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } hex = "~0.4.3" lazy_static = "1.4.0" libp2p = { version = "0.55.0", features = ["identify", "kad"] } diff --git a/ant-logging/Cargo.toml b/ant-logging/Cargo.toml index a27bec98ee..f1f56bfbb7 100644 --- a/ant-logging/Cargo.toml +++ b/ant-logging/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-logging" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.46" +version = "0.2.47-rc.1" [dependencies] chrono = "~0.4.19" diff --git a/ant-metrics/Cargo.toml b/ant-metrics/Cargo.toml index fb9f77b744..450e932e8a 100644 --- a/ant-metrics/Cargo.toml +++ b/ant-metrics/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-metrics" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.25" +version = "0.1.26-rc.1" [[bin]] path = "src/main.rs" diff --git a/ant-networking/Cargo.toml b/ant-networking/Cargo.toml index e955fce386..971b3f51da 100644 --- a/ant-networking/Cargo.toml +++ b/ant-networking/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-networking" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.3.5" +version = "0.3.6-rc.1" [features] default = [] @@ -16,10 +16,10 @@ open-metrics = ["libp2p/metrics", "prometheus-client", "hyper", "sysinfo"] [dependencies] aes-gcm-siv = "0.11.1" -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.5" } -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-evm = { path = "../ant-evm", version = "0.1.9" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.2" } bytes = { version = "1.0.1", features = ["serde"] } diff --git a/ant-node-manager/Cargo.toml b/ant-node-manager/Cargo.toml index d0293919cf..f1acb1f0cb 100644 --- a/ant-node-manager/Cargo.toml +++ b/ant-node-manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.11.8" +version = "0.12.0-rc.1" [[bin]] name = "antctl" @@ -29,13 +29,13 @@ tcp = [] websockets = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.5" } -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-evm = { path = "../ant-evm", version = "0.1.9" } -ant-logging = { path = "../ant-logging", version = "0.2.46" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } ant-releases = { version = "0.4.0" } -ant-service-management = { path = "../ant-service-management", version = "0.4.8" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.1" } chrono = "~0.4.19" clap = { version = "4.4.6", features = ["derive", "env"] } colored = "2.0.4" diff --git a/ant-node-rpc-client/Cargo.toml b/ant-node-rpc-client/Cargo.toml index ed8061b64f..32ab5dee18 100644 --- a/ant-node-rpc-client/Cargo.toml +++ b/ant-node-rpc-client/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-rpc-client" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.6.42" +version = "0.6.43-rc.1" [[bin]] name = "antnode_rpc_client" @@ -17,11 +17,11 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-logging = { path = "../ant-logging", version = "0.2.46" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0", features=["rpc"] } -ant-node = { path = "../ant-node", version = "0.3.6" } -ant-service-management = { path = "../ant-service-management", version = "0.4.8" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1", features=["rpc"] } +ant-node = { path = "../ant-node", version = "0.3.7-rc.1" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.1" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } diff --git a/ant-node/Cargo.toml b/ant-node/Cargo.toml index 358c639054..b29e34a676 100644 --- a/ant-node/Cargo.toml +++ b/ant-node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "The Autonomi node binary" name = "ant-node" -version = "0.3.6" +version = "0.3.7-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -22,13 +22,13 @@ open-metrics = ["ant-networking/open-metrics", "prometheus-client"] otlp = ["ant-logging/otlp"] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.5" } -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-evm = { path = "../ant-evm", version = "0.1.9" } -ant-logging = { path = "../ant-logging", version = "0.2.46", features = ["process-metrics"] } -ant-networking = { path = "../ant-networking", version = "0.3.5" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } -ant-service-management = { path = "../ant-service-management", version = "0.4.8" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1", features = ["process-metrics"] } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.1" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } @@ -77,10 +77,10 @@ walkdir = "~2.5.0" xor_name = "5.0.0" [dev-dependencies] -ant-protocol = { path = "../ant-protocol", version = "1.0.0", features = ["rpc"] } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1", features = ["rpc"] } assert_fs = "1.0.0" -evmlib = { path = "../evmlib", version = "0.1.9" } -autonomi = { path = "../autonomi", version = "0.3.6" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.1" } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } diff --git a/ant-protocol/Cargo.toml b/ant-protocol/Cargo.toml index dd057c72d4..57e6855de5 100644 --- a/ant-protocol/Cargo.toml +++ b/ant-protocol/Cargo.toml @@ -7,15 +7,15 @@ license = "GPL-3.0" name = "ant-protocol" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "1.0.0" +version = "1.0.1-rc.1" [features] default = [] rpc = ["tonic", "prost"] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-evm = { path = "../ant-evm", version = "0.1.9" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" diff --git a/ant-service-management/Cargo.toml b/ant-service-management/Cargo.toml index e2e2954293..80715fd08d 100644 --- a/ant-service-management/Cargo.toml +++ b/ant-service-management/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "ant-service-management" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.8" +version = "0.4.9-rc.1" [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.5" } -ant-evm = { path = "../ant-evm", version = "0.1.9" } -ant-logging = { path = "../ant-logging", version = "0.2.46" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0", features = ["rpc"] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1", features = ["rpc"] } async-trait = "0.1" dirs-next = "2.0.0" libp2p = { version = "0.55.0", features = ["kad"] } diff --git a/ant-token-supplies/Cargo.toml b/ant-token-supplies/Cargo.toml index c788f13d04..267365b247 100644 --- a/ant-token-supplies/Cargo.toml +++ b/ant-token-supplies/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-token-supplies" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.63" +version = "0.1.64-rc.1" [dependencies] diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index b00e6d3459..71ef24c5e2 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.3.6" +version = "0.4.0-rc.1" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -26,10 +26,10 @@ extension-module = ["pyo3/extension-module", "pyo3-async-runtimes"] loud = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.5" } -ant-evm = { path = "../ant-evm", version = "0.1.9" } -ant-networking = { path = "../ant-networking", version = "0.3.5" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } bip39 = "2.0.0" blst = "0.3.13" blstrs = "0.7.1" @@ -56,7 +56,7 @@ xor_name = "5.0.0" [dev-dependencies] alloy = { version = "0.7.3", default-features = false, features = ["contract", "json-rpc", "network", "node-bindings", "provider-http", "reqwest-rustls-tls", "rpc-client", "rpc-types", "signer-local", "std"] } -ant-logging = { path = "../ant-logging", version = "0.2.46" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } eyre = "0.6.5" serial_test = "3.2.0" sha2 = "0.10.6" diff --git a/evm-testnet/Cargo.toml b/evm-testnet/Cargo.toml index 8dac620dbf..f765947a4b 100644 --- a/evm-testnet/Cargo.toml +++ b/evm-testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm-testnet" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.9" +version = "0.1.10-rc.1" [dependencies] -ant-evm = { path = "../ant-evm", version = "0.1.9" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.9" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 61d71e3493..88f14c8078 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.9" +version = "0.1.10-rc.1" [features] external-signer = [] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 6a43a51352..494a78158f 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.16" +version = "0.2.17-rc.1" [[bin]] name = "nat-detection" @@ -17,9 +17,9 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-networking = { path = "../ant-networking", version = "0.3.5" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.1" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } clap = { version = "4.5.4", features = ["derive"] } clap-verbosity-flag = "2.2.0" color-eyre = { version = "0.6", default-features = false } diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 04671549c5..f41b674227 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "TUI for running nodes on the Autonomi network" name = "node-launchpad" -version = "0.5.4" +version = "0.5.5-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -18,13 +18,13 @@ path = "src/bin/tui/main.rs" nightly = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.5" } -ant-build-info = { path = "../ant-build-info", version = "0.1.24" } -ant-evm = { path = "../ant-evm", version = "0.1.9" } -ant-node-manager = { version = "0.11.8", path = "../ant-node-manager" } -ant-protocol = { path = "../ant-protocol", version = "1.0.0" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-node-manager = { version = "0.12.0-rc.1", path = "../ant-node-manager" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } ant-releases = { version = "0.4.0" } -ant-service-management = { version = "0.4.8", path = "../ant-service-management" } +ant-service-management = { version = "0.4.9-rc.1", path = "../ant-service-management" } arboard = "3.4.1" atty = "0.2.14" better-panic = "0.3.0" diff --git a/release-cycle-info b/release-cycle-info index 9619250bcb..b02324806c 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -15,4 +15,4 @@ release-year: 2025 release-month: 1 release-cycle: 2 -release-cycle-counter: 3 +release-cycle-counter: 4 diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 98a75121b0..8ab2c16702 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "test-utils" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.16" +version = "0.4.17-rc.1" [dependencies] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.9" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } libp2p = { version = "0.55.0", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } From f7b6a985fcabd86a62f33018141b13b823fbe83e Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 24 Feb 2025 11:43:26 +0000 Subject: [PATCH 43/69] Revert "feat(node): use closer peers as relayer candidate" This reverts commit 885574b845d394110c27c9c8fd9b0142550a4755. --- ant-networking/src/relay_manager.rs | 35 +++++++++++++---------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/ant-networking/src/relay_manager.rs b/ant-networking/src/relay_manager.rs index afbdd916fa..4407b3b3fc 100644 --- a/ant-networking/src/relay_manager.rs +++ b/ant-networking/src/relay_manager.rs @@ -6,21 +6,16 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{ - driver::{BadNodes, NodeBehaviour}, - NetworkAddress, -}; +use crate::driver::{BadNodes, NodeBehaviour}; use itertools::Itertools; use libp2p::swarm::ConnectionId; use libp2p::{ - core::transport::ListenerId, kad::KBucketDistance as Distance, multiaddr::Protocol, Multiaddr, - PeerId, StreamProtocol, Swarm, + core::transport::ListenerId, multiaddr::Protocol, Multiaddr, PeerId, StreamProtocol, Swarm, }; #[cfg(feature = "open-metrics")] use prometheus_client::metrics::gauge::Gauge; -#[cfg(feature = "open-metrics")] -use std::collections::VecDeque; -use std::collections::{BTreeMap, HashMap, HashSet}; +use rand::Rng; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; #[cfg(feature = "open-metrics")] use std::sync::atomic::AtomicU64; use std::time::Instant; @@ -56,7 +51,7 @@ pub(crate) fn is_a_relayed_peer<'a>(mut addrs: impl Iterator, + relay_server_candidates: VecDeque<(PeerId, Multiaddr)>, /// The relay servers that we are waiting for a reservation from. waiting_for_reservation: BTreeMap, /// The relay servers that we are connected to. @@ -155,11 +150,8 @@ impl RelayManager { // Hence here can add the addr directly. if let Some(relay_addr) = Self::craft_relay_address(addr, Some(*peer_id)) { debug!("Adding {peer_id:?} with {relay_addr:?} as a potential relay candidate"); - let distance = NetworkAddress::from_peer(self.self_peer_id) - .distance(&NetworkAddress::from_peer(*peer_id)); - let _ = self - .relay_server_candidates - .insert(distance, (*peer_id, relay_addr)); + self.relay_server_candidates + .push_back((*peer_id, relay_addr)); } } } else { @@ -189,10 +181,15 @@ impl RelayManager { // todo: should we remove all our other `listen_addr`? And should we block from adding `add_external_address` if // we're behind nat? - // Pick a closest candidate as a potential relay_server. - if let Some((_distance, (peer_id, relay_addr))) = - self.relay_server_candidates.pop_first() - { + // Pick a random candidate from the vector. Check if empty, or `gen_range` panics for empty range. + let index = if self.relay_server_candidates.is_empty() { + debug!("No more relay candidates."); + break; + } else { + rand::thread_rng().gen_range(0..self.relay_server_candidates.len()) + }; + + if let Some((peer_id, relay_addr)) = self.relay_server_candidates.remove(index) { // skip if detected as a bad node if let Some((_, is_bad)) = bad_nodes.get(&peer_id) { if *is_bad { From 942c365e7d068e2a8c645efe212964887bf46705 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 24 Feb 2025 13:45:40 +0000 Subject: [PATCH 44/69] chore(release): release candidate 2025.1.2.5 ================== Crate Versions ================== ant-bootstrap: 0.1.6-rc.2 ant-build-info: 0.1.25-rc.2 ant-cli: 0.3.8-rc.2 ant-evm: 0.1.10-rc.2 ant-logging: 0.2.47-rc.2 ant-metrics: 0.1.26-rc.2 ant-networking: 0.3.6-rc.2 ant-node: 0.3.7-rc.2 ant-node-manager: 0.12.0-rc.2 ant-node-rpc-client: 0.6.43-rc.2 ant-protocol: 1.0.1-rc.2 ant-service-management: 0.4.9-rc.2 ant-token-supplies: 0.1.64-rc.2 autonomi: 0.4.0-rc.2 evmlib: 0.1.10-rc.2 evm-testnet: 0.1.10-rc.2 nat-detection: 0.2.17-rc.2 node-launchpad: 0.5.5-rc.2 test-utils: 0.4.17-rc.2 =================== Binary Versions =================== ant: 0.3.8-rc.2 antctl: 0.12.0-rc.2 antctld: 0.12.0-rc.2 antnode: 0.3.7-rc.2 antnode_rpc_client: 0.6.43-rc.2 nat-detection: 0.2.17-rc.2 node-launchpad: 0.5.5-rc.2 --- Cargo.lock | 38 +++++++++++++++--------------- ant-bootstrap/Cargo.toml | 6 ++--- ant-build-info/Cargo.toml | 2 +- ant-build-info/src/release_info.rs | 2 +- ant-cli/Cargo.toml | 14 +++++------ ant-evm/Cargo.toml | 4 ++-- ant-logging/Cargo.toml | 2 +- ant-metrics/Cargo.toml | 2 +- ant-networking/Cargo.toml | 10 ++++---- ant-node-manager/Cargo.toml | 14 +++++------ ant-node-rpc-client/Cargo.toml | 12 +++++----- ant-node/Cargo.toml | 22 ++++++++--------- ant-protocol/Cargo.toml | 6 ++--- ant-service-management/Cargo.toml | 10 ++++---- ant-token-supplies/Cargo.toml | 2 +- autonomi/Cargo.toml | 12 +++++----- evm-testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++---- node-launchpad/Cargo.toml | 14 +++++------ release-cycle-info | 2 +- test-utils/Cargo.toml | 4 ++-- 22 files changed, 97 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f59af610d7..7e61ecbadc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -772,7 +772,7 @@ dependencies = [ [[package]] name = "ant-bootstrap" -version = "0.1.6-rc.1" +version = "0.1.6-rc.2" dependencies = [ "ant-logging", "ant-protocol", @@ -796,7 +796,7 @@ dependencies = [ [[package]] name = "ant-build-info" -version = "0.1.25-rc.1" +version = "0.1.25-rc.2" dependencies = [ "chrono", "tracing", @@ -805,7 +805,7 @@ dependencies = [ [[package]] name = "ant-cli" -version = "0.3.8-rc.1" +version = "0.3.8-rc.2" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -836,7 +836,7 @@ dependencies = [ [[package]] name = "ant-evm" -version = "0.1.10-rc.1" +version = "0.1.10-rc.2" dependencies = [ "custom_debug", "evmlib", @@ -858,7 +858,7 @@ dependencies = [ [[package]] name = "ant-logging" -version = "0.2.47-rc.1" +version = "0.2.47-rc.2" dependencies = [ "chrono", "color-eyre", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "ant-metrics" -version = "0.1.26-rc.1" +version = "0.1.26-rc.2" dependencies = [ "clap", "color-eyre", @@ -897,7 +897,7 @@ dependencies = [ [[package]] name = "ant-networking" -version = "0.3.6-rc.1" +version = "0.3.6-rc.2" dependencies = [ "aes-gcm-siv", "ant-bootstrap", @@ -938,7 +938,7 @@ dependencies = [ [[package]] name = "ant-node" -version = "0.3.7-rc.1" +version = "0.3.7-rc.2" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -994,7 +994,7 @@ dependencies = [ [[package]] name = "ant-node-manager" -version = "0.12.0-rc.1" +version = "0.12.0-rc.2" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -1037,7 +1037,7 @@ dependencies = [ [[package]] name = "ant-node-rpc-client" -version = "0.6.43-rc.1" +version = "0.6.43-rc.2" dependencies = [ "ant-build-info", "ant-logging", @@ -1061,7 +1061,7 @@ dependencies = [ [[package]] name = "ant-protocol" -version = "1.0.1-rc.1" +version = "1.0.1-rc.2" dependencies = [ "ant-build-info", "ant-evm", @@ -1111,7 +1111,7 @@ dependencies = [ [[package]] name = "ant-service-management" -version = "0.4.9-rc.1" +version = "0.4.9-rc.2" dependencies = [ "ant-bootstrap", "ant-evm", @@ -1139,7 +1139,7 @@ dependencies = [ [[package]] name = "ant-token-supplies" -version = "0.1.64-rc.1" +version = "0.1.64-rc.2" dependencies = [ "dirs-next", "reqwest 0.11.27", @@ -1577,7 +1577,7 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "autonomi" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" dependencies = [ "alloy", "ant-bootstrap", @@ -3186,7 +3186,7 @@ dependencies = [ [[package]] name = "evm-testnet" -version = "0.1.10-rc.1" +version = "0.1.10-rc.2" dependencies = [ "ant-evm", "clap", @@ -3197,7 +3197,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.10-rc.1" +version = "0.1.10-rc.2" dependencies = [ "alloy", "dirs-next", @@ -6046,7 +6046,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.17-rc.1" +version = "0.2.17-rc.2" dependencies = [ "ant-build-info", "ant-networking", @@ -6187,7 +6187,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.5.5-rc.1" +version = "0.5.5-rc.2" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -8801,7 +8801,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "test-utils" -version = "0.4.17-rc.1" +version = "0.4.17-rc.2" dependencies = [ "bytes", "color-eyre", diff --git a/ant-bootstrap/Cargo.toml b/ant-bootstrap/Cargo.toml index c08b40c487..da3584dd6d 100644 --- a/ant-bootstrap/Cargo.toml +++ b/ant-bootstrap/Cargo.toml @@ -7,14 +7,14 @@ license = "GPL-3.0" name = "ant-bootstrap" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.6-rc.1" +version = "0.1.6-rc.2" [features] local = [] [dependencies] -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } atomic-write-file = "0.2.2" chrono = { version = "0.4", features = ["serde"] } clap = { version = "4.2.1", features = ["derive", "env"] } diff --git a/ant-build-info/Cargo.toml b/ant-build-info/Cargo.toml index 20c4fd23b1..af6ae83865 100644 --- a/ant-build-info/Cargo.toml +++ b/ant-build-info/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-build-info" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.25-rc.1" +version = "0.1.25-rc.2" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/ant-build-info/src/release_info.rs b/ant-build-info/src/release_info.rs index 91829eb153..913fff66c2 100644 --- a/ant-build-info/src/release_info.rs +++ b/ant-build-info/src/release_info.rs @@ -1,4 +1,4 @@ pub const RELEASE_YEAR: &str = "2025"; pub const RELEASE_MONTH: &str = "1"; pub const RELEASE_CYCLE: &str = "2"; -pub const RELEASE_CYCLE_COUNTER: &str = "4"; +pub const RELEASE_CYCLE_COUNTER: &str = "5"; diff --git a/ant-cli/Cargo.toml b/ant-cli/Cargo.toml index 273993be90..e61bcb1a5a 100644 --- a/ant-cli/Cargo.toml +++ b/ant-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "ant-cli" description = "CLI client for the Autonomi network" license = "GPL-3.0" -version = "0.3.8-rc.1" +version = "0.3.8-rc.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -23,11 +23,11 @@ name = "files" harness = false [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } -autonomi = { path = "../autonomi", version = "0.4.0-rc.1", features = [ "loud" ] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.2", features = [ "loud" ] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.3" const-hex = "1.13.1" @@ -54,7 +54,7 @@ tracing = { version = "~0.1.26" } walkdir = "2.5.0" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.4.0-rc.1" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.2" } criterion = "0.5.1" eyre = "0.6.8" rand = { version = "~0.8.5", features = ["small_rng"] } diff --git a/ant-evm/Cargo.toml b/ant-evm/Cargo.toml index 4c571b1a44..a37de6c228 100644 --- a/ant-evm/Cargo.toml +++ b/ant-evm/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-evm" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.1" +version = "0.1.10-rc.2" [features] external-signer = ["evmlib/external-signer"] @@ -15,7 +15,7 @@ test-utils = [] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } hex = "~0.4.3" lazy_static = "1.4.0" libp2p = { version = "0.55.0", features = ["identify", "kad"] } diff --git a/ant-logging/Cargo.toml b/ant-logging/Cargo.toml index f1f56bfbb7..f5b35fe531 100644 --- a/ant-logging/Cargo.toml +++ b/ant-logging/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-logging" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.47-rc.1" +version = "0.2.47-rc.2" [dependencies] chrono = "~0.4.19" diff --git a/ant-metrics/Cargo.toml b/ant-metrics/Cargo.toml index 450e932e8a..429f465416 100644 --- a/ant-metrics/Cargo.toml +++ b/ant-metrics/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-metrics" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.26-rc.1" +version = "0.1.26-rc.2" [[bin]] path = "src/main.rs" diff --git a/ant-networking/Cargo.toml b/ant-networking/Cargo.toml index 971b3f51da..e85d35edc9 100644 --- a/ant-networking/Cargo.toml +++ b/ant-networking/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-networking" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.3.6-rc.1" +version = "0.3.6-rc.2" [features] default = [] @@ -16,10 +16,10 @@ open-metrics = ["libp2p/metrics", "prometheus-client", "hyper", "sysinfo"] [dependencies] aes-gcm-siv = "0.11.1" -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.2" } bytes = { version = "1.0.1", features = ["serde"] } diff --git a/ant-node-manager/Cargo.toml b/ant-node-manager/Cargo.toml index f1acb1f0cb..79f8011c5b 100644 --- a/ant-node-manager/Cargo.toml +++ b/ant-node-manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.12.0-rc.1" +version = "0.12.0-rc.2" [[bin]] name = "antctl" @@ -29,13 +29,13 @@ tcp = [] websockets = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } ant-releases = { version = "0.4.0" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.1" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.2" } chrono = "~0.4.19" clap = { version = "4.4.6", features = ["derive", "env"] } colored = "2.0.4" diff --git a/ant-node-rpc-client/Cargo.toml b/ant-node-rpc-client/Cargo.toml index 32ab5dee18..c46b7bc8df 100644 --- a/ant-node-rpc-client/Cargo.toml +++ b/ant-node-rpc-client/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-rpc-client" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.6.43-rc.1" +version = "0.6.43-rc.2" [[bin]] name = "antnode_rpc_client" @@ -17,11 +17,11 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1", features=["rpc"] } -ant-node = { path = "../ant-node", version = "0.3.7-rc.1" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2", features=["rpc"] } +ant-node = { path = "../ant-node", version = "0.3.7-rc.2" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.2" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } diff --git a/ant-node/Cargo.toml b/ant-node/Cargo.toml index b29e34a676..81ea2ee346 100644 --- a/ant-node/Cargo.toml +++ b/ant-node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "The Autonomi node binary" name = "ant-node" -version = "0.3.7-rc.1" +version = "0.3.7-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -22,13 +22,13 @@ open-metrics = ["ant-networking/open-metrics", "prometheus-client"] otlp = ["ant-logging/otlp"] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1", features = ["process-metrics"] } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.1" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2", features = ["process-metrics"] } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.2" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } @@ -77,10 +77,10 @@ walkdir = "~2.5.0" xor_name = "5.0.0" [dev-dependencies] -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1", features = ["rpc"] } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2", features = ["rpc"] } assert_fs = "1.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } -autonomi = { path = "../autonomi", version = "0.4.0-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.2" } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } diff --git a/ant-protocol/Cargo.toml b/ant-protocol/Cargo.toml index 57e6855de5..c437105f02 100644 --- a/ant-protocol/Cargo.toml +++ b/ant-protocol/Cargo.toml @@ -7,15 +7,15 @@ license = "GPL-3.0" name = "ant-protocol" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "1.0.1-rc.1" +version = "1.0.1-rc.2" [features] default = [] rpc = ["tonic", "prost"] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" diff --git a/ant-service-management/Cargo.toml b/ant-service-management/Cargo.toml index 80715fd08d..415d0068a9 100644 --- a/ant-service-management/Cargo.toml +++ b/ant-service-management/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "ant-service-management" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.9-rc.1" +version = "0.4.9-rc.2" [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1", features = ["rpc"] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2", features = ["rpc"] } async-trait = "0.1" dirs-next = "2.0.0" libp2p = { version = "0.55.0", features = ["kad"] } diff --git a/ant-token-supplies/Cargo.toml b/ant-token-supplies/Cargo.toml index 267365b247..59a615a7d9 100644 --- a/ant-token-supplies/Cargo.toml +++ b/ant-token-supplies/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-token-supplies" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.64-rc.1" +version = "0.1.64-rc.2" [dependencies] diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 71ef24c5e2..2de18e7bf0 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -26,10 +26,10 @@ extension-module = ["pyo3/extension-module", "pyo3-async-runtimes"] loud = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } bip39 = "2.0.0" blst = "0.3.13" blstrs = "0.7.1" @@ -56,7 +56,7 @@ xor_name = "5.0.0" [dev-dependencies] alloy = { version = "0.7.3", default-features = false, features = ["contract", "json-rpc", "network", "node-bindings", "provider-http", "reqwest-rustls-tls", "rpc-client", "rpc-types", "signer-local", "std"] } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.1" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } eyre = "0.6.5" serial_test = "3.2.0" sha2 = "0.10.6" diff --git a/evm-testnet/Cargo.toml b/evm-testnet/Cargo.toml index f765947a4b..e6379dbf50 100644 --- a/evm-testnet/Cargo.toml +++ b/evm-testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm-testnet" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.1" +version = "0.1.10-rc.2" [dependencies] -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 88f14c8078..4376d7d9f3 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.1" +version = "0.1.10-rc.2" [features] external-signer = [] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 494a78158f..13010d3672 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.17-rc.1" +version = "0.2.17-rc.2" [[bin]] name = "nat-detection" @@ -17,9 +17,9 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.1" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.2" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } clap = { version = "4.5.4", features = ["derive"] } clap-verbosity-flag = "2.2.0" color-eyre = { version = "0.6", default-features = false } diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index f41b674227..8bb0c0513d 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "TUI for running nodes on the Autonomi network" name = "node-launchpad" -version = "0.5.5-rc.1" +version = "0.5.5-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -18,13 +18,13 @@ path = "src/bin/tui/main.rs" nightly = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.1" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.1" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.1" } -ant-node-manager = { version = "0.12.0-rc.1", path = "../ant-node-manager" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.1" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-node-manager = { version = "0.12.0-rc.2", path = "../ant-node-manager" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } ant-releases = { version = "0.4.0" } -ant-service-management = { version = "0.4.9-rc.1", path = "../ant-service-management" } +ant-service-management = { version = "0.4.9-rc.2", path = "../ant-service-management" } arboard = "3.4.1" atty = "0.2.14" better-panic = "0.3.0" diff --git a/release-cycle-info b/release-cycle-info index b02324806c..8509b8e616 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -15,4 +15,4 @@ release-year: 2025 release-month: 1 release-cycle: 2 -release-cycle-counter: 4 +release-cycle-counter: 5 diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 8ab2c16702..abe5c9cdf8 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "test-utils" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.17-rc.1" +version = "0.4.17-rc.2" [dependencies] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } libp2p = { version = "0.55.0", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } From cf21e42e01cd320525dc85fdb4d900246c92cf2e Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 19 Feb 2025 21:49:08 +0530 Subject: [PATCH 45/69] refactor(network): make continuous network discovery into a single module --- ant-networking/src/bootstrap.rs | 188 --------------------- ant-networking/src/driver.rs | 10 +- ant-networking/src/event/kad.rs | 2 +- ant-networking/src/lib.rs | 1 - ant-networking/src/network_discovery.rs | 211 ++++++++++++++++++++++-- 5 files changed, 204 insertions(+), 208 deletions(-) delete mode 100644 ant-networking/src/bootstrap.rs diff --git a/ant-networking/src/bootstrap.rs b/ant-networking/src/bootstrap.rs deleted file mode 100644 index 30511009c9..0000000000 --- a/ant-networking/src/bootstrap.rs +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{driver::PendingGetClosestType, SwarmDriver}; -use libp2p::kad::K_VALUE; -use rand::{rngs::OsRng, Rng}; -use tokio::time::Duration; - -use crate::time::{interval, Instant, Interval}; - -/// The default interval at which NetworkDiscovery is triggered. -/// The interval is increased as more peers are added to the routing table. -pub(crate) const NETWORK_DISCOVER_INTERVAL: Duration = Duration::from_secs(10); - -/// Every NETWORK_DISCOVER_CONNECTED_PEERS_STEP connected peer, -/// we step up the NETWORK_DISCOVER_INTERVAL to slow down process. -const NETWORK_DISCOVER_CONNECTED_PEERS_STEP: u32 = 5; - -/// Slow down the process if the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT. -/// This is to make sure we don't flood the network with `FindNode` msgs. -const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); - -/// A minimum interval to prevent network discovery got triggered too often -const LAST_NETWORK_DISCOVER_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(90); - -/// The network discovery interval to use if we haven't added any new peers in a while. -const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; - -impl SwarmDriver { - /// This functions triggers network discovery based on when the last peer was added to the RT - /// and the number of peers in RT. The function also returns a new interval that is proportional - /// to the number of peers in RT, so more peers in RT, the longer the interval. - pub(crate) async fn run_network_discover_continuously( - &mut self, - current_interval: Duration, - ) -> Option { - let (should_discover, new_interval) = self - .bootstrap - .should_we_discover(self.peers_in_rt as u32, current_interval) - .await; - if should_discover { - self.trigger_network_discovery(); - } - new_interval - } - - pub(crate) fn trigger_network_discovery(&mut self) { - let now = Instant::now(); - - // Find the farthest bucket that is not full. This is used to skip refreshing the RT of farthest full buckets. - let mut first_filled_bucket = 0; - // unfilled kbuckets will not be returned, hence the value shall be: - // * first_filled_kbucket.ilog2() - 1 - for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { - let Some(ilog2) = kbucket.range().0.ilog2() else { - continue; - }; - if kbucket.num_entries() >= K_VALUE.get() { - first_filled_bucket = ilog2; - break; - } - } - let farthest_unfilled_bucket = if first_filled_bucket == 0 { - None - } else { - Some(first_filled_bucket - 1) - }; - - let addrs = self.network_discovery.candidates(farthest_unfilled_bucket); - info!( - "Triggering network discovery with {} candidates. Farthest non full bucket: {farthest_unfilled_bucket:?}", - addrs.len() - ); - // Fetches the candidates and also generates new candidates - for addr in addrs { - // The query_id is tracked here. This is to update the candidate list of network_discovery with the newly - // found closest peers. It may fill up the candidate list of closer buckets which are harder to generate. - let query_id = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_peers(addr.as_bytes()); - let _ = self.pending_get_closest_peers.insert( - query_id, - (PendingGetClosestType::NetworkDiscovery, Default::default()), - ); - } - - self.bootstrap.initiated(); - debug!("Trigger network discovery took {:?}", now.elapsed()); - } -} - -/// Tracks and helps with the continuous kad::bootstrapping process -pub(crate) struct ContinuousNetworkDiscover { - initial_bootstrap_done: bool, - last_peer_added_instant: Instant, - last_network_discover_triggered: Option, -} - -impl ContinuousNetworkDiscover { - pub(crate) fn new() -> Self { - Self { - initial_bootstrap_done: false, - last_peer_added_instant: Instant::now(), - last_network_discover_triggered: None, - } - } - - /// The Kademlia Bootstrap request has been sent successfully. - pub(crate) fn initiated(&mut self) { - self.last_network_discover_triggered = Some(Instant::now()); - } - - /// Notify about a newly added peer to the RT. This will help with slowing down the process. - /// Returns `true` if we have to perform the initial bootstrapping. - pub(crate) fn notify_new_peer(&mut self) -> bool { - self.last_peer_added_instant = Instant::now(); - // true to kick off the initial bootstrapping. - // `run_network_discover_continuously` might kick of so soon that we might - // not have a single peer in the RT and we'd not perform any network discovery for a while. - if !self.initial_bootstrap_done { - self.initial_bootstrap_done = true; - true - } else { - false - } - } - - /// Returns `true` if we should carry out the Kademlia Bootstrap process immediately. - /// Also optionally returns the new interval for network discovery. - pub(crate) async fn should_we_discover( - &self, - peers_in_rt: u32, - current_interval: Duration, - ) -> (bool, Option) { - let is_ongoing = if let Some(last_network_discover_triggered) = - self.last_network_discover_triggered - { - last_network_discover_triggered.elapsed() < LAST_NETWORK_DISCOVER_TRIGGERED_TIME_LIMIT - } else { - false - }; - let should_network_discover = !is_ongoing && peers_in_rt >= 1; - - // if it has been a while (LAST_PEER_ADDED_TIME_LIMIT) since we have added a new peer, - // slowdown the network discovery process. - // Don't slow down if we haven't even added one peer to our RT. - if self.last_peer_added_instant.elapsed() > LAST_PEER_ADDED_TIME_LIMIT && peers_in_rt != 0 { - // To avoid a heart beat like cpu usage due to the 1K candidates generation, - // randomize the interval within certain range - let no_peer_added_slowdown_interval: u64 = OsRng.gen_range( - NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S / 2..NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S, - ); - let no_peer_added_slowdown_interval_duration = - Duration::from_secs(no_peer_added_slowdown_interval); - info!( - "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous network discovery process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" - ); - - let mut new_interval = interval(no_peer_added_slowdown_interval_duration); - new_interval.tick().await; - - return (should_network_discover, Some(new_interval)); - } - - // increment network_discover_interval in steps of NETWORK_DISCOVER_INTERVAL every NETWORK_DISCOVER_CONNECTED_PEERS_STEP - let step = peers_in_rt / NETWORK_DISCOVER_CONNECTED_PEERS_STEP; - let step = std::cmp::max(1, step); - let new_interval = NETWORK_DISCOVER_INTERVAL * step; - let new_interval = if new_interval > current_interval { - info!("More peers have been added to our RT!. Slowing down the continuous network discovery process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); - - let mut interval = interval(new_interval); - interval.tick().await; - - Some(interval) - } else { - None - }; - (should_network_discover, new_interval) - } -} diff --git a/ant-networking/src/driver.rs b/ant-networking/src/driver.rs index 45874e0fae..40d35d5bf2 100644 --- a/ant-networking/src/driver.rs +++ b/ant-networking/src/driver.rs @@ -7,7 +7,6 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - bootstrap::{ContinuousNetworkDiscover, NETWORK_DISCOVER_INTERVAL}, circular_vec::CircularVec, cmd::{LocalSwarmCmd, NetworkSwarmCmd}, config::GetRecordCfg, @@ -18,7 +17,7 @@ use crate::{ fifo_register::FifoRegister, log_markers::Marker, multiaddr_pop_p2p, - network_discovery::NetworkDiscovery, + network_discovery::{NetworkDiscovery, NETWORK_DISCOVER_INTERVAL}, record_store::{ClientRecordStore, NodeRecordStore, NodeRecordStoreConfig}, record_store_api::UnifiedRecordStore, relay_manager::RelayManager, @@ -554,7 +553,6 @@ impl NetworkBuilder { let swarm = Swarm::new(transport, behaviour, peer_id, swarm_config); - let bootstrap = ContinuousNetworkDiscover::new(); let replication_fetcher = ReplicationFetcher::new(peer_id, network_event_sender.clone()); // Enable relay manager for nodes behind home network @@ -591,7 +589,6 @@ impl NetworkBuilder { #[cfg(feature = "open-metrics")] close_group: Vec::with_capacity(CLOSE_GROUP_SIZE), peers_in_rt: 0, - bootstrap, bootstrap_cache: self.bootstrap_cache, relay_manager, connected_relay_clients: Default::default(), @@ -687,7 +684,7 @@ pub struct SwarmDriver { #[cfg(feature = "open-metrics")] pub(crate) close_group: Vec, pub(crate) peers_in_rt: usize, - pub(crate) bootstrap: ContinuousNetworkDiscover, + pub(crate) network_discovery: NetworkDiscovery, pub(crate) bootstrap_cache: Option, pub(crate) external_address_manager: Option, pub(crate) relay_manager: Option, @@ -711,9 +708,6 @@ pub struct SwarmDriver { pub(crate) pending_get_record: PendingGetRecord, /// A list of the most recent peers we have dialed ourselves. Old dialed peers are evicted once the vec fills up. pub(crate) dialed_peers: CircularVec, - // A list of random `PeerId` candidates that falls into kbuckets, - // This is to ensure a more accurate network discovery. - pub(crate) network_discovery: NetworkDiscovery, pub(crate) bootstrap_peers: BTreeMap, HashSet>, // Peers that having live connection to. Any peer got contacted during kad network query // will have live connection established. And they may not appear in the RT. diff --git a/ant-networking/src/event/kad.rs b/ant-networking/src/event/kad.rs index 6d95016942..aa658baeb9 100644 --- a/ant-networking/src/event/kad.rs +++ b/ant-networking/src/event/kad.rs @@ -249,7 +249,7 @@ impl SwarmDriver { self.update_on_peer_addition(peer, addresses); // This should only happen once - if self.bootstrap.notify_new_peer() { + if self.network_discovery.notify_new_peer() { info!("Performing the first bootstrap"); self.trigger_network_discovery(); } diff --git a/ant-networking/src/lib.rs b/ant-networking/src/lib.rs index 8abaaf9ead..0b76aeb7cd 100644 --- a/ant-networking/src/lib.rs +++ b/ant-networking/src/lib.rs @@ -9,7 +9,6 @@ #[macro_use] extern crate tracing; -mod bootstrap; mod circular_vec; mod cmd; mod config; diff --git a/ant-networking/src/network_discovery.rs b/ant-networking/src/network_discovery.rs index 39fa45e51c..7e33728d96 100644 --- a/ant-networking/src/network_discovery.rs +++ b/ant-networking/src/network_discovery.rs @@ -6,31 +6,222 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::time::Instant; +use crate::time::{interval, Instant, Interval}; +use crate::{driver::PendingGetClosestType, SwarmDriver}; use ant_protocol::NetworkAddress; +use libp2p::kad::K_VALUE; use libp2p::{kad::KBucketKey, PeerId}; +use rand::rngs::OsRng; use rand::{thread_rng, Rng}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use std::collections::{btree_map::Entry, BTreeMap}; +use tokio::time::Duration; -// The number of PeerId to generate when starting an instance of NetworkDiscovery +// The number of PeerId to generate when starting an instance of NetworkDiscoveryCandidate. const INITIAL_GENERATION_ATTEMPTS: usize = 10_000; -// The number of PeerId to generate during each invocation to refresh our candidates +// The number of PeerId to generate during each invocation to refresh the candidate list. const GENERATION_ATTEMPTS: usize = 1_000; -// The max number of PeerId to keep per bucket +// The max number of PeerId to keep per bucket. const MAX_PEERS_PER_BUCKET: usize = 5; +/// The default interval at which NetworkDiscovery is triggered. +/// The interval is increased as more peers are added to the routing table. +pub(crate) const NETWORK_DISCOVER_INTERVAL: Duration = Duration::from_secs(10); + +/// For every NETWORK_DISCOVER_CONNECTED_PEERS_STEP connected peer, we step up the +/// NETWORK_DISCOVER_INTERVAL to slow down process. +const NETWORK_DISCOVER_CONNECTED_PEERS_STEP: u32 = 5; + +/// Slow down the process if the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT. +/// This is to make sure we don't flood the network with `FindNode` msgs. +const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); + +/// A minimum interval to prevent network discovery got triggered too often +const LAST_NETWORK_DISCOVER_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(90); + +/// The network discovery interval to use if we haven't added any new peers in a while. +const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; + +impl SwarmDriver { + /// This functions triggers network discovery based on when the last peer was added to the RT + /// and the number of peers in RT. The function also returns a new interval that is proportional + /// to the number of peers in RT, so more peers in RT, the longer the interval. + pub(crate) async fn run_network_discover_continuously( + &mut self, + current_interval: Duration, + ) -> Option { + let (should_discover, new_interval) = self + .network_discovery + .should_we_discover(self.peers_in_rt as u32, current_interval) + .await; + if should_discover { + self.trigger_network_discovery(); + } + new_interval + } + + pub(crate) fn trigger_network_discovery(&mut self) { + let now = Instant::now(); + + // Find the farthest bucket that is not full. This is used to skip refreshing the RT of farthest full buckets. + let mut first_filled_bucket = 0; + // unfilled kbuckets will not be returned, hence the value shall be: + // * first_filled_kbucket.ilog2() - 1 + for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { + let Some(ilog2) = kbucket.range().0.ilog2() else { + continue; + }; + if kbucket.num_entries() >= K_VALUE.get() { + first_filled_bucket = ilog2; + break; + } + } + let farthest_unfilled_bucket = if first_filled_bucket == 0 { + None + } else { + Some(first_filled_bucket - 1) + }; + + let addrs = self + .network_discovery + .candidates + .get_candidates(farthest_unfilled_bucket); + info!( + "Triggering network discovery with {} candidates. Farthest non full bucket: {farthest_unfilled_bucket:?}", + addrs.len() + ); + // Fetches the candidates and also generates new candidates + for addr in addrs { + // The query_id is tracked here. This is to update the candidate list of network_discovery with the newly + // found closest peers. It may fill up the candidate list of closer buckets which are harder to generate. + let query_id = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_peers(addr.as_bytes()); + let _ = self.pending_get_closest_peers.insert( + query_id, + (PendingGetClosestType::NetworkDiscovery, Default::default()), + ); + } + + self.network_discovery.initiated(); + debug!("Trigger network discovery took {:?}", now.elapsed()); + } +} + +/// The process for discovering new peers in the network. +/// This is done by generating random NetworkAddresses that are closest to our key and querying the network for the +/// closest peers to those Addresses. +/// +/// The process slows down based on the number of peers in the routing table and the time since the last peer was added. +pub(crate) struct NetworkDiscovery { + initial_bootstrap_done: bool, + last_peer_added_instant: Instant, + last_network_discover_triggered: Option, + candidates: NetworkDiscoveryCandidates, +} + +impl NetworkDiscovery { + pub(crate) fn new(self_peer_id: &PeerId) -> Self { + Self { + initial_bootstrap_done: false, + last_peer_added_instant: Instant::now(), + last_network_discover_triggered: None, + candidates: NetworkDiscoveryCandidates::new(self_peer_id), + } + } + + /// The Kademlia Bootstrap request has been sent successfully. + pub(crate) fn initiated(&mut self) { + self.last_network_discover_triggered = Some(Instant::now()); + } + + /// Notify about a newly added peer to the RT. This will help with slowing down the process. + /// Returns `true` if we have to perform the initial bootstrapping. + pub(crate) fn notify_new_peer(&mut self) -> bool { + self.last_peer_added_instant = Instant::now(); + // true to kick off the initial bootstrapping. + // `run_network_discover_continuously` might kick of so soon that we might + // not have a single peer in the RT and we'd not perform any network discovery for a while. + if !self.initial_bootstrap_done { + self.initial_bootstrap_done = true; + true + } else { + false + } + } + + /// Returns `true` if we should carry out the Kademlia Bootstrap process immediately. + /// Also optionally returns the new interval for network discovery. + pub(crate) async fn should_we_discover( + &self, + peers_in_rt: u32, + current_interval: Duration, + ) -> (bool, Option) { + let is_ongoing = if let Some(last_network_discover_triggered) = + self.last_network_discover_triggered + { + last_network_discover_triggered.elapsed() < LAST_NETWORK_DISCOVER_TRIGGERED_TIME_LIMIT + } else { + false + }; + let should_network_discover = !is_ongoing && peers_in_rt >= 1; + + // if it has been a while (LAST_PEER_ADDED_TIME_LIMIT) since we have added a new peer, + // slowdown the network discovery process. + // Don't slow down if we haven't even added one peer to our RT. + if self.last_peer_added_instant.elapsed() > LAST_PEER_ADDED_TIME_LIMIT && peers_in_rt != 0 { + // To avoid a heart beat like cpu usage due to the 1K candidates generation, + // randomize the interval within certain range + let no_peer_added_slowdown_interval: u64 = OsRng.gen_range( + NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S / 2..NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S, + ); + let no_peer_added_slowdown_interval_duration = + Duration::from_secs(no_peer_added_slowdown_interval); + info!( + "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous network discovery process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" + ); + + let mut new_interval = interval(no_peer_added_slowdown_interval_duration); + new_interval.tick().await; + + return (should_network_discover, Some(new_interval)); + } + + // increment network_discover_interval in steps of NETWORK_DISCOVER_INTERVAL every NETWORK_DISCOVER_CONNECTED_PEERS_STEP + let step = peers_in_rt / NETWORK_DISCOVER_CONNECTED_PEERS_STEP; + let step = std::cmp::max(1, step); + let new_interval = NETWORK_DISCOVER_INTERVAL * step; + let new_interval = if new_interval > current_interval { + info!("More peers have been added to our RT!. Slowing down the continuous network discovery process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); + + let mut interval = interval(new_interval); + interval.tick().await; + + Some(interval) + } else { + None + }; + (should_network_discover, new_interval) + } + + pub(crate) fn handle_get_closest_query(&mut self, closest_peers: Vec) { + self.candidates.handle_get_closest_query(closest_peers); + } +} + /// Keep track of NetworkAddresses belonging to every bucket (if we can generate them with reasonable effort) /// which we can then query using Kad::GetClosestPeers to effectively fill our RT. #[derive(Debug, Clone)] -pub(crate) struct NetworkDiscovery { +struct NetworkDiscoveryCandidates { self_key: KBucketKey, candidates: BTreeMap>, } -impl NetworkDiscovery { - /// Create a new instance of NetworkDiscovery and tries to populate each bucket with random peers. - pub(crate) fn new(self_peer_id: &PeerId) -> Self { +impl NetworkDiscoveryCandidates { + /// Create a new instance of NetworkDiscoveryCandidates and tries to populate each bucket with random peers. + fn new(self_peer_id: &PeerId) -> Self { let start = Instant::now(); let self_key = KBucketKey::from(*self_peer_id); let candidates = Self::generate_candidates(&self_key, INITIAL_GENERATION_ATTEMPTS); @@ -52,7 +243,7 @@ impl NetworkDiscovery { } /// The result from the kad::GetClosestPeers are again used to update our kbucket. - pub(crate) fn handle_get_closest_query(&mut self, closest_peers: Vec) { + fn handle_get_closest_query(&mut self, closest_peers: Vec) { let now = Instant::now(); let candidates_map: BTreeMap> = closest_peers @@ -83,7 +274,7 @@ impl NetworkDiscovery { /// Returns one random candidate per bucket. Also tries to refresh the candidate list. /// Set the farthest_bucket to get candidates that are closer than or equal to the farthest_bucket. - pub(crate) fn candidates(&mut self, farthest_bucket: Option) -> Vec<&NetworkAddress> { + fn get_candidates(&mut self, farthest_bucket: Option) -> Vec<&NetworkAddress> { self.try_refresh_candidates(); let mut rng = thread_rng(); From 4f58c023a6453af01831c9421e9c40fb81cb9852 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 21 Feb 2025 00:24:34 +0530 Subject: [PATCH 46/69] feat(network): organically bootstrap network and limit concurrency --- ant-networking/src/bootstrap.rs | 237 +++++++++++++++++++++++++++++ ant-networking/src/driver.rs | 7 +- ant-networking/src/event/swarm.rs | 29 ++++ ant-networking/src/lib.rs | 12 +- ant-node/src/bin/antnode/main.rs | 6 +- ant-node/src/node.rs | 44 ++---- ant-node/src/python.rs | 6 +- ant-node/src/spawn/node_spawner.rs | 9 +- autonomi/src/client/mod.rs | 20 +-- 9 files changed, 314 insertions(+), 56 deletions(-) create mode 100644 ant-networking/src/bootstrap.rs diff --git a/ant-networking/src/bootstrap.rs b/ant-networking/src/bootstrap.rs new file mode 100644 index 0000000000..6f61d9e73c --- /dev/null +++ b/ant-networking/src/bootstrap.rs @@ -0,0 +1,237 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::{driver::NodeBehaviour, multiaddr_get_p2p, multiaddr_pop_p2p}; +use libp2p::{ + core::ConnectedPoint, + swarm::{ + dial_opts::{DialOpts, PeerCondition}, + DialError, + }, + Multiaddr, PeerId, Swarm, +}; +use rand::seq::SliceRandom; +use std::collections::{HashSet, VecDeque}; + +/// The max number of concurrent dials to be made during the initial bootstrap process. +const CONCURRENT_DIALS: usize = 3; + +/// The max number of peers to be added to the routing table before stopping the initial bootstrap process. +const MAX_PEERS_BEFORE_TERMINATION: usize = 5; + +pub(crate) struct InitialBootstrap { + initial_addrs: VecDeque, + ongoing_dials: HashSet, + bootstrap_completed: bool, +} + +impl InitialBootstrap { + pub(crate) fn new(mut initial_addrs: Vec) -> Self { + let bootstrap_completed = if initial_addrs.is_empty() { + info!("No initial addresses provided for bootstrap. Initial bootstrap process will not be triggered."); + true + } else { + let mut rng = rand::thread_rng(); + initial_addrs.shuffle(&mut rng); + false + }; + + Self { + initial_addrs: initial_addrs.into(), + ongoing_dials: Default::default(), + bootstrap_completed, + } + } + + /// Trigger the initial bootstrap process. + /// + /// This will start dialing CONCURRENT_DIALS peers at a time from the initial addresses. If we have a successful + /// dial and if a few peer are added to the routing table, we stop the initial bootstrap process. + pub(crate) fn trigger_initial_bootstrap( + &mut self, + swarm: &mut Swarm, + peers_in_rt: usize, + ) { + if !self.trigger_condition(peers_in_rt, true) { + return; + } + + while self.ongoing_dials.len() <= CONCURRENT_DIALS && !self.initial_addrs.is_empty() { + let Some(mut addr) = self.initial_addrs.pop_front() else { + continue; + }; + + let addr_clone = addr.clone(); + let peer_id = multiaddr_pop_p2p(&mut addr); + + let opts = match peer_id { + Some(peer_id) => DialOpts::peer_id(peer_id) + // If we have a peer ID, we can prevent simultaneous dials. + .condition(PeerCondition::NotDialing) + .addresses(vec![addr]) + .build(), + None => DialOpts::unknown_peer_id().address(addr).build(), + }; + + info!("Trying to dial peer with address: {addr_clone}",); + + match swarm.dial(opts) { + Ok(()) => { + info!("Dial attempt initiated for peer with address: {addr_clone}. Ongoing dial attempts: {}", self.ongoing_dials.len()+1); + self.ongoing_dials.insert(addr_clone); + } + Err(err) => match err { + DialError::LocalPeerId { .. } => { + warn!("Failed to dial peer with address: {addr_clone}. This is our own peer ID. Dialing the next peer"); + } + DialError::NoAddresses => { + error!("Failed to dial peer with address: {addr_clone}. No addresses found. Dialing the next peer"); + } + DialError::DialPeerConditionFalse(_) => { + warn!("We are already dialing the peer with address: {addr_clone}. Dialing the next peer. This error is harmless."); + } + DialError::Aborted => { + error!(" Pending connection attempt has been aborted for {addr_clone}. Dialing the next peer."); + } + DialError::WrongPeerId { obtained, .. } => { + error!("The peer identity obtained on the connection did not match the one that was expected. Expected: {peer_id:?}, obtained: {obtained}. Dialing the next peer."); + } + DialError::Denied { cause } => { + error!("The dialing attempt was denied by the remote peer. Cause: {cause}. Dialing the next peer."); + } + DialError::Transport(items) => { + error!("Failed to dial peer with address: {addr_clone}. Transport error: {items:?}. Dialing the next peer."); + } + }, + } + } + } + + /// Check if the initial bootstrap process should be triggered. + /// Also update bootstrap_completed flag if the process is completed. + fn trigger_condition(&mut self, peers_in_rt: usize, verbose: bool) -> bool { + if self.bootstrap_completed { + if verbose { + info!("Initial bootstrap process has already completed successfully."); + } else { + trace!("Initial bootstrap process has already completed successfully."); + } + return false; + } + + if peers_in_rt >= MAX_PEERS_BEFORE_TERMINATION { + // This will terminate the loop + self.bootstrap_completed = true; + self.initial_addrs.clear(); + self.ongoing_dials.clear(); + + if verbose { + info!("Initial bootstrap process completed successfully. We have {peers_in_rt} peers in the routing table."); + } else { + trace!("Initial bootstrap process completed successfully. We have {peers_in_rt} peers in the routing table."); + } + return false; + } + + if self.ongoing_dials.len() >= CONCURRENT_DIALS { + if verbose { + info!( + "Initial bootstrap has {} ongoing dials. Not dialing anymore.", + self.ongoing_dials.len() + ); + } else { + debug!( + "Initial bootstrap has {} ongoing dials. Not dialing anymore.", + self.ongoing_dials.len() + ); + } + return false; + } + + if peers_in_rt < MAX_PEERS_BEFORE_TERMINATION && self.initial_addrs.is_empty() { + if verbose { + info!("We have {peers_in_rt} peers in RT, but no more addresses to dial. Stopping initial bootstrap."); + } else { + debug!("We have {peers_in_rt} peers in RT, but no more addresses to dial. Stopping initial bootstrap."); + } + return false; + } + + if self.initial_addrs.is_empty() { + if verbose { + warn!("Initial bootstrap has no more addresses to dial."); + } else { + debug!("Initial bootstrap has no more addresses to dial."); + } + return false; + } + + true + } + + pub(crate) fn on_connection_established( + &mut self, + endpoint: &ConnectedPoint, + swarm: &mut Swarm, + peers_in_rt: usize, + ) { + if self.bootstrap_completed { + return; + } + + if let ConnectedPoint::Dialer { address, .. } = endpoint { + if !self.ongoing_dials.remove(address) { + // try to remove via peer Id, to not block the bootstrap process. + // The only concern with the following removal is that we might increase the number of + // dials/concurrent dials, which is fine. + if let Some(peer_id) = multiaddr_get_p2p(address) { + self.ongoing_dials.retain(|addr| { + if let Some(id) = multiaddr_get_p2p(addr) { + id != peer_id + } else { + true + } + }); + } + } + } + + self.trigger_initial_bootstrap(swarm, peers_in_rt); + } + + pub(crate) fn on_outgoing_connection_error( + &mut self, + peer_id: Option, + swarm: &mut Swarm, + peers_in_rt: usize, + ) { + if self.bootstrap_completed { + return; + } + + match peer_id { + Some(peer_id) => { + self.ongoing_dials.retain(|addr| { + if let Some(id) = multiaddr_get_p2p(addr) { + id != peer_id + } else { + true + } + }); + } + // we are left with no option but to remove all the addresses from the ongoing dials that + // do not have a peer ID. + None => { + self.ongoing_dials + .retain(|addr| multiaddr_get_p2p(addr).is_some()); + } + } + + self.trigger_initial_bootstrap(swarm, peers_in_rt); + } +} diff --git a/ant-networking/src/driver.rs b/ant-networking/src/driver.rs index 40d35d5bf2..77e43b2a1e 100644 --- a/ant-networking/src/driver.rs +++ b/ant-networking/src/driver.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ + bootstrap::InitialBootstrap, circular_vec::CircularVec, cmd::{LocalSwarmCmd, NetworkSwarmCmd}, config::GetRecordCfg, @@ -161,6 +162,7 @@ pub(super) struct NodeBehaviour { pub struct NetworkBuilder { bootstrap_cache: Option, concurrency_limit: Option, + initial_contacts: Vec, is_behind_home_network: bool, keypair: Keypair, listen_addr: Option, @@ -174,10 +176,11 @@ pub struct NetworkBuilder { } impl NetworkBuilder { - pub fn new(keypair: Keypair, local: bool) -> Self { + pub fn new(keypair: Keypair, local: bool, initial_contacts: Vec) -> Self { Self { bootstrap_cache: None, concurrency_limit: None, + initial_contacts, is_behind_home_network: false, keypair, listen_addr: None, @@ -589,6 +592,7 @@ impl NetworkBuilder { #[cfg(feature = "open-metrics")] close_group: Vec::with_capacity(CLOSE_GROUP_SIZE), peers_in_rt: 0, + initial_bootstrap: InitialBootstrap::new(self.initial_contacts), bootstrap_cache: self.bootstrap_cache, relay_manager, connected_relay_clients: Default::default(), @@ -684,6 +688,7 @@ pub struct SwarmDriver { #[cfg(feature = "open-metrics")] pub(crate) close_group: Vec, pub(crate) peers_in_rt: usize, + pub(crate) initial_bootstrap: InitialBootstrap, pub(crate) network_discovery: NetworkDiscovery, pub(crate) bootstrap_cache: Option, pub(crate) external_address_manager: Option, diff --git a/ant-networking/src/event/swarm.rs b/ant-networking/src/event/swarm.rs index 1dccc17fcf..fae58cf937 100644 --- a/ant-networking/src/event/swarm.rs +++ b/ant-networking/src/event/swarm.rs @@ -236,12 +236,20 @@ impl SwarmDriver { } => { event_string = "ConnectionEstablished"; debug!(%peer_id, num_established, ?concurrent_dial_errors, "ConnectionEstablished ({connection_id:?}) in {established_in:?}: {}", endpoint_str(&endpoint)); + + self.initial_bootstrap.on_connection_established( + &endpoint, + &mut self.swarm, + self.peers_in_rt, + ); + if let Some(external_addr_manager) = self.external_address_manager.as_mut() { if let ConnectedPoint::Listener { local_addr, .. } = &endpoint { external_addr_manager .on_established_incoming_connection(local_addr.clone()); } } + #[cfg(feature = "open-metrics")] if let Some(relay_manager) = self.relay_manager.as_mut() { relay_manager.on_connection_established(&peer_id, &connection_id); @@ -290,6 +298,21 @@ impl SwarmDriver { self.record_connection_metrics(); } + SwarmEvent::OutgoingConnectionError { + connection_id, + peer_id: None, + error, + } => { + event_string = "OutgoingConnErr"; + warn!("OutgoingConnectionError on {connection_id:?} - {error:?}"); + self.record_connection_metrics(); + + self.initial_bootstrap.on_outgoing_connection_error( + None, + &mut self.swarm, + self.peers_in_rt, + ); + } SwarmEvent::OutgoingConnectionError { peer_id: Some(failed_peer_id), error, @@ -300,6 +323,12 @@ impl SwarmDriver { let connection_details = self.live_connected_peers.remove(&connection_id); self.record_connection_metrics(); + self.initial_bootstrap.on_outgoing_connection_error( + Some(failed_peer_id), + &mut self.swarm, + self.peers_in_rt, + ); + // we need to decide if this was a critical error and if we should report it to the Issue tracker let is_critical_error = match error { DialError::Transport(errors) => { diff --git a/ant-networking/src/lib.rs b/ant-networking/src/lib.rs index 0b76aeb7cd..b029ad78cc 100644 --- a/ant-networking/src/lib.rs +++ b/ant-networking/src/lib.rs @@ -9,6 +9,7 @@ #[macro_use] extern crate tracing; +mod bootstrap; mod circular_vec; mod cmd; mod config; @@ -1159,6 +1160,15 @@ pub(crate) fn multiaddr_pop_p2p(multiaddr: &mut Multiaddr) -> Option { } } +/// Return the last `PeerId` from the `Multiaddr` if it exists. +pub(crate) fn multiaddr_get_p2p(multiaddr: &Multiaddr) -> Option { + if let Some(Protocol::P2p(peer_id)) = multiaddr.iter().last() { + Some(peer_id) + } else { + None + } +} + /// Build a `Multiaddr` with the p2p protocol filtered out. /// If it is a relayed address, then the relay's P2P address is preserved. pub(crate) fn multiaddr_strip_p2p(multiaddr: &Multiaddr) -> Multiaddr { @@ -1249,7 +1259,7 @@ mod tests { #[tokio::test] async fn test_network_sign_verify() -> eyre::Result<()> { let (network, _, _) = - NetworkBuilder::new(Keypair::generate_ed25519(), false).build_client(); + NetworkBuilder::new(Keypair::generate_ed25519(), false, vec![]).build_client(); let msg = b"test message"; let sig = network.sign(msg)?; assert!(network.verify(msg, &sig)); diff --git a/ant-node/src/bin/antnode/main.rs b/ant-node/src/bin/antnode/main.rs index 0ce4980844..8c07a5c02a 100644 --- a/ant-node/src/bin/antnode/main.rs +++ b/ant-node/src/bin/antnode/main.rs @@ -314,14 +314,14 @@ fn main() -> Result<()> { let restart_options = rt.block_on(async move { let mut node_builder = NodeBuilder::new( keypair, + initial_peers, rewards_address, evm_network, node_socket_addr, - opt.peers.local, root_dir, - opt.upnp, ); - node_builder.initial_peers(initial_peers); + node_builder.local(opt.peers.local); + node_builder.upnp(opt.upnp); node_builder.bootstrap_cache(bootstrap_cache); node_builder.is_behind_home_network(opt.home_network); #[cfg(feature = "open-metrics")] diff --git a/ant-node/src/node.rs b/ant-node/src/node.rs index 00c5d72d68..a2aac34ae4 100644 --- a/ant-node/src/node.rs +++ b/ant-node/src/node.rs @@ -101,29 +101,33 @@ impl NodeBuilder { /// or fetched from the bootstrap cache set using `bootstrap_cache` method. pub fn new( identity_keypair: Keypair, + initial_peers: Vec, evm_address: RewardsAddress, evm_network: EvmNetwork, addr: SocketAddr, - local: bool, root_dir: PathBuf, - upnp: bool, ) -> Self { Self { bootstrap_cache: None, - initial_peers: vec![], + initial_peers, identity_keypair, evm_address, evm_network, addr, - local, + local: false, root_dir, #[cfg(feature = "open-metrics")] metrics_server_port: None, is_behind_home_network: false, - upnp, + upnp: false, } } + /// Set the flag to indicate if the node is running in local mode + pub fn local(&mut self, local: bool) { + self.local = local; + } + #[cfg(feature = "open-metrics")] /// Set the port for the OpenMetrics server. Defaults to a random port if not set pub fn metrics_server_port(&mut self, port: Option) { @@ -135,16 +139,16 @@ impl NodeBuilder { self.bootstrap_cache = Some(cache); } - /// Set the initial peers to dial at startup. - pub fn initial_peers(&mut self, peers: Vec) { - self.initial_peers = peers; - } - /// Set the flag to indicate if the node is behind a home network pub fn is_behind_home_network(&mut self, is_behind_home_network: bool) { self.is_behind_home_network = is_behind_home_network; } + /// Set the flag to enable UPnP for the node + pub fn upnp(&mut self, upnp: bool) { + self.upnp = upnp; + } + /// Asynchronously runs a new node instance, setting up the swarm driver, /// creating a data storage, and handling network events. Returns the /// created `RunningNode` which contains a `NodeEventsChannel` for listening @@ -158,7 +162,8 @@ impl NodeBuilder { /// /// Returns an error if there is a problem initializing the `SwarmDriver`. pub fn build_and_run(self) -> Result { - let mut network_builder = NetworkBuilder::new(self.identity_keypair, self.local); + let mut network_builder = + NetworkBuilder::new(self.identity_keypair, self.local, self.initial_peers); #[cfg(feature = "open-metrics")] let metrics_recorder = if self.metrics_server_port.is_some() { @@ -191,7 +196,6 @@ impl NodeBuilder { let node = NodeInner { network: network.clone(), events_channel: node_events_channel.clone(), - initial_peers: self.initial_peers, reward_address: self.evm_address, #[cfg(feature = "open-metrics")] metrics_recorder, @@ -232,8 +236,6 @@ pub(crate) struct Node { /// the Arc from the interface. struct NodeInner { events_channel: NodeEventsChannel, - // Peers that are dialed at startup of node. - initial_peers: Vec, network: Network, #[cfg(feature = "open-metrics")] metrics_recorder: Option, @@ -247,11 +249,6 @@ impl Node { &self.inner.events_channel } - /// Returns the initial peers that the node will dial at startup - pub(crate) fn initial_peers(&self) -> &Vec { - &self.inner.initial_peers - } - /// Returns the instance of Network pub(crate) fn network(&self) -> &Network { &self.inner.network @@ -471,15 +468,6 @@ impl Node { } NetworkEvent::NewListenAddr(_) => { event_header = "NewListenAddr"; - let network = self.network().clone(); - let peers = self.initial_peers().clone(); - let _handle = spawn(async move { - for addr in peers { - if let Err(err) = network.dial(addr.clone()).await { - tracing::error!("Failed to dial {addr}: {err:?}"); - }; - } - }); } NetworkEvent::ResponseReceived { res } => { event_header = "ResponseReceived"; diff --git a/ant-node/src/python.rs b/ant-node/src/python.rs index d8f3dd9479..230e0990a9 100644 --- a/ant-node/src/python.rs +++ b/ant-node/src/python.rs @@ -95,14 +95,14 @@ impl AntNode { let node = rt.block_on(async { let mut node_builder = NodeBuilder::new( keypair, + initial_peers, rewards_address, evm_network, node_socket_addr, - local, root_dir.unwrap_or_else(|| PathBuf::from(".")), - false, ); - node_builder.initial_peers(initial_peers); + node_builder.local(local); + node_builder.upnp(false); node_builder.is_behind_home_network(home_network); node_builder diff --git a/ant-node/src/spawn/node_spawner.rs b/ant-node/src/spawn/node_spawner.rs index 280c4f9b2a..b01bc2b010 100644 --- a/ant-node/src/spawn/node_spawner.rs +++ b/ant-node/src/spawn/node_spawner.rs @@ -152,17 +152,14 @@ async fn spawn_node( let mut node_builder = NodeBuilder::new( keypair, + initial_peers, rewards_address, evm_network, socket_addr, - local, root_dir, - upnp, ); - - if !initial_peers.is_empty() { - node_builder.initial_peers(initial_peers); - } + node_builder.local(local); + node_builder.upnp(upnp); let running_node = node_builder.build_and_run()?; diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 602c60b030..75cc6448ea 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -199,29 +199,19 @@ impl Client { /// # } /// ``` pub async fn init_with_config(config: ClientConfig) -> Result { - let (shutdown_tx, network, event_receiver) = build_client_and_run_swarm(config.local); - let peers_args = PeersArgs { disable_mainnet_contacts: config.local, addrs: config.peers.unwrap_or_default(), local: config.local, ..Default::default() }; - - let peers = match peers_args.get_addrs(None, None).await { + let initial_peers = match peers_args.get_addrs(None, None).await { Ok(peers) => peers, Err(e) => return Err(e.into()), }; - let network_clone = network.clone(); - let peers = peers.to_vec(); - let _handle = ant_networking::time::spawn(async move { - for addr in peers { - if let Err(err) = network_clone.dial(addr.clone()).await { - error!("Failed to dial addr={addr} with err: {err:?}"); - }; - } - }); + let (shutdown_tx, network, event_receiver) = + build_client_and_run_swarm(config.local, initial_peers); // Wait until we have added a few peers to our routing table. let (sender, receiver) = futures::channel::oneshot::channel(); @@ -259,8 +249,10 @@ impl Client { fn build_client_and_run_swarm( local: bool, + initial_peers: Vec, ) -> (watch::Sender, Network, mpsc::Receiver) { - let mut network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local); + let mut network_builder = + NetworkBuilder::new(Keypair::generate_ed25519(), local, initial_peers); if let Ok(mut config) = BootstrapCacheConfig::default_config(local) { if local { From 5baad9b1fa567ddd82bfaf23defca23db1567948 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 21 Feb 2025 00:43:20 +0530 Subject: [PATCH 47/69] feat(network): remove dial fn and move bootstrap peer tracker --- ant-networking/src/bootstrap.rs | 15 ++++++++ ant-networking/src/cmd.rs | 27 +-------------- ant-networking/src/driver.rs | 26 +------------- ant-networking/src/event/identify.rs | 9 ++--- ant-networking/src/event/swarm.rs | 52 ++-------------------------- ant-networking/src/lib.rs | 8 ----- 6 files changed, 22 insertions(+), 115 deletions(-) diff --git a/ant-networking/src/bootstrap.rs b/ant-networking/src/bootstrap.rs index 6f61d9e73c..7624529e60 100644 --- a/ant-networking/src/bootstrap.rs +++ b/ant-networking/src/bootstrap.rs @@ -28,6 +28,8 @@ pub(crate) struct InitialBootstrap { initial_addrs: VecDeque, ongoing_dials: HashSet, bootstrap_completed: bool, + /// This tracker is used by other components to avoid overloading the initial peers. + initial_bootstrap_peer_ids: HashSet, } impl InitialBootstrap { @@ -41,13 +43,26 @@ impl InitialBootstrap { false }; + let initial_bootstrap_peer_ids = + initial_addrs.iter().filter_map(multiaddr_get_p2p).collect(); + Self { initial_addrs: initial_addrs.into(), ongoing_dials: Default::default(), bootstrap_completed, + initial_bootstrap_peer_ids, } } + /// Returns true if the peer is one of the initial bootstrap peers. + pub(crate) fn is_bootstrap_peer(&self, peer_id: &PeerId) -> bool { + self.initial_bootstrap_peer_ids.contains(peer_id) + } + + pub(crate) fn has_terminated(&self) -> bool { + self.bootstrap_completed + } + /// Trigger the initial bootstrap process. /// /// This will start dialing CONCURRENT_DIALS peers at a time from the initial addresses. If we have a successful diff --git a/ant-networking/src/cmd.rs b/ant-networking/src/cmd.rs index 1170705889..519a86ed6f 100644 --- a/ant-networking/src/cmd.rs +++ b/ant-networking/src/cmd.rs @@ -12,8 +12,7 @@ use crate::{ error::{NetworkError, Result}, event::TerminateNodeReason, log_markers::Marker, - multiaddr_pop_p2p, GetRecordError, MsgResponder, NetworkEvent, ResponseQuorum, - CLOSE_GROUP_SIZE, + GetRecordError, MsgResponder, NetworkEvent, ResponseQuorum, CLOSE_GROUP_SIZE, }; use ant_evm::{PaymentQuote, QuotingMetrics}; use ant_protocol::{ @@ -183,10 +182,6 @@ pub enum LocalSwarmCmd { /// Commands to send to the Swarm pub enum NetworkSwarmCmd { - Dial { - addr: Multiaddr, - sender: oneshot::Sender>, - }, // Get closest peers from the network GetClosestPeersToAddressFromNetwork { key: NetworkAddress, @@ -363,9 +358,6 @@ impl Debug for LocalSwarmCmd { impl Debug for NetworkSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - NetworkSwarmCmd::Dial { addr, .. } => { - write!(f, "NetworkSwarmCmd::Dial {{ addr: {addr:?} }}") - } NetworkSwarmCmd::GetNetworkRecord { key, cfg, .. } => { write!( f, @@ -515,23 +507,6 @@ impl SwarmDriver { error!("Could not send response to PutRecordTo cmd: {:?}", err); } } - - NetworkSwarmCmd::Dial { addr, sender } => { - cmd_string = "Dial"; - - if let Some(peer_id) = multiaddr_pop_p2p(&mut addr.clone()) { - // Only consider the dial peer is bootstrap node when proper PeerId is provided. - if let Some(kbucket) = self.swarm.behaviour_mut().kademlia.kbucket(peer_id) { - let ilog2 = kbucket.range().0.ilog2(); - let peers = self.bootstrap_peers.entry(ilog2).or_default(); - peers.insert(peer_id); - } - } - let _ = match self.dial(addr) { - Ok(_) => sender.send(Ok(())), - Err(e) => sender.send(Err(e.into())), - }; - } NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { cmd_string = "GetClosestPeersToAddressFromNetwork"; let query_id = self diff --git a/ant-networking/src/driver.rs b/ant-networking/src/driver.rs index 77e43b2a1e..15162d6aac 100644 --- a/ant-networking/src/driver.rs +++ b/ant-networking/src/driver.rs @@ -17,7 +17,6 @@ use crate::{ external_address::ExternalAddressManager, fifo_register::FifoRegister, log_markers::Marker, - multiaddr_pop_p2p, network_discovery::{NetworkDiscovery, NETWORK_DISCOVER_INTERVAL}, record_store::{ClientRecordStore, NodeRecordStore, NodeRecordStoreConfig}, record_store_api::UnifiedRecordStore, @@ -48,10 +47,7 @@ use libp2p::{ kad::{self, KBucketDistance as Distance, QueryId, Record, RecordKey, K_VALUE}, multiaddr::Protocol, request_response::{self, Config as RequestResponseConfig, OutboundRequestId, ProtocolSupport}, - swarm::{ - dial_opts::{DialOpts, PeerCondition}, - ConnectionId, DialError, NetworkBehaviour, StreamProtocol, Swarm, - }, + swarm::{ConnectionId, NetworkBehaviour, StreamProtocol, Swarm}, Multiaddr, PeerId, }; use libp2p::{swarm::SwarmEvent, Transport as _}; @@ -614,7 +610,6 @@ impl NetworkBuilder { // This is based on the libp2p kad::kBuckets peers distribution. dialed_peers: CircularVec::new(255), network_discovery: NetworkDiscovery::new(&peer_id), - bootstrap_peers: Default::default(), live_connected_peers: Default::default(), latest_established_connection_ids: Default::default(), handling_statistics: Default::default(), @@ -713,7 +708,6 @@ pub struct SwarmDriver { pub(crate) pending_get_record: PendingGetRecord, /// A list of the most recent peers we have dialed ourselves. Old dialed peers are evicted once the vec fills up. pub(crate) dialed_peers: CircularVec, - pub(crate) bootstrap_peers: BTreeMap, HashSet>, // Peers that having live connection to. Any peer got contacted during kad network query // will have live connection established. And they may not appear in the RT. pub(crate) live_connected_peers: BTreeMap, @@ -1015,24 +1009,6 @@ impl SwarmDriver { std::iter::once(self.self_peer_id).chain(peers).collect() } - /// Dials the given multiaddress. If address contains a peer ID, simultaneous - /// dials to that peer are prevented. - pub(crate) fn dial(&mut self, mut addr: Multiaddr) -> Result<(), DialError> { - debug!(%addr, "Dialing manually"); - - let peer_id = multiaddr_pop_p2p(&mut addr); - let opts = match peer_id { - Some(peer_id) => DialOpts::peer_id(peer_id) - // If we have a peer ID, we can prevent simultaneous dials. - .condition(PeerCondition::NotDialing) - .addresses(vec![addr]) - .build(), - None => DialOpts::unknown_peer_id().address(addr).build(), - }; - - self.swarm.dial(opts) - } - /// Record one handling time. /// Log for every 100 received. pub(crate) fn log_handling(&mut self, handle_string: String, handle_time: Duration) { diff --git a/ant-networking/src/event/identify.rs b/ant-networking/src/event/identify.rs index 6b9ee45cdd..9b1d7f2114 100644 --- a/ant-networking/src/event/identify.rs +++ b/ant-networking/src/event/identify.rs @@ -90,13 +90,8 @@ impl SwarmDriver { let is_relayed_peer = is_a_relayed_peer(addrs.iter()); - let is_bootstrap_peer = self - .bootstrap_peers - .iter() - .any(|(_ilog2, peers)| peers.contains(&peer_id)); - - // Do not use an `already relayed` peer as `potential relay candidate`. - if !is_relayed_peer && !is_bootstrap_peer { + // Do not use an `already relayed` or a `bootstrap` peer as `potential relay candidate`. + if !is_relayed_peer && !self.initial_bootstrap.is_bootstrap_peer(&peer_id) { if let Some(relay_manager) = self.relay_manager.as_mut() { debug!("Adding candidate relay server {peer_id:?}, it's not a bootstrap node"); relay_manager.add_potential_candidates(&peer_id, &addrs, &info.protocols); diff --git a/ant-networking/src/event/swarm.rs b/ant-networking/src/event/swarm.rs index fae58cf937..82aae26fd8 100644 --- a/ant-networking/src/event/swarm.rs +++ b/ant-networking/src/event/swarm.rs @@ -15,10 +15,9 @@ use itertools::Itertools; use libp2p::metrics::Recorder; use libp2p::{ core::ConnectedPoint, - kad::K_VALUE, multiaddr::Protocol, swarm::{ConnectionId, DialError, SwarmEvent}, - Multiaddr, PeerId, TransportError, + Multiaddr, TransportError, }; use tokio::time::Duration; @@ -366,13 +365,8 @@ impl SwarmDriver { "HandshakeTimedOut", ]; - let is_bootstrap_peer = self - .bootstrap_peers - .iter() - .any(|(_ilog2, peers)| peers.contains(&failed_peer_id)); - - if is_bootstrap_peer - && self.peers_in_rt < self.bootstrap_peers.len() + if self.initial_bootstrap.is_bootstrap_peer(&failed_peer_id) + && !self.initial_bootstrap.has_terminated() { warn!("OutgoingConnectionError: On bootstrap peer {failed_peer_id:?}, while still in bootstrap mode, ignoring"); there_is_a_serious_issue = false; @@ -539,46 +533,6 @@ impl SwarmDriver { Ok(()) } - // if target bucket is full, remove a bootstrap node if presents. - #[allow(dead_code)] - fn remove_bootstrap_from_full(&mut self, peer_id: PeerId) { - let mut shall_removed = None; - - let mut bucket_index = Some(0); - - if let Some(kbucket) = self.swarm.behaviour_mut().kademlia.kbucket(peer_id) { - if kbucket.num_entries() >= K_VALUE.into() { - bucket_index = kbucket.range().0.ilog2(); - if let Some(peers) = self.bootstrap_peers.get(&bucket_index) { - for peer_entry in kbucket.iter() { - if peers.contains(peer_entry.node.key.preimage()) { - shall_removed = Some(*peer_entry.node.key.preimage()); - break; - } - } - } - } - } - if let Some(to_be_removed_bootstrap) = shall_removed { - info!("Bootstrap node {to_be_removed_bootstrap:?} to be replaced by peer {peer_id:?}"); - let entry = self - .swarm - .behaviour_mut() - .kademlia - .remove_peer(&to_be_removed_bootstrap); - if let Some(removed_peer) = entry { - self.update_on_peer_removal(*removed_peer.node.key.preimage()); - } - - // With the switch to using bootstrap cache, workload is distributed already. - // to avoid peers keeps being replaced by each other, - // there shall be just one time of removal to be undertaken. - if let Some(peers) = self.bootstrap_peers.get_mut(&bucket_index) { - let _ = peers.remove(&to_be_removed_bootstrap); - } - } - } - // Remove outdated connection to a peer if it is not in the RT. // Optionally force remove all the connections for a provided peer. fn remove_outdated_connections(&mut self) { diff --git a/ant-networking/src/lib.rs b/ant-networking/src/lib.rs index b029ad78cc..9501cab594 100644 --- a/ant-networking/src/lib.rs +++ b/ant-networking/src/lib.rs @@ -212,14 +212,6 @@ impl Network { self.keypair().public().encode_protobuf() } - /// Dial the given peer at the given address. - /// This function will only be called for the bootstrap nodes. - pub async fn dial(&self, addr: Multiaddr) -> Result<()> { - let (sender, receiver) = oneshot::channel(); - self.send_network_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); - receiver.await? - } - /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// Excludes the client's `PeerId` while calculating the closest peers. pub async fn client_get_all_close_peers_in_range_or_close_group( From dd3e3bcb5e59eb9b9f8508623b48d80e52dd90f7 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 21 Feb 2025 01:10:23 +0530 Subject: [PATCH 48/69] fix(network): trigger the initial bootstrap process for clients and nodes --- ant-networking/src/bootstrap.rs | 2 +- ant-networking/src/driver.rs | 5 +++++ ant-networking/src/event/swarm.rs | 5 +++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ant-networking/src/bootstrap.rs b/ant-networking/src/bootstrap.rs index 7624529e60..eb94114966 100644 --- a/ant-networking/src/bootstrap.rs +++ b/ant-networking/src/bootstrap.rs @@ -76,7 +76,7 @@ impl InitialBootstrap { return; } - while self.ongoing_dials.len() <= CONCURRENT_DIALS && !self.initial_addrs.is_empty() { + while self.ongoing_dials.len() < CONCURRENT_DIALS && !self.initial_addrs.is_empty() { let Some(mut addr) = self.initial_addrs.pop_front() else { continue; }; diff --git a/ant-networking/src/driver.rs b/ant-networking/src/driver.rs index 15162d6aac..5356738f54 100644 --- a/ant-networking/src/driver.rs +++ b/ant-networking/src/driver.rs @@ -761,6 +761,11 @@ impl SwarmDriver { ); } + if self.is_client { + self.initial_bootstrap + .trigger_initial_bootstrap(&mut self.swarm, self.peers_in_rt); + } + // temporarily skip processing IncomingConnectionError swarm event to avoid log spamming let mut previous_incoming_connection_error_event = None; loop { diff --git a/ant-networking/src/event/swarm.rs b/ant-networking/src/event/swarm.rs index 82aae26fd8..1b193565bf 100644 --- a/ant-networking/src/event/swarm.rs +++ b/ant-networking/src/event/swarm.rs @@ -196,6 +196,11 @@ impl SwarmDriver { debug!("All our external addresses: {all_external_addresses:?}"); } + if !self.is_client { + self.initial_bootstrap + .trigger_initial_bootstrap(&mut self.swarm, self.peers_in_rt); + } + self.send_event(NetworkEvent::NewListenAddr(address.clone())); } SwarmEvent::ListenerClosed { From 29adcddae885e868f4251c07258ab823cd22be20 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 21 Feb 2025 01:25:15 +0530 Subject: [PATCH 49/69] chore(test): increase node dial wait time --- ant-networking/src/event/swarm.rs | 2 +- ant-node/src/spawn/network_spawner.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ant-networking/src/event/swarm.rs b/ant-networking/src/event/swarm.rs index 1b193565bf..6daa68a5c7 100644 --- a/ant-networking/src/event/swarm.rs +++ b/ant-networking/src/event/swarm.rs @@ -307,7 +307,7 @@ impl SwarmDriver { peer_id: None, error, } => { - event_string = "OutgoingConnErr"; + event_string = "OutgoingConnErrWithoutPeerId"; warn!("OutgoingConnectionError on {connection_id:?} - {error:?}"); self.record_connection_metrics(); diff --git a/ant-node/src/spawn/network_spawner.rs b/ant-node/src/spawn/network_spawner.rs index 78e36f8127..dec9a95f33 100644 --- a/ant-node/src/spawn/network_spawner.rs +++ b/ant-node/src/spawn/network_spawner.rs @@ -241,7 +241,7 @@ mod tests { assert_eq!(running_network.running_nodes().len(), network_size); // Wait for nodes to dial each other - sleep(Duration::from_secs(10)).await; + sleep(Duration::from_secs(15)).await; // Validate that all nodes know each other for node in running_network.running_nodes() { From 7d0fc0b632347f1bb5253931009e99b700668bee Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 24 Feb 2025 22:59:59 +0800 Subject: [PATCH 50/69] test(protocol): ensure enum extension backward compatible --- Cargo.lock | 1 + ant-protocol/Cargo.toml | 1 + ant-protocol/src/lib.rs | 125 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 125 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e61ecbadc..273bc4ba46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1065,6 +1065,7 @@ version = "1.0.1-rc.2" dependencies = [ "ant-build-info", "ant-evm", + "bincode", "blsttc", "bytes", "color-eyre", diff --git a/ant-protocol/Cargo.toml b/ant-protocol/Cargo.toml index c437105f02..48f234ea28 100644 --- a/ant-protocol/Cargo.toml +++ b/ant-protocol/Cargo.toml @@ -50,3 +50,4 @@ workspace = true [dev-dependencies] rand = "0.8" +bincode = "1.3" diff --git a/ant-protocol/src/lib.rs b/ant-protocol/src/lib.rs index 0846846fa8..0a0e146896 100644 --- a/ant-protocol/src/lib.rs +++ b/ant-protocol/src/lib.rs @@ -376,9 +376,13 @@ impl std::fmt::Debug for PrettyPrintRecordKey<'_> { #[cfg(test)] mod tests { - use crate::storage::GraphEntryAddress; - use crate::NetworkAddress; + use crate::{ + messages::{Nonce, Query}, + storage::GraphEntryAddress, + NetworkAddress, PeerId, + }; use bls::rand::thread_rng; + use serde::{Deserialize, Serialize}; #[test] fn verify_graph_entry_addr_is_actionable() { @@ -391,4 +395,121 @@ mod tests { assert!(net_addr_fmt.contains(graph_entry_addr_hex)); } + + #[derive(Eq, PartialEq, PartialOrd, Clone, Serialize, Deserialize, Debug)] + enum QueryExtended { + GetStoreQuote { + key: NetworkAddress, + data_type: u32, + data_size: usize, + nonce: Option, + difficulty: usize, + }, + GetReplicatedRecord { + requester: NetworkAddress, + key: NetworkAddress, + }, + GetChunkExistenceProof { + key: NetworkAddress, + nonce: Nonce, + difficulty: usize, + }, + CheckNodeInProblem(NetworkAddress), + GetClosestPeers { + key: NetworkAddress, + num_of_peers: Option, + range: Option<[u8; 32]>, + sign_result: bool, + }, + GetVersion(NetworkAddress), + } + + #[test] + fn test_query_serialization_deserialization() { + let peer_id = PeerId::random(); + // Create a sample Query message + let original_query = Query::GetStoreQuote { + key: NetworkAddress::from_peer(peer_id), + data_type: 1, + data_size: 100, + nonce: Some(0), + difficulty: 3, + }; + + // Serialize to bytes + let serialized = bincode::serialize(&original_query).expect("Serialization failed"); + + // Deserialize into QueryExtended + let deserialized: QueryExtended = + bincode::deserialize(&serialized).expect("Deserialization into QueryExtended failed"); + + // Verify the deserialized data matches the original + match deserialized { + QueryExtended::GetStoreQuote { + key, + data_type, + data_size, + nonce, + difficulty, + } => { + assert_eq!(key, NetworkAddress::from_peer(peer_id)); + assert_eq!(data_type, 1); + assert_eq!(data_size, 100); + assert_eq!(nonce, Some(0)); + assert_eq!(difficulty, 3); + } + _ => panic!("Deserialized into wrong variant"), + } + } + + #[test] + fn test_query_extended_serialization() { + // Create a sample QueryExtended message with extended new variant + let extended_query = QueryExtended::GetVersion(NetworkAddress::from_peer(PeerId::random())); + + // Serialize to bytes + let serialized = bincode::serialize(&extended_query).expect("Serialization failed"); + + // Attempt to deserialize into original Query (should fail) + let result: Result = bincode::deserialize(&serialized); + assert!( + result.is_err(), + "Should fail to deserialize extended enum into original" + ); + + let peer_id = PeerId::random(); + // Create a sample QueryExtended message with old variant + let extended_query = QueryExtended::GetStoreQuote { + key: NetworkAddress::from_peer(peer_id), + data_type: 1, + data_size: 100, + nonce: Some(0), + difficulty: 3, + }; + + // Serialize to bytes + let serialized = bincode::serialize(&extended_query).expect("Serialization failed"); + + // Deserialize into Query + let deserialized: Query = + bincode::deserialize(&serialized).expect("Deserialization into Query failed"); + + // Verify the deserialized data matches the original + match deserialized { + Query::GetStoreQuote { + key, + data_type, + data_size, + nonce, + difficulty, + } => { + assert_eq!(key, NetworkAddress::from_peer(peer_id)); + assert_eq!(data_type, 1); + assert_eq!(data_size, 100); + assert_eq!(nonce, Some(0)); + assert_eq!(difficulty, 3); + } + _ => panic!("Deserialized into wrong variant"), + } + } } From 46dcc93c0f34fcbfeb50828808261291ec059ecc Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 25 Feb 2025 20:26:00 +0530 Subject: [PATCH 51/69] feat(network): increase network discovery rate --- ant-networking/src/network_discovery.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ant-networking/src/network_discovery.rs b/ant-networking/src/network_discovery.rs index 7e33728d96..c4bb71a008 100644 --- a/ant-networking/src/network_discovery.rs +++ b/ant-networking/src/network_discovery.rs @@ -30,7 +30,7 @@ pub(crate) const NETWORK_DISCOVER_INTERVAL: Duration = Duration::from_secs(10); /// For every NETWORK_DISCOVER_CONNECTED_PEERS_STEP connected peer, we step up the /// NETWORK_DISCOVER_INTERVAL to slow down process. -const NETWORK_DISCOVER_CONNECTED_PEERS_STEP: u32 = 5; +const NETWORK_DISCOVER_CONNECTED_PEERS_STEP: u32 = 20; /// Slow down the process if the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT. /// This is to make sure we don't flood the network with `FindNode` msgs. From 92e78927f614e70258b4d2548a70d241e337a73a Mon Sep 17 00:00:00 2001 From: qima Date: Tue, 25 Feb 2025 03:33:58 +0800 Subject: [PATCH 52/69] feat(node): get peer's version and generate versions percentage metric --- ant-networking/src/cmd.rs | 35 ++++++++++++++++++++++++ ant-networking/src/driver.rs | 3 +++ ant-networking/src/event/mod.rs | 5 ++++ ant-networking/src/lib.rs | 5 ++++ ant-networking/src/metrics/mod.rs | 39 +++++++++++++++++++++++++-- ant-node/src/node.rs | 30 +++++++++++++++++++++ ant-protocol/src/lib.rs | 3 ++- ant-protocol/src/messages/query.rs | 8 +++++- ant-protocol/src/messages/response.rs | 13 +++++++++ 9 files changed, 137 insertions(+), 4 deletions(-) diff --git a/ant-networking/src/cmd.rs b/ant-networking/src/cmd.rs index 1170705889..d220f28fd1 100644 --- a/ant-networking/src/cmd.rs +++ b/ant-networking/src/cmd.rs @@ -179,6 +179,11 @@ pub enum LocalSwarmCmd { holder: NetworkAddress, keys: Vec<(NetworkAddress, ValidationType)>, }, + /// Notify a fetched peer's version + NotifyPeerVersion { + peer: PeerId, + version: String, + }, } /// Commands to send to the Swarm @@ -354,6 +359,9 @@ impl Debug for LocalSwarmCmd { "LocalSwarmCmd::AddFreshReplicateRecords({holder:?}, {keys:?})" ) } + LocalSwarmCmd::NotifyPeerVersion { peer, version } => { + write!(f, "LocalSwarmCmd::NotifyPeerVersion({peer:?}, {version:?})") + } } } } @@ -983,6 +991,10 @@ impl SwarmDriver { cmd_string = "AddFreshReplicateRecords"; let _ = self.add_keys_to_replication_fetcher(holder, keys, true); } + LocalSwarmCmd::NotifyPeerVersion { peer, version } => { + cmd_string = "NotifyPeerVersion"; + self.record_node_version(peer, version); + } } self.log_handling(cmd_string.to_string(), start.elapsed()); @@ -990,6 +1002,29 @@ impl SwarmDriver { Ok(()) } + fn record_node_version(&mut self, peer_id: PeerId, version: String) { + let _ = self.peers_version.insert(peer_id, version); + + // Collect all peers_in_non_full_buckets + let mut peers_in_non_full_buckets = vec![]; + for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { + let num_entires = kbucket.num_entries(); + if num_entires >= K_VALUE.get() { + continue; + } else { + let peers_in_kbucket = kbucket + .iter() + .map(|peer_entry| peer_entry.node.key.into_preimage()) + .collect::>(); + peers_in_non_full_buckets.extend(peers_in_kbucket); + } + } + + // Ensure all existing node_version records are for those peers_in_non_full_buckets + self.peers_version + .retain(|peer_id, _version| peers_in_non_full_buckets.contains(peer_id)); + } + pub(crate) fn record_node_issue(&mut self, peer_id: PeerId, issue: NodeIssue) { info!("Peer {peer_id:?} is reported as having issue {issue:?}"); let (issue_vec, is_bad) = self.bad_nodes.entry(peer_id).or_default(); diff --git a/ant-networking/src/driver.rs b/ant-networking/src/driver.rs index 45874e0fae..a560d4aa8b 100644 --- a/ant-networking/src/driver.rs +++ b/ant-networking/src/driver.rs @@ -625,6 +625,7 @@ impl NetworkBuilder { last_replication: None, last_connection_pruning_time: Instant::now(), network_density_samples: FifoRegister::new(100), + peers_version: Default::default(), }; let network = Network::new( @@ -735,6 +736,8 @@ pub struct SwarmDriver { pub(crate) last_connection_pruning_time: Instant, /// FIFO cache for the network density samples pub(crate) network_density_samples: FifoRegister, + /// record versions of those peers that in the non-full-kbuckets. + pub(crate) peers_version: HashMap, } impl SwarmDriver { diff --git a/ant-networking/src/event/mod.rs b/ant-networking/src/event/mod.rs index b414a32961..b1cd4de23d 100644 --- a/ant-networking/src/event/mod.rs +++ b/ant-networking/src/event/mod.rs @@ -312,6 +312,11 @@ impl SwarmDriver { if self.metrics_recorder.is_some() { self.check_for_change_in_our_close_group(); } + + #[cfg(feature = "open-metrics")] + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.update_node_versions(&self.peers_version); + } } /// Update state on removal of a peer from the routing table. diff --git a/ant-networking/src/lib.rs b/ant-networking/src/lib.rs index 8abaaf9ead..86ec789c81 100644 --- a/ant-networking/src/lib.rs +++ b/ant-networking/src/lib.rs @@ -1028,10 +1028,15 @@ impl Network { self.send_local_swarm_cmd(LocalSwarmCmd::NotifyPeerScores { peer_scores }) } + pub fn notify_node_version(&self, peer: PeerId, version: String) { + self.send_local_swarm_cmd(LocalSwarmCmd::NotifyPeerVersion { peer, version }) + } + /// Helper to send NetworkSwarmCmd fn send_network_swarm_cmd(&self, cmd: NetworkSwarmCmd) { send_network_swarm_cmd(self.network_swarm_cmd_sender().clone(), cmd); } + /// Helper to send LocalSwarmCmd fn send_local_swarm_cmd(&self, cmd: LocalSwarmCmd) { send_local_swarm_cmd(self.local_swarm_cmd_sender().clone(), cmd); diff --git a/ant-networking/src/metrics/mod.rs b/ant-networking/src/metrics/mod.rs index a1ba7f5b1d..3abc5a7acf 100644 --- a/ant-networking/src/metrics/mod.rs +++ b/ant-networking/src/metrics/mod.rs @@ -20,9 +20,10 @@ use libp2p::{ PeerId, }; use prometheus_client::{ - metrics::family::Family, - metrics::{counter::Counter, gauge::Gauge}, + encoding::EncodeLabelSet, + metrics::{counter::Counter, family::Family, gauge::Gauge}, }; +use std::collections::HashMap; use std::sync::atomic::AtomicU64; use sysinfo::{Pid, ProcessRefreshKind, System}; use tokio::time::Duration; @@ -30,6 +31,12 @@ use tokio::time::Duration; const UPDATE_INTERVAL: Duration = Duration::from_secs(60); const TO_MB: u64 = 1_000_000; +// Add this new struct for version labels +#[derive(Clone, Hash, PartialEq, Eq, Debug, EncodeLabelSet)] +pub(crate) struct VersionLabels { + version: String, +} + /// The shared recorders that are used to record metrics. pub(crate) struct NetworkMetricsRecorder { // Records libp2p related metrics @@ -49,6 +56,7 @@ pub(crate) struct NetworkMetricsRecorder { pub(crate) relay_peers_in_routing_table: Gauge, pub(crate) records_stored: Gauge, pub(crate) relay_reservation_health: Gauge, + pub(crate) node_versions: Family, // quoting metrics relevant_records: Gauge, @@ -168,6 +176,14 @@ impl NetworkMetricsRecorder { relay_client_events.clone(), ); + // Add this new metric registration + let node_versions = Family::default(); + sub_registry.register( + "node_versions", + "Number of nodes running each version", + node_versions.clone(), + ); + let process_memory_used_mb = Gauge::::default(); sub_registry.register( "process_memory_used_mb", @@ -257,6 +273,7 @@ impl NetworkMetricsRecorder { max_records, received_payment_count, live_time, + node_versions, bad_peers_count, shunned_count_across_time_frames, @@ -355,6 +372,24 @@ impl NetworkMetricsRecorder { } }); } + + pub(crate) fn update_node_versions(&self, versions: &HashMap) { + // First, count occurrences of each version + let mut version_counts: HashMap = HashMap::new(); + for version in versions.values() { + *version_counts.entry(version.clone()).or_insert(0) += 1; + } + + // Clean up old records, to avoid outdated versions pollute the statistic. + self.node_versions.clear(); + + // Update metrics + for (version, count) in version_counts { + self.node_versions + .get_or_create(&VersionLabels { version }) + .set(count as i64); + } + } } /// Impl the Recorder traits again for our struct. diff --git a/ant-node/src/node.rs b/ant-node/src/node.rs index 00c5d72d68..c0b27612c4 100644 --- a/ant-node/src/node.rs +++ b/ant-node/src/node.rs @@ -448,6 +448,12 @@ impl Node { self.record_metrics(Marker::PeersInRoutingTable(connected_peers)); self.record_metrics(Marker::PeerAddedToRoutingTable(&peer_id)); + // try query peer version + let network = self.network().clone(); + let _handle = spawn(async move { + Self::try_query_peer_version(network, peer_id).await; + }); + // try replication here let network = self.network().clone(); self.record_metrics(Marker::IntervalReplicationTriggered); @@ -715,6 +721,10 @@ impl Node { Self::respond_get_closest_peers(network, key, num_of_peers, range, sign_result) .await } + Query::GetVersion(_) => QueryResponse::GetVersion { + peer: NetworkAddress::from_peer(network.peer_id()), + version: ant_build_info::package_version(), + }, }; Response::Query(resp) } @@ -970,6 +980,26 @@ impl Node { ); } + /// Query peer's version and update local knowledge. + async fn try_query_peer_version(network: Network, peer: PeerId) { + let request = Request::Query(Query::GetVersion(NetworkAddress::from_peer(peer))); + let version = match network.send_request(request, peer).await { + Ok(Response::Query(QueryResponse::GetVersion { version, .. })) => { + info!("Fetched peer {peer:?} version as {version:?}"); + version + } + Ok(other) => { + info!("Not a version fetched from peer {peer:?}, {other:?}"); + "none".to_string() + } + Err(err) => { + info!("Failed to fetch version from peer {peer:?} with error {err:?}"); + "old".to_string() + } + }; + network.notify_node_version(peer, version); + } + #[allow(dead_code)] async fn network_density_sampling(network: Network) { for _ in 0..10 { diff --git a/ant-protocol/src/lib.rs b/ant-protocol/src/lib.rs index 0a0e146896..0b48941ed8 100644 --- a/ant-protocol/src/lib.rs +++ b/ant-protocol/src/lib.rs @@ -422,6 +422,7 @@ mod tests { sign_result: bool, }, GetVersion(NetworkAddress), + Extended, } #[test] @@ -465,7 +466,7 @@ mod tests { #[test] fn test_query_extended_serialization() { // Create a sample QueryExtended message with extended new variant - let extended_query = QueryExtended::GetVersion(NetworkAddress::from_peer(PeerId::random())); + let extended_query = QueryExtended::Extended; // Serialize to bytes let serialized = bincode::serialize(&extended_query).expect("Serialization failed"); diff --git a/ant-protocol/src/messages/query.rs b/ant-protocol/src/messages/query.rs index 5c2d8a6ac9..8240a18aa3 100644 --- a/ant-protocol/src/messages/query.rs +++ b/ant-protocol/src/messages/query.rs @@ -71,13 +71,16 @@ pub enum Query { // For future econ usage, sign_result: bool, }, + /// *** From now on, the order of variants shall be retained to be backward compatible + /// Query peer's cargo package version. + GetVersion(NetworkAddress), } impl Query { /// Used to send a query to the close group of the address. pub fn dst(&self) -> NetworkAddress { match self { - Query::CheckNodeInProblem(address) => address.clone(), + Query::CheckNodeInProblem(address) | Query::GetVersion(address) => address.clone(), // Shall not be called for this, as this is a `one-to-one` message, // and the destination shall be decided by the requester already. Query::GetStoreQuote { key, .. } @@ -131,6 +134,9 @@ impl std::fmt::Display for Query { "Query::GetClosestPeers({key:?} {num_of_peers:?} {distance:?} {sign_result})" ) } + Query::GetVersion(address) => { + write!(f, "Query::GetVersion({address:?})") + } } } } diff --git a/ant-protocol/src/messages/response.rs b/ant-protocol/src/messages/response.rs index bd2d6364cd..a84dbb2c31 100644 --- a/ant-protocol/src/messages/response.rs +++ b/ant-protocol/src/messages/response.rs @@ -66,6 +66,16 @@ pub enum QueryResponse { // Signature of signing the above (if requested), for future economic model usage. signature: Option>, }, + /// *** From now on, the order of variants shall be retained to be backward compatible + // ===== GetVersion ===== + // + /// Response to [`GetVersion`] + /// + /// [`GetVersion`]: crate::messages::Query::GetVersion + GetVersion { + peer: NetworkAddress, + version: String, + }, } // Debug implementation for QueryResponse, to avoid printing Vec @@ -118,6 +128,9 @@ impl Debug for QueryResponse { "GetClosestPeers target {target:?} close peers {addresses:?}" ) } + QueryResponse::GetVersion { peer, version } => { + write!(f, "GetVersion peer {peer:?} has version of {version:?}") + } } } } From c12a01c926f6bba3aa5981485f0eb4e687539722 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 25 Feb 2025 21:31:05 +0530 Subject: [PATCH 53/69] fix(network): increase the network discovery slow down rate exponentially --- ant-networking/src/network_discovery.rs | 78 ++++++++++++++++++------- 1 file changed, 56 insertions(+), 22 deletions(-) diff --git a/ant-networking/src/network_discovery.rs b/ant-networking/src/network_discovery.rs index c4bb71a008..3cbfedd59c 100644 --- a/ant-networking/src/network_discovery.rs +++ b/ant-networking/src/network_discovery.rs @@ -28,17 +28,10 @@ const MAX_PEERS_PER_BUCKET: usize = 5; /// The interval is increased as more peers are added to the routing table. pub(crate) const NETWORK_DISCOVER_INTERVAL: Duration = Duration::from_secs(10); -/// For every NETWORK_DISCOVER_CONNECTED_PEERS_STEP connected peer, we step up the -/// NETWORK_DISCOVER_INTERVAL to slow down process. -const NETWORK_DISCOVER_CONNECTED_PEERS_STEP: u32 = 20; - /// Slow down the process if the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT. /// This is to make sure we don't flood the network with `FindNode` msgs. const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); -/// A minimum interval to prevent network discovery got triggered too often -const LAST_NETWORK_DISCOVER_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(90); - /// The network discovery interval to use if we haven't added any new peers in a while. const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; @@ -159,14 +152,7 @@ impl NetworkDiscovery { peers_in_rt: u32, current_interval: Duration, ) -> (bool, Option) { - let is_ongoing = if let Some(last_network_discover_triggered) = - self.last_network_discover_triggered - { - last_network_discover_triggered.elapsed() < LAST_NETWORK_DISCOVER_TRIGGERED_TIME_LIMIT - } else { - false - }; - let should_network_discover = !is_ongoing && peers_in_rt >= 1; + let should_network_discover = peers_in_rt >= 1; // if it has been a while (LAST_PEER_ADDED_TIME_LIMIT) since we have added a new peer, // slowdown the network discovery process. @@ -189,26 +175,36 @@ impl NetworkDiscovery { return (should_network_discover, Some(new_interval)); } - // increment network_discover_interval in steps of NETWORK_DISCOVER_INTERVAL every NETWORK_DISCOVER_CONNECTED_PEERS_STEP - let step = peers_in_rt / NETWORK_DISCOVER_CONNECTED_PEERS_STEP; - let step = std::cmp::max(1, step); - let new_interval = NETWORK_DISCOVER_INTERVAL * step; - let new_interval = if new_interval > current_interval { - info!("More peers have been added to our RT!. Slowing down the continuous network discovery process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); + let duration_based_on_peers = Self::scaled_duration(peers_in_rt); + let new_interval = if duration_based_on_peers > current_interval { + info!("More peers have been added to our RT!. Slowing down the continuous network discovery process. Old interval: {current_interval:?}, New interval: {duration_based_on_peers:?}"); - let mut interval = interval(new_interval); + let mut interval = interval(duration_based_on_peers); interval.tick().await; Some(interval) } else { None }; + (should_network_discover, new_interval) } pub(crate) fn handle_get_closest_query(&mut self, closest_peers: Vec) { self.candidates.handle_get_closest_query(closest_peers); } + + /// Returns an exponentially increasing interval based on the number of peers in the routing table. + /// Formula: y=30 * 1.00673^x + /// Caps out at 600s for 400+ peers + fn scaled_duration(peers_in_rt: u32) -> Duration { + if peers_in_rt >= 450 { + return Duration::from_secs(600); + } + let base: f64 = 1.00673; + + Duration::from_secs_f64(30.0 * base.powi(peers_in_rt as i32)) + } } /// Keep track of NetworkAddresses belonging to every bucket (if we can generate them with reasonable effort) @@ -370,3 +366,41 @@ impl NetworkDiscoveryCandidates { ) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_scaled_interval() { + let test_cases = vec![ + (0, 30.0), + (50, 40.0), + (100, 60.0), + (150, 80.0), + (200, 115.0), + (220, 130.0), + (250, 160.0), + (300, 220.0), + (350, 313.0), + (400, 430.0), + (425, 520.0), + (449, 600.0), + (1000, 600.0), + ]; + + for (peers, expected_secs) in test_cases { + let interval = NetworkDiscovery::scaled_duration(peers); + let actual_secs = interval.as_secs_f64(); + + let tolerance = 0.15 * expected_secs; // 5% tolerance + + assert!( + (actual_secs - expected_secs).abs() < tolerance, + "For {peers} peers, expected duration {expected_secs:.2}s but got {actual_secs:.2}s", + ); + + println!("Peers: {peers}, Expected: {expected_secs:.2}s, Actual: {actual_secs:.2}s",); + } + } +} From d283d12610bd95c9e2bf88a97eb2f30b86208255 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 26 Feb 2025 19:18:37 +0530 Subject: [PATCH 54/69] feat(network): implement initial bootstrap trigger to check if bootstrap should be started --- ant-networking/src/bootstrap.rs | 60 ++++++++++++++++++++++++++++--- ant-networking/src/driver.rs | 23 ++++++++---- ant-networking/src/event/swarm.rs | 28 ++++++++++----- 3 files changed, 91 insertions(+), 20 deletions(-) diff --git a/ant-networking/src/bootstrap.rs b/ant-networking/src/bootstrap.rs index eb94114966..6fcebf4416 100644 --- a/ant-networking/src/bootstrap.rs +++ b/ant-networking/src/bootstrap.rs @@ -18,12 +18,57 @@ use libp2p::{ use rand::seq::SliceRandom; use std::collections::{HashSet, VecDeque}; +/// Periodically check if the initial bootstrap process should be triggered. +/// This happens only once after the conditions for triggering the initial bootstrap process are met. +pub(crate) const INITIAL_BOOTSTRAP_CHECK_INTERVAL: std::time::Duration = + std::time::Duration::from_secs(1); + /// The max number of concurrent dials to be made during the initial bootstrap process. const CONCURRENT_DIALS: usize = 3; /// The max number of peers to be added to the routing table before stopping the initial bootstrap process. const MAX_PEERS_BEFORE_TERMINATION: usize = 5; +/// This is used to track the conditions that are required to trigger the initial bootstrap process once. +pub(crate) struct InitialBootstrapTrigger { + pub(crate) upnp: bool, + pub(crate) client: bool, + pub(crate) upnp_gateway_result_obtained: bool, + pub(crate) listen_addr_obtained: bool, +} + +impl InitialBootstrapTrigger { + pub(crate) fn new(upnp: bool, client: bool) -> Self { + Self { + upnp, + client, + upnp_gateway_result_obtained: false, + listen_addr_obtained: false, + } + } + + /// Used to check if we can trigger the initial bootstrap process. + /// + /// - If we are a client, we should trigger the initial bootstrap process immediately. + /// - If we have set upnp flag and if we have obtained the upnp gateway result, we should trigger the initial bootstrap process. + /// - If we don't have upnp enabled, then we should trigger the initial bootstrap process only if we have a listen address available. + pub(crate) fn should_trigger_initial_bootstrap(&self) -> bool { + if self.client { + return true; + } + + if self.upnp { + return self.upnp_gateway_result_obtained; + } + + if self.listen_addr_obtained { + return true; + } + + false + } +} + pub(crate) struct InitialBootstrap { initial_addrs: VecDeque, ongoing_dials: HashSet, @@ -59,6 +104,7 @@ impl InitialBootstrap { self.initial_bootstrap_peer_ids.contains(peer_id) } + /// Has the bootstrap process finished. pub(crate) fn has_terminated(&self) -> bool { self.bootstrap_completed } @@ -67,12 +113,16 @@ impl InitialBootstrap { /// /// This will start dialing CONCURRENT_DIALS peers at a time from the initial addresses. If we have a successful /// dial and if a few peer are added to the routing table, we stop the initial bootstrap process. - pub(crate) fn trigger_initial_bootstrap( + /// + /// This should be called only ONCE and then the `on_connection_established` and `on_outgoing_connection_error` + /// should be used to continue the process. + /// Once the process is completed, the `bootstrap_completed` flag will be set to true, and this becomes a no-op. + pub(crate) fn trigger_bootstrapping_process( &mut self, swarm: &mut Swarm, peers_in_rt: usize, ) { - if !self.trigger_condition(peers_in_rt, true) { + if !self.should_we_continue_bootstrapping(peers_in_rt, true) { return; } @@ -129,7 +179,7 @@ impl InitialBootstrap { /// Check if the initial bootstrap process should be triggered. /// Also update bootstrap_completed flag if the process is completed. - fn trigger_condition(&mut self, peers_in_rt: usize, verbose: bool) -> bool { + fn should_we_continue_bootstrapping(&mut self, peers_in_rt: usize, verbose: bool) -> bool { if self.bootstrap_completed { if verbose { info!("Initial bootstrap process has already completed successfully."); @@ -216,7 +266,7 @@ impl InitialBootstrap { } } - self.trigger_initial_bootstrap(swarm, peers_in_rt); + self.trigger_bootstrapping_process(swarm, peers_in_rt); } pub(crate) fn on_outgoing_connection_error( @@ -247,6 +297,6 @@ impl InitialBootstrap { } } - self.trigger_initial_bootstrap(swarm, peers_in_rt); + self.trigger_bootstrapping_process(swarm, peers_in_rt); } } diff --git a/ant-networking/src/driver.rs b/ant-networking/src/driver.rs index 5356738f54..c82e305ba9 100644 --- a/ant-networking/src/driver.rs +++ b/ant-networking/src/driver.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - bootstrap::InitialBootstrap, + bootstrap::{InitialBootstrap, InitialBootstrapTrigger, INITIAL_BOOTSTRAP_CHECK_INTERVAL}, circular_vec::CircularVec, cmd::{LocalSwarmCmd, NetworkSwarmCmd}, config::GetRecordCfg, @@ -589,6 +589,7 @@ impl NetworkBuilder { close_group: Vec::with_capacity(CLOSE_GROUP_SIZE), peers_in_rt: 0, initial_bootstrap: InitialBootstrap::new(self.initial_contacts), + initial_bootstrap_trigger: InitialBootstrapTrigger::new(self.upnp, is_client), bootstrap_cache: self.bootstrap_cache, relay_manager, connected_relay_clients: Default::default(), @@ -684,6 +685,7 @@ pub struct SwarmDriver { pub(crate) close_group: Vec, pub(crate) peers_in_rt: usize, pub(crate) initial_bootstrap: InitialBootstrap, + pub(crate) initial_bootstrap_trigger: InitialBootstrapTrigger, pub(crate) network_discovery: NetworkDiscovery, pub(crate) bootstrap_cache: Option, pub(crate) external_address_manager: Option, @@ -742,6 +744,8 @@ impl SwarmDriver { let mut network_discover_interval = interval(NETWORK_DISCOVER_INTERVAL); let mut set_farthest_record_interval = interval(CLOSET_RECORD_CHECK_INTERVAL); let mut relay_manager_reservation_interval = interval(RELAY_MANAGER_RESERVATION_INTERVAL); + let mut initial_bootstrap_trigger_check_interval = + Some(interval(INITIAL_BOOTSTRAP_CHECK_INTERVAL)); let mut bootstrap_cache_save_interval = self.bootstrap_cache.as_ref().and_then(|cache| { if cache.config().disable_cache_writing { @@ -761,11 +765,6 @@ impl SwarmDriver { ); } - if self.is_client { - self.initial_bootstrap - .trigger_initial_bootstrap(&mut self.swarm, self.peers_in_rt); - } - // temporarily skip processing IncomingConnectionError swarm event to avoid log spamming let mut previous_incoming_connection_error_event = None; loop { @@ -831,6 +830,18 @@ impl SwarmDriver { }, // thereafter we can check our intervals + // check if we can trigger the initial bootstrap process + // once it is triggered, we don't re-trigger it + Some(()) = Self::conditional_interval(&mut initial_bootstrap_trigger_check_interval) => { + if self.initial_bootstrap_trigger.should_trigger_initial_bootstrap() { + info!("Triggering initial bootstrap process. This is a one-time operation."); + self.initial_bootstrap.trigger_bootstrapping_process(&mut self.swarm, self.peers_in_rt); + // we will not call this loop anymore, once the initial bootstrap is triggered. + // It should run on its own and complete. + initial_bootstrap_trigger_check_interval = None; + } + } + // runs every bootstrap_interval time _ = network_discover_interval.tick() => { if let Some(new_interval) = self.run_network_discover_continuously(network_discover_interval.period()).await { diff --git a/ant-networking/src/event/swarm.rs b/ant-networking/src/event/swarm.rs index 6daa68a5c7..8ad85058ac 100644 --- a/ant-networking/src/event/swarm.rs +++ b/ant-networking/src/event/swarm.rs @@ -82,11 +82,24 @@ impl SwarmDriver { } event_string = "upnp_event"; info!(?upnp_event, "UPnP event"); - if let libp2p::upnp::Event::GatewayNotFound = upnp_event { - warn!("UPnP is not enabled/supported on the gateway. Please rerun without the `--upnp` flag"); - self.send_event(NetworkEvent::TerminateNode { - reason: crate::event::TerminateNodeReason::UpnpGatewayNotFound, - }); + match upnp_event { + libp2p::upnp::Event::GatewayNotFound => { + warn!("UPnP is not enabled/supported on the gateway. Please rerun without the `--upnp` flag"); + self.send_event(NetworkEvent::TerminateNode { + reason: crate::event::TerminateNodeReason::UpnpGatewayNotFound, + }); + } + libp2p::upnp::Event::NewExternalAddr(addr) => { + info!("UPnP: New external address: {addr:?}"); + self.initial_bootstrap_trigger.upnp_gateway_result_obtained = true; + } + libp2p::upnp::Event::NonRoutableGateway => { + warn!("UPnP gateway is not routable"); + self.initial_bootstrap_trigger.upnp_gateway_result_obtained = true; + } + _ => { + debug!("UPnP event: {upnp_event:?}"); + } } } @@ -196,10 +209,7 @@ impl SwarmDriver { debug!("All our external addresses: {all_external_addresses:?}"); } - if !self.is_client { - self.initial_bootstrap - .trigger_initial_bootstrap(&mut self.swarm, self.peers_in_rt); - } + self.initial_bootstrap_trigger.listen_addr_obtained = true; self.send_event(NetworkEvent::NewListenAddr(address.clone())); } From f221e9ae7a35722e16d2d2c2a9ba06ca31ca44b1 Mon Sep 17 00:00:00 2001 From: Ermine Jose Date: Wed, 26 Feb 2025 20:22:38 +0530 Subject: [PATCH 55/69] fix: records_store metrics count --- ant-networking/src/record_store.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ant-networking/src/record_store.rs b/ant-networking/src/record_store.rs index 3a0ea1c2ee..1805479aee 100644 --- a/ant-networking/src/record_store.rs +++ b/ant-networking/src/record_store.rs @@ -635,6 +635,11 @@ impl NodeRecordStore { self.records .insert(key.clone(), (addr.clone(), validate_type, data_type)); + #[cfg(feature = "open-metrics")] + if let Some(metric) = &self.record_count_metric { + let _ = metric.set(self.records.len() as i64); + } + // Update bucket index let _ = self.records_by_distance.insert(distance, key.clone()); @@ -708,11 +713,6 @@ impl NodeRecordStore { let filename = Self::generate_filename(key); let file_path = self.config.storage_dir.join(&filename); - #[cfg(feature = "open-metrics")] - if let Some(metric) = &self.record_count_metric { - let _ = metric.set(self.records.len() as i64); - } - let encryption_details = self.encryption_details.clone(); let cloned_cmd_sender = self.local_swarm_cmd_sender.clone(); From 3896c1fcb17b3451fa529a7df9fccc3ae3f12280 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 7 Feb 2025 19:09:10 +0100 Subject: [PATCH 56/69] refactor: add more missing methods and unify Also added a few test cases for stuff with pytest (cherry picked from commit 38dabcc6a6f9d1041d8dc52574f66b3d2a308855) --- autonomi/pyproject.toml | 6 +- autonomi/python/examples/autonomi_pointers.py | 2 +- autonomi/src/python.rs | 256 +++++++++++++----- autonomi/tests/python/test_bindings.py | 119 ++++---- 4 files changed, 257 insertions(+), 126 deletions(-) diff --git a/autonomi/pyproject.toml b/autonomi/pyproject.toml index b3c9a2d080..f2113ea779 100644 --- a/autonomi/pyproject.toml +++ b/autonomi/pyproject.toml @@ -7,7 +7,11 @@ name = "autonomi-client" dynamic = ["version"] description = "Autonomi client API" authors = [{ name = "MaidSafe Developers", email = "dev@maidsafe.net" }] -dependencies = ["maturin>=1.7.4", "pip>=24.0"] +dependencies = [ + "maturin>=1.7.4", + "pip>=24.0", + "pytest>=8.3.4", +] readme = "README_PYTHON.md" requires-python = ">=3.8" license = { text = "GPL-3.0" } diff --git a/autonomi/python/examples/autonomi_pointers.py b/autonomi/python/examples/autonomi_pointers.py index fb63ec4451..d709a307fb 100644 --- a/autonomi/python/examples/autonomi_pointers.py +++ b/autonomi/python/examples/autonomi_pointers.py @@ -20,7 +20,7 @@ async def main(): # First, let's upload some data that we want to point to target_data = b"Hello, I'm the target data!" - target_addr = await client.data_put_public(target_data, PaymentOption.wallet(wallet)) + [cost, target_addr] = await client.data_put_public(target_data, PaymentOption.wallet(wallet)) print(f"Target data uploaded to: {target_addr}") # Create a pointer target from the address diff --git a/autonomi/src/python.rs b/autonomi/src/python.rs index fd9333dad0..3736b5ccc1 100644 --- a/autonomi/src/python.rs +++ b/autonomi/src/python.rs @@ -2,6 +2,7 @@ use std::{path::PathBuf, str::FromStr, sync::Arc}; use crate::{ client::{ + address::addr_to_str, chunk::DataMapChunk, payment::PaymentOption, vault::{UserData, VaultSecretKey}, @@ -132,7 +133,7 @@ impl PyClient { .chunk_put(&chunk, payment) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to put chunk: {e}")))?; - Ok((cost.to_string(), PyChunkAddress::from(addr))) + Ok((cost.to_string(), PyChunkAddress { inner: addr })) }) } @@ -140,14 +141,13 @@ impl PyClient { fn graph_entry_get<'a>( &self, py: Python<'a>, - #[pyo3(from_py_with = "str_to_addr")] addr: XorName, + addr: PyGraphEntryAddress, ) -> PyResult> { let client = self.inner.clone(); - let addr = GraphEntryAddress(addr); future_into_py(py, async move { let entry = client - .graph_entry_get(&addr) + .graph_entry_get(&addr.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to get graph entry: {e}")))?; Ok(PyGraphEntry { inner: entry }) @@ -158,14 +158,13 @@ impl PyClient { fn graph_entry_check_existance<'a>( &self, py: Python<'a>, - #[pyo3(from_py_with = "str_to_addr")] addr: XorName, + addr: PyGraphEntryAddress, ) -> PyResult> { let client = self.inner.clone(); - let addr = GraphEntryAddress(addr); future_into_py(py, async move { let exists = client - .graph_entry_check_existance(&addr) + .graph_entry_check_existance(&addr.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to get graph entry: {e}")))?; Ok(exists) @@ -995,6 +994,7 @@ impl PyClient { .pointer_put(pointer, payment) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to put pointer: {e}")))?; + Ok(PyPointerAddress { inner: addr }) }) } @@ -1063,8 +1063,8 @@ impl PyClient { /// A network address where a pointer is stored. /// The address is derived from the owner's public key. -#[pyclass(name = "PointerAddress")] -#[derive(Debug, Clone)] +#[pyclass(name = "PointerAddress", eq, ord)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] pub struct PyPointerAddress { inner: PointerAddress, } @@ -1072,24 +1072,33 @@ pub struct PyPointerAddress { #[pymethods] impl PyPointerAddress { /// Initialise pointer address from hex string. - #[staticmethod] - pub fn from_hex(hex: String) -> PyResult { - let bytes = hex::decode(hex) - .map_err(|e| PyValueError::new_err(format!("`hex` not a valid hex string: {e}")))?; - let bytes: [u8; 32] = bytes - .try_into() - .map_err(|_| PyValueError::new_err("`hex` invalid: must be 32 bytes"))?; + #[new] + fn new(#[pyo3(from_py_with = "str_to_addr")] addr: XorName) -> PyResult { + Ok(Self { + inner: PointerAddress::new(addr), + }) + } + /// Instantiate address which is derived from owner's unique public key. + #[staticmethod] + fn from_owner(public_key: PyPublicKey) -> PyResult { Ok(Self { - inner: PointerAddress::new(XorName(bytes)), + inner: PointerAddress::from_owner(public_key.inner), }) } /// Returns the hex string representation of the pointer address. #[getter] - pub fn hex(&self) -> String { - let bytes: [u8; 32] = self.inner.xorname().0; - hex::encode(bytes) + fn hex(&self) -> String { + self.inner.to_hex() + } + + fn __str__(&self) -> PyResult { + Ok(self.hex()) + } + + fn __repr__(&self) -> PyResult { + Ok(format!("PointerAddress('{}')", self.hex())) } } @@ -1123,8 +1132,7 @@ impl PyPointer { /// Returns the hex string representation of the pointer's target. #[getter] fn hex(&self) -> String { - let bytes: [u8; 32] = self.inner.xorname().0; - hex::encode(bytes) + addr_to_str(self.inner.xorname()) } /// Returns the target that this pointer points to. @@ -1149,25 +1157,36 @@ pub struct PyPointerTarget { #[pymethods] impl PyPointerTarget { - /// Initialize a pointer target from a chunk address hex string. + /// Initialize a pointer targeting a chunk. #[staticmethod] - fn from_hex(hex: &str) -> PyResult { - let bytes = hex::decode(hex) - .map_err(|e| PyValueError::new_err(format!("`hex` not a valid hex string: {e}")))?; - let bytes: [u8; 32] = bytes - .try_into() - .map_err(|_| PyValueError::new_err("`hex` invalid: must be 32 bytes"))?; + fn new_chunk(addr: PyChunkAddress) -> PyResult { + Ok(Self { + inner: PointerTarget::ChunkAddress(addr.inner), + }) + } + /// Initialize a pointer targeting a graph entry. + #[staticmethod] + fn new_graph_entry(addr: PyGraphEntryAddress) -> PyResult { Ok(Self { - inner: PointerTarget::ChunkAddress(ChunkAddress::new(XorName(bytes))), + inner: PointerTarget::GraphEntryAddress(addr.inner), }) } - /// Returns the hex string representation of this pointer address. - #[getter] - fn hex(&self) -> String { - let bytes: [u8; 32] = self.inner.xorname().0; - hex::encode(bytes) + /// Initialize a pointer targeting another pointer. + #[staticmethod] + fn new_pointer(addr: PyPointerAddress) -> PyResult { + Ok(Self { + inner: PointerTarget::PointerAddress(addr.inner), + }) + } + + /// Initialize a pointer targeting a scratchpad. + #[staticmethod] + fn new_scratchpad(addr: PyScratchpadAddress) -> PyResult { + Ok(Self { + inner: PointerTarget::ScratchpadAddress(addr.inner), + }) } #[getter] @@ -1177,12 +1196,10 @@ impl PyPointerTarget { } } - /// Creates a pointer target from a chunk address. - #[staticmethod] - fn from_chunk_address(addr: &PyChunkAddress) -> Self { - Self { - inner: PointerTarget::ChunkAddress(addr.inner), - } + /// Returns the hex string representation of this pointer address. + #[getter] + fn hex(&self) -> String { + addr_to_str(self.inner.xorname()) } fn __str__(&self) -> PyResult { @@ -1191,64 +1208,126 @@ impl PyPointerTarget { } /// An address of a chunk of data on the network. Used to locate and retrieve data chunks. -#[pyclass(name = "ChunkAddress")] -#[derive(Debug, Clone)] +#[pyclass(name = "ChunkAddress", eq, ord)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] pub struct PyChunkAddress { inner: ChunkAddress, } -impl From for PyChunkAddress { - fn from(addr: ChunkAddress) -> Self { - Self { inner: addr } +#[pymethods] +impl PyChunkAddress { + /// Creates a new chunk address from a hex string. + #[new] + fn new(#[pyo3(from_py_with = "str_to_addr")] addr: XorName) -> PyResult { + Ok(Self { + inner: ChunkAddress::new(addr), + }) + } + + /// Generate a chunk address for the given content (for content-addressable-storage). + #[staticmethod] + fn from_content(data: Vec) -> PyResult { + Ok(Self { + inner: ChunkAddress::new(XorName::from_content(&data[..])), + }) + } + + /// Generate a random chunk address. + #[staticmethod] + fn random() -> PyResult { + Ok(Self { + inner: ChunkAddress::new(XorName::random(&mut rand::thread_rng())), + }) + } + + #[getter] + fn hex(&self) -> String { + addr_to_str(*self.inner.xorname()) + } + + fn __str__(&self) -> PyResult { + Ok(self.hex()) } -} -impl From for ChunkAddress { - fn from(addr: PyChunkAddress) -> Self { - addr.inner + fn __repr__(&self) -> PyResult { + Ok(format!("ChunkAddress('{}')", self.hex())) } } +/// Address of a GraphEntry, is derived from the owner's unique public key. +#[pyclass(name = "GraphEntryAddress", eq, ord)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] +pub struct PyGraphEntryAddress { + inner: GraphEntryAddress, +} + #[pymethods] -impl PyChunkAddress { - /// Creates a new chunk address from a string representation. +impl PyGraphEntryAddress { + /// Create graph entry address pointing to a specific XOR name. #[new] fn new(#[pyo3(from_py_with = "str_to_addr")] addr: XorName) -> PyResult { Ok(Self { - inner: ChunkAddress::new(addr), + inner: GraphEntryAddress::new(addr), + }) + } + + /// Instantiate address which is derived from owner's unique public key. + #[staticmethod] + fn from_owner(public_key: PyPublicKey) -> PyResult { + Ok(Self { + inner: GraphEntryAddress::from_owner(public_key.inner), }) } #[getter] fn hex(&self) -> String { - let bytes: [u8; 32] = self.inner.xorname().0; - hex::encode(bytes) + self.inner.to_hex() } - /// Creates a chunk address from a hex string representation. - #[staticmethod] - fn from_chunk_address(addr: &str) -> PyResult { - let bytes = - hex::decode(addr).map_err(|e| PyValueError::new_err(format!("`addr` invalid: {e}")))?; + fn __str__(&self) -> PyResult { + Ok(self.hex()) + } - if bytes.len() != 32 { - return Err(PyValueError::new_err("`addr` invalid: must be 32 bytes")); - } + fn __repr__(&self) -> PyResult { + Ok(format!("GraphEntryAddress('{}')", self.hex())) + } +} - let mut xorname = [0u8; 32]; - xorname.copy_from_slice(&bytes); +/// Address of a GraphEntry, is derived from the owner's unique public key. +#[pyclass(name = "ScratchpadAddress", eq, ord)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] +pub struct PyScratchpadAddress { + inner: ScratchpadAddress, +} +#[pymethods] +impl PyScratchpadAddress { + /// Construct a new ScratchpadAddress given an owner. + #[new] + fn new(public_key: PyPublicKey) -> PyResult { Ok(Self { - inner: ChunkAddress::new(XorName(xorname)), + inner: ScratchpadAddress::new(public_key.inner), }) } + /// Return the owner public key. + pub fn owner(&self) -> PyPublicKey { + PyPublicKey { + inner: self.inner.owner().clone(), + } + } + + #[getter] + fn hex(&self) -> String { + self.inner.to_hex() + } + fn __str__(&self) -> PyResult { Ok(self.hex()) } fn __repr__(&self) -> PyResult { - Ok(format!("ChunkAddress({})", self.hex())) + Ok(format!("ScratchpadAddress('{}')", self.hex())) } } @@ -1275,6 +1354,14 @@ impl PyWallet { Ok(Self { inner: wallet }) } + /// Convenience function that creates a new Wallet with a random EthereumWallet. + #[staticmethod] + fn new_with_random_wallet(network: PyNetwork) -> Self { + Self { + inner: Wallet::new_with_random_wallet(network.inner), + } + } + /// Creates a new wallet from a private key string with a specified network. #[staticmethod] fn new_from_private_key(network: PyNetwork, private_key: &str) -> PyResult { @@ -1286,7 +1373,14 @@ impl PyWallet { /// Returns a string representation of the wallet's address. fn address(&self) -> String { - format!("{:?}", self.inner.address()) + self.inner.address().to_string() + } + + /// Returns the `Network` of this wallet. + fn network(&self) -> PyNetwork { + PyNetwork { + inner: self.inner.network().clone(), + } } /// Returns the raw balance of payment tokens in the wallet. @@ -1314,6 +1408,12 @@ impl PyWallet { } }) } + + /// Returns a random private key string. + #[staticmethod] + pub fn random_private_key() -> String { + Wallet::random_private_key() + } } /// Options for making payments on the network. @@ -1382,9 +1482,9 @@ pub struct PyPublicKey { #[pymethods] impl PyPublicKey { - /// Creates a new random public key by generating a random secret key. - #[new] - fn new() -> PyResult { + /// Creates a random public key by generating a random secret key. + #[staticmethod] + fn random() -> PyResult { let secret = SecretKey::random(); Ok(Self { inner: secret.public_key(), @@ -1518,8 +1618,8 @@ fn encrypt(data: Vec) -> PyResult<(Vec, Vec>)> { Ok((data_map_bytes, chunks_bytes)) } -#[pyclass(name = "Network")] -#[derive(Debug, Clone)] +#[pyclass(name = "Network", eq)] +#[derive(Debug, Clone, PartialEq)] pub struct PyNetwork { inner: Network, } @@ -1840,6 +1940,11 @@ impl PyClientConfig { // fn strategy() { } } +#[pyfunction] +fn random_xor() -> String { + addr_to_str(XorName::random(&mut rand::thread_rng())) +} + #[pymodule] #[pyo3(name = "autonomi_client")] fn autonomi_client_module(m: &Bound<'_, PyModule>) -> PyResult<()> { @@ -1850,9 +1955,11 @@ fn autonomi_client_module(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_class::()?; + m.add_class::()?; + m.add_class::()?; m.add_class::()?; + m.add_class::()?; m.add_class::()?; - m.add_class::()?; m.add_class::()?; m.add_class::()?; m.add_class::()?; @@ -1864,6 +1971,7 @@ fn autonomi_client_module(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_function(wrap_pyfunction!(encrypt, m)?)?; + m.add_function(wrap_pyfunction!(random_xor, m)?)?; Ok(()) } diff --git a/autonomi/tests/python/test_bindings.py b/autonomi/tests/python/test_bindings.py index ce1d37cd10..e79da9bcc2 100644 --- a/autonomi/tests/python/test_bindings.py +++ b/autonomi/tests/python/test_bindings.py @@ -1,92 +1,111 @@ import pytest -from autonomi_client import ( - ChunkAddress, - PointerTarget, - Pointer, - PointerAddress, - SecretKey, - PublicKey, - Wallet -) - -def test_chunk_address_creation(): - # Test creating a ChunkAddress from bytes - test_data = b"test data for chunk address" - chunk_addr = ChunkAddress(test_data) - +from autonomi_client import * + +def test_graph_entry_address(): + # Create a random address from XOR + xor_hex = random_xor() + addr = GraphEntryAddress(xor_hex) + # Test hex representation - hex_str = chunk_addr.hex - assert isinstance(hex_str, str) - assert len(hex_str) == 64 # 32 bytes = 64 hex chars - - # Test string representation - str_repr = str(chunk_addr) - assert str_repr == hex_str - - # Test repr - repr_str = repr(chunk_addr) - assert repr_str == f"ChunkAddress({hex_str})" + assert isinstance(addr.hex, str) + assert len(addr.hex) == 64 + assert addr.hex == xor_hex -def test_chunk_address_from_hex(): - # Create a chunk address - original = ChunkAddress(b"test data") - hex_str = original.hex - - # Create new chunk address from hex - recreated = ChunkAddress.from_chunk_address(hex_str) - assert recreated.hex == hex_str + # Test repr (round-trip) and equality + assert eval(repr(addr)) == addr + + # Create a graph entry address from a (random) public key + addr = GraphEntryAddress.from_owner(PublicKey.random()) + +def test_chunk_address(): + # Create a random address from XOR + xor_hex = random_xor() + addr = ChunkAddress(xor_hex) + + # Test hex representation + assert isinstance(addr.hex, str) + assert len(addr.hex) == 64 + assert addr.hex == xor_hex + + # Test repr (round-trip) and equality + assert eval(repr(addr)) == addr + + # Create a chunk address from some content + addr = ChunkAddress.from_content(b"test data") + +def test_pointer_address(): + # Create a random address from XOR + xor_hex = random_xor() + addr = PointerAddress(xor_hex) + + # Test hex representation + assert isinstance(addr.hex, str) + assert len(addr.hex) == 64 + assert addr.hex == xor_hex + + # Test repr (round-trip) and equality + assert eval(repr(addr)) == addr + + # Create a pointer address from a (random) public key + addr = PointerAddress.from_owner(PublicKey.random()) def test_pointer_target_with_chunk_address(): # Create a chunk address - chunk_addr = ChunkAddress(b"test data for pointer target") + chunk_addr = ChunkAddress.from_content(b"test data for pointer target") # Create pointer target from chunk address - target = PointerTarget.from_chunk_address(chunk_addr) + target = PointerTarget.new_chunk(chunk_addr) # Verify the hex matches assert isinstance(target.hex, str) assert len(target.hex) == 64 def test_pointer_creation(): + xor_hex = random_xor() + # Create necessary components - owner = PublicKey() - counter = 42 - chunk_addr = ChunkAddress(b"test data for pointer") - target = PointerTarget.from_chunk_address(chunk_addr) key = SecretKey() + counter = 42 + target = PointerTarget.new_chunk(ChunkAddress(xor_hex)) # Create pointer - pointer = Pointer(owner, counter, target, key) + pointer = Pointer(key, counter, target) # Verify pointer properties assert isinstance(pointer.hex, str) assert len(pointer.hex) == 64 # Test network address - addr = pointer.network_address() + addr = pointer.address() assert isinstance(addr, PointerAddress) assert isinstance(addr.hex, str) assert len(addr.hex) == 64 + # Pointer should point to original XOR + assert pointer.target.hex == xor_hex + def test_pointer_target_creation(): # Test direct creation test_data = b"test data for pointer target" - target = PointerTarget(test_data) + target = PointerTarget.new_chunk(ChunkAddress.from_content(test_data)) # Verify hex assert isinstance(target.hex, str) assert len(target.hex) == 64 - - # Test from_xorname - target2 = PointerTarget.from_xorname(test_data) - assert isinstance(target2.hex, str) - assert len(target2.hex) == 64 def test_invalid_hex(): # Test invalid hex string for chunk address with pytest.raises(ValueError): - ChunkAddress.from_chunk_address("invalid hex") + ChunkAddress("invalid hex") # Test invalid hex string for pointer address with pytest.raises(ValueError): - PointerAddress("invalid hex") \ No newline at end of file + PointerAddress("invalid hex") + +def test_wallet(): + network = Network(True) + private_key = "0xdb1049e76a813c94be0df47ec3e20533ca676b1b9fef2ddbce9daa117e4da4aa" + wallet = Wallet.new_from_private_key(network, private_key) + + assert wallet.address() == '0x69D5BF2Bc42bca8782b8D2b4FdfF2b1Fa7644Fe7' + assert wallet.network() == network From 934d1058c7df5431b29e6b007277666a02ec7d86 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 7 Feb 2025 19:20:48 +0100 Subject: [PATCH 57/69] fix: clippy fix suggestion (cherry picked from commit a91f8b25df22b76260760a4c2b057fd35bb9e9c0) --- autonomi/src/python.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autonomi/src/python.rs b/autonomi/src/python.rs index 3736b5ccc1..9421a96b7f 100644 --- a/autonomi/src/python.rs +++ b/autonomi/src/python.rs @@ -1313,7 +1313,7 @@ impl PyScratchpadAddress { /// Return the owner public key. pub fn owner(&self) -> PyPublicKey { PyPublicKey { - inner: self.inner.owner().clone(), + inner: *self.inner.owner(), } } From f10b16dd8d1d5d2526201a1b3df71427cf4322da Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 11 Feb 2025 17:14:33 +0100 Subject: [PATCH 58/69] refactor: improvements for python bindings (client) (cherry picked from commit bf3e638eb66b5f24250c52e8991f57e6304df0ec) --- autonomi/python/examples/autonomi_example.py | 8 +- autonomi/src/python.rs | 93 +++++++++++++++++--- 2 files changed, 83 insertions(+), 18 deletions(-) diff --git a/autonomi/python/examples/autonomi_example.py b/autonomi/python/examples/autonomi_example.py index c889dd5477..e0dfeab378 100644 --- a/autonomi/python/examples/autonomi_example.py +++ b/autonomi/python/examples/autonomi_example.py @@ -1,7 +1,10 @@ -from autonomi_client import Client, Wallet, PaymentOption +from autonomi_client import Client, Network, Wallet, PaymentOption import asyncio async def main(): + # Connect to the network + client = await Client.init_local() + # Initialize a wallet with a private key # This should be a valid Ethereum private key (64 hex chars without '0x' prefix) private_key = "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" @@ -9,9 +12,6 @@ async def main(): print(f"Wallet address: {wallet.address()}") print(f"Wallet balance: {wallet.balance()}") - # Connect to the network - client = await Client.init() - # Create payment option using the wallet payment = PaymentOption.wallet(wallet) diff --git a/autonomi/src/python.rs b/autonomi/src/python.rs index 9421a96b7f..33414c90e8 100644 --- a/autonomi/src/python.rs +++ b/autonomi/src/python.rs @@ -227,14 +227,16 @@ impl PyClient { } /// Get Scratchpad from the Network using the scratpad address in hex string format. - fn scratchpad_get<'a>(&self, py: Python<'a>, addr: String) -> PyResult> { + fn scratchpad_get<'a>( + &self, + py: Python<'a>, + addr: PyScratchpadAddress, + ) -> PyResult> { let client = self.inner.clone(); - let addr = ScratchpadAddress::from_hex(&addr) - .map_err(|e| PyValueError::new_err(format!("Failed to parse address: {e}")))?; future_into_py(py, async move { let scratchpad = client - .scratchpad_get(&addr) + .scratchpad_get(&addr.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to get scratchpad: {e}")))?; @@ -246,15 +248,13 @@ impl PyClient { fn scratchpad_check_existance<'a>( &self, py: Python<'a>, - addr: String, + addr: PyScratchpadAddress, ) -> PyResult> { let client = self.inner.clone(); - let addr = ScratchpadAddress::from_hex(&addr) - .map_err(|e| PyValueError::new_err(format!("Failed to parse address: {e}")))?; future_into_py(py, async move { let exists = client - .scratchpad_check_existance(&addr) + .scratchpad_check_existance(&addr.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to get scratchpad: {e}")))?; @@ -278,7 +278,7 @@ impl PyClient { .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to put scratchpad: {e}")))?; - Ok((cost.to_string(), addr.to_hex())) + Ok((cost.to_string(), PyScratchpadAddress { inner: addr })) }) } @@ -1806,19 +1806,84 @@ impl PyPrivateArchive { /// The protocol only ensures that the graph entry is immutable once uploaded and that the signature is valid and matches the owner. /// /// For convenience it is advised to make use of BLS key derivation to create multiple graph entries from a single key. -#[pyclass(name = "GraphEntry")] -#[derive(Debug, Clone)] +#[pyclass(name = "GraphEntry", eq, ord)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct PyGraphEntry { inner: GraphEntry, } +#[pymethods] +impl PyGraphEntry { + /// Create a new graph entry, signing it with the provided secret key. + #[new] + fn new( + owner: PySecretKey, + parents: Vec, + content: [u8; 32], + descendants: Vec<(PyPublicKey, [u8; 32])>, + ) -> PyResult { + Ok(Self { + inner: GraphEntry::new( + &owner.inner, + parents.into_iter().map(|p| p.inner).collect(), + content, + descendants.into_iter().map(|p| (p.0.inner, p.1)).collect(), + ), + }) + } + + /// Returns the network address where this entry is stored. + pub fn address(&self) -> PyGraphEntryAddress { + PyGraphEntryAddress { + inner: self.inner.address(), + } + } +} + /// Scratchpad, a mutable space for encrypted data on the Network -#[pyclass(name = "Scratchpad")] -#[derive(Debug, Clone)] +#[pyclass(name = "Scratchpad", eq, ord)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct PyScratchpad { inner: Scratchpad, } +#[pymethods] +impl PyScratchpad { + /// Creates a new instance of Scratchpad. Encrypts the data, and signs all the elements. + #[new] + fn new( + owner: PySecretKey, + data_encoding: u64, + unencrypted_data: Vec, + counter: u64, + ) -> PyResult { + Ok(Self { + inner: Scratchpad::new( + &owner.inner, + data_encoding, + &Bytes::from(unencrypted_data), + counter, + ), + }) + } + + /// Returns the address of the scratchpad. + pub fn address(&self) -> PyScratchpadAddress { + PyScratchpadAddress { + inner: *self.inner.address(), + } + } + + /// Returns the encrypted_data, decrypted via the passed SecretKey + pub fn decrypt_data(&self, sk: PySecretKey) -> PyResult> { + let data = self + .inner + .decrypt_data(&sk.inner) + .map_err(|e| PyRuntimeError::new_err(format!("{e}")))?; + Ok(data.to_vec()) + } +} + /// A handle to the register history #[pyclass(name = "RegisterHistory")] #[derive(Clone)] @@ -1874,7 +1939,7 @@ pub struct PyClientConfig { #[pymethods] impl PyClientConfig { - #[staticmethod] + #[new] fn new() -> Self { Self { inner: ClientConfig::default(), From 2afeb14307786cf8d0b64ae823b306e4d394c28c Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 25 Feb 2025 07:49:13 +0100 Subject: [PATCH 59/69] feat!: accurate useable uniform address management (cherry picked from commit a42b404b72c94e64aa99b2e8ad035b7046aec2c2) --- ant-cli/src/access/user_data.rs | 27 +- ant-cli/src/actions/download.rs | 13 +- ant-cli/src/commands/file.rs | 5 +- ant-node/src/quote.rs | 8 +- ant-node/tests/data_with_churn.rs | 10 +- ant-protocol/src/lib.rs | 166 ++-------- ant-protocol/src/storage/address/chunk.rs | 29 +- ant-protocol/src/storage/address/graph.rs | 36 ++- ant-protocol/src/storage/address/mod.rs | 10 + .../src/storage/address/pointer_address.rs | 46 +-- .../src/storage/address/scratchpad.rs | 66 ++-- ant-protocol/src/storage/graph.rs | 2 +- ant-protocol/src/storage/mod.rs | 1 + ant-protocol/src/storage/pointer.rs | 23 +- autonomi/src/client/address.rs | 48 --- autonomi/src/client/data_types/chunk.rs | 2 + autonomi/src/client/data_types/graph.rs | 8 +- autonomi/src/client/data_types/pointer.rs | 10 +- autonomi/src/client/high_level/data/mod.rs | 42 ++- autonomi/src/client/high_level/data/public.rs | 10 +- .../high_level/files/archive_private.rs | 6 +- .../client/high_level/files/archive_public.rs | 35 ++- .../src/client/high_level/files/fs_private.rs | 8 +- .../src/client/high_level/files/fs_public.rs | 24 +- .../src/client/high_level/register/history.rs | 6 +- .../src/client/high_level/register/mod.rs | 90 +++--- autonomi/src/client/high_level/vault/mod.rs | 6 +- .../src/client/high_level/vault/user_data.rs | 23 +- autonomi/src/client/mod.rs | 1 - autonomi/src/lib.rs | 3 + autonomi/src/python.rs | 291 +++++++++++++----- autonomi/tests/address.rs | 152 +++++++++ 32 files changed, 704 insertions(+), 503 deletions(-) delete mode 100644 autonomi/src/client/address.rs create mode 100644 autonomi/tests/address.rs diff --git a/ant-cli/src/access/user_data.rs b/ant-cli/src/access/user_data.rs index 2fa3822066..1f01b77e95 100644 --- a/ant-cli/src/access/user_data.rs +++ b/ant-cli/src/access/user_data.rs @@ -8,12 +8,13 @@ use std::collections::HashMap; -use autonomi::client::{ - address::{addr_to_str, str_to_addr}, - files::archive_private::PrivateArchiveAccess, - files::archive_public::ArchiveAddr, - register::RegisterAddress, - vault::UserData, +use autonomi::{ + client::{ + files::{archive_private::PrivateArchiveDataMap, archive_public::ArchiveAddress}, + register::RegisterAddress, + vault::UserData, + }, + data::DataAddress, }; use color_eyre::eyre::Result; @@ -40,7 +41,7 @@ pub fn get_local_user_data() -> Result { Ok(user_data) } -pub fn get_local_private_file_archives() -> Result> { +pub fn get_local_private_file_archives() -> Result> { let data_dir = get_client_data_dir_path()?; let user_data_path = data_dir.join("user_data"); let private_file_archives_path = user_data_path.join("private_file_archives"); @@ -55,13 +56,13 @@ pub fn get_local_private_file_archives() -> Result Result { +pub fn get_local_private_archive_access(local_addr: &str) -> Result { let data_dir = get_client_data_dir_path()?; let user_data_path = data_dir.join("user_data"); let private_file_archives_path = user_data_path.join("private_file_archives"); @@ -69,7 +70,7 @@ pub fn get_local_private_archive_access(local_addr: &str) -> Result Res Ok(file_content) } -pub fn get_local_public_file_archives() -> Result> { +pub fn get_local_public_file_archives() -> Result> { let data_dir = get_client_data_dir_path()?; let user_data_path = data_dir.join("user_data"); let file_archives_path = user_data_path.join("file_archives"); @@ -116,7 +117,7 @@ pub fn get_local_public_file_archives() -> Result> { let entry = entry?; let file_name = entry.file_name().to_string_lossy(); - let file_archive_address = str_to_addr(&file_name)?; + let file_archive_address = DataAddress::from_hex(&file_name)?; let file_archive_name = std::fs::read_to_string(entry.path())?; file_archives.insert(file_archive_address, file_archive_name); } @@ -125,7 +126,7 @@ pub fn get_local_public_file_archives() -> Result> pub fn write_local_user_data(user_data: &UserData) -> Result<()> { for (archive, name) in user_data.file_archives.iter() { - write_local_public_file_archive(addr_to_str(*archive), name)?; + write_local_public_file_archive(archive.to_hex(), name)?; } for (archive, name) in user_data.private_file_archives.iter() { diff --git a/ant-cli/src/actions/download.rs b/ant-cli/src/actions/download.rs index 6218479dfb..2f6c0b2e16 100644 --- a/ant-cli/src/actions/download.rs +++ b/ant-cli/src/actions/download.rs @@ -10,11 +10,10 @@ use super::get_progress_bar; use autonomi::{ chunk::DataMapChunk, client::{ - address::str_to_addr, - files::{archive_private::PrivateArchiveAccess, archive_public::ArchiveAddr}, + files::{archive_private::PrivateArchiveDataMap, archive_public::ArchiveAddress}, GetError, }, - data::DataAddr, + data::DataAddress, Client, }; use color_eyre::{ @@ -24,7 +23,7 @@ use color_eyre::{ use std::path::PathBuf; pub async fn download(addr: &str, dest_path: &str, client: &Client) -> Result<()> { - let try_public_address = str_to_addr(addr).ok(); + let try_public_address = DataAddress::from_hex(addr).ok(); if let Some(public_address) = try_public_address { return download_public(addr, public_address, dest_path, client).await; } @@ -48,7 +47,7 @@ pub async fn download(addr: &str, dest_path: &str, client: &Client) -> Result<() async fn download_private( addr: &str, - private_address: PrivateArchiveAccess, + private_address: PrivateArchiveDataMap, dest_path: &str, client: &Client, ) -> Result<()> { @@ -94,7 +93,7 @@ async fn download_private( async fn download_public( addr: &str, - address: ArchiveAddr, + address: ArchiveAddress, dest_path: &str, client: &Client, ) -> Result<()> { @@ -146,7 +145,7 @@ async fn download_public( async fn download_public_single_file( addr: &str, - address: DataAddr, + address: DataAddress, dest_path: &str, client: &Client, ) -> Result<()> { diff --git a/ant-cli/src/commands/file.rs b/ant-cli/src/commands/file.rs index ecd2640d6a..fc934ebefc 100644 --- a/ant-cli/src/commands/file.rs +++ b/ant-cli/src/commands/file.rs @@ -9,7 +9,6 @@ use crate::network::NetworkPeers; use crate::utils::collect_upload_summary; use crate::wallet::load_wallet; -use autonomi::client::address::addr_to_str; use autonomi::client::payment::PaymentOption; use autonomi::ClientOperatingStrategy; use autonomi::ResponseQuorum; @@ -70,7 +69,7 @@ pub async fn upload( .dir_upload_public(dir_path, payment.clone()) .await .wrap_err("Failed to upload file")?; - local_addr = addr_to_str(xor_name); + local_addr = xor_name.to_hex(); local_addr.clone() } else { let (_cost, private_data_access) = client @@ -144,7 +143,7 @@ pub fn list() -> Result<()> { file_archives.len() ); for (addr, name) in file_archives { - println!("{}: {}", name, addr_to_str(addr)); + println!("{}: {}", name, addr.to_hex()); } // get private file archives diff --git a/ant-node/src/quote.rs b/ant-node/src/quote.rs index 763016020c..01ffa4d898 100644 --- a/ant-node/src/quote.rs +++ b/ant-node/src/quote.rs @@ -23,9 +23,9 @@ impl Node { ) -> Result { let content = match address { NetworkAddress::ChunkAddress(addr) => *addr.xorname(), - NetworkAddress::GraphEntryAddress(addr) => *addr.xorname(), + NetworkAddress::GraphEntryAddress(addr) => addr.xorname(), NetworkAddress::ScratchpadAddress(addr) => addr.xorname(), - NetworkAddress::PointerAddress(addr) => *addr.xorname(), + NetworkAddress::PointerAddress(addr) => addr.xorname(), NetworkAddress::PeerId(_) | NetworkAddress::RecordKey(_) => XorName::default(), }; let timestamp = std::time::SystemTime::now(); @@ -59,9 +59,9 @@ pub(crate) fn verify_quote_for_storecost( // check address let content = match address { NetworkAddress::ChunkAddress(addr) => *addr.xorname(), - NetworkAddress::GraphEntryAddress(addr) => *addr.xorname(), + NetworkAddress::GraphEntryAddress(addr) => addr.xorname(), NetworkAddress::ScratchpadAddress(addr) => addr.xorname(), - NetworkAddress::PointerAddress(addr) => *addr.xorname(), + NetworkAddress::PointerAddress(addr) => addr.xorname(), NetworkAddress::PeerId(_) | NetworkAddress::RecordKey(_) => XorName::default(), }; if content != quote.content { diff --git a/ant-node/tests/data_with_churn.rs b/ant-node/tests/data_with_churn.rs index 9e6871df99..642af9352e 100644 --- a/ant-node/tests/data_with_churn.rs +++ b/ant-node/tests/data_with_churn.rs @@ -17,7 +17,7 @@ use ant_protocol::{ storage::{ChunkAddress, GraphEntry, GraphEntryAddress, PointerTarget, ScratchpadAddress}, NetworkAddress, }; -use autonomi::{Client, Wallet}; +use autonomi::{data::DataAddress, Client, Wallet}; use bls::{PublicKey, SecretKey}; use bytes::Bytes; use common::client::transfer_to_new_wallet; @@ -690,7 +690,9 @@ fn store_chunks_task( content .write() .await - .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(data_map))); + .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new( + *data_map.xorname(), + ))); break; } Err(err) => { @@ -872,7 +874,9 @@ async fn final_retry_query_content( async fn query_content(client: &Client, net_addr: &NetworkAddress) -> Result<()> { match net_addr { NetworkAddress::ChunkAddress(addr) => { - client.data_get_public(addr.xorname()).await?; + client + .data_get_public(&DataAddress::new(*addr.xorname())) + .await?; Ok(()) } NetworkAddress::PointerAddress(addr) => { diff --git a/ant-protocol/src/lib.rs b/ant-protocol/src/lib.rs index 0b48941ed8..f87b651b7f 100644 --- a/ant-protocol/src/lib.rs +++ b/ant-protocol/src/lib.rs @@ -30,9 +30,8 @@ pub mod antnode_proto { } pub use error::Error; pub use error::Error as NetworkError; -use storage::ScratchpadAddress; -use self::storage::{ChunkAddress, GraphEntryAddress, PointerAddress}; +use self::storage::{ChunkAddress, GraphEntryAddress, PointerAddress, ScratchpadAddress}; /// Re-export of Bytes used throughout the protocol pub use bytes::Bytes; @@ -125,12 +124,12 @@ impl NetworkAddress { pub fn as_bytes(&self) -> Vec { match self { NetworkAddress::PeerId(bytes) | NetworkAddress::RecordKey(bytes) => bytes.to_vec(), - NetworkAddress::ChunkAddress(chunk_address) => chunk_address.xorname().0.to_vec(), + NetworkAddress::ChunkAddress(chunk_address) => chunk_address.xorname().to_vec(), NetworkAddress::GraphEntryAddress(graph_entry_address) => { - graph_entry_address.xorname().0.to_vec() + graph_entry_address.xorname().to_vec() } - NetworkAddress::ScratchpadAddress(addr) => addr.xorname().0.to_vec(), - NetworkAddress::PointerAddress(pointer_address) => pointer_address.0.to_vec(), + NetworkAddress::ScratchpadAddress(addr) => addr.xorname().to_vec(), + NetworkAddress::PointerAddress(pointer_address) => pointer_address.xorname().to_vec(), } } @@ -158,10 +157,10 @@ impl NetworkAddress { NetworkAddress::RecordKey(bytes) => RecordKey::new(bytes), NetworkAddress::ChunkAddress(chunk_address) => RecordKey::new(chunk_address.xorname()), NetworkAddress::GraphEntryAddress(graph_entry_address) => { - RecordKey::new(graph_entry_address.xorname()) + RecordKey::new(&graph_entry_address.xorname()) } NetworkAddress::PointerAddress(pointer_address) => { - RecordKey::new(pointer_address.xorname()) + RecordKey::new(&pointer_address.xorname()) } NetworkAddress::ScratchpadAddress(addr) => RecordKey::new(&addr.xorname()), NetworkAddress::PeerId(bytes) => RecordKey::new(bytes), @@ -197,25 +196,25 @@ impl Debug for NetworkAddress { NetworkAddress::ChunkAddress(chunk_address) => { format!( "NetworkAddress::ChunkAddress({} - ", - &chunk_address.to_hex()[0..6] + &chunk_address.to_hex() ) } NetworkAddress::GraphEntryAddress(graph_entry_address) => { format!( "NetworkAddress::GraphEntryAddress({} - ", - &graph_entry_address.to_hex()[0..6] + &graph_entry_address.to_hex() ) } NetworkAddress::ScratchpadAddress(scratchpad_address) => { format!( "NetworkAddress::ScratchpadAddress({} - ", - &scratchpad_address.to_hex()[0..6] + &scratchpad_address.to_hex() ) } NetworkAddress::PointerAddress(pointer_address) => { format!( "NetworkAddress::PointerAddress({} - ", - &pointer_address.to_hex()[0..6] + &pointer_address.to_hex() ) } NetworkAddress::RecordKey(bytes) => { @@ -238,19 +237,19 @@ impl Display for NetworkAddress { write!(f, "NetworkAddress::PeerId({})", hex::encode(id)) } NetworkAddress::ChunkAddress(addr) => { - write!(f, "NetworkAddress::ChunkAddress({addr:?})") + write!(f, "NetworkAddress::ChunkAddress({addr})") } NetworkAddress::GraphEntryAddress(addr) => { - write!(f, "NetworkAddress::GraphEntryAddress({addr:?})") + write!(f, "NetworkAddress::GraphEntryAddress({addr})") } NetworkAddress::ScratchpadAddress(addr) => { - write!(f, "NetworkAddress::ScratchpadAddress({addr:?})") + write!(f, "NetworkAddress::ScratchpadAddress({addr})") } NetworkAddress::RecordKey(key) => { write!(f, "NetworkAddress::RecordKey({})", hex::encode(key)) } NetworkAddress::PointerAddress(addr) => { - write!(f, "NetworkAddress::PointerAddress({addr:?})") + write!(f, "NetworkAddress::PointerAddress({addr})") } } } @@ -376,141 +375,20 @@ impl std::fmt::Debug for PrettyPrintRecordKey<'_> { #[cfg(test)] mod tests { - use crate::{ - messages::{Nonce, Query}, - storage::GraphEntryAddress, - NetworkAddress, PeerId, - }; - use bls::rand::thread_rng; - use serde::{Deserialize, Serialize}; + use crate::storage::GraphEntryAddress; + use crate::NetworkAddress; #[test] fn verify_graph_entry_addr_is_actionable() { - let xorname = xor_name::XorName::random(&mut thread_rng()); - let graph_entry_addr = GraphEntryAddress::new(xorname); + let pk = bls::SecretKey::random().public_key(); + let graph_entry_addr = GraphEntryAddress::new(pk); let net_addr = NetworkAddress::from_graph_entry_address(graph_entry_addr); - let graph_entry_addr_hex = &graph_entry_addr.to_hex()[0..6]; // we only log the first 6 chars + let graph_entry_addr_hex = &graph_entry_addr.to_hex(); let net_addr_fmt = format!("{net_addr}"); + let net_addr_dbg = format!("{net_addr:?}"); assert!(net_addr_fmt.contains(graph_entry_addr_hex)); - } - - #[derive(Eq, PartialEq, PartialOrd, Clone, Serialize, Deserialize, Debug)] - enum QueryExtended { - GetStoreQuote { - key: NetworkAddress, - data_type: u32, - data_size: usize, - nonce: Option, - difficulty: usize, - }, - GetReplicatedRecord { - requester: NetworkAddress, - key: NetworkAddress, - }, - GetChunkExistenceProof { - key: NetworkAddress, - nonce: Nonce, - difficulty: usize, - }, - CheckNodeInProblem(NetworkAddress), - GetClosestPeers { - key: NetworkAddress, - num_of_peers: Option, - range: Option<[u8; 32]>, - sign_result: bool, - }, - GetVersion(NetworkAddress), - Extended, - } - - #[test] - fn test_query_serialization_deserialization() { - let peer_id = PeerId::random(); - // Create a sample Query message - let original_query = Query::GetStoreQuote { - key: NetworkAddress::from_peer(peer_id), - data_type: 1, - data_size: 100, - nonce: Some(0), - difficulty: 3, - }; - - // Serialize to bytes - let serialized = bincode::serialize(&original_query).expect("Serialization failed"); - - // Deserialize into QueryExtended - let deserialized: QueryExtended = - bincode::deserialize(&serialized).expect("Deserialization into QueryExtended failed"); - - // Verify the deserialized data matches the original - match deserialized { - QueryExtended::GetStoreQuote { - key, - data_type, - data_size, - nonce, - difficulty, - } => { - assert_eq!(key, NetworkAddress::from_peer(peer_id)); - assert_eq!(data_type, 1); - assert_eq!(data_size, 100); - assert_eq!(nonce, Some(0)); - assert_eq!(difficulty, 3); - } - _ => panic!("Deserialized into wrong variant"), - } - } - - #[test] - fn test_query_extended_serialization() { - // Create a sample QueryExtended message with extended new variant - let extended_query = QueryExtended::Extended; - - // Serialize to bytes - let serialized = bincode::serialize(&extended_query).expect("Serialization failed"); - - // Attempt to deserialize into original Query (should fail) - let result: Result = bincode::deserialize(&serialized); - assert!( - result.is_err(), - "Should fail to deserialize extended enum into original" - ); - - let peer_id = PeerId::random(); - // Create a sample QueryExtended message with old variant - let extended_query = QueryExtended::GetStoreQuote { - key: NetworkAddress::from_peer(peer_id), - data_type: 1, - data_size: 100, - nonce: Some(0), - difficulty: 3, - }; - - // Serialize to bytes - let serialized = bincode::serialize(&extended_query).expect("Serialization failed"); - - // Deserialize into Query - let deserialized: Query = - bincode::deserialize(&serialized).expect("Deserialization into Query failed"); - - // Verify the deserialized data matches the original - match deserialized { - Query::GetStoreQuote { - key, - data_type, - data_size, - nonce, - difficulty, - } => { - assert_eq!(key, NetworkAddress::from_peer(peer_id)); - assert_eq!(data_type, 1); - assert_eq!(data_size, 100); - assert_eq!(nonce, Some(0)); - assert_eq!(difficulty, 3); - } - _ => panic!("Deserialized into wrong variant"), - } + assert!(net_addr_dbg.contains(graph_entry_addr_hex)); } } diff --git a/ant-protocol/src/storage/address/chunk.rs b/ant-protocol/src/storage/address/chunk.rs index b1671c2f78..3539abd466 100644 --- a/ant-protocol/src/storage/address/chunk.rs +++ b/ant-protocol/src/storage/address/chunk.rs @@ -7,11 +7,14 @@ // permissions and limitations relating to use of the SAFE Network Software. use serde::{Deserialize, Serialize}; -use std::{fmt, hash::Hash}; +use std::hash::Hash; use xor_name::XorName; -/// Address of a Chunk -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +use super::AddressParseError; + +/// Address of a [`crate::storage::chunks::Chunk`] +/// It is derived from the content of the chunk +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] pub struct ChunkAddress(XorName); impl ChunkAddress { @@ -20,18 +23,30 @@ impl ChunkAddress { Self(xor_name) } - /// Returns the name. + /// Returns the XorName pub fn xorname(&self) -> &XorName { &self.0 } + /// Returns the hex string representation of the address. pub fn to_hex(&self) -> String { hex::encode(self.0) } + + /// Creates a new ChunkAddress from a hex string. + pub fn from_hex(hex: &str) -> Result { + let bytes = hex::decode(hex)?; + let xor = XorName( + bytes + .try_into() + .map_err(|_| AddressParseError::InvalidLength)?, + ); + Ok(Self(xor)) + } } -impl std::fmt::Debug for ChunkAddress { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ChunkAddress({})", &self.to_hex()[0..6]) +impl std::fmt::Display for ChunkAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", &self.to_hex()) } } diff --git a/ant-protocol/src/storage/address/graph.rs b/ant-protocol/src/storage/address/graph.rs index 4a247f76f6..c8b5aba76c 100644 --- a/ant-protocol/src/storage/address/graph.rs +++ b/ant-protocol/src/storage/address/graph.rs @@ -10,30 +10,44 @@ use bls::PublicKey; use serde::{Deserialize, Serialize}; use xor_name::XorName; -/// Address of a GraphEntry, is derived from the owner's unique public key -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub struct GraphEntryAddress(pub XorName); +use super::AddressParseError; + +/// Address of a [`crate::storage::graph::GraphEntry`] +/// It is derived from the owner's unique public key +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +pub struct GraphEntryAddress(PublicKey); impl GraphEntryAddress { - pub fn from_owner(owner: PublicKey) -> Self { - Self(XorName::from_content(&owner.to_bytes())) + /// Create a new [`GraphEntryAddress`] + pub fn new(owner: PublicKey) -> Self { + Self(owner) } - pub fn new(xor_name: XorName) -> Self { - Self(xor_name) + /// Return the network name of the scratchpad. + /// This is used to locate the scratchpad on the network. + pub fn xorname(&self) -> XorName { + XorName::from_content(&self.0.to_bytes()) } - pub fn xorname(&self) -> &XorName { + /// Return the owner. + pub fn owner(&self) -> &PublicKey { &self.0 } + /// Serialize this [`GraphEntryAddress`] into a hex-encoded string. pub fn to_hex(&self) -> String { - hex::encode(self.0) + hex::encode(self.0.to_bytes()) + } + + /// Parse a hex-encoded string into a [`GraphEntryAddress`]. + pub fn from_hex(hex: &str) -> Result { + let owner = PublicKey::from_hex(hex)?; + Ok(Self(owner)) } } -impl std::fmt::Debug for GraphEntryAddress { +impl std::fmt::Display for GraphEntryAddress { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "GraphEntryAddress({})", &self.to_hex()[0..6]) + write!(f, "{}", &self.to_hex()) } } diff --git a/ant-protocol/src/storage/address/mod.rs b/ant-protocol/src/storage/address/mod.rs index f1bf8abd4a..9821f92f32 100644 --- a/ant-protocol/src/storage/address/mod.rs +++ b/ant-protocol/src/storage/address/mod.rs @@ -7,3 +7,13 @@ pub use chunk::ChunkAddress; pub use graph::GraphEntryAddress; pub use pointer_address::PointerAddress; pub use scratchpad::ScratchpadAddress; + +#[derive(Debug, thiserror::Error)] +pub enum AddressParseError { + #[error("Invalid hex string: {0}")] + Hex(#[from] hex::FromHexError), + #[error("Invalid public key: {0}")] + PublicKey(#[from] bls::Error), + #[error("Invalid string length")] + InvalidLength, +} diff --git a/ant-protocol/src/storage/address/pointer_address.rs b/ant-protocol/src/storage/address/pointer_address.rs index c6406f4889..7cd456ebd9 100644 --- a/ant-protocol/src/storage/address/pointer_address.rs +++ b/ant-protocol/src/storage/address/pointer_address.rs @@ -2,39 +2,45 @@ use bls::PublicKey; use serde::{Deserialize, Serialize}; use xor_name::XorName; -/// Address of a pointer, is derived from the owner's public key -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub struct PointerAddress(pub XorName); +use super::AddressParseError; + +/// Address of a [`crate::storage::pointer::Pointer`] +/// It is derived from the owner's public key +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +pub struct PointerAddress(PublicKey); impl PointerAddress { - pub fn from_owner(owner: PublicKey) -> Self { - Self(XorName::from_content(&owner.to_bytes())) + /// Create a new [`PointerAddress`] + pub fn new(owner: PublicKey) -> Self { + Self(owner) } - pub fn new(xor_name: XorName) -> Self { - Self(xor_name) + /// Return the network name of the scratchpad. + /// This is used to locate the scratchpad on the network. + pub fn xorname(&self) -> XorName { + XorName::from_content(&self.0.to_bytes()) } - pub fn xorname(&self) -> &XorName { + /// Return the owner. + pub fn owner(&self) -> &PublicKey { &self.0 } + /// Serialize this [`PointerAddress`] into a hex-encoded string. pub fn to_hex(&self) -> String { - hex::encode(self.0) - } - - pub fn to_bytes(&self) -> Vec { - rmp_serde::to_vec(self).expect("Failed to serialize PointerAddress") + hex::encode(self.0.to_bytes()) } - pub fn from_bytes(bytes: &[u8]) -> Result { - rmp_serde::from_slice(bytes) + /// Parse a hex-encoded string into a [`PointerAddress`]. + pub fn from_hex(hex: &str) -> Result { + let owner = PublicKey::from_hex(hex)?; + Ok(Self(owner)) } } -impl std::fmt::Debug for PointerAddress { +impl std::fmt::Display for PointerAddress { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "PointerAddress({})", &self.to_hex()[0..6]) + write!(f, "{}", &self.to_hex()) } } @@ -45,9 +51,9 @@ mod tests { #[test] fn test_pointer_serialization() { let key = bls::SecretKey::random(); - let pointer_address = PointerAddress::from_owner(key.public_key()); - let serialized = pointer_address.to_bytes(); - let deserialized = PointerAddress::from_bytes(&serialized).unwrap(); + let pointer_address = PointerAddress::new(key.public_key()); + let serialized = pointer_address.to_hex(); + let deserialized = PointerAddress::from_hex(&serialized).unwrap(); assert_eq!(pointer_address, deserialized); } } diff --git a/ant-protocol/src/storage/address/scratchpad.rs b/ant-protocol/src/storage/address/scratchpad.rs index ecd9735183..7ca8ee6f04 100644 --- a/ant-protocol/src/storage/address/scratchpad.rs +++ b/ant-protocol/src/storage/address/scratchpad.rs @@ -6,66 +6,50 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::error::{Error, Result}; use bls::PublicKey; use serde::{Deserialize, Serialize}; -use std::{ - fmt::{Debug, Display}, - hash::Hash, -}; +use std::hash::Hash; use xor_name::XorName; -/// Address of a Scratchpad on the SAFE Network -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub struct ScratchpadAddress { - /// Owner of the scratchpad - pub(crate) owner: PublicKey, -} - -impl Display for ScratchpadAddress { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "({:?})", &self.to_hex()[0..6]) - } -} +use super::AddressParseError; -impl Debug for ScratchpadAddress { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "ScratchpadAddress({}) {{ owner: {:?} }}", - &self.to_hex()[0..6], - self.owner - ) - } -} +/// Address of a [`crate::storage::scratchpad::Scratchpad`] +/// It is derived from the owner's public key +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +pub struct ScratchpadAddress(PublicKey); impl ScratchpadAddress { - /// Construct a new `ScratchpadAddress` given `owner`. + /// Create a new [`ScratchpadAddress`] pub fn new(owner: PublicKey) -> Self { - Self { owner } + Self(owner) } /// Return the network name of the scratchpad. /// This is used to locate the scratchpad on the network. pub fn xorname(&self) -> XorName { - XorName::from_content(&self.owner.to_bytes()) + XorName::from_content(&self.0.to_bytes()) } - /// Serialize this `ScratchpadAddress` instance to a hex-encoded `String`. + /// Return the owner. + pub fn owner(&self) -> &PublicKey { + &self.0 + } + + /// Serialize this [`ScratchpadAddress`] into a hex-encoded string. pub fn to_hex(&self) -> String { - hex::encode(self.owner.to_bytes()) + hex::encode(self.0.to_bytes()) } - /// Deserialize a hex-encoded representation of a `ScratchpadAddress` to a `ScratchpadAddress` instance. - pub fn from_hex(hex: &str) -> Result { - // let bytes = hex::decode(hex).map_err(|_| Error::ScratchpadHexDeserializeFailed)?; - let owner = PublicKey::from_hex(hex).map_err(|_| Error::ScratchpadHexDeserializeFailed)?; - Ok(Self { owner }) + /// Parse a hex-encoded string into a [`ScratchpadAddress`]. + pub fn from_hex(hex: &str) -> Result { + let owner = PublicKey::from_hex(hex)?; + Ok(Self(owner)) } +} - /// Return the owner. - pub fn owner(&self) -> &PublicKey { - &self.owner +impl std::fmt::Display for ScratchpadAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", &self.to_hex()) } } @@ -85,6 +69,6 @@ mod tests { let bad_hex = format!("{hex}0"); let err = ScratchpadAddress::from_hex(&bad_hex); - assert_eq!(err, Err(Error::ScratchpadHexDeserializeFailed)); + assert!(err.is_err()); } } diff --git a/ant-protocol/src/storage/graph.rs b/ant-protocol/src/storage/graph.rs index c647131bd1..da63b0a1c1 100644 --- a/ant-protocol/src/storage/graph.rs +++ b/ant-protocol/src/storage/graph.rs @@ -113,7 +113,7 @@ impl GraphEntry { } pub fn address(&self) -> GraphEntryAddress { - GraphEntryAddress::from_owner(self.owner) + GraphEntryAddress::new(self.owner) } /// Get the bytes that the signature is calculated from. diff --git a/ant-protocol/src/storage/mod.rs b/ant-protocol/src/storage/mod.rs index 6fc76b34e8..060cc44e3d 100644 --- a/ant-protocol/src/storage/mod.rs +++ b/ant-protocol/src/storage/mod.rs @@ -14,6 +14,7 @@ mod pointer; mod scratchpad; pub use self::{ + address::AddressParseError, address::{ChunkAddress, GraphEntryAddress, PointerAddress, ScratchpadAddress}, chunks::Chunk, graph::{GraphContent, GraphEntry}, diff --git a/ant-protocol/src/storage/pointer.rs b/ant-protocol/src/storage/pointer.rs index ddfdb5ade1..bc18452979 100644 --- a/ant-protocol/src/storage/pointer.rs +++ b/ant-protocol/src/storage/pointer.rs @@ -30,14 +30,25 @@ pub enum PointerTarget { } impl PointerTarget { + /// Returns the xorname of the target pub fn xorname(&self) -> XorName { match self { PointerTarget::ChunkAddress(addr) => *addr.xorname(), - PointerTarget::GraphEntryAddress(addr) => *addr.xorname(), - PointerTarget::PointerAddress(ptr) => *ptr.xorname(), + PointerTarget::GraphEntryAddress(addr) => addr.xorname(), + PointerTarget::PointerAddress(addr) => addr.xorname(), PointerTarget::ScratchpadAddress(addr) => addr.xorname(), } } + + /// Returns the hex string representation of the target + pub fn to_hex(&self) -> String { + match self { + PointerTarget::ChunkAddress(addr) => addr.to_hex(), + PointerTarget::GraphEntryAddress(addr) => addr.to_hex(), + PointerTarget::PointerAddress(addr) => addr.to_hex(), + PointerTarget::ScratchpadAddress(addr) => addr.to_hex(), + } + } } impl Pointer { @@ -88,7 +99,7 @@ impl Pointer { /// Get the address of the pointer pub fn address(&self) -> PointerAddress { - PointerAddress::from_owner(self.owner) + PointerAddress::new(self.owner) } /// Get the owner of the pointer @@ -131,15 +142,13 @@ impl Pointer { #[cfg(test)] mod tests { use super::*; - use rand::thread_rng; #[test] fn test_pointer_creation_and_validation() { let owner_sk = SecretKey::random(); let counter = 1; - let mut rng = thread_rng(); - let target = - PointerTarget::GraphEntryAddress(GraphEntryAddress::new(XorName::random(&mut rng))); + let pk = SecretKey::random().public_key(); + let target = PointerTarget::GraphEntryAddress(GraphEntryAddress::new(pk)); // Create and sign pointer let pointer = Pointer::new(&owner_sk, counter, target.clone()); diff --git a/autonomi/src/client/address.rs b/autonomi/src/client/address.rs deleted file mode 100644 index f314952f9c..0000000000 --- a/autonomi/src/client/address.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use xor_name::XorName; - -#[derive(Debug, thiserror::Error)] -pub enum DataError { - #[error("Invalid XorName")] - InvalidXorName, - #[error("Input address is not a hex string")] - InvalidHexString, -} - -pub fn str_to_addr(addr: &str) -> Result { - let bytes = hex::decode(addr).map_err(|err| { - error!("Failed to decode hex string: {err:?}"); - DataError::InvalidHexString - })?; - let xor = XorName(bytes.try_into().map_err(|err| { - error!("Failed to convert bytes to XorName: {err:?}"); - DataError::InvalidXorName - })?); - Ok(xor) -} - -pub fn addr_to_str(addr: XorName) -> String { - hex::encode(addr) -} - -#[cfg(test)] -mod test { - use super::*; - use xor_name::XorName; - - #[test] - fn test_xorname_to_str() { - let rng = &mut rand::thread_rng(); - let xorname = XorName::random(rng); - let str = addr_to_str(xorname); - let xorname2 = str_to_addr(&str).expect("Failed to convert back to xorname"); - assert_eq!(xorname, xorname2); - } -} diff --git a/autonomi/src/client/data_types/chunk.rs b/autonomi/src/client/data_types/chunk.rs index 06626599a2..1748282f2f 100644 --- a/autonomi/src/client/data_types/chunk.rs +++ b/autonomi/src/client/data_types/chunk.rs @@ -76,10 +76,12 @@ pub static CHUNK_DOWNLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { pub struct DataMapChunk(pub(crate) Chunk); impl DataMapChunk { + /// Convert the chunk to a hex string. pub fn to_hex(&self) -> String { hex::encode(self.0.value()) } + /// Convert a hex string to a [`DataMapChunk`]. pub fn from_hex(hex: &str) -> Result { let data = hex::decode(hex)?; Ok(Self(Chunk::new(Bytes::from(data)))) diff --git a/autonomi/src/client/data_types/graph.rs b/autonomi/src/client/data_types/graph.rs index f86c004022..7c8f411468 100644 --- a/autonomi/src/client/data_types/graph.rs +++ b/autonomi/src/client/data_types/graph.rs @@ -109,7 +109,7 @@ impl Client { let (payment_proofs, skipped_payments) = self .pay_for_content_addrs( DataTypes::GraphEntry, - std::iter::once((*xor_name, entry.size())), + std::iter::once((xor_name, entry.size())), payment_option, ) .await @@ -118,7 +118,7 @@ impl Client { })?; // make sure the graph entry was paid for - let (proof, price) = match payment_proofs.get(xor_name) { + let (proof, price) = match payment_proofs.get(&xor_name) { Some((proof, price)) => (proof, price), None => { // graph entry was skipped, meaning it was already paid for @@ -170,8 +170,8 @@ impl Client { /// Get the cost to create a GraphEntry pub async fn graph_entry_cost(&self, key: &PublicKey) -> Result { trace!("Getting cost for GraphEntry of {key:?}"); - let address = GraphEntryAddress::from_owner(*key); - let xor = *address.xorname(); + let address = GraphEntryAddress::new(*key); + let xor = address.xorname(); let store_quote = self .get_store_quotes( DataTypes::GraphEntry, diff --git a/autonomi/src/client/data_types/pointer.rs b/autonomi/src/client/data_types/pointer.rs index 5ac9ada854..75c42bb0e3 100644 --- a/autonomi/src/client/data_types/pointer.rs +++ b/autonomi/src/client/data_types/pointer.rs @@ -120,7 +120,7 @@ impl Client { let address = pointer.address(); // pay for the pointer storage - let xor_name = *address.xorname(); + let xor_name = address.xorname(); debug!("Paying for pointer at address: {address:?}"); let (payment_proofs, _skipped_payments) = self .pay_for_content_addrs( @@ -191,7 +191,7 @@ impl Client { target: PointerTarget, payment_option: PaymentOption, ) -> Result<(AttoTokens, PointerAddress), PointerError> { - let address = PointerAddress::from_owner(owner.public_key()); + let address = PointerAddress::new(owner.public_key()); let already_exists = self.pointer_check_existance(&address).await?; if already_exists { return Err(PointerError::PointerAlreadyExists(address)); @@ -211,7 +211,7 @@ impl Client { owner: &SecretKey, target: PointerTarget, ) -> Result<(), PointerError> { - let address = PointerAddress::from_owner(owner.public_key()); + let address = PointerAddress::new(owner.public_key()); let current = match self.pointer_get(&address).await { Ok(pointer) => Some(pointer), Err(PointerError::Network(NetworkError::GetRecordError( @@ -263,8 +263,8 @@ impl Client { pub async fn pointer_cost(&self, key: &PublicKey) -> Result { trace!("Getting cost for pointer of {key:?}"); - let address = PointerAddress::from_owner(*key); - let xor = *address.xorname(); + let address = PointerAddress::new(*key); + let xor = address.xorname(); let store_quote = self .get_store_quotes(DataTypes::Pointer, std::iter::once((xor, Pointer::size()))) .await?; diff --git a/autonomi/src/client/high_level/data/mod.rs b/autonomi/src/client/high_level/data/mod.rs index 1e7de350fc..7c103ec6a3 100644 --- a/autonomi/src/client/high_level/data/mod.rs +++ b/autonomi/src/client/high_level/data/mod.rs @@ -6,6 +6,9 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use ant_protocol::storage::AddressParseError; +use serde::{Deserialize, Serialize}; +use std::hash::Hash; use xor_name::XorName; /// Private data on the network, readable only if you have the DataMapChunk @@ -13,5 +16,40 @@ pub mod private; /// Public data on the network, readable by anyone with the DataAddr pub mod public; -/// Raw Data Address (points to a DataMap) -pub type DataAddr = XorName; +/// A [`DataAddress`] which points to a DataMap +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +pub struct DataAddress(XorName); + +impl DataAddress { + /// Creates a new DataAddress. + pub fn new(xor_name: XorName) -> Self { + Self(xor_name) + } + + /// Returns the XorName + pub fn xorname(&self) -> &XorName { + &self.0 + } + + /// Returns the hex string representation of the address. + pub fn to_hex(&self) -> String { + hex::encode(self.0) + } + + /// Creates a new DataAddress from a hex string. + pub fn from_hex(hex: &str) -> Result { + let bytes = hex::decode(hex)?; + let xor = XorName( + bytes + .try_into() + .map_err(|_| AddressParseError::InvalidLength)?, + ); + Ok(Self(xor)) + } +} + +impl std::fmt::Display for DataAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", &self.to_hex()) + } +} diff --git a/autonomi/src/client/high_level/data/public.rs b/autonomi/src/client/high_level/data/public.rs index 20fc8ed114..abcc28b8fa 100644 --- a/autonomi/src/client/high_level/data/public.rs +++ b/autonomi/src/client/high_level/data/public.rs @@ -15,13 +15,13 @@ use crate::client::{ClientEvent, GetError, PutError, UploadSummary}; use crate::{chunk::ChunkAddress, self_encryption::encrypt, Client}; use ant_evm::{Amount, AttoTokens}; -use super::DataAddr; +use super::DataAddress; impl Client { /// Fetch a blob of data from the network - pub async fn data_get_public(&self, addr: &DataAddr) -> Result { + pub async fn data_get_public(&self, addr: &DataAddress) -> Result { info!("Fetching data from Data Address: {addr:?}"); - let data_map_chunk = self.chunk_get(&ChunkAddress::new(*addr)).await?; + let data_map_chunk = self.chunk_get(&ChunkAddress::new(*addr.xorname())).await?; let data = self .fetch_from_data_map_chunk(data_map_chunk.value()) .await?; @@ -37,7 +37,7 @@ impl Client { &self, data: Bytes, payment_option: PaymentOption, - ) -> Result<(AttoTokens, DataAddr), PutError> { + ) -> Result<(AttoTokens, DataAddress), PutError> { let now = ant_networking::time::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; let data_map_addr = data_map_chunk.address(); @@ -101,7 +101,7 @@ impl Client { } } - Ok((total_cost, map_xor_name)) + Ok((total_cost, DataAddress::new(map_xor_name))) } /// Get the estimated cost of storing a piece of data. diff --git a/autonomi/src/client/high_level/files/archive_private.rs b/autonomi/src/client/high_level/files/archive_private.rs index 9d79d204f4..f77c1ac185 100644 --- a/autonomi/src/client/high_level/files/archive_private.rs +++ b/autonomi/src/client/high_level/files/archive_private.rs @@ -27,7 +27,7 @@ use serde::{Deserialize, Serialize}; use super::Metadata; /// Private archive data map, allowing access to the [`PrivateArchive`] data. -pub type PrivateArchiveAccess = DataMapChunk; +pub type PrivateArchiveDataMap = DataMapChunk; /// Directory structure mapping filepaths to their data maps and metadata. /// @@ -142,7 +142,7 @@ impl Client { /// Fetch a [`PrivateArchive`] from the network pub async fn archive_get( &self, - addr: &PrivateArchiveAccess, + addr: &PrivateArchiveDataMap, ) -> Result { let data = self.data_get(addr).await?; Ok(PrivateArchive::from_bytes(data)?) @@ -153,7 +153,7 @@ impl Client { &self, archive: &PrivateArchive, payment_option: PaymentOption, - ) -> Result<(AttoTokens, PrivateArchiveAccess), PutError> { + ) -> Result<(AttoTokens, PrivateArchiveDataMap), PutError> { let bytes = archive .to_bytes() .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; diff --git a/autonomi/src/client/high_level/files/archive_public.rs b/autonomi/src/client/high_level/files/archive_public.rs index 1dcee27c41..03a008dd4f 100644 --- a/autonomi/src/client/high_level/files/archive_public.rs +++ b/autonomi/src/client/high_level/files/archive_public.rs @@ -16,11 +16,10 @@ use ant_networking::time::{Duration, SystemTime, UNIX_EPOCH}; use crate::{client::payment::PaymentOption, AttoTokens}; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use xor_name::XorName; use crate::{ client::{ - high_level::{data::DataAddr, files::RenameError}, + high_level::{data::DataAddress, files::RenameError}, quote::CostError, GetError, PutError, }, @@ -30,7 +29,7 @@ use crate::{ use super::Metadata; /// The address of a public archive on the network. Points to an [`PublicArchive`]. -pub type ArchiveAddr = XorName; +pub type ArchiveAddress = DataAddress; /// Public variant of [`crate::client::files::archive_private::PrivateArchive`]. Differs in that data maps of files are uploaded /// to the network, of which the addresses are stored in this archive. @@ -41,7 +40,7 @@ pub struct PublicArchive { /// | | Metadata of the file /// | | | /// V V V - map: BTreeMap, + map: BTreeMap, } /// This type essentially wraps archive in version marker. E.g. in JSON format: @@ -80,7 +79,7 @@ impl PublicArchive { /// Add a file to a local archive /// Note that this does not upload the archive to the network - pub fn add_file(&mut self, path: PathBuf, data_addr: DataAddr, meta: Metadata) { + pub fn add_file(&mut self, path: PathBuf, data_addr: DataAddress, meta: Metadata) { self.map.insert(path.clone(), (data_addr, meta)); debug!("Added a new file to the archive, path: {:?}", path); } @@ -94,20 +93,20 @@ impl PublicArchive { } /// List all data addresses of the files in the archive - pub fn addresses(&self) -> Vec { + pub fn addresses(&self) -> Vec { self.map.values().map(|(addr, _)| *addr).collect() } /// Iterate over the archive items /// Returns an iterator over (PathBuf, DataAddr, Metadata) - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.map .iter() .map(|(path, (addr, meta))| (path, addr, meta)) } /// Get the underlying map - pub fn map(&self) -> &BTreeMap { + pub fn map(&self) -> &BTreeMap { &self.map } @@ -151,7 +150,10 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub async fn archive_get_public(&self, addr: &ArchiveAddr) -> Result { + pub async fn archive_get_public( + &self, + addr: &ArchiveAddress, + ) -> Result { let data = self.data_get_public(addr).await?; Ok(PublicArchive::from_bytes(data)?) } @@ -181,7 +183,7 @@ impl Client { &self, archive: &PublicArchive, payment_option: PaymentOption, - ) -> Result<(AttoTokens, ArchiveAddr), PutError> { + ) -> Result<(AttoTokens, ArchiveAddress), PutError> { let bytes = archive .to_bytes() .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; @@ -211,6 +213,7 @@ impl Client { #[cfg(test)] mod test { use std::str::FromStr; + use xor_name::XorName; use super::*; @@ -229,7 +232,7 @@ mod test { let mut arch = PublicArchive::new(); arch.add_file( PathBuf::from_str("hello_world").unwrap(), - DataAddr::random(&mut rand::thread_rng()), + DataAddress::new(XorName::random(&mut rand::thread_rng())), Metadata::new_with_size(1), ); let arch_serialized = arch.to_bytes().unwrap(); @@ -272,7 +275,7 @@ mod test { } #[derive(Debug, Default, Serialize, Deserialize)] pub struct PublicArchiveV1p1 { - map: BTreeMap, + map: BTreeMap, } #[derive(Debug, Serialize, Deserialize)] pub enum PublicArchiveVersionedV1p1 { @@ -283,7 +286,7 @@ mod test { arch_p1.map.insert( PathBuf::from_str("hello_world").unwrap(), ( - DataAddr::random(&mut rand::thread_rng()), + DataAddress::new(XorName::random(&mut rand::thread_rng())), MetadataV1p1 { accessed: Some(1), ..Default::default() @@ -304,13 +307,13 @@ mod test { let file2 = PathBuf::from_str("file2").unwrap(); arch.add_file( file1.clone(), - DataAddr::random(&mut rand::thread_rng()), + DataAddress::new(XorName::random(&mut rand::thread_rng())), Metadata::new_with_size(1), ); let mut other_arch = PublicArchive::new(); other_arch.add_file( file2.clone(), - DataAddr::random(&mut rand::thread_rng()), + DataAddress::new(XorName::random(&mut rand::thread_rng())), Metadata::new_with_size(2), ); arch.merge(&other_arch); @@ -321,7 +324,7 @@ mod test { let mut arch_with_duplicate = PublicArchive::new(); arch_with_duplicate.add_file( file1.clone(), - DataAddr::random(&mut rand::thread_rng()), + DataAddress::new(XorName::random(&mut rand::thread_rng())), Metadata::new_with_size(5), ); arch.merge(&arch_with_duplicate); diff --git a/autonomi/src/client/high_level/files/fs_private.rs b/autonomi/src/client/high_level/files/fs_private.rs index 2ac45d2d3d..7f51c55e32 100644 --- a/autonomi/src/client/high_level/files/fs_private.rs +++ b/autonomi/src/client/high_level/files/fs_private.rs @@ -14,7 +14,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::archive_private::{PrivateArchive, PrivateArchiveAccess}; +use super::archive_private::{PrivateArchive, PrivateArchiveDataMap}; use super::{get_relative_file_path_from_abs_file_and_folder_path, FILE_UPLOAD_BATCH_SIZE}; use super::{DownloadError, UploadError}; @@ -48,7 +48,7 @@ impl Client { /// Download a private directory from network to local file system pub async fn dir_download( &self, - archive_access: &PrivateArchiveAccess, + archive_access: &PrivateArchiveDataMap, to_dest: PathBuf, ) -> Result<(), DownloadError> { let archive = self.archive_get(archive_access).await?; @@ -229,12 +229,12 @@ impl Client { /// Same as [`Client::dir_content_upload`] but also uploads the archive (privately) to the network. /// - /// Returns the [`PrivateArchiveAccess`] allowing the private archive to be downloaded from the network. + /// Returns the [`PrivateArchiveDataMap`] allowing the private archive to be downloaded from the network. pub async fn dir_upload( &self, dir_path: PathBuf, payment_option: PaymentOption, - ) -> Result<(AttoTokens, PrivateArchiveAccess), UploadError> { + ) -> Result<(AttoTokens, PrivateArchiveDataMap), UploadError> { let (cost1, archive) = self .dir_content_upload(dir_path, payment_option.clone()) .await?; diff --git a/autonomi/src/client/high_level/files/fs_public.rs b/autonomi/src/client/high_level/files/fs_public.rs index d23da1f6bc..1d960c8814 100644 --- a/autonomi/src/client/high_level/files/fs_public.rs +++ b/autonomi/src/client/high_level/files/fs_public.rs @@ -6,13 +6,13 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::archive_public::{ArchiveAddr, PublicArchive}; +use super::archive_public::{ArchiveAddress, PublicArchive}; use super::{DownloadError, FileCostError, Metadata, UploadError}; use crate::client::high_level::files::{ get_relative_file_path_from_abs_file_and_folder_path, FILE_UPLOAD_BATCH_SIZE, }; use crate::client::payment::PaymentOption; -use crate::client::{high_level::data::DataAddr, utils::process_tasks_with_max_concurrency}; +use crate::client::{high_level::data::DataAddress, utils::process_tasks_with_max_concurrency}; use crate::client::{Client, PutError}; use crate::self_encryption::encrypt; use crate::{Amount, AttoTokens}; @@ -26,7 +26,7 @@ impl Client { /// Download file from network to local file system pub async fn file_download_public( &self, - data_addr: &DataAddr, + data_addr: &DataAddress, to_dest: PathBuf, ) -> Result<(), DownloadError> { let data = self.data_get_public(data_addr).await?; @@ -42,7 +42,7 @@ impl Client { /// Download directory from network to local file system pub async fn dir_download_public( &self, - archive_addr: &ArchiveAddr, + archive_addr: &ArchiveAddress, to_dest: PathBuf, ) -> Result<(), DownloadError> { let archive = self.archive_get_public(archive_addr).await?; @@ -125,13 +125,13 @@ impl Client { file_path.to_string_lossy().to_string(), xor_names, chunks, - (relative_path, data_address, metadata), + (relative_path, DataAddress::new(data_address), metadata), )) }); } let mut combined_xor_names: Vec<(XorName, usize)> = vec![]; - let mut combined_chunks: Vec<((String, XorName), Vec)> = vec![]; + let mut combined_chunks: Vec<((String, DataAddress), Vec)> = vec![]; let mut public_archive = PublicArchive::new(); let encryption_results = @@ -203,13 +203,13 @@ impl Client { info!( "Successfully uploaded {name} ({} chunks) to: {}", chunks.len(), - hex::encode(data_address.0) + hex::encode(data_address.xorname()) ); #[cfg(feature = "loud")] println!( "Successfully uploaded {name} ({} chunks) to: {}", chunks.len(), - hex::encode(data_address.0) + hex::encode(data_address.xorname()) ); (name, Ok(chunks_uploaded)) @@ -242,12 +242,12 @@ impl Client { /// Same as [`Client::dir_content_upload_public`] but also uploads the archive to the network. /// - /// Returns the [`ArchiveAddr`] of the uploaded archive. + /// Returns the [`ArchiveAddress`] of the uploaded archive. pub async fn dir_upload_public( &self, dir_path: PathBuf, payment_option: PaymentOption, - ) -> Result<(AttoTokens, ArchiveAddr), UploadError> { + ) -> Result<(AttoTokens, ArchiveAddress), UploadError> { let (cost1, archive) = self .dir_content_upload_public(dir_path, payment_option.clone()) .await?; @@ -265,7 +265,7 @@ impl Client { &self, path: PathBuf, payment_option: PaymentOption, - ) -> Result<(AttoTokens, DataAddr), UploadError> { + ) -> Result<(AttoTokens, DataAddress), UploadError> { info!("Uploading file: {path:?}"); #[cfg(feature = "loud")] println!("Uploading file: {path:?}"); @@ -307,7 +307,7 @@ impl Client { let map_xor_name = *data_map_chunk.address().xorname(); let metadata = metadata_from_entry(&entry); - archive.add_file(path, map_xor_name, metadata); + archive.add_file(path, DataAddress::new(map_xor_name), metadata); } let root_serialized = rmp_serde::to_vec(&archive)?; diff --git a/autonomi/src/client/high_level/register/history.rs b/autonomi/src/client/high_level/register/history.rs index 3f21a01653..104c8c1df6 100644 --- a/autonomi/src/client/high_level/register/history.rs +++ b/autonomi/src/client/high_level/register/history.rs @@ -49,14 +49,14 @@ impl RegisterHistory { let next_entry_pk: PublicKey = MainPubkey::from(self.register_owner) .derive_key(&next_derivation) .into(); - self.current_iter = GraphEntryAddress::from_owner(next_entry_pk); + self.current_iter = GraphEntryAddress::new(next_entry_pk); Ok(Some(entry.content)) } /// Get all the register values from the history, starting from the first to the latest entry pub async fn collect(&mut self) -> Result, RegisterError> { let mut history_from_first = self.clone(); - history_from_first.current_iter = GraphEntryAddress::from_owner(self.register_owner); + history_from_first.current_iter = GraphEntryAddress::new(self.register_owner); let mut values = Vec::new(); while let Some(value) = history_from_first.next().await? { values.push(value); @@ -74,6 +74,6 @@ impl Client { /// [`RegisterHistory::collect`] can be used to get all the register values from the history from the first to the latest entry. pub fn register_history(&self, addr: &RegisterAddress) -> RegisterHistory { let graph_entry_addr = addr.to_underlying_graph_root(); - RegisterHistory::new(self.clone(), addr.owner, graph_entry_addr) + RegisterHistory::new(self.clone(), addr.owner(), graph_entry_addr) } } diff --git a/autonomi/src/client/high_level/register/mod.rs b/autonomi/src/client/high_level/register/mod.rs index 2d5db0858b..a08408070c 100644 --- a/autonomi/src/client/high_level/register/mod.rs +++ b/autonomi/src/client/high_level/register/mod.rs @@ -30,36 +30,39 @@ pub use history::RegisterHistory; /// it is up to the owner to encrypt the data uploaded to the register, if wanted. /// Only the owner can update the register with its [`SecretKey`]. /// The [`SecretKey`] is the only piece of information an owner should keep to access to the register. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct RegisterAddress { - pub owner: PublicKey, -} +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +pub struct RegisterAddress(PublicKey); impl RegisterAddress { /// Create a new register address pub fn new(owner: PublicKey) -> Self { - Self { owner } + Self(owner) } /// Get the owner of the register pub fn owner(&self) -> PublicKey { - self.owner + self.0 } /// To underlying graph representation pub fn to_underlying_graph_root(&self) -> GraphEntryAddress { - GraphEntryAddress::from_owner(self.owner) + GraphEntryAddress::new(self.0) + } + + /// To underlying head pointer + pub fn to_underlying_head_pointer(&self) -> PointerAddress { + register_head_pointer_address(self) } /// Convert a register address to a hex string pub fn to_hex(&self) -> String { - self.owner.to_hex() + self.0.to_hex() } /// Convert a hex string to a register address pub fn from_hex(hex: &str) -> Result { let owner = PublicKey::from_hex(hex)?; - Ok(Self { owner }) + Ok(Self(owner)) } } @@ -154,19 +157,14 @@ impl Client { // create a Pointer to the last entry let target = PointerTarget::GraphEntryAddress(addr); - let pointer_key = self.register_head_pointer_sk(&main_key.into()); + let pointer_key = register_head_pointer_sk(&main_key.into()); let (pointer_cost, _pointer_addr) = self .pointer_create(&pointer_key, target, payment_option.clone()) .await?; let total_cost = graph_cost .checked_add(pointer_cost) .ok_or(RegisterError::InvalidCost)?; - Ok(( - total_cost, - RegisterAddress { - owner: public_key.into(), - }, - )) + Ok((total_cost, RegisterAddress(public_key.into()))) } /// Update the value of a register. @@ -179,10 +177,8 @@ impl Client { payment_option: PaymentOption, ) -> Result { // get the pointer of the register head - let addr = RegisterAddress { - owner: owner.public_key(), - }; - let pointer_addr = self.register_head_pointer_address(&addr); + let addr = RegisterAddress(owner.public_key()); + let pointer_addr = register_head_pointer_address(&addr); debug!("Getting pointer of register head at {pointer_addr:?}"); let pointer = match self.pointer_get(&pointer_addr).await { Ok(pointer) => pointer, @@ -220,7 +216,7 @@ impl Client { Err(GraphError::AlreadyExists(address)) => { // pointer is apparently not at head, update it let target = PointerTarget::GraphEntryAddress(address); - let pointer_key = self.register_head_pointer_sk(&main_key.into()); + let pointer_key = register_head_pointer_sk(&main_key.into()); self.pointer_update(&pointer_key, target).await?; return Err(RegisterError::Corrupt(format!( "Pointer is apparently not at head, attempting to heal the register by updating it to point to the next entry at {address:?}, please retry the operation" @@ -231,7 +227,7 @@ impl Client { // update the pointer to point to the new entry let target = PointerTarget::GraphEntryAddress(new_graph_entry_addr); - let pointer_key = self.register_head_pointer_sk(&main_key.into()); + let pointer_key = register_head_pointer_sk(&main_key.into()); self.pointer_update(&pointer_key, target).await?; Ok(cost) @@ -243,7 +239,7 @@ impl Client { addr: &RegisterAddress, ) -> Result { // get the pointer of the register head - let pointer_addr = self.register_head_pointer_address(addr); + let pointer_addr = register_head_pointer_address(addr); debug!("Getting pointer of register head at {pointer_addr:?}"); let pointer = self.pointer_get(&pointer_addr).await?; let graph_entry_addr = match pointer.target() { @@ -270,7 +266,7 @@ impl Client { /// Get the cost of a register operation. /// Returns the cost of creation if it doesn't exist, else returns the cost of an update pub async fn register_cost(&self, owner: &PublicKey) -> Result { - let pointer_pk = self.register_head_pointer_pk(&RegisterAddress { owner: *owner }); + let pointer_pk = register_head_pointer_pk(&RegisterAddress(*owner)); let graph_entry_cost = self.graph_entry_cost(owner); let pointer_cost = self.pointer_cost(&pointer_pk); let (graph_entry_cost, pointer_cost) = @@ -280,29 +276,6 @@ impl Client { .ok_or(CostError::InvalidCost) } - /// Get the address of the register's head pointer - fn register_head_pointer_address(&self, addr: &RegisterAddress) -> PointerAddress { - let pk: MainPubkey = addr.owner.into(); - let pointer_pk = - pk.derive_key(&DerivationIndex::from_bytes(REGISTER_HEAD_DERIVATION_INDEX)); - PointerAddress::from_owner(pointer_pk.into()) - } - - /// Get the secret key of the register's head pointer - fn register_head_pointer_sk(&self, register_owner: &SecretKey) -> SecretKey { - let pointer_sk = MainSecretKey::new(register_owner.clone()) - .derive_key(&DerivationIndex::from_bytes(REGISTER_HEAD_DERIVATION_INDEX)); - pointer_sk.into() - } - - /// Get the public key of the register's head pointer - fn register_head_pointer_pk(&self, addr: &RegisterAddress) -> PublicKey { - let pk: MainPubkey = addr.owner.into(); - let pointer_pk = - pk.derive_key(&DerivationIndex::from_bytes(REGISTER_HEAD_DERIVATION_INDEX)); - pointer_pk.into() - } - /// Get underlying register graph entry and next derivation index /// In normal circumstances, there is only one entry with one descendant, yielding ONE entry and ONE derivation index /// In the case of a fork or a corrupt register, the smallest derivation index among all the entries descendants is chosen @@ -335,8 +308,29 @@ impl Client { } } +/// Get the address of the register's head pointer +fn register_head_pointer_address(addr: &RegisterAddress) -> PointerAddress { + let pk: MainPubkey = addr.0.into(); + let pointer_pk = pk.derive_key(&DerivationIndex::from_bytes(REGISTER_HEAD_DERIVATION_INDEX)); + PointerAddress::new(pointer_pk.into()) +} + +/// Get the secret key of the register's head pointer +fn register_head_pointer_sk(register_owner: &SecretKey) -> SecretKey { + let pointer_sk = MainSecretKey::new(register_owner.clone()) + .derive_key(&DerivationIndex::from_bytes(REGISTER_HEAD_DERIVATION_INDEX)); + pointer_sk.into() +} + +/// Get the public key of the register's head pointer +fn register_head_pointer_pk(addr: &RegisterAddress) -> PublicKey { + let pk: MainPubkey = addr.0.into(); + let pointer_pk = pk.derive_key(&DerivationIndex::from_bytes(REGISTER_HEAD_DERIVATION_INDEX)); + pointer_pk.into() +} + fn get_derivation_from_graph_entry(entry: &GraphEntry) -> Result { - let graph_entry_addr = GraphEntryAddress::from_owner(entry.owner); + let graph_entry_addr = GraphEntryAddress::new(entry.owner); let d = match entry.descendants.as_slice() { [d] => d.1, _ => return Err(RegisterError::Corrupt(format!( diff --git a/autonomi/src/client/high_level/vault/mod.rs b/autonomi/src/client/high_level/vault/mod.rs index fa239b566a..b6b96f2c7a 100644 --- a/autonomi/src/client/high_level/vault/mod.rs +++ b/autonomi/src/client/high_level/vault/mod.rs @@ -85,7 +85,7 @@ impl Client { .derive_key(&DerivationIndex::from_bytes(VAULT_HEAD_DERIVATION_INDEX)) .public_key(); - let mut cur_graph_entry_addr = GraphEntryAddress::from_owner(public_key.into()); + let mut cur_graph_entry_addr = GraphEntryAddress::new(public_key.into()); let mut decrypted_full_text = vec![]; let mut content_type = 0; let mut has_end_reached = false; @@ -96,7 +96,7 @@ impl Client { // The first descendant is reserved for `expand GraphEntry`. match graph_entry.descendants.split_first() { Some((&(first, _), rest)) => { - cur_graph_entry_addr = GraphEntryAddress::from_owner(first); + cur_graph_entry_addr = GraphEntryAddress::new(first); let scratchpad_addresses = rest.to_vec(); let (decrypt_data, cur_content_type, is_end_reached) = self @@ -343,7 +343,7 @@ impl Client { let public_key = main_secret_key .derive_key(&cur_free_graphentry_derivation) .public_key(); - let cur_graph_entry_addr = GraphEntryAddress::from_owner(public_key.into()); + let cur_graph_entry_addr = GraphEntryAddress::new(public_key.into()); match self.graph_entry_get(&cur_graph_entry_addr).await { Ok(entry) => { diff --git a/autonomi/src/client/high_level/vault/user_data.rs b/autonomi/src/client/high_level/vault/user_data.rs index ff37495727..f0537382f3 100644 --- a/autonomi/src/client/high_level/vault/user_data.rs +++ b/autonomi/src/client/high_level/vault/user_data.rs @@ -8,8 +8,8 @@ use std::collections::HashMap; -use crate::client::high_level::files::archive_private::PrivateArchiveAccess; -use crate::client::high_level::files::archive_public::ArchiveAddr; +use crate::client::high_level::files::archive_private::PrivateArchiveDataMap; +use crate::client::high_level::files::archive_public::ArchiveAddress; use crate::client::payment::PaymentOption; use crate::client::Client; use crate::client::GetError; @@ -32,9 +32,9 @@ pub static USER_DATA_VAULT_CONTENT_IDENTIFIER: LazyLock = #[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)] pub struct UserData { /// Owned file archive addresses, along with their names (can be empty) - pub file_archives: HashMap, + pub file_archives: HashMap, /// Owned private file archives, along with their names (can be empty) - pub private_file_archives: HashMap, + pub private_file_archives: HashMap, /// Owned register addresses, along with their names (can be empty) pub register_addresses: HashMap, } @@ -64,40 +64,43 @@ impl UserData { } /// Add an archive. Returning `Option::Some` with the old name if the archive was already in the set. - pub fn add_file_archive(&mut self, archive: ArchiveAddr) -> Option { + pub fn add_file_archive(&mut self, archive: ArchiveAddress) -> Option { self.file_archives.insert(archive, "".into()) } /// Add an archive. Returning `Option::Some` with the old name if the archive was already in the set. pub fn add_file_archive_with_name( &mut self, - archive: ArchiveAddr, + archive: ArchiveAddress, name: String, ) -> Option { self.file_archives.insert(archive, name) } /// Add a private archive. Returning `Option::Some` with the old name if the archive was already in the set. - pub fn add_private_file_archive(&mut self, archive: PrivateArchiveAccess) -> Option { + pub fn add_private_file_archive(&mut self, archive: PrivateArchiveDataMap) -> Option { self.private_file_archives.insert(archive, "".into()) } /// Add a private archive with a name. Returning `Option::Some` with the old name if the archive was already in the set. pub fn add_private_file_archive_with_name( &mut self, - archive: PrivateArchiveAccess, + archive: PrivateArchiveDataMap, name: String, ) -> Option { self.private_file_archives.insert(archive, name) } /// Remove an archive. Returning `Option::Some` with the old name if the archive was already in the set. - pub fn remove_file_archive(&mut self, archive: ArchiveAddr) -> Option { + pub fn remove_file_archive(&mut self, archive: ArchiveAddress) -> Option { self.file_archives.remove(&archive) } /// Remove a private archive. Returning `Option::Some` with the old name if the archive was already in the set. - pub fn remove_private_file_archive(&mut self, archive: PrivateArchiveAccess) -> Option { + pub fn remove_private_file_archive( + &mut self, + archive: PrivateArchiveDataMap, + ) -> Option { self.private_file_archives.remove(&archive) } diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 75cc6448ea..764fd019f9 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -28,7 +28,6 @@ pub use high_level::files; pub use high_level::register; pub use high_level::vault; -pub mod address; pub mod config; pub mod key_derivation; pub mod payment; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 6866ead0f3..5709030fe6 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -80,6 +80,9 @@ pub use ant_evm::QuoteHash; pub use ant_evm::RewardsAddress; pub use ant_evm::{Amount, AttoTokens}; +// Re-exports of the ant-protocol address parsing error +pub use ant_protocol::storage::AddressParseError; + // Re-exports of the bls types pub use bls::{PublicKey, SecretKey, Signature}; diff --git a/autonomi/src/python.rs b/autonomi/src/python.rs index 33414c90e8..262fc17692 100644 --- a/autonomi/src/python.rs +++ b/autonomi/src/python.rs @@ -1,8 +1,11 @@ use std::{path::PathBuf, str::FromStr, sync::Arc}; +use crate::client::data::DataAddress; +use crate::client::files::archive_private::PrivateArchiveDataMap; +use crate::client::files::archive_public::ArchiveAddress; +use crate::client::pointer::PointerTarget; use crate::{ client::{ - address::addr_to_str, chunk::DataMapChunk, payment::PaymentOption, vault::{UserData, VaultSecretKey}, @@ -12,10 +15,11 @@ use crate::{ Client, ClientConfig, }; use crate::{Bytes, Network, Wallet}; -use ant_protocol::storage::{ - Chunk, ChunkAddress, GraphEntry, GraphEntryAddress, Pointer, PointerAddress, PointerTarget, - Scratchpad, ScratchpadAddress, +use crate::{ + Chunk, ChunkAddress, GraphEntry, GraphEntryAddress, Pointer, PointerAddress, Scratchpad, + ScratchpadAddress, }; + use bls::{PublicKey, SecretKey}; use libp2p::Multiaddr; use pyo3::exceptions::{PyConnectionError, PyRuntimeError, PyValueError}; @@ -187,10 +191,7 @@ impl PyClient { .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to get graph entry: {e}")))?; - Ok(( - cost.to_string(), - crate::client::address::addr_to_str(addr.0), - )) + Ok((cost.to_string(), PyGraphEntryAddress { inner: addr })) }) } @@ -226,7 +227,7 @@ impl PyClient { }) } - /// Get Scratchpad from the Network using the scratpad address in hex string format. + /// Get Scratchpad from the Network using the scratpad address. fn scratchpad_get<'a>( &self, py: Python<'a>, @@ -313,7 +314,7 @@ impl PyClient { PyRuntimeError::new_err(format!("Failed to create scratchpad: {e}")) })?; - Ok((cost.to_string(), addr.to_hex())) + Ok((cost.to_string(), PyScratchpadAddress { inner: addr })) }) } @@ -434,7 +435,7 @@ impl PyClient { PyRuntimeError::new_err(format!("Failed to put public archive: {e}")) })?; - Ok((cost.to_string(), crate::client::address::addr_to_str(addr))) + Ok((cost.to_string(), PyArchiveAddress { inner: addr })) }) } @@ -475,7 +476,7 @@ impl PyClient { fn dir_download<'a>( &self, py: Python<'a>, - data_map: PyDataMapChunk, + data_map: PyPrivateArchiveDataMap, dir_path: PathBuf, ) -> PyResult> { let client = self.inner.clone(); @@ -514,11 +515,12 @@ impl PyClient { fn file_download_public<'a>( &self, py: Python<'a>, - #[pyo3(from_py_with = "str_to_addr")] addr: XorName, + addr: &PyDataAddress, path: PathBuf, ) -> PyResult> { let client = self.inner.clone(); + let addr = addr.inner; future_into_py(py, async move { client .file_download_public(&addr, path) @@ -547,7 +549,10 @@ impl PyClient { .dir_upload(dir_path, payment.inner) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to upload directory: {e}")))?; - Ok((cost.to_string(), PyDataMapChunk { inner: data_map })) + Ok(( + cost.to_string(), + PyPrivateArchiveDataMap { inner: data_map }, + )) }) } @@ -618,7 +623,7 @@ impl PyClient { .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to put data: {e}")))?; - Ok((cost.to_string(), crate::client::address::addr_to_str(addr))) + Ok((cost.to_string(), PyDataAddress { inner: addr })) }) } @@ -626,10 +631,11 @@ impl PyClient { fn data_get_public<'a>( &self, py: Python<'a>, - #[pyo3(from_py_with = "str_to_addr")] addr: XorName, + addr: &PyDataAddress, ) -> PyResult> { let client = self.inner.clone(); + let addr = addr.inner; future_into_py(py, async move { let data = client .data_get_public(&addr) @@ -655,7 +661,7 @@ impl PyClient { .dir_upload_public(dir_path, payment) .await .map_err(|e| PyRuntimeError::new_err(format!("Failed to upload directory: {e}")))?; - Ok((cost.to_string(), crate::client::address::addr_to_str(addr))) + Ok((cost.to_string(), PyArchiveAddress { inner: addr })) }) } @@ -663,11 +669,12 @@ impl PyClient { fn dir_download_public<'a>( &self, py: Python<'a>, - #[pyo3(from_py_with = "str_to_addr")] addr: XorName, + addr: &PyArchiveAddress, dir_path: PathBuf, ) -> PyResult> { let client = self.inner.clone(); + let addr = addr.inner; future_into_py(py, async move { client .dir_download_public(&addr, dir_path) @@ -705,10 +712,11 @@ impl PyClient { fn archive_get_public<'a>( &self, py: Python<'a>, - #[pyo3(from_py_with = "str_to_addr")] addr: XorName, + addr: &PyArchiveAddress, ) -> PyResult> { let client = self.inner.clone(); + let addr = addr.inner; future_into_py(py, async move { let archive = client .archive_get_public(&addr) @@ -1061,8 +1069,7 @@ impl PyClient { } } -/// A network address where a pointer is stored. -/// The address is derived from the owner's public key. +/// Address of a Pointer, is derived from the owner's unique public key. #[pyclass(name = "PointerAddress", eq, ord)] #[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] pub struct PyPointerAddress { @@ -1071,20 +1078,19 @@ pub struct PyPointerAddress { #[pymethods] impl PyPointerAddress { - /// Initialise pointer address from hex string. + /// Construct a new PointerAddress given an owner. #[new] - fn new(#[pyo3(from_py_with = "str_to_addr")] addr: XorName) -> PyResult { + fn new(public_key: PyPublicKey) -> PyResult { Ok(Self { - inner: PointerAddress::new(addr), + inner: PointerAddress::new(public_key.inner), }) } - /// Instantiate address which is derived from owner's unique public key. - #[staticmethod] - fn from_owner(public_key: PyPublicKey) -> PyResult { - Ok(Self { - inner: PointerAddress::from_owner(public_key.inner), - }) + /// Return the owner public key. + pub fn owner(&self) -> PyPublicKey { + PyPublicKey { + inner: *self.inner.owner(), + } } /// Returns the hex string representation of the pointer address. @@ -1093,6 +1099,15 @@ impl PyPointerAddress { self.inner.to_hex() } + /// Create a Pointer address from a hex string. + #[staticmethod] + fn from_hex(hex: &str) -> PyResult { + Ok(Self { + inner: PointerAddress::from_hex(hex) + .map_err(|e| PyValueError::new_err(e.to_string()))?, + }) + } + fn __str__(&self) -> PyResult { Ok(self.hex()) } @@ -1129,22 +1144,16 @@ impl PyPointer { } } - /// Returns the hex string representation of the pointer's target. - #[getter] - fn hex(&self) -> String { - addr_to_str(self.inner.xorname()) - } - /// Returns the target that this pointer points to. #[getter] fn target(&self) -> PyPointerTarget { PyPointerTarget { - inner: PointerTarget::ChunkAddress(ChunkAddress::new(self.inner.xorname())), + inner: self.inner.target().clone(), } } fn __str__(&self) -> PyResult { - Ok(self.hex()) + Ok(format!("Pointer('{}')", self.inner.address().to_hex())) } } @@ -1196,14 +1205,14 @@ impl PyPointerTarget { } } - /// Returns the hex string representation of this pointer address. + /// Returns the hex string representation of the target #[getter] fn hex(&self) -> String { - addr_to_str(self.inner.xorname()) + self.inner.to_hex() } fn __str__(&self) -> PyResult { - Ok(self.hex()) + Ok(format!("PointerTarget('{}')", self.hex())) } } @@ -1218,9 +1227,9 @@ pub struct PyChunkAddress { impl PyChunkAddress { /// Creates a new chunk address from a hex string. #[new] - fn new(#[pyo3(from_py_with = "str_to_addr")] addr: XorName) -> PyResult { + fn new(addr: PyXorName) -> PyResult { Ok(Self { - inner: ChunkAddress::new(addr), + inner: ChunkAddress::new(addr.inner), }) } @@ -1242,7 +1251,7 @@ impl PyChunkAddress { #[getter] fn hex(&self) -> String { - addr_to_str(*self.inner.xorname()) + self.inner.to_hex() } fn __str__(&self) -> PyResult { @@ -1263,19 +1272,11 @@ pub struct PyGraphEntryAddress { #[pymethods] impl PyGraphEntryAddress { - /// Create graph entry address pointing to a specific XOR name. - #[new] - fn new(#[pyo3(from_py_with = "str_to_addr")] addr: XorName) -> PyResult { - Ok(Self { - inner: GraphEntryAddress::new(addr), - }) - } - - /// Instantiate address which is derived from owner's unique public key. + /// Create graph entry address #[staticmethod] - fn from_owner(public_key: PyPublicKey) -> PyResult { + fn new(public_key: PyPublicKey) -> PyResult { Ok(Self { - inner: GraphEntryAddress::from_owner(public_key.inner), + inner: GraphEntryAddress::new(public_key.inner), }) } @@ -1284,6 +1285,15 @@ impl PyGraphEntryAddress { self.inner.to_hex() } + /// Create a graph entry address from a hex string. + #[staticmethod] + fn from_hex(hex: &str) -> PyResult { + Ok(Self { + inner: GraphEntryAddress::from_hex(hex) + .map_err(|e| PyValueError::new_err(e.to_string()))?, + }) + } + fn __str__(&self) -> PyResult { Ok(self.hex()) } @@ -1293,7 +1303,7 @@ impl PyGraphEntryAddress { } } -/// Address of a GraphEntry, is derived from the owner's unique public key. +/// Address of a Scratchpad, is derived from the owner's unique public key. #[pyclass(name = "ScratchpadAddress", eq, ord)] #[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] pub struct PyScratchpadAddress { @@ -1317,11 +1327,21 @@ impl PyScratchpadAddress { } } + /// Returns the hex string representation of the scratchpad address. #[getter] fn hex(&self) -> String { self.inner.to_hex() } + /// Create a scratchpad address from a hex string. + #[staticmethod] + fn from_hex(hex: &str) -> PyResult { + Ok(Self { + inner: ScratchpadAddress::from_hex(hex) + .map_err(|e| PyValueError::new_err(e.to_string()))?, + }) + } + fn __str__(&self) -> PyResult { Ok(self.hex()) } @@ -1331,6 +1351,120 @@ impl PyScratchpadAddress { } } +/// Address of Data on the Network. +#[pyclass(name = "DataAddress", eq, ord)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] +pub struct PyDataAddress { + inner: DataAddress, +} + +#[pymethods] +impl PyDataAddress { + /// Construct a new DataAddress + #[new] + fn new(xorname: PyXorName) -> PyResult { + Ok(Self { + inner: DataAddress::new(xorname.inner), + }) + } + + /// Returns the hex string representation of the data address. + #[getter] + fn hex(&self) -> String { + self.inner.to_hex() + } + + /// Create a Data address from a hex string. + #[staticmethod] + fn from_hex(hex: &str) -> PyResult { + Ok(Self { + inner: DataAddress::from_hex(hex).map_err(|e| PyValueError::new_err(e.to_string()))?, + }) + } + + fn __str__(&self) -> PyResult { + Ok(self.hex()) + } + + fn __repr__(&self) -> PyResult { + Ok(format!("DataAddress('{}')", self.hex())) + } +} + +/// Address of Data on the Network. +#[pyclass(name = "ArchiveAddress", eq, ord)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] +pub struct PyArchiveAddress { + inner: ArchiveAddress, +} + +#[pymethods] +impl PyArchiveAddress { + /// Construct a new ArchiveAddress, the address of a public archive on the network. + #[new] + fn new(xorname: PyXorName) -> PyResult { + Ok(Self { + inner: ArchiveAddress::new(xorname.inner), + }) + } + + /// Returns the hex string representation of this archive address. + #[getter] + fn hex(&self) -> String { + self.inner.to_hex() + } + + /// Create an ArchiveAddress from a hex string. + #[staticmethod] + fn from_hex(hex: &str) -> PyResult { + Ok(Self { + inner: ArchiveAddress::from_hex(hex) + .map_err(|e| PyValueError::new_err(e.to_string()))?, + }) + } + + fn __str__(&self) -> PyResult { + Ok(self.hex()) + } + + fn __repr__(&self) -> PyResult { + Ok(format!("ArchiveAddress('{}')", self.hex())) + } +} + +/// Address of Data on the Network. +#[pyclass(name = "PrivateArchiveDataMap", eq, ord)] +#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)] +pub struct PyPrivateArchiveDataMap { + inner: PrivateArchiveDataMap, +} + +#[pymethods] +impl PyPrivateArchiveDataMap { + /// Returns the hex string representation of this private archive data map. + #[getter] + fn hex(&self) -> String { + self.inner.to_hex() + } + + /// Create a PrivateArchiveDataMap from a hex string. + #[staticmethod] + fn from_hex(hex: &str) -> PyResult { + Ok(Self { + inner: PrivateArchiveDataMap::from_hex(hex) + .map_err(|e| PyValueError::new_err(e.to_string()))?, + }) + } + + fn __str__(&self) -> PyResult { + Ok(self.hex()) + } + + fn __repr__(&self) -> PyResult { + Ok(format!("PrivateArchiveDataMap('{}')", self.hex())) + } +} + /// A wallet for interacting with the network's payment system. /// Handles token transfers, balance checks, and payments for network operations. #[pyclass(name = "Wallet")] @@ -1468,7 +1602,7 @@ impl PySecretKey { } /// Returns the hex string representation of the key. - fn to_hex(&self) -> String { + fn hex(&self) -> String { self.inner.to_hex() } } @@ -1499,7 +1633,8 @@ impl PyPublicKey { .map_err(|e| PyValueError::new_err(format!("Invalid hex key: {e}"))) } - fn to_hex(&self) -> String { + /// Returns the hex string representation of the public key. + fn hex(&self) -> String { self.inner.to_hex() } } @@ -1528,7 +1663,8 @@ impl PyVaultSecretKey { .map_err(|e| PyValueError::new_err(format!("Invalid hex key: {e}"))) } - fn to_hex(&self) -> String { + /// Returns the hex string representation of the vault secret key. + fn hex(&self) -> String { self.inner.to_hex() } } @@ -1558,7 +1694,7 @@ impl PyUserData { self.inner .file_archives .iter() - .map(|(addr, name)| (hex::encode(addr), name.clone())) + .map(|(addr, name)| (addr.to_hex(), name.clone())) .collect() } @@ -1590,7 +1726,7 @@ impl PyDataMapChunk { } /// Returns the hex string representation of this DataMapChunk. - fn to_hex(&self) -> String { + fn hex(&self) -> String { self.inner.to_hex() } @@ -1717,13 +1853,9 @@ impl PyPublicArchive { } /// Add a file to the archive - fn add_file( - &mut self, - path: PathBuf, - #[pyo3(from_py_with = "str_to_addr")] addr: XorName, - metadata: &PyMetadata, - ) { - self.inner.add_file(path, addr, metadata.inner.clone()); + fn add_file(&mut self, path: PathBuf, addr: &PyDataAddress, metadata: &PyMetadata) { + self.inner + .add_file(path, addr.inner, metadata.inner.clone()); } /// List all files in the archive. @@ -1742,7 +1874,7 @@ impl PyPublicArchive { self.inner .addresses() .into_iter() - .map(crate::client::address::addr_to_str) + .map(|a| a.to_hex()) .collect() } } @@ -2005,9 +2137,19 @@ impl PyClientConfig { // fn strategy() { } } +/// A handle to a XorName. +#[pyclass(name = "XorName")] +#[derive(Debug, Clone)] +pub struct PyXorName { + inner: XorName, +} + +/// Generate a random XorName. #[pyfunction] -fn random_xor() -> String { - addr_to_str(XorName::random(&mut rand::thread_rng())) +fn random_xor() -> PyXorName { + PyXorName { + inner: XorName::random(&mut rand::thread_rng()), + } } #[pymodule] @@ -2039,10 +2181,3 @@ fn autonomi_client_module(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap_pyfunction!(random_xor, m)?)?; Ok(()) } - -// Helper function to convert argument hex string to XorName. -fn str_to_addr(addr: &Bound<'_, PyAny>) -> PyResult { - let addr: String = addr.extract()?; - crate::client::address::str_to_addr(&addr) - .map_err(|e| PyValueError::new_err(format!("`addr` has invalid format: {e:?}"))) -} diff --git a/autonomi/tests/address.rs b/autonomi/tests/address.rs new file mode 100644 index 0000000000..206a06e576 --- /dev/null +++ b/autonomi/tests/address.rs @@ -0,0 +1,152 @@ +// Copyright 2025 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use ant_logging::LogBuilder; +use autonomi::chunk::DataMapChunk; +use autonomi::client::payment::PaymentOption; +use autonomi::data::DataAddress; +use autonomi::pointer::PointerTarget; +use autonomi::register::RegisterAddress; +use autonomi::{client::chunk::Chunk, Bytes, Client}; +use autonomi::{ + ChunkAddress, GraphEntry, GraphEntryAddress, Pointer, PointerAddress, Scratchpad, + ScratchpadAddress, +}; +use eyre::Result; +use serial_test::serial; +use test_utils::evm::get_funded_wallet; + +#[tokio::test] +#[serial] +async fn test_data_addresses_use() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("data_addresses", false); + + let client = Client::init_local().await?; + let wallet = get_funded_wallet(); + + // put the chunk + let chunk = Chunk::new(Bytes::from("Chunk content example")); + let payment_option = PaymentOption::from(&wallet); + let (_cost, addr) = client.chunk_put(&chunk, payment_option).await?; + assert_eq!(addr, *chunk.address()); + let chunk_addr = addr.to_hex(); + println!("Chunk: {chunk_addr}"); + + let parsed_chunk_addr = ChunkAddress::from_hex(&chunk_addr)?; + assert_eq!(parsed_chunk_addr, *chunk.address()); + + // put data + let data = Bytes::from("Private data example"); + let payment_option = PaymentOption::from(&wallet); + let (_cost, addr) = client.data_put(data, payment_option).await?; + let data_addr = addr.to_hex(); + println!("Private Data (hex DataMapChunk): {data_addr}"); + + let parsed_data_addr = DataMapChunk::from_hex(&data_addr)?; + assert_eq!(parsed_data_addr, addr); + + // put public data + let data = Bytes::from("Public data example"); + let payment_option = PaymentOption::from(&wallet); + let (_cost, addr) = client.data_put_public(data, payment_option).await?; + let public_data_addr = addr.to_hex(); + println!("Public Data (XorName): {public_data_addr}"); + + let parsed_public_data_addr = DataAddress::from_hex(&public_data_addr)?; + assert_eq!(parsed_public_data_addr, addr); + + // put graph entry + let key = bls::SecretKey::random(); + let other_key = bls::SecretKey::random(); + let content = [0u8; 32]; + let graph_entry = GraphEntry::new( + &key, + vec![other_key.public_key()], + content, + vec![(other_key.public_key(), content)], + ); + let payment_option = PaymentOption::from(&wallet); + let (_cost, addr) = client.graph_entry_put(graph_entry, payment_option).await?; + let graph_entry_addr = addr.to_hex(); + println!("Graph Entry: {graph_entry_addr}"); + let graph_entry_bls_pubkey = key.public_key().to_hex(); + println!("Graph Entry (bls pubkey): {graph_entry_bls_pubkey}"); + + let parsed_graph_entry_addr = GraphEntryAddress::from_hex(&graph_entry_addr)?; + assert_eq!(parsed_graph_entry_addr, addr); + let parsed_graph_entry_bls_pubkey = GraphEntryAddress::from_hex(&graph_entry_bls_pubkey)?; + assert_eq!(parsed_graph_entry_bls_pubkey, addr); + + // put pointer + let key = bls::SecretKey::random(); + let pointer = Pointer::new(&key, 0, PointerTarget::GraphEntryAddress(addr)); + let payment_option = PaymentOption::from(&wallet); + let (_cost, addr) = client.pointer_put(pointer, payment_option).await?; + let pointer_addr = addr.to_hex(); + println!("Pointer: {pointer_addr}"); + let pointer_bls_pubkey = key.public_key().to_hex(); + println!("Pointer (bls pubkey): {pointer_bls_pubkey}"); + + let parsed_pointer_addr = PointerAddress::from_hex(&pointer_addr)?; + assert_eq!(parsed_pointer_addr, addr); + let parsed_pointer_bls_pubkey = PointerAddress::from_hex(&pointer_bls_pubkey)?; + assert_eq!(parsed_pointer_bls_pubkey, addr); + + // put scratchpad + let key = bls::SecretKey::random(); + let scratchpad = Scratchpad::new(&key, 0, &Bytes::from("Scratchpad content example"), 0); + let payment_option = PaymentOption::from(&wallet); + let (_cost, addr) = client.scratchpad_put(scratchpad, payment_option).await?; + let scratchpad_addr = addr.to_hex(); + println!("Scratchpad: {scratchpad_addr}"); + let scratchpad_bls_pubkey = key.public_key().to_hex(); + println!("Scratchpad (bls pubkey): {scratchpad_bls_pubkey}"); + + let parsed_scratchpad_addr = ScratchpadAddress::from_hex(&scratchpad_addr)?; + assert_eq!(parsed_scratchpad_addr, addr); + let parsed_scratchpad_bls_pubkey = ScratchpadAddress::from_hex(&scratchpad_bls_pubkey)?; + assert_eq!(parsed_scratchpad_bls_pubkey, addr); + + // put register + let key = bls::SecretKey::random(); + let payment_option = PaymentOption::from(&wallet); + let value = Client::register_value_from_bytes(b"Register content example")?; + let (_cost, addr) = client.register_create(&key, value, payment_option).await?; + let register_addr = addr.to_hex(); + println!("Register: {register_addr}"); + let register_bls_pubkey = key.public_key().to_hex(); + println!("Register (bls pubkey): {register_bls_pubkey}"); + + let parsed_register_addr = RegisterAddress::from_hex(®ister_addr)?; + assert_eq!(parsed_register_addr, addr); + let parsed_register_bls_pubkey = RegisterAddress::from_hex(®ister_bls_pubkey)?; + assert_eq!(parsed_register_bls_pubkey, addr); + + // put private dir + let payment_option = PaymentOption::from(&wallet); + let path = "tests/file/test_dir/".into(); + let (_cost, archive_datamap) = client.dir_upload(path, payment_option.clone()).await?; + let archive_datamap_addr = archive_datamap.to_hex(); + println!("Private Archive (DataMap): {archive_datamap_addr}"); + + let parsed_archive_datamap_addr = DataMapChunk::from_hex(&archive_datamap_addr)?; + assert_eq!(parsed_archive_datamap_addr, archive_datamap); + + // put public dir + let path = "tests/file/test_dir/".into(); + let (_cost, archive_addr) = client + .dir_upload_public(path, payment_option.clone()) + .await?; + let archive_addr_str = archive_addr.to_hex(); + println!("Public Archive (XorName): {archive_addr_str}"); + + let parsed_archive_addr = DataAddress::from_hex(&archive_addr_str)?; + assert_eq!(parsed_archive_addr, archive_addr); + + Ok(()) +} From 382a978ff0d10ce6ec6c1991e7c05b921249b172 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 25 Feb 2025 16:12:42 +0900 Subject: [PATCH 60/69] fix: doctest (cherry picked from commit eaf08046703f0b5d86afb6f003a45d700b2db9e8) --- .../src/client/high_level/files/archive_public.rs | 12 +++++++----- autonomi/src/lib.rs | 3 ++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/autonomi/src/client/high_level/files/archive_public.rs b/autonomi/src/client/high_level/files/archive_public.rs index 03a008dd4f..25881db3dd 100644 --- a/autonomi/src/client/high_level/files/archive_public.rs +++ b/autonomi/src/client/high_level/files/archive_public.rs @@ -98,7 +98,7 @@ impl PublicArchive { } /// Iterate over the archive items - /// Returns an iterator over (PathBuf, DataAddr, Metadata) + /// Returns an iterator over ([`PathBuf`], [`DataAddress`], [`Metadata`]) pub fn iter(&self) -> impl Iterator { self.map .iter() @@ -142,11 +142,12 @@ impl Client { /// # Example /// /// ```no_run - /// # use autonomi::{Client, client::files::archive_public::ArchiveAddr}; + /// # use autonomi::{Client, XorName, client::files::archive_public::ArchiveAddress}; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// let client = Client::init().await?; - /// let archive = client.archive_get_public(&ArchiveAddr::random(&mut rand::thread_rng())).await?; + /// let addr = ArchiveAddress::new(XorName::random(&mut rand::thread_rng())); + /// let archive = client.archive_get_public(&addr).await?; /// # Ok(()) /// # } /// ``` @@ -165,7 +166,7 @@ impl Client { /// Create simple archive containing `file.txt` pointing to random XOR name. /// /// ```no_run - /// # use autonomi::{Client, client::{data::DataAddr, files::{Metadata, archive_public::{PublicArchive, ArchiveAddr}}}}; + /// # use autonomi::{Client, XorName, client::{data::DataAddress, files::{Metadata, archive_public::{PublicArchive, ArchiveAddress}}}}; /// # use autonomi::client::payment::PaymentOption; /// # use std::path::PathBuf; /// # #[tokio::main] @@ -174,7 +175,8 @@ impl Client { /// # let wallet = todo!(); /// # let payment = PaymentOption::Wallet(wallet); /// let mut archive = PublicArchive::new(); - /// archive.add_file(PathBuf::from("file.txt"), DataAddr::random(&mut rand::thread_rng()), Metadata::new_with_size(0)); + /// let data_addr = DataAddress::new(XorName::random(&mut rand::thread_rng())); + /// archive.add_file(PathBuf::from("file.txt"), data_addr, Metadata::new_with_size(0)); /// let (cost, address) = client.archive_put_public(&archive, payment).await?; /// # Ok(()) /// # } diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 5709030fe6..b16b23ca4c 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -80,8 +80,9 @@ pub use ant_evm::QuoteHash; pub use ant_evm::RewardsAddress; pub use ant_evm::{Amount, AttoTokens}; -// Re-exports of the ant-protocol address parsing error +// Re-exports of address related types pub use ant_protocol::storage::AddressParseError; +pub use xor_name::XorName; // Re-exports of the bls types pub use bls::{PublicKey, SecretKey, Signature}; From 224954e014f4b87481a355e45c874ce9500ea5da Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 25 Feb 2025 16:39:47 +0900 Subject: [PATCH 61/69] chore: adjust doc comment (cherry picked from commit b7a0cb8b1eeee184df594d1c4a355bda3c3f9f40) --- autonomi/src/client/high_level/files/archive_public.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autonomi/src/client/high_level/files/archive_public.rs b/autonomi/src/client/high_level/files/archive_public.rs index 25881db3dd..8885c9ee06 100644 --- a/autonomi/src/client/high_level/files/archive_public.rs +++ b/autonomi/src/client/high_level/files/archive_public.rs @@ -37,9 +37,9 @@ pub type ArchiveAddress = DataAddress; pub struct PublicArchive { /// Path of the file in the directory /// | Data address of the content of the file (points to a DataMap) - /// | | Metadata of the file - /// | | | - /// V V V + /// | | Metadata of the file + /// | | | + /// V V V map: BTreeMap, } From 75713afca211d081d4fcc3ad7236970b746ffe41 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 25 Feb 2025 15:47:33 +0100 Subject: [PATCH 62/69] refactor: rename `from_hex` to `try_from_hex` in `ChunkAddress` (cherry picked from commit 6e955c15dc0007403e3b7bf8e1b98451778f6dd6) --- ant-protocol/src/storage/address/chunk.rs | 2 +- autonomi/tests/address.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ant-protocol/src/storage/address/chunk.rs b/ant-protocol/src/storage/address/chunk.rs index 3539abd466..963b8c28ba 100644 --- a/ant-protocol/src/storage/address/chunk.rs +++ b/ant-protocol/src/storage/address/chunk.rs @@ -34,7 +34,7 @@ impl ChunkAddress { } /// Creates a new ChunkAddress from a hex string. - pub fn from_hex(hex: &str) -> Result { + pub fn try_from_hex(hex: &str) -> Result { let bytes = hex::decode(hex)?; let xor = XorName( bytes diff --git a/autonomi/tests/address.rs b/autonomi/tests/address.rs index 206a06e576..165bcfa359 100644 --- a/autonomi/tests/address.rs +++ b/autonomi/tests/address.rs @@ -37,7 +37,7 @@ async fn test_data_addresses_use() -> Result<()> { let chunk_addr = addr.to_hex(); println!("Chunk: {chunk_addr}"); - let parsed_chunk_addr = ChunkAddress::from_hex(&chunk_addr)?; + let parsed_chunk_addr = ChunkAddress::try_from_hex(&chunk_addr)?; assert_eq!(parsed_chunk_addr, *chunk.address()); // put data From d6fd7b6f9e18a9f5c0f1775b862f656afdaaacb1 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 26 Feb 2025 16:21:16 +0900 Subject: [PATCH 63/69] fix: backwards compatible for archive serialization (cherry picked from commit ea87edbc3260f9b7286e5eef6675bbb474f97970) --- autonomi/src/client/high_level/data/mod.rs | 38 ++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/autonomi/src/client/high_level/data/mod.rs b/autonomi/src/client/high_level/data/mod.rs index 7c103ec6a3..982bdc6bf5 100644 --- a/autonomi/src/client/high_level/data/mod.rs +++ b/autonomi/src/client/high_level/data/mod.rs @@ -7,7 +7,6 @@ // permissions and limitations relating to use of the SAFE Network Software. use ant_protocol::storage::AddressParseError; -use serde::{Deserialize, Serialize}; use std::hash::Hash; use xor_name::XorName; @@ -17,7 +16,7 @@ pub mod private; pub mod public; /// A [`DataAddress`] which points to a DataMap -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] pub struct DataAddress(XorName); impl DataAddress { @@ -53,3 +52,38 @@ impl std::fmt::Display for DataAddress { write!(f, "{}", &self.to_hex()) } } + +impl serde::Serialize for DataAddress { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0.serialize(serializer) + } +} + +impl<'de> serde::Deserialize<'de> for DataAddress { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let xor_name = XorName::deserialize(deserializer)?; + Ok(Self(xor_name)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_serialize_deserialize() { + let name = XorName::random(&mut rand::thread_rng()); + let data_address = DataAddress::new(name); + let name_serialized = rmp_serde::to_vec_named(&name).unwrap(); + let serialized = rmp_serde::to_vec_named(&data_address).unwrap(); + assert_eq!(name_serialized, serialized); + let deserialized: DataAddress = rmp_serde::from_slice(&serialized).unwrap(); + assert_eq!(data_address, deserialized); + } +} From 56f157ea7fddf6b3f80a986ef790ebd9d9a45641 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 26 Feb 2025 19:50:30 +0000 Subject: [PATCH 64/69] chore(release): release candidate 2025.1.2.6 ================== Crate Versions ================== ant-bootstrap: 0.1.6-rc.3 ant-build-info: 0.1.25-rc.3 ant-cli: 0.3.8-rc.3 ant-evm: 0.1.10-rc.3 ant-logging: 0.2.47-rc.3 ant-metrics: 0.1.26-rc.3 ant-networking: 0.3.6-rc.3 ant-node: 0.3.7-rc.3 ant-node-manager: 0.12.0-rc.3 ant-node-rpc-client: 0.6.43-rc.3 ant-protocol: 1.0.1-rc.3 ant-service-management: 0.4.9-rc.3 ant-token-supplies: 0.1.64-rc.3 autonomi: 0.4.0-rc.3 evmlib: 0.1.10-rc.3 evm-testnet: 0.1.10-rc.3 nat-detection: 0.2.17-rc.3 node-launchpad: 0.5.5-rc.3 test-utils: 0.4.17-rc.3 =================== Binary Versions =================== ant: 0.3.8-rc.3 antctl: 0.12.0-rc.3 antctld: 0.12.0-rc.3 antnode: 0.3.7-rc.3 antnode_rpc_client: 0.6.43-rc.3 nat-detection: 0.2.17-rc.3 node-launchpad: 0.5.5-rc.3 --- Cargo.lock | 38 +++++++++++++++--------------- ant-bootstrap/Cargo.toml | 6 ++--- ant-build-info/Cargo.toml | 2 +- ant-build-info/src/release_info.rs | 2 +- ant-cli/Cargo.toml | 14 +++++------ ant-evm/Cargo.toml | 4 ++-- ant-logging/Cargo.toml | 2 +- ant-metrics/Cargo.toml | 2 +- ant-networking/Cargo.toml | 10 ++++---- ant-node-manager/Cargo.toml | 14 +++++------ ant-node-rpc-client/Cargo.toml | 12 +++++----- ant-node/Cargo.toml | 22 ++++++++--------- ant-protocol/Cargo.toml | 6 ++--- ant-service-management/Cargo.toml | 10 ++++---- ant-token-supplies/Cargo.toml | 2 +- autonomi/Cargo.toml | 12 +++++----- evm-testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++---- node-launchpad/Cargo.toml | 14 +++++------ release-cycle-info | 2 +- test-utils/Cargo.toml | 4 ++-- 22 files changed, 97 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 273bc4ba46..3fcf70678c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -772,7 +772,7 @@ dependencies = [ [[package]] name = "ant-bootstrap" -version = "0.1.6-rc.2" +version = "0.1.6-rc.3" dependencies = [ "ant-logging", "ant-protocol", @@ -796,7 +796,7 @@ dependencies = [ [[package]] name = "ant-build-info" -version = "0.1.25-rc.2" +version = "0.1.25-rc.3" dependencies = [ "chrono", "tracing", @@ -805,7 +805,7 @@ dependencies = [ [[package]] name = "ant-cli" -version = "0.3.8-rc.2" +version = "0.3.8-rc.3" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -836,7 +836,7 @@ dependencies = [ [[package]] name = "ant-evm" -version = "0.1.10-rc.2" +version = "0.1.10-rc.3" dependencies = [ "custom_debug", "evmlib", @@ -858,7 +858,7 @@ dependencies = [ [[package]] name = "ant-logging" -version = "0.2.47-rc.2" +version = "0.2.47-rc.3" dependencies = [ "chrono", "color-eyre", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "ant-metrics" -version = "0.1.26-rc.2" +version = "0.1.26-rc.3" dependencies = [ "clap", "color-eyre", @@ -897,7 +897,7 @@ dependencies = [ [[package]] name = "ant-networking" -version = "0.3.6-rc.2" +version = "0.3.6-rc.3" dependencies = [ "aes-gcm-siv", "ant-bootstrap", @@ -938,7 +938,7 @@ dependencies = [ [[package]] name = "ant-node" -version = "0.3.7-rc.2" +version = "0.3.7-rc.3" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -994,7 +994,7 @@ dependencies = [ [[package]] name = "ant-node-manager" -version = "0.12.0-rc.2" +version = "0.12.0-rc.3" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -1037,7 +1037,7 @@ dependencies = [ [[package]] name = "ant-node-rpc-client" -version = "0.6.43-rc.2" +version = "0.6.43-rc.3" dependencies = [ "ant-build-info", "ant-logging", @@ -1061,7 +1061,7 @@ dependencies = [ [[package]] name = "ant-protocol" -version = "1.0.1-rc.2" +version = "1.0.1-rc.3" dependencies = [ "ant-build-info", "ant-evm", @@ -1112,7 +1112,7 @@ dependencies = [ [[package]] name = "ant-service-management" -version = "0.4.9-rc.2" +version = "0.4.9-rc.3" dependencies = [ "ant-bootstrap", "ant-evm", @@ -1140,7 +1140,7 @@ dependencies = [ [[package]] name = "ant-token-supplies" -version = "0.1.64-rc.2" +version = "0.1.64-rc.3" dependencies = [ "dirs-next", "reqwest 0.11.27", @@ -1578,7 +1578,7 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "autonomi" -version = "0.4.0-rc.2" +version = "0.4.0-rc.3" dependencies = [ "alloy", "ant-bootstrap", @@ -3187,7 +3187,7 @@ dependencies = [ [[package]] name = "evm-testnet" -version = "0.1.10-rc.2" +version = "0.1.10-rc.3" dependencies = [ "ant-evm", "clap", @@ -3198,7 +3198,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.10-rc.2" +version = "0.1.10-rc.3" dependencies = [ "alloy", "dirs-next", @@ -6047,7 +6047,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.17-rc.2" +version = "0.2.17-rc.3" dependencies = [ "ant-build-info", "ant-networking", @@ -6188,7 +6188,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.5.5-rc.2" +version = "0.5.5-rc.3" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -8802,7 +8802,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "test-utils" -version = "0.4.17-rc.2" +version = "0.4.17-rc.3" dependencies = [ "bytes", "color-eyre", diff --git a/ant-bootstrap/Cargo.toml b/ant-bootstrap/Cargo.toml index da3584dd6d..652956c320 100644 --- a/ant-bootstrap/Cargo.toml +++ b/ant-bootstrap/Cargo.toml @@ -7,14 +7,14 @@ license = "GPL-3.0" name = "ant-bootstrap" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.6-rc.2" +version = "0.1.6-rc.3" [features] local = [] [dependencies] -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } atomic-write-file = "0.2.2" chrono = { version = "0.4", features = ["serde"] } clap = { version = "4.2.1", features = ["derive", "env"] } diff --git a/ant-build-info/Cargo.toml b/ant-build-info/Cargo.toml index af6ae83865..aca918b4bd 100644 --- a/ant-build-info/Cargo.toml +++ b/ant-build-info/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-build-info" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.25-rc.2" +version = "0.1.25-rc.3" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/ant-build-info/src/release_info.rs b/ant-build-info/src/release_info.rs index 913fff66c2..1ee586e62d 100644 --- a/ant-build-info/src/release_info.rs +++ b/ant-build-info/src/release_info.rs @@ -1,4 +1,4 @@ pub const RELEASE_YEAR: &str = "2025"; pub const RELEASE_MONTH: &str = "1"; pub const RELEASE_CYCLE: &str = "2"; -pub const RELEASE_CYCLE_COUNTER: &str = "5"; +pub const RELEASE_CYCLE_COUNTER: &str = "6"; diff --git a/ant-cli/Cargo.toml b/ant-cli/Cargo.toml index e61bcb1a5a..d60d6b5c9a 100644 --- a/ant-cli/Cargo.toml +++ b/ant-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "ant-cli" description = "CLI client for the Autonomi network" license = "GPL-3.0" -version = "0.3.8-rc.2" +version = "0.3.8-rc.3" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -23,11 +23,11 @@ name = "files" harness = false [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } -autonomi = { path = "../autonomi", version = "0.4.0-rc.2", features = [ "loud" ] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.3", features = [ "loud" ] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.3" const-hex = "1.13.1" @@ -54,7 +54,7 @@ tracing = { version = "~0.1.26" } walkdir = "2.5.0" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.4.0-rc.2" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.3" } criterion = "0.5.1" eyre = "0.6.8" rand = { version = "~0.8.5", features = ["small_rng"] } diff --git a/ant-evm/Cargo.toml b/ant-evm/Cargo.toml index a37de6c228..fa650d52d8 100644 --- a/ant-evm/Cargo.toml +++ b/ant-evm/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-evm" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.2" +version = "0.1.10-rc.3" [features] external-signer = ["evmlib/external-signer"] @@ -15,7 +15,7 @@ test-utils = [] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } hex = "~0.4.3" lazy_static = "1.4.0" libp2p = { version = "0.55.0", features = ["identify", "kad"] } diff --git a/ant-logging/Cargo.toml b/ant-logging/Cargo.toml index f5b35fe531..fa019ffbba 100644 --- a/ant-logging/Cargo.toml +++ b/ant-logging/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-logging" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.47-rc.2" +version = "0.2.47-rc.3" [dependencies] chrono = "~0.4.19" diff --git a/ant-metrics/Cargo.toml b/ant-metrics/Cargo.toml index 429f465416..8e7a511427 100644 --- a/ant-metrics/Cargo.toml +++ b/ant-metrics/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-metrics" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.26-rc.2" +version = "0.1.26-rc.3" [[bin]] path = "src/main.rs" diff --git a/ant-networking/Cargo.toml b/ant-networking/Cargo.toml index e85d35edc9..db292ad6be 100644 --- a/ant-networking/Cargo.toml +++ b/ant-networking/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-networking" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.3.6-rc.2" +version = "0.3.6-rc.3" [features] default = [] @@ -16,10 +16,10 @@ open-metrics = ["libp2p/metrics", "prometheus-client", "hyper", "sysinfo"] [dependencies] aes-gcm-siv = "0.11.1" -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.2" } bytes = { version = "1.0.1", features = ["serde"] } diff --git a/ant-node-manager/Cargo.toml b/ant-node-manager/Cargo.toml index 79f8011c5b..d34deb753c 100644 --- a/ant-node-manager/Cargo.toml +++ b/ant-node-manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.12.0-rc.2" +version = "0.12.0-rc.3" [[bin]] name = "antctl" @@ -29,13 +29,13 @@ tcp = [] websockets = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } ant-releases = { version = "0.4.0" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.2" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.3" } chrono = "~0.4.19" clap = { version = "4.4.6", features = ["derive", "env"] } colored = "2.0.4" diff --git a/ant-node-rpc-client/Cargo.toml b/ant-node-rpc-client/Cargo.toml index c46b7bc8df..3146b4e318 100644 --- a/ant-node-rpc-client/Cargo.toml +++ b/ant-node-rpc-client/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-rpc-client" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.6.43-rc.2" +version = "0.6.43-rc.3" [[bin]] name = "antnode_rpc_client" @@ -17,11 +17,11 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2", features=["rpc"] } -ant-node = { path = "../ant-node", version = "0.3.7-rc.2" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3", features=["rpc"] } +ant-node = { path = "../ant-node", version = "0.3.7-rc.3" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.3" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } diff --git a/ant-node/Cargo.toml b/ant-node/Cargo.toml index 81ea2ee346..5c77f41a18 100644 --- a/ant-node/Cargo.toml +++ b/ant-node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "The Autonomi node binary" name = "ant-node" -version = "0.3.7-rc.2" +version = "0.3.7-rc.3" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -22,13 +22,13 @@ open-metrics = ["ant-networking/open-metrics", "prometheus-client"] otlp = ["ant-logging/otlp"] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2", features = ["process-metrics"] } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.2" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3", features = ["process-metrics"] } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.3" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } @@ -77,10 +77,10 @@ walkdir = "~2.5.0" xor_name = "5.0.0" [dev-dependencies] -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2", features = ["rpc"] } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3", features = ["rpc"] } assert_fs = "1.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } -autonomi = { path = "../autonomi", version = "0.4.0-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } +autonomi = { path = "../autonomi", version = "0.4.0-rc.3" } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } diff --git a/ant-protocol/Cargo.toml b/ant-protocol/Cargo.toml index 48f234ea28..e53386a3e1 100644 --- a/ant-protocol/Cargo.toml +++ b/ant-protocol/Cargo.toml @@ -7,15 +7,15 @@ license = "GPL-3.0" name = "ant-protocol" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "1.0.1-rc.2" +version = "1.0.1-rc.3" [features] default = [] rpc = ["tonic", "prost"] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" diff --git a/ant-service-management/Cargo.toml b/ant-service-management/Cargo.toml index 415d0068a9..03c9114cac 100644 --- a/ant-service-management/Cargo.toml +++ b/ant-service-management/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "ant-service-management" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.9-rc.2" +version = "0.4.9-rc.3" [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2", features = ["rpc"] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3", features = ["rpc"] } async-trait = "0.1" dirs-next = "2.0.0" libp2p = { version = "0.55.0", features = ["kad"] } diff --git a/ant-token-supplies/Cargo.toml b/ant-token-supplies/Cargo.toml index 59a615a7d9..b2551701aa 100644 --- a/ant-token-supplies/Cargo.toml +++ b/ant-token-supplies/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-token-supplies" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.64-rc.2" +version = "0.1.64-rc.3" [dependencies] diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 2de18e7bf0..29e78a50c4 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.4.0-rc.2" +version = "0.4.0-rc.3" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -26,10 +26,10 @@ extension-module = ["pyo3/extension-module", "pyo3-async-runtimes"] loud = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } bip39 = "2.0.0" blst = "0.3.13" blstrs = "0.7.1" @@ -56,7 +56,7 @@ xor_name = "5.0.0" [dev-dependencies] alloy = { version = "0.7.3", default-features = false, features = ["contract", "json-rpc", "network", "node-bindings", "provider-http", "reqwest-rustls-tls", "rpc-client", "rpc-types", "signer-local", "std"] } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.2" } +ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } eyre = "0.6.5" serial_test = "3.2.0" sha2 = "0.10.6" diff --git a/evm-testnet/Cargo.toml b/evm-testnet/Cargo.toml index e6379dbf50..5859f35d78 100644 --- a/evm-testnet/Cargo.toml +++ b/evm-testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm-testnet" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.2" +version = "0.1.10-rc.3" [dependencies] -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 4376d7d9f3..4701fbefda 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.2" +version = "0.1.10-rc.3" [features] external-signer = [] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 13010d3672..304c62b38f 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.17-rc.2" +version = "0.2.17-rc.3" [[bin]] name = "nat-detection" @@ -17,9 +17,9 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.2" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-networking = { path = "../ant-networking", version = "0.3.6-rc.3" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } clap = { version = "4.5.4", features = ["derive"] } clap-verbosity-flag = "2.2.0" color-eyre = { version = "0.6", default-features = false } diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 8bb0c0513d..e609d4f9aa 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "TUI for running nodes on the Autonomi network" name = "node-launchpad" -version = "0.5.5-rc.2" +version = "0.5.5-rc.3" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -18,13 +18,13 @@ path = "src/bin/tui/main.rs" nightly = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.2" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.2" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.2" } -ant-node-manager = { version = "0.12.0-rc.2", path = "../ant-node-manager" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.2" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-node-manager = { version = "0.12.0-rc.3", path = "../ant-node-manager" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } ant-releases = { version = "0.4.0" } -ant-service-management = { version = "0.4.9-rc.2", path = "../ant-service-management" } +ant-service-management = { version = "0.4.9-rc.3", path = "../ant-service-management" } arboard = "3.4.1" atty = "0.2.14" better-panic = "0.3.0" diff --git a/release-cycle-info b/release-cycle-info index 8509b8e616..4ea27c1e5d 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -15,4 +15,4 @@ release-year: 2025 release-month: 1 release-cycle: 2 -release-cycle-counter: 5 +release-cycle-counter: 6 diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index abe5c9cdf8..ae5b854b4f 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "test-utils" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.17-rc.2" +version = "0.4.17-rc.3" [dependencies] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } libp2p = { version = "0.55.0", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } From 011a265653b841547a2eec848e68be638c2f2991 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 27 Feb 2025 15:34:44 +0100 Subject: [PATCH 65/69] feat!: add `TransactionConfig` --- ant-node/tests/common/client.rs | 5 +- evmlib/src/contract/network_token.rs | 33 ++++++++++++-- evmlib/src/contract/payment_vault/handler.rs | 12 ++++- evmlib/src/lib.rs | 1 + evmlib/src/retry.rs | 5 +- evmlib/src/transaction_config.rs | 20 ++++++++ evmlib/src/wallet.rs | 48 +++++++++++++++++--- evmlib/tests/gas_fee_limit.rs | 14 ++++++ evmlib/tests/network_token.rs | 5 +- evmlib/tests/payment_vault.rs | 25 ++++++++-- evmlib/tests/wallet.rs | 4 ++ 11 files changed, 151 insertions(+), 21 deletions(-) create mode 100644 evmlib/src/transaction_config.rs create mode 100644 evmlib/tests/gas_fee_limit.rs diff --git a/ant-node/tests/common/client.rs b/ant-node/tests/common/client.rs index 851edb53de..ee9aa37cc3 100644 --- a/ant-node/tests/common/client.rs +++ b/ant-node/tests/common/client.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::common::get_antnode_rpc_client; use ant_evm::Amount; use ant_protocol::antnode_proto::{NodeInfoRequest, RestartRequest}; use ant_service_management::{get_local_node_registry_path, NodeRegistry}; @@ -21,8 +22,6 @@ use tokio::sync::Mutex; use tonic::Request; use tracing::{debug, info}; -use crate::common::get_antnode_rpc_client; - /// This is a limited hard coded value as Droplet version has to contact the faucet to get the funds. /// This is limited to 10 requests to the faucet, where each request yields 100 SNT pub const INITIAL_WALLET_BALANCE: u64 = 3 * 100 * 1_000_000_000; @@ -136,7 +135,7 @@ impl LocalNetwork { .expect("Client shall be successfully created.") } - fn get_funded_wallet() -> evmlib::wallet::Wallet { + fn get_funded_wallet() -> Wallet { get_funded_wallet() } diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index 47f11946a1..6144221d9a 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -9,6 +9,7 @@ use crate::common::{Address, Calldata, TxHash, U256}; use crate::contract::network_token::NetworkTokenContract::NetworkTokenContractInstance; use crate::retry::{retry, send_transaction_with_retries}; +use crate::transaction_config::TransactionConfig; use alloy::providers::{Network, Provider}; use alloy::sol; use alloy::transports::{RpcError, Transport, TransportErrorKind}; @@ -92,10 +93,22 @@ where } /// Approve spender to spend a raw amount of tokens. - pub async fn approve(&self, spender: Address, value: U256) -> Result { + pub async fn approve( + &self, + spender: Address, + value: U256, + transaction_config: &TransactionConfig, + ) -> Result { debug!("Approving spender {spender:?} to spend {value}"); let (calldata, to) = self.approve_calldata(spender, value); - send_transaction_with_retries(self.contract.provider(), calldata, to, "approve").await + send_transaction_with_retries( + self.contract.provider(), + calldata, + to, + "approve", + transaction_config, + ) + .await } /// Approve spender to spend a raw amount of tokens. @@ -106,10 +119,22 @@ where } /// Transfer a raw amount of tokens. - pub async fn transfer(&self, receiver: Address, amount: U256) -> Result { + pub async fn transfer( + &self, + receiver: Address, + amount: U256, + transaction_config: &TransactionConfig, + ) -> Result { debug!("Transferring raw amount of tokens: {amount} to {receiver:?}"); let (calldata, to) = self.transfer_calldata(receiver, amount); - send_transaction_with_retries(self.contract.provider(), calldata, to, "transfer").await + send_transaction_with_retries( + self.contract.provider(), + calldata, + to, + "transfer", + transaction_config, + ) + .await } /// Transfer a raw amount of tokens. diff --git a/evmlib/src/contract/payment_vault/handler.rs b/evmlib/src/contract/payment_vault/handler.rs index 026ec37c72..e69b4902c8 100644 --- a/evmlib/src/contract/payment_vault/handler.rs +++ b/evmlib/src/contract/payment_vault/handler.rs @@ -3,6 +3,7 @@ use crate::contract::payment_vault::error::Error; use crate::contract::payment_vault::interface::IPaymentVault; use crate::contract::payment_vault::interface::IPaymentVault::IPaymentVaultInstance; use crate::retry::{retry, send_transaction_with_retries}; +use crate::transaction_config::TransactionConfig; use alloy::network::Network; use alloy::providers::Provider; use alloy::transports::Transport; @@ -61,11 +62,18 @@ where pub async fn pay_for_quotes>>( &self, data_payments: I, + transaction_config: &TransactionConfig, ) -> Result { debug!("Paying for quotes."); let (calldata, to) = self.pay_for_quotes_calldata(data_payments)?; - send_transaction_with_retries(self.contract.provider(), calldata, to, "pay for quotes") - .await + send_transaction_with_retries( + self.contract.provider(), + calldata, + to, + "pay for quotes", + transaction_config, + ) + .await } /// Returns the pay for quotes transaction calldata. diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index 25c1e58c73..0f6dbb6fd2 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -26,6 +26,7 @@ pub mod external_signer; pub mod quoting_metrics; mod retry; pub mod testnet; +pub mod transaction_config; pub mod utils; pub mod wallet; diff --git a/evmlib/src/retry.rs b/evmlib/src/retry.rs index d802a9f087..b44c85ee3a 100644 --- a/evmlib/src/retry.rs +++ b/evmlib/src/retry.rs @@ -1,4 +1,5 @@ use crate::common::{Address, Calldata, TxHash}; +use crate::transaction_config::TransactionConfig; use crate::TX_TIMEOUT; use alloy::network::{Network, TransactionBuilder}; use alloy::providers::{PendingTransactionBuilder, Provider}; @@ -53,6 +54,7 @@ pub(crate) async fn send_transaction_with_retries( calldata: Calldata, to: Address, tx_identifier: &str, + transaction_config: &TransactionConfig, ) -> Result where T: Transport + Clone, @@ -69,7 +71,8 @@ where let mut transaction_request = provider .transaction_request() .with_to(to) - .with_input(calldata.clone()); + .with_input(calldata.clone()) + .with_max_fee_per_gas(transaction_config.max_fee_per_gas); // Retry with the same nonce to replace a stuck transaction if let Some(nonce) = nonce { diff --git a/evmlib/src/transaction_config.rs b/evmlib/src/transaction_config.rs new file mode 100644 index 0000000000..36ce5cb8b1 --- /dev/null +++ b/evmlib/src/transaction_config.rs @@ -0,0 +1,20 @@ +const DEFAULT_MAX_FEE_PER_GAS: u128 = 40_000_000; + +#[derive(Clone, Debug)] +pub struct TransactionConfig { + pub max_fee_per_gas: u128, +} + +impl TransactionConfig { + pub fn new(max_fee_per_gas: u128) -> Self { + Self { max_fee_per_gas } + } +} + +impl Default for TransactionConfig { + fn default() -> Self { + Self { + max_fee_per_gas: DEFAULT_MAX_FEE_PER_GAS, + } + } +} diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 40b302ccad..1629bba9e6 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -11,6 +11,7 @@ use crate::contract::network_token::NetworkToken; use crate::contract::payment_vault::handler::PaymentVaultHandler; use crate::contract::payment_vault::MAX_TRANSFERS_PER_TRANSACTION; use crate::contract::{network_token, payment_vault}; +use crate::transaction_config::TransactionConfig; use crate::utils::http_provider; use crate::{Network, TX_TIMEOUT}; use alloy::hex::ToHexExt; @@ -44,6 +45,7 @@ pub enum Error { pub struct Wallet { wallet: EthereumWallet, network: Network, + transaction_config: TransactionConfig, lock: Arc>, } @@ -53,6 +55,7 @@ impl Wallet { Self { wallet, network, + transaction_config: Default::default(), lock: Arc::new(tokio::sync::Mutex::new(())), } } @@ -94,7 +97,14 @@ impl Wallet { to: Address, amount: U256, ) -> Result { - transfer_tokens(self.wallet.clone(), &self.network, to, amount).await + transfer_tokens( + self.wallet.clone(), + &self.network, + to, + amount, + &self.transaction_config, + ) + .await } /// Transfer a raw amount of gas tokens to another address. @@ -117,7 +127,14 @@ impl Wallet { spender: Address, amount: U256, ) -> Result { - approve_to_spend_tokens(self.wallet.clone(), &self.network, spender, amount).await + approve_to_spend_tokens( + self.wallet.clone(), + &self.network, + spender, + amount, + &self.transaction_config, + ) + .await } /// Function for batch payments of quotes. It accepts an iterator of QuotePayment and returns @@ -126,7 +143,13 @@ impl Wallet { &self, quote_payments: I, ) -> Result, PayForQuotesError> { - pay_for_quotes(self.wallet.clone(), &self.network, quote_payments).await + pay_for_quotes( + self.wallet.clone(), + &self.network, + quote_payments, + &self.transaction_config, + ) + .await } /// Build a provider using this wallet. @@ -145,6 +168,11 @@ impl Wallet { let signer: PrivateKeySigner = LocalSigner::random(); signer.to_bytes().encode_hex_with_prefix() } + + /// Sets the transaction configuration for the wallet. + pub fn set_transaction_config(&mut self, config: TransactionConfig) { + self.transaction_config = config; + } } /// Generate an EthereumWallet with a random private key. @@ -229,11 +257,14 @@ pub async fn approve_to_spend_tokens( network: &Network, spender: Address, amount: U256, + transaction_config: &TransactionConfig, ) -> Result { debug!("Approving address/smart contract with {amount} tokens at address: {spender}",); let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); let network_token = NetworkToken::new(*network.payment_token_address(), provider); - network_token.approve(spender, amount).await + network_token + .approve(spender, amount, transaction_config) + .await } /// Transfer payment tokens from the supplied wallet to an address. @@ -242,11 +273,14 @@ pub async fn transfer_tokens( network: &Network, receiver: Address, amount: U256, + transaction_config: &TransactionConfig, ) -> Result { debug!("Transferring {amount} tokens to {receiver}"); let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); let network_token = NetworkToken::new(*network.payment_token_address(), provider); - network_token.transfer(receiver, amount).await + network_token + .transfer(receiver, amount, transaction_config) + .await } /// Transfer native/gas tokens from the supplied wallet to an address. @@ -290,6 +324,7 @@ pub async fn pay_for_quotes>( wallet: EthereumWallet, network: &Network, payments: T, + transaction_config: &TransactionConfig, ) -> Result, PayForQuotesError> { let payments: Vec<_> = payments.into_iter().collect(); info!("Paying for quotes of len: {}", payments.len()); @@ -326,6 +361,7 @@ pub async fn pay_for_quotes>( network, *network.data_payments_address(), U256::MAX, + transaction_config, ) .await .map_err(|err| PayForQuotesError(Error::from(err), Default::default()))?; @@ -354,7 +390,7 @@ pub async fn pay_for_quotes>( ); let tx_hash = data_payments - .pay_for_quotes(batch.clone()) + .pay_for_quotes(batch.clone(), transaction_config) .await .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; diff --git a/evmlib/tests/gas_fee_limit.rs b/evmlib/tests/gas_fee_limit.rs new file mode 100644 index 0000000000..b86e2550b1 --- /dev/null +++ b/evmlib/tests/gas_fee_limit.rs @@ -0,0 +1,14 @@ +use alloy::providers::Provider; +use evmlib::utils::http_provider; +use evmlib::Network; + +#[tokio::test] +async fn test_gas_fee_limit() { + let network = Network::ArbitrumOne; + let provider = http_provider(network.rpc_url().clone()); + let base_gas_price = provider.get_gas_price().await.unwrap(); + let max_priority_fee_per_gas = provider.get_max_priority_fee_per_gas().await.unwrap(); + + println!("Base gas price: {base_gas_price}"); + println!("Max priority fee per gas: {max_priority_fee_per_gas}"); +} diff --git a/evmlib/tests/network_token.rs b/evmlib/tests/network_token.rs index 878c4e950c..30b491a724 100644 --- a/evmlib/tests/network_token.rs +++ b/evmlib/tests/network_token.rs @@ -11,6 +11,7 @@ use alloy::signers::local::PrivateKeySigner; use alloy::transports::http::{Client, Http}; use evmlib::contract::network_token::NetworkToken; use evmlib::testnet::{deploy_network_token_contract, start_node}; +use evmlib::transaction_config::TransactionConfig; use evmlib::wallet::wallet_address; use std::str::FromStr; @@ -73,9 +74,11 @@ async fn test_approve() { let transaction_value = U256::from(1); let spender = PrivateKeySigner::random(); + let transaction_config = TransactionConfig::default(); + // Approve for the spender to spend a value from the funds of the owner (our default account). let approval_result = network_token - .approve(spender.address(), transaction_value) + .approve(spender.address(), transaction_value, &transaction_config) .await; assert!( diff --git a/evmlib/tests/payment_vault.rs b/evmlib/tests/payment_vault.rs index 8dc3b9e6a1..818f25470f 100644 --- a/evmlib/tests/payment_vault.rs +++ b/evmlib/tests/payment_vault.rs @@ -17,6 +17,7 @@ use evmlib::contract::payment_vault::handler::PaymentVaultHandler; use evmlib::contract::payment_vault::{interface, MAX_TRANSFERS_PER_TRANSACTION}; use evmlib::quoting_metrics::QuotingMetrics; use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::transaction_config::TransactionConfig; use evmlib::utils::http_provider; use evmlib::wallet::wallet_address; use evmlib::Network; @@ -145,6 +146,8 @@ async fn test_get_quote_on_arb_sepolia() { async fn test_pay_for_quotes_on_local() { let (_anvil, network_token, mut payment_vault) = setup().await; + let transaction_config = TransactionConfig::default(); + let mut quote_payments = vec![]; for _ in 0..MAX_TRANSFERS_PER_TRANSACTION { @@ -153,7 +156,11 @@ async fn test_pay_for_quotes_on_local() { } let _ = network_token - .approve(*payment_vault.contract.address(), U256::MAX) + .approve( + *payment_vault.contract.address(), + U256::MAX, + &transaction_config, + ) .await .unwrap(); @@ -161,7 +168,9 @@ async fn test_pay_for_quotes_on_local() { // so we set it to the same as the network token contract payment_vault.set_provider(network_token.contract.provider().clone()); - let result = payment_vault.pay_for_quotes(quote_payments).await; + let result = payment_vault + .pay_for_quotes(quote_payments, &transaction_config) + .await; assert!(result.is_ok(), "Failed with error: {:?}", result.err()); } @@ -170,6 +179,8 @@ async fn test_pay_for_quotes_on_local() { async fn test_verify_payment_on_local() { let (_anvil, network_token, mut payment_vault) = setup().await; + let transaction_config = TransactionConfig::default(); + let mut quote_payments = vec![]; for _ in 0..5 { @@ -178,7 +189,11 @@ async fn test_verify_payment_on_local() { } let _ = network_token - .approve(*payment_vault.contract.address(), U256::MAX) + .approve( + *payment_vault.contract.address(), + U256::MAX, + &transaction_config, + ) .await .unwrap(); @@ -186,7 +201,9 @@ async fn test_verify_payment_on_local() { // so we set it to the same as the network token contract payment_vault.set_provider(network_token.contract.provider().clone()); - let result = payment_vault.pay_for_quotes(quote_payments.clone()).await; + let result = payment_vault + .pay_for_quotes(quote_payments.clone(), &transaction_config) + .await; assert!(result.is_ok(), "Failed with error: {:?}", result.err()); diff --git a/evmlib/tests/wallet.rs b/evmlib/tests/wallet.rs index 6713879279..f0eca38fe1 100644 --- a/evmlib/tests/wallet.rs +++ b/evmlib/tests/wallet.rs @@ -11,6 +11,7 @@ use evmlib::common::{Amount, TxHash}; use evmlib::contract::payment_vault::{verify_data_payment, MAX_TRANSFERS_PER_TRANSACTION}; use evmlib::quoting_metrics::QuotingMetrics; use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::transaction_config::TransactionConfig; use evmlib::wallet::{transfer_tokens, wallet_address, Wallet}; use evmlib::{CustomNetwork, Network}; use std::collections::HashSet; @@ -51,12 +52,15 @@ async fn funded_wallet(network: &Network, genesis_wallet: EthereumWallet) -> Wal .await .unwrap(); + let transaction_config = TransactionConfig::default(); + // Fund the wallet with plenty of ERC20 tokens transfer_tokens( genesis_wallet, network, account, Amount::from(9999999999_u64), + &transaction_config, ) .await .unwrap(); From 0534346e71b8bf1ea202e5f3715e8d73d68f58b7 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 27 Feb 2025 15:47:21 +0100 Subject: [PATCH 66/69] feat(cli): add optional `--max-fee-per-gas` param to override transaction config --- ant-cli/src/commands.rs | 33 +++++++++++++++++++++++++------- ant-cli/src/commands/file.rs | 10 ++++++++-- ant-cli/src/commands/register.rs | 20 ++++++++++++++----- ant-cli/src/commands/vault.rs | 10 ++++++++-- ant-evm/src/lib.rs | 1 + autonomi/src/lib.rs | 1 + 6 files changed, 59 insertions(+), 16 deletions(-) diff --git a/ant-cli/src/commands.rs b/ant-cli/src/commands.rs index c98df9e8de..36db3506bd 100644 --- a/ant-cli/src/commands.rs +++ b/ant-cli/src/commands.rs @@ -63,6 +63,9 @@ pub enum FileCmd { /// Possible values are: "one", "majority", "all", n (where n is a number greater than 0) #[arg(short, long)] quorum: Option, + /// Optional: Specify the maximum fee per gas in u128. + #[arg(long)] + max_fee_per_gas: Option, }, /// Download a file from the given address. @@ -105,6 +108,9 @@ pub enum RegisterCmd { name: String, /// The value to store in the register. value: String, + /// Optional: Specify the maximum fee per gas in u128. + #[arg(long)] + max_fee_per_gas: Option, }, /// Edit an existing register. @@ -119,6 +125,9 @@ pub enum RegisterCmd { address: String, /// The new value to store in the register. value: String, + /// Optional: Specify the maximum fee per gas in u128. + #[arg(long)] + max_fee_per_gas: Option, }, /// Get the value of a register. @@ -147,7 +156,11 @@ pub enum VaultCmd { /// Create a vault at a deterministic address based on your `SECRET_KEY`. /// Pushing an encrypted backup of your local user data to the network - Create, + Create { + /// Optional: Specify the maximum fee per gas in u128. + #[arg(long)] + max_fee_per_gas: Option, + }, /// Load an existing vault from the network. /// Use this when loading your user data to a new device. @@ -207,7 +220,8 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { file, public, quorum, - } => file::upload(&file, public, peers.await?, quorum).await, + max_fee_per_gas, + } => file::upload(&file, public, peers.await?, quorum, max_fee_per_gas).await, FileCmd::Download { addr, dest_file, @@ -218,14 +232,17 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { Some(SubCmd::Register { command }) => match command { RegisterCmd::GenerateKey { overwrite } => register::generate_key(overwrite), RegisterCmd::Cost { name } => register::cost(&name, peers.await?).await, - RegisterCmd::Create { name, value } => { - register::create(&name, &value, peers.await?).await - } + RegisterCmd::Create { + name, + value, + max_fee_per_gas, + } => register::create(&name, &value, peers.await?, max_fee_per_gas).await, RegisterCmd::Edit { address, name, value, - } => register::edit(address, name, &value, peers.await?).await, + max_fee_per_gas + } => register::edit(address, name, &value, peers.await?, max_fee_per_gas).await, RegisterCmd::Get { address, name } => register::get(address, name, peers.await?).await, RegisterCmd::List => register::list(), }, @@ -233,7 +250,9 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { VaultCmd::Cost { expected_max_size } => { vault::cost(peers.await?, expected_max_size).await } - VaultCmd::Create => vault::create(peers.await?).await, + VaultCmd::Create { max_fee_per_gas } => { + vault::create(peers.await?, max_fee_per_gas).await + } VaultCmd::Load => vault::load(peers.await?).await, VaultCmd::Sync { force } => vault::sync(force, peers.await?).await, }, diff --git a/ant-cli/src/commands/file.rs b/ant-cli/src/commands/file.rs index fc934ebefc..ea8be8866b 100644 --- a/ant-cli/src/commands/file.rs +++ b/ant-cli/src/commands/file.rs @@ -10,8 +10,8 @@ use crate::network::NetworkPeers; use crate::utils::collect_upload_summary; use crate::wallet::load_wallet; use autonomi::client::payment::PaymentOption; -use autonomi::ClientOperatingStrategy; use autonomi::ResponseQuorum; +use autonomi::{ClientOperatingStrategy, TransactionConfig}; use color_eyre::eyre::Context; use color_eyre::eyre::Result; use color_eyre::Section; @@ -38,6 +38,7 @@ pub async fn upload( public: bool, peers: NetworkPeers, optional_verification_quorum: Option, + max_fee_per_gas: Option, ) -> Result<()> { let mut config = ClientOperatingStrategy::new(); if let Some(verification_quorum) = optional_verification_quorum { @@ -45,7 +46,12 @@ pub async fn upload( } let mut client = crate::actions::connect_to_network_with_config(peers, config).await?; - let wallet = load_wallet(client.evm_network())?; + let mut wallet = load_wallet(client.evm_network())?; + + if let Some(max_fee_per_gas) = max_fee_per_gas { + wallet.set_transaction_config(TransactionConfig::new(max_fee_per_gas)) + } + let payment = PaymentOption::Wallet(wallet); let event_receiver = client.enable_client_events(); let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); diff --git a/ant-cli/src/commands/register.rs b/ant-cli/src/commands/register.rs index cbd0c0c3ee..a2af234fe9 100644 --- a/ant-cli/src/commands/register.rs +++ b/ant-cli/src/commands/register.rs @@ -12,7 +12,7 @@ use crate::network::NetworkPeers; use crate::wallet::load_wallet; use autonomi::client::register::RegisterAddress; use autonomi::client::register::SecretKey as RegisterSecretKey; -use autonomi::Client; +use autonomi::{Client, TransactionConfig}; use color_eyre::eyre::eyre; use color_eyre::eyre::Context; use color_eyre::eyre::Result; @@ -52,11 +52,16 @@ pub async fn cost(name: &str, peers: NetworkPeers) -> Result<()> { Ok(()) } -pub async fn create(name: &str, value: &str, peers: NetworkPeers) -> Result<()> { +pub async fn create(name: &str, value: &str, peers: NetworkPeers, max_fee_per_gas: Option) -> Result<()> { let main_registers_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; let client = crate::actions::connect_to_network(peers).await?; - let wallet = load_wallet(client.evm_network())?; + let mut wallet = load_wallet(client.evm_network())?; + + if let Some(max_fee_per_gas) = max_fee_per_gas { + wallet.set_transaction_config(TransactionConfig::new(max_fee_per_gas)) + } + let register_key = Client::register_key_from_name(&main_registers_key, name); println!("Creating register with name: {name}"); @@ -81,11 +86,16 @@ pub async fn create(name: &str, value: &str, peers: NetworkPeers) -> Result<()> Ok(()) } -pub async fn edit(address: String, name: bool, value: &str, peers: NetworkPeers) -> Result<()> { +pub async fn edit(address: String, name: bool, value: &str, peers: NetworkPeers, max_fee_per_gas: Option,) -> Result<()> { let main_registers_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; let client = crate::actions::connect_to_network(peers).await?; - let wallet = load_wallet(client.evm_network())?; + let mut wallet = load_wallet(client.evm_network())?; + + if let Some(max_fee_per_gas) = max_fee_per_gas { + wallet.set_transaction_config(TransactionConfig::new(max_fee_per_gas)) + } + let value_bytes = Client::register_value_from_bytes(value.as_bytes())?; let register_key = if name { diff --git a/ant-cli/src/commands/vault.rs b/ant-cli/src/commands/vault.rs index 26ab592093..b535be587d 100644 --- a/ant-cli/src/commands/vault.rs +++ b/ant-cli/src/commands/vault.rs @@ -8,6 +8,7 @@ use crate::network::NetworkPeers; use crate::wallet::load_wallet; +use autonomi::TransactionConfig; use color_eyre::eyre::Context; use color_eyre::eyre::Result; use color_eyre::Section; @@ -27,9 +28,14 @@ pub async fn cost(peers: NetworkPeers, expected_max_size: u64) -> Result<()> { Ok(()) } -pub async fn create(peers: NetworkPeers) -> Result<()> { +pub async fn create(peers: NetworkPeers, max_fee_per_gas: Option) -> Result<()> { let client = crate::actions::connect_to_network(peers).await?; - let wallet = load_wallet(client.evm_network())?; + let mut wallet = load_wallet(client.evm_network())?; + + if let Some(max_fee_per_gas) = max_fee_per_gas { + wallet.set_transaction_config(TransactionConfig::new(max_fee_per_gas)) + } + let vault_sk = crate::keys::get_vault_secret_key()?; println!("Retrieving local user data..."); diff --git a/ant-evm/src/lib.rs b/ant-evm/src/lib.rs index e8d5e92784..2c173013bc 100644 --- a/ant-evm/src/lib.rs +++ b/ant-evm/src/lib.rs @@ -18,6 +18,7 @@ pub use evmlib::contract::payment_vault; pub use evmlib::cryptography; #[cfg(feature = "external-signer")] pub use evmlib::external_signer; +pub use evmlib::transaction_config::TransactionConfig; pub use evmlib::utils; pub use evmlib::utils::get_evm_network; pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index b16b23ca4c..1b728a9551 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -78,6 +78,7 @@ pub use ant_evm::EvmNetwork as Network; pub use ant_evm::EvmWallet as Wallet; pub use ant_evm::QuoteHash; pub use ant_evm::RewardsAddress; +pub use ant_evm::TransactionConfig; pub use ant_evm::{Amount, AttoTokens}; // Re-exports of address related types From c414308718a968e3011b28b6eb226c07036bc18c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 27 Feb 2025 16:13:53 +0100 Subject: [PATCH 67/69] refactor: increase default max fee per gas --- evmlib/src/transaction_config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evmlib/src/transaction_config.rs b/evmlib/src/transaction_config.rs index 36ce5cb8b1..8e057daf41 100644 --- a/evmlib/src/transaction_config.rs +++ b/evmlib/src/transaction_config.rs @@ -1,4 +1,4 @@ -const DEFAULT_MAX_FEE_PER_GAS: u128 = 40_000_000; +const DEFAULT_MAX_FEE_PER_GAS: u128 = 200_000_000; // 0.2 Gwei #[derive(Clone, Debug)] pub struct TransactionConfig { From e34e78ab9fae20eb13230aec844e99ae3cb09340 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 27 Feb 2025 16:44:46 +0100 Subject: [PATCH 68/69] chore: cargo fmt --- ant-cli/src/commands.rs | 2 +- ant-cli/src/commands/register.rs | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/ant-cli/src/commands.rs b/ant-cli/src/commands.rs index 36db3506bd..49ef818cd8 100644 --- a/ant-cli/src/commands.rs +++ b/ant-cli/src/commands.rs @@ -241,7 +241,7 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { address, name, value, - max_fee_per_gas + max_fee_per_gas, } => register::edit(address, name, &value, peers.await?, max_fee_per_gas).await, RegisterCmd::Get { address, name } => register::get(address, name, peers.await?).await, RegisterCmd::List => register::list(), diff --git a/ant-cli/src/commands/register.rs b/ant-cli/src/commands/register.rs index a2af234fe9..23ce2417f6 100644 --- a/ant-cli/src/commands/register.rs +++ b/ant-cli/src/commands/register.rs @@ -52,7 +52,12 @@ pub async fn cost(name: &str, peers: NetworkPeers) -> Result<()> { Ok(()) } -pub async fn create(name: &str, value: &str, peers: NetworkPeers, max_fee_per_gas: Option) -> Result<()> { +pub async fn create( + name: &str, + value: &str, + peers: NetworkPeers, + max_fee_per_gas: Option, +) -> Result<()> { let main_registers_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; let client = crate::actions::connect_to_network(peers).await?; @@ -86,7 +91,13 @@ pub async fn create(name: &str, value: &str, peers: NetworkPeers, max_fee_per_ga Ok(()) } -pub async fn edit(address: String, name: bool, value: &str, peers: NetworkPeers, max_fee_per_gas: Option,) -> Result<()> { +pub async fn edit( + address: String, + name: bool, + value: &str, + peers: NetworkPeers, + max_fee_per_gas: Option, +) -> Result<()> { let main_registers_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; let client = crate::actions::connect_to_network(peers).await?; From 1a4b84cb19428ef90c989b501f843083b4a97252 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Thu, 27 Feb 2025 16:57:37 +0000 Subject: [PATCH 69/69] chore(release): stable release 2025.1.2.6 ================== Crate Versions ================== ant-bootstrap: 0.1.6 ant-build-info: 0.1.25 ant-cli: 0.3.8 ant-evm: 0.1.10 ant-logging: 0.2.47 ant-metrics: 0.1.26 ant-networking: 0.3.6 ant-node: 0.3.7 ant-node-manager: 0.12.0 ant-node-rpc-client: 0.6.43 ant-protocol: 1.0.1 ant-service-management: 0.4.9 ant-token-supplies: 0.1.64 autonomi: 0.4.0 evmlib: 0.2.0 evm-testnet: 0.1.10 nat-detection: 0.2.17 node-launchpad: 0.5.5 test-utils: 0.4.17 =================== Binary Versions =================== ant: 0.3.8 antctl: 0.12.0 antctld: 0.12.0 antnode: 0.3.7 antnode_rpc_client: 0.6.43 nat-detection: 0.2.17 node-launchpad: 0.5.5 --- CHANGELOG.md | 110 ++++++++++++++++++++++++++++++ Cargo.lock | 38 +++++------ ant-bootstrap/Cargo.toml | 6 +- ant-build-info/Cargo.toml | 2 +- ant-cli/Cargo.toml | 14 ++-- ant-evm/Cargo.toml | 4 +- ant-logging/Cargo.toml | 2 +- ant-metrics/Cargo.toml | 2 +- ant-networking/Cargo.toml | 10 +-- ant-node-manager/Cargo.toml | 14 ++-- ant-node-rpc-client/Cargo.toml | 12 ++-- ant-node/Cargo.toml | 22 +++--- ant-protocol/Cargo.toml | 6 +- ant-service-management/Cargo.toml | 10 +-- ant-token-supplies/Cargo.toml | 2 +- autonomi/Cargo.toml | 12 ++-- evm-testnet/Cargo.toml | 6 +- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +-- node-launchpad/Cargo.toml | 14 ++-- test-utils/Cargo.toml | 4 +- 21 files changed, 205 insertions(+), 95 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 081aed11e6..5f25634293 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,116 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* +## 2025-02-28 + +### Network + +#### Added + +- The node outputs critical start up and runtime failures to a `critical_failure.log` file. This is + to help `antctl` feedback failure information to the user, but it should hopefully be generally + useful to more advanced users. +- New metrics: + + `connected_relay_clients` + + `relay_peers_in_routing_table` + + `peers_in_non_full_buckets` + + `relay_peers_in_non_full_buckets` + + `percentage_of_relay_peers` +- We also add a `node_versions` metric. This will be used to help us gauge what versions of nodes + are present in the network and how many nodes have upgraded to the latest releases. It will also + assist us in ensuring backward compatibility. + +#### Changed + +- The network bootstrapping process is changed to dial three of the initial peer addresses rather + than all of them concurrently. When the routing table reaches five peers, network discovery takes + over the rest of the bootstrapping process, and no more peers are dialled. This mechanism is much + more efficient and avoids overloading the peers in the bootstrap cache. +- Network discovery rate has been increased during the start up phase, but it should slow down + exponentially as more peers are added to the routing table. +- Several items aim to address uploading issues: + + Avoid deadlocks on record store cache access + + Do not fetch from the network when a replication fetch failed + + Lower the number of parallel replication fetches + + Issues that come in too quick will not trigger an extra action + + Disable the current black list (possibly to be re-enabled when we have more data) + They may also help reduce open connections and `libp2p` identify attempts +- Remove relay clients from the swarm driver tracker if the reservation has been closed. +- The `peers_in_rt` metric is improved by calculating it directly from kbuckets rather than using + `libp2p` events. + +### Autonomi API + +#### Added + +- Support uploading files with empty metadata + +#### Changed + +- Several file-related functions were renamed [BREAKING]: + + `dir_upload` to `dir_content_upload` + + `dir_and_archive_upload` to `dir_upload` + + `file_upload` to `file_content_upload` + + `dir_upload_public` to `dir_content_upload_public` + + `dir_and_archive_upload_public` to `dir_upload_public` + + `file_upload_public` to `file_content_upload_public` +- Improved address management to make it easier to use [BREAKING]: + + All address types have the same methods: `to_hex` and `from_hex`. + + All public-key addressed data types have the public key in their address. + + High level `DataAddress` shares the values above instead of the low-level `XorName` that can't + be constructed from hex. + + Python now uses accurate addresses instead of clunky hex strings, and addresses for other + types. + + Fix inaccurate/missing python bindings for addresses: now all have `to_hex` and `from_hex`. + +### Client + +#### Added + +- Support merging one archive into another. +- Introduce a maximum limit of 0.2 Gwei on the gas price when uploading files or creating/editing + registers. If the gas exceeds this value, operations will be aborted. The commands provide a + `--max-fee-per-gas` argument to override the value. This measure has been taken to avoid + involuntarily paying excessive fees when the gas price fluctuates. + +#### Changed + +- The `ant file download` command can download directly from a `XorName`. +- The `ant file download` command can download data directly from a `DataMapChunk` to a file. + +### Antctl + +#### Added + +- A `--no-upnp` flag to disable launching nodes with UPnP. +- A failure column is added to the `status` command. + +#### Changed + +- The `add` command will create services that will launch the node with `--upnp` by default. For + home networking we want to try encourage people to use UPnP rather than relaying. +- The `add` command does not apply the 'on failure' restart policy to services. This is to prevent + the node from continually restarting if UPnP is not working. +- The `--home-network` argument has been renamed `--relay` [BREAKING]. + +#### Fixed + +- A debug logging statement used during the upgrade process caused an error if there were no nodes + in the node registry. + +### Launchpad + +#### Added + +- New column in the nodes panel for node failure reason. +- New column in the nodes panel to indicate UPnP support. +- New column in the nodes panel to the connection mode chosen by `Automatic`. + +#### Changed + +- Remove `Home Network` from the connection modes. Relay can only be selected by using `Automatic` + in the case where UPnP fails. We are trying to avoid the use of relays when UPnP is available. + ## 2025-02-11 ### Network diff --git a/Cargo.lock b/Cargo.lock index 3fcf70678c..ef49e0c348 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -772,7 +772,7 @@ dependencies = [ [[package]] name = "ant-bootstrap" -version = "0.1.6-rc.3" +version = "0.1.6" dependencies = [ "ant-logging", "ant-protocol", @@ -796,7 +796,7 @@ dependencies = [ [[package]] name = "ant-build-info" -version = "0.1.25-rc.3" +version = "0.1.25" dependencies = [ "chrono", "tracing", @@ -805,7 +805,7 @@ dependencies = [ [[package]] name = "ant-cli" -version = "0.3.8-rc.3" +version = "0.3.8" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -836,7 +836,7 @@ dependencies = [ [[package]] name = "ant-evm" -version = "0.1.10-rc.3" +version = "0.1.10" dependencies = [ "custom_debug", "evmlib", @@ -858,7 +858,7 @@ dependencies = [ [[package]] name = "ant-logging" -version = "0.2.47-rc.3" +version = "0.2.47" dependencies = [ "chrono", "color-eyre", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "ant-metrics" -version = "0.1.26-rc.3" +version = "0.1.26" dependencies = [ "clap", "color-eyre", @@ -897,7 +897,7 @@ dependencies = [ [[package]] name = "ant-networking" -version = "0.3.6-rc.3" +version = "0.3.6" dependencies = [ "aes-gcm-siv", "ant-bootstrap", @@ -938,7 +938,7 @@ dependencies = [ [[package]] name = "ant-node" -version = "0.3.7-rc.3" +version = "0.3.7" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -994,7 +994,7 @@ dependencies = [ [[package]] name = "ant-node-manager" -version = "0.12.0-rc.3" +version = "0.12.0" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -1037,7 +1037,7 @@ dependencies = [ [[package]] name = "ant-node-rpc-client" -version = "0.6.43-rc.3" +version = "0.6.43" dependencies = [ "ant-build-info", "ant-logging", @@ -1061,7 +1061,7 @@ dependencies = [ [[package]] name = "ant-protocol" -version = "1.0.1-rc.3" +version = "1.0.1" dependencies = [ "ant-build-info", "ant-evm", @@ -1112,7 +1112,7 @@ dependencies = [ [[package]] name = "ant-service-management" -version = "0.4.9-rc.3" +version = "0.4.9" dependencies = [ "ant-bootstrap", "ant-evm", @@ -1140,7 +1140,7 @@ dependencies = [ [[package]] name = "ant-token-supplies" -version = "0.1.64-rc.3" +version = "0.1.64" dependencies = [ "dirs-next", "reqwest 0.11.27", @@ -1578,7 +1578,7 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "autonomi" -version = "0.4.0-rc.3" +version = "0.4.0" dependencies = [ "alloy", "ant-bootstrap", @@ -3187,7 +3187,7 @@ dependencies = [ [[package]] name = "evm-testnet" -version = "0.1.10-rc.3" +version = "0.1.10" dependencies = [ "ant-evm", "clap", @@ -3198,7 +3198,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.10-rc.3" +version = "0.2.0" dependencies = [ "alloy", "dirs-next", @@ -6047,7 +6047,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.17-rc.3" +version = "0.2.17" dependencies = [ "ant-build-info", "ant-networking", @@ -6188,7 +6188,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.5.5-rc.3" +version = "0.5.5" dependencies = [ "ant-bootstrap", "ant-build-info", @@ -8802,7 +8802,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "test-utils" -version = "0.4.17-rc.3" +version = "0.4.17" dependencies = [ "bytes", "color-eyre", diff --git a/ant-bootstrap/Cargo.toml b/ant-bootstrap/Cargo.toml index 652956c320..3460ea30ae 100644 --- a/ant-bootstrap/Cargo.toml +++ b/ant-bootstrap/Cargo.toml @@ -7,14 +7,14 @@ license = "GPL-3.0" name = "ant-bootstrap" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.6-rc.3" +version = "0.1.6" [features] local = [] [dependencies] -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +ant-logging = { path = "../ant-logging", version = "0.2.47" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } atomic-write-file = "0.2.2" chrono = { version = "0.4", features = ["serde"] } clap = { version = "4.2.1", features = ["derive", "env"] } diff --git a/ant-build-info/Cargo.toml b/ant-build-info/Cargo.toml index aca918b4bd..6e22aa3a66 100644 --- a/ant-build-info/Cargo.toml +++ b/ant-build-info/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-build-info" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.25-rc.3" +version = "0.1.25" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/ant-cli/Cargo.toml b/ant-cli/Cargo.toml index d60d6b5c9a..314722d630 100644 --- a/ant-cli/Cargo.toml +++ b/ant-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "ant-cli" description = "CLI client for the Autonomi network" license = "GPL-3.0" -version = "0.3.8-rc.3" +version = "0.3.8" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -23,11 +23,11 @@ name = "files" harness = false [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } -autonomi = { path = "../autonomi", version = "0.4.0-rc.3", features = [ "loud" ] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-logging = { path = "../ant-logging", version = "0.2.47" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } +autonomi = { path = "../autonomi", version = "0.4.0", features = [ "loud" ] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.3" const-hex = "1.13.1" @@ -54,7 +54,7 @@ tracing = { version = "~0.1.26" } walkdir = "2.5.0" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.4.0-rc.3" } +autonomi = { path = "../autonomi", version = "0.4.0" } criterion = "0.5.1" eyre = "0.6.8" rand = { version = "~0.8.5", features = ["small_rng"] } diff --git a/ant-evm/Cargo.toml b/ant-evm/Cargo.toml index fa650d52d8..c3310b7625 100644 --- a/ant-evm/Cargo.toml +++ b/ant-evm/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-evm" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.3" +version = "0.1.10" [features] external-signer = ["evmlib/external-signer"] @@ -15,7 +15,7 @@ test-utils = [] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } +evmlib = { path = "../evmlib", version = "0.2.0" } hex = "~0.4.3" lazy_static = "1.4.0" libp2p = { version = "0.55.0", features = ["identify", "kad"] } diff --git a/ant-logging/Cargo.toml b/ant-logging/Cargo.toml index fa019ffbba..de51b24f1c 100644 --- a/ant-logging/Cargo.toml +++ b/ant-logging/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-logging" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.47-rc.3" +version = "0.2.47" [dependencies] chrono = "~0.4.19" diff --git a/ant-metrics/Cargo.toml b/ant-metrics/Cargo.toml index 8e7a511427..cb405b1f09 100644 --- a/ant-metrics/Cargo.toml +++ b/ant-metrics/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-metrics" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.26-rc.3" +version = "0.1.26" [[bin]] path = "src/main.rs" diff --git a/ant-networking/Cargo.toml b/ant-networking/Cargo.toml index db292ad6be..6ee454fe93 100644 --- a/ant-networking/Cargo.toml +++ b/ant-networking/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-networking" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.3.6-rc.3" +version = "0.3.6" [features] default = [] @@ -16,10 +16,10 @@ open-metrics = ["libp2p/metrics", "prometheus-client", "hyper", "sysinfo"] [dependencies] aes-gcm-siv = "0.11.1" -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.2" } bytes = { version = "1.0.1", features = ["serde"] } diff --git a/ant-node-manager/Cargo.toml b/ant-node-manager/Cargo.toml index d34deb753c..75de468b88 100644 --- a/ant-node-manager/Cargo.toml +++ b/ant-node-manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.12.0-rc.3" +version = "0.12.0" [[bin]] name = "antctl" @@ -29,13 +29,13 @@ tcp = [] websockets = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } +ant-logging = { path = "../ant-logging", version = "0.2.47" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } ant-releases = { version = "0.4.0" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.3" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9" } chrono = "~0.4.19" clap = { version = "4.4.6", features = ["derive", "env"] } colored = "2.0.4" diff --git a/ant-node-rpc-client/Cargo.toml b/ant-node-rpc-client/Cargo.toml index 3146b4e318..dc1a2952e8 100644 --- a/ant-node-rpc-client/Cargo.toml +++ b/ant-node-rpc-client/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-node-rpc-client" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.6.43-rc.3" +version = "0.6.43" [[bin]] name = "antnode_rpc_client" @@ -17,11 +17,11 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3", features=["rpc"] } -ant-node = { path = "../ant-node", version = "0.3.7-rc.3" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-logging = { path = "../ant-logging", version = "0.2.47" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1", features=["rpc"] } +ant-node = { path = "../ant-node", version = "0.3.7" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } diff --git a/ant-node/Cargo.toml b/ant-node/Cargo.toml index 5c77f41a18..52fd401a94 100644 --- a/ant-node/Cargo.toml +++ b/ant-node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "The Autonomi node binary" name = "ant-node" -version = "0.3.7-rc.3" +version = "0.3.7" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -22,13 +22,13 @@ open-metrics = ["ant-networking/open-metrics", "prometheus-client"] otlp = ["ant-logging/otlp"] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3", features = ["process-metrics"] } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } -ant-service-management = { path = "../ant-service-management", version = "0.4.9-rc.3" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } +ant-logging = { path = "../ant-logging", version = "0.2.47", features = ["process-metrics"] } +ant-networking = { path = "../ant-networking", version = "0.3.6" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } +ant-service-management = { path = "../ant-service-management", version = "0.4.9" } async-trait = "0.1" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } @@ -77,10 +77,10 @@ walkdir = "~2.5.0" xor_name = "5.0.0" [dev-dependencies] -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3", features = ["rpc"] } +ant-protocol = { path = "../ant-protocol", version = "1.0.1", features = ["rpc"] } assert_fs = "1.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } -autonomi = { path = "../autonomi", version = "0.4.0-rc.3" } +evmlib = { path = "../evmlib", version = "0.2.0" } +autonomi = { path = "../autonomi", version = "0.4.0" } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } diff --git a/ant-protocol/Cargo.toml b/ant-protocol/Cargo.toml index e53386a3e1..412388d932 100644 --- a/ant-protocol/Cargo.toml +++ b/ant-protocol/Cargo.toml @@ -7,15 +7,15 @@ license = "GPL-3.0" name = "ant-protocol" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "1.0.1-rc.3" +version = "1.0.1" [features] default = [] rpc = ["tonic", "prost"] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" diff --git a/ant-service-management/Cargo.toml b/ant-service-management/Cargo.toml index 03c9114cac..88054f6f86 100644 --- a/ant-service-management/Cargo.toml +++ b/ant-service-management/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "ant-service-management" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.9-rc.3" +version = "0.4.9" [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3", features = ["rpc"] } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } +ant-logging = { path = "../ant-logging", version = "0.2.47" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1", features = ["rpc"] } async-trait = "0.1" dirs-next = "2.0.0" libp2p = { version = "0.55.0", features = ["kad"] } diff --git a/ant-token-supplies/Cargo.toml b/ant-token-supplies/Cargo.toml index b2551701aa..29ea007dc0 100644 --- a/ant-token-supplies/Cargo.toml +++ b/ant-token-supplies/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "ant-token-supplies" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.64-rc.3" +version = "0.1.64" [dependencies] diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 29e78a50c4..2eb5ad3547 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.4.0-rc.3" +version = "0.4.0" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -26,10 +26,10 @@ extension-module = ["pyo3/extension-module", "pyo3-async-runtimes"] loud = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } +ant-networking = { path = "../ant-networking", version = "0.3.6" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } bip39 = "2.0.0" blst = "0.3.13" blstrs = "0.7.1" @@ -56,7 +56,7 @@ xor_name = "5.0.0" [dev-dependencies] alloy = { version = "0.7.3", default-features = false, features = ["contract", "json-rpc", "network", "node-bindings", "provider-http", "reqwest-rustls-tls", "rpc-client", "rpc-types", "signer-local", "std"] } -ant-logging = { path = "../ant-logging", version = "0.2.47-rc.3" } +ant-logging = { path = "../ant-logging", version = "0.2.47" } eyre = "0.6.5" serial_test = "3.2.0" sha2 = "0.10.6" diff --git a/evm-testnet/Cargo.toml b/evm-testnet/Cargo.toml index 5859f35d78..cb32a71fef 100644 --- a/evm-testnet/Cargo.toml +++ b/evm-testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm-testnet" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.3" +version = "0.1.10" [dependencies] -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } +evmlib = { path = "../evmlib", version = "0.2.0" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 4701fbefda..79a5e367da 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/autonomi" -version = "0.1.10-rc.3" +version = "0.2.0" [features] external-signer = [] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 304c62b38f..b8459f9e49 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.2.17-rc.3" +version = "0.2.17" [[bin]] name = "nat-detection" @@ -17,9 +17,9 @@ path = "src/main.rs" nightly = [] [dependencies] -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-networking = { path = "../ant-networking", version = "0.3.6-rc.3" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-networking = { path = "../ant-networking", version = "0.3.6" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } clap = { version = "4.5.4", features = ["derive"] } clap-verbosity-flag = "2.2.0" color-eyre = { version = "0.6", default-features = false } diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index e609d4f9aa..ba737f1b04 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "TUI for running nodes on the Autonomi network" name = "node-launchpad" -version = "0.5.5-rc.3" +version = "0.5.5" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -18,13 +18,13 @@ path = "src/bin/tui/main.rs" nightly = [] [dependencies] -ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6-rc.3" } -ant-build-info = { path = "../ant-build-info", version = "0.1.25-rc.3" } -ant-evm = { path = "../ant-evm", version = "0.1.10-rc.3" } -ant-node-manager = { version = "0.12.0-rc.3", path = "../ant-node-manager" } -ant-protocol = { path = "../ant-protocol", version = "1.0.1-rc.3" } +ant-bootstrap = { path = "../ant-bootstrap", version = "0.1.6" } +ant-build-info = { path = "../ant-build-info", version = "0.1.25" } +ant-evm = { path = "../ant-evm", version = "0.1.10" } +ant-node-manager = { version = "0.12.0", path = "../ant-node-manager" } +ant-protocol = { path = "../ant-protocol", version = "1.0.1" } ant-releases = { version = "0.4.0" } -ant-service-management = { version = "0.4.9-rc.3", path = "../ant-service-management" } +ant-service-management = { version = "0.4.9", path = "../ant-service-management" } arboard = "3.4.1" atty = "0.2.14" better-panic = "0.3.0" diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index ae5b854b4f..ef5ae9b1f0 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0" name = "test-utils" readme = "README.md" repository = "https://github.com/maidsafe/autonomi" -version = "0.4.17-rc.3" +version = "0.4.17" [dependencies] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "0.6.3" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.10-rc.3" } +evmlib = { path = "../evmlib", version = "0.2.0" } libp2p = { version = "0.55.0", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] }