Skip to content

Commit

Permalink
Merge pull request maidsafe#2759 from grumbach/archive_and_files_impr…
Browse files Browse the repository at this point in the history
…ovements

feat: archive and files improvements
  • Loading branch information
grumbach authored Feb 20, 2025
2 parents c35ee9a + 9487ecb commit ff5d40e
Show file tree
Hide file tree
Showing 4 changed files with 190 additions and 18 deletions.
100 changes: 82 additions & 18 deletions ant-cli/src/actions/download.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,13 @@

use super::get_progress_bar;
use autonomi::{
chunk::DataMapChunk,
client::{
address::str_to_addr, files::archive_private::PrivateArchiveAccess,
files::archive_public::ArchiveAddr,
address::str_to_addr,
files::{archive_private::PrivateArchiveAccess, archive_public::ArchiveAddr},
GetError,
},
data::DataAddr,
Client,
};
use color_eyre::{
Expand All @@ -21,19 +24,26 @@ use color_eyre::{
use std::path::PathBuf;

pub async fn download(addr: &str, dest_path: &str, client: &Client) -> Result<()> {
let public_address = str_to_addr(addr).ok();
let private_address = crate::user_data::get_local_private_archive_access(addr)
.inspect_err(|e| error!("Failed to get private archive access: {e}"))
.ok();

match (public_address, private_address) {
(Some(public_address), _) => download_public(addr, public_address, dest_path, client).await,
(_, Some(private_address)) => download_private(addr, private_address, dest_path, client).await,
_ => Err(eyre!("Failed to parse data address {addr}"))
let try_public_address = str_to_addr(addr).ok();
if let Some(public_address) = try_public_address {
return download_public(addr, public_address, dest_path, client).await;
}

let try_private_address = crate::user_data::get_local_private_archive_access(addr).ok();
if let Some(private_address) = try_private_address {
return download_private(addr, private_address, dest_path, client).await;
}

let try_datamap = DataMapChunk::from_hex(addr).ok();
if let Some(datamap) = try_datamap {
return download_from_datamap(addr, datamap, dest_path, client).await;
}

Err(eyre!("Failed to parse data address {addr}"))
.with_suggestion(|| "Public addresses look like this: 0037cfa13eae4393841cbc00c3a33cade0f98b8c1f20826e5c51f8269e7b09d7")
.with_suggestion(|| "Private addresses look like this: 1358645341480028172")
.with_suggestion(|| "Try the `file list` command to get addresses you have access to"),
}
.with_suggestion(|| "You can also use a hex encoded DataMap directly here")
.with_suggestion(|| "Try the `file list` command to get addresses you have access to")
}

async fn download_private(
Expand All @@ -45,7 +55,7 @@ async fn download_private(
let archive = client
.archive_get(&private_address)
.await
.wrap_err("Failed to fetch data from address")?;
.wrap_err("Failed to fetch Private Archive from address")?;

let progress_bar = get_progress_bar(archive.iter().count() as u64)?;
let mut all_errs = vec![];
Expand Down Expand Up @@ -88,10 +98,16 @@ async fn download_public(
dest_path: &str,
client: &Client,
) -> Result<()> {
let archive = client
.archive_get_public(&address)
.await
.wrap_err("Failed to fetch data from address")?;
let archive = match client.archive_get_public(&address).await {
Ok(archive) => archive,
Err(GetError::Deserialization(_)) => {
info!("Failed to deserialize Public Archive from address {addr}, trying to fetch data assuming it is a single file instead");
return download_public_single_file(addr, address, dest_path, client)
.await
.wrap_err("Failed to fetch public file from address");
}
Err(e) => return Err(e).wrap_err("Failed to fetch Public Archive from address")?,
};

let progress_bar = get_progress_bar(archive.iter().count() as u64)?;
let mut all_errs = vec![];
Expand Down Expand Up @@ -127,3 +143,51 @@ async fn download_public(
Err(eyre!("Errors while downloading data"))
}
}

async fn download_public_single_file(
addr: &str,
address: DataAddr,
dest_path: &str,
client: &Client,
) -> Result<()> {
let bytes = match client.data_get_public(&address).await {
Ok(bytes) => bytes,
Err(e) => {
let err = format!("Failed to fetch file at {addr:?}: {e}");
return Err(eyre!(err)).wrap_err("Failed to fetch file content from address");
}
};

let path = PathBuf::from(dest_path);
let here = PathBuf::from(".");
let parent = path.parent().unwrap_or_else(|| &here);
std::fs::create_dir_all(parent)?;
std::fs::write(path, bytes)?;
info!("Successfully downloaded file at: {addr}");
println!("Successfully downloaded file at: {addr}");
Ok(())
}

async fn download_from_datamap(
addr: &str,
datamap: DataMapChunk,
dest_path: &str,
client: &Client,
) -> Result<()> {
let bytes = match client.data_get(&datamap).await {
Ok(bytes) => bytes,
Err(e) => {
let err = format!("Failed to fetch file {addr:?}: {e}");
return Err(eyre!(err)).wrap_err("Failed to fetch file content from address");
}
};

let path = PathBuf::from(dest_path);
let here = PathBuf::from(".");
let parent = path.parent().unwrap_or_else(|| &here);
std::fs::create_dir_all(parent)?;
std::fs::write(path, bytes)?;
info!("Successfully downloaded file from datamap at: {addr}");
println!("Successfully downloaded file from datamap at: {addr}");
Ok(())
}
46 changes: 46 additions & 0 deletions autonomi/src/client/high_level/files/archive_private.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,13 @@ impl PrivateArchive {

Ok(root_serialized)
}

/// Merge with another archive
///
/// Note that if there are duplicate entries for the same filename, the files from the other archive will be the ones that are kept.
pub fn merge(&mut self, other: &PrivateArchive) {
self.map.extend(other.map.clone());
}
}

impl Client {
Expand Down Expand Up @@ -162,3 +169,42 @@ impl Client {
result
}
}

#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;

#[test]
fn test_private_archive_merge() {
let mut arch = PrivateArchive::new();
let file1 = PathBuf::from_str("file1").unwrap();
let file2 = PathBuf::from_str("file2").unwrap();
arch.add_file(
file1.clone(),
DataMapChunk::from_hex("1111").unwrap(),
Metadata::new_with_size(1),
);
let mut other_arch = PrivateArchive::new();
other_arch.add_file(
file2.clone(),
DataMapChunk::from_hex("AAAA").unwrap(),
Metadata::new_with_size(2),
);
arch.merge(&other_arch);
assert_eq!(arch.map().len(), 2);
assert_eq!(arch.map().get(&file1).unwrap().1.size, 1);
assert_eq!(arch.map().get(&file2).unwrap().1.size, 2);

let mut arch_with_duplicate = PrivateArchive::new();
arch_with_duplicate.add_file(
file1.clone(),
DataMapChunk::from_hex("BBBB").unwrap(),
Metadata::new_with_size(5),
);
arch.merge(&arch_with_duplicate);
assert_eq!(arch.map().len(), 2);
assert_eq!(arch.map().get(&file1).unwrap().1.size, 5);
assert_eq!(arch.map().get(&file2).unwrap().1.size, 2);
}
}
40 changes: 40 additions & 0 deletions autonomi/src/client/high_level/files/archive_public.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,13 @@ impl PublicArchive {

Ok(root_serialized)
}

/// Merge with another archive
///
/// Note that if there are duplicate entries for the same filename, the files from the other archive will be the ones that are kept.
pub fn merge(&mut self, other: &PublicArchive) {
self.map.extend(other.map.clone());
}
}

impl Client {
Expand Down Expand Up @@ -289,4 +296,37 @@ mod test {
// Our old data structure should be forward compatible with the new one.
assert!(PublicArchive::from_bytes(Bytes::from(arch_p1_ser)).is_ok());
}

#[test]
fn test_archive_merge() {
let mut arch = PublicArchive::new();
let file1 = PathBuf::from_str("file1").unwrap();
let file2 = PathBuf::from_str("file2").unwrap();
arch.add_file(
file1.clone(),
DataAddr::random(&mut rand::thread_rng()),
Metadata::new_with_size(1),
);
let mut other_arch = PublicArchive::new();
other_arch.add_file(
file2.clone(),
DataAddr::random(&mut rand::thread_rng()),
Metadata::new_with_size(2),
);
arch.merge(&other_arch);
assert_eq!(arch.map().len(), 2);
assert_eq!(arch.map().get(&file1).unwrap().1.size, 1);
assert_eq!(arch.map().get(&file2).unwrap().1.size, 2);

let mut arch_with_duplicate = PublicArchive::new();
arch_with_duplicate.add_file(
file1.clone(),
DataAddr::random(&mut rand::thread_rng()),
Metadata::new_with_size(5),
);
arch.merge(&arch_with_duplicate);
assert_eq!(arch.map().len(), 2);
assert_eq!(arch.map().get(&file1).unwrap().1.size, 5);
assert_eq!(arch.map().get(&file2).unwrap().1.size, 2);
}
}
22 changes: 22 additions & 0 deletions autonomi/src/client/high_level/files/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@ pub static FILE_UPLOAD_BATCH_SIZE: LazyLock<usize> = LazyLock::new(|| {
});

/// Metadata for a file in an archive. Time values are UNIX timestamps.
///
/// The recommended way to create a new [`Metadata`] is to use [`Metadata::new_with_size`].
///
/// The [`Metadata::default`] method creates a new [`Metadata`] with 0 as size and the current time for created and modified.
///
/// The [`Metadata::empty`] method creates a new [`Metadata`] filled with 0s. Use this if you don't want to reveal any metadata.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Metadata {
/// File creation time on local file system. See [`std::fs::Metadata::created`] for details per OS.
Expand All @@ -48,6 +54,12 @@ pub struct Metadata {
pub extra: Option<String>,
}

impl Default for Metadata {
fn default() -> Self {
Self::new_with_size(0)
}
}

impl Metadata {
/// Create a new metadata struct with the current time as uploaded, created and modified.
pub fn new_with_size(size: u64) -> Self {
Expand All @@ -63,6 +75,16 @@ impl Metadata {
extra: None,
}
}

/// Create a new empty metadata struct
pub fn empty() -> Self {
Self {
created: 0,
modified: 0,
size: 0,
extra: None,
}
}
}

#[derive(Error, Debug, PartialEq, Eq)]
Expand Down

0 comments on commit ff5d40e

Please sign in to comment.