diff --git a/src/error.rs b/src/error.rs index cf709702..afabbab6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -16,13 +16,15 @@ pub enum Error { #[error("Could not determine content length for asset")] AssetContentLengthUndetermined, #[error(transparent)] - AwsS3Error(#[from] aws_sdk_s3::Error), + AwsS3Error(#[from] Box), #[error("The {0} environment variable must be set to use your cloud provider")] CloudProviderCredentialsNotSupplied(String), #[error("The {0} cloud provider is not supported yet")] CloudProviderNotSupported(String), #[error("Both the repository owner and branch name must be supplied if either are used")] CustomBinConfigError, + #[error("Failed to delete '{0}' from '{1}")] + DeleteS3ObjectError(String, String), #[error("The '{0}' environment does not exist")] EnvironmentDoesNotExist(String), #[error("Command executed with {0} failed. See output for details.")] @@ -39,6 +41,8 @@ pub enum Error { Io(#[from] std::io::Error), #[error("Failed to list objects in S3 bucket with '{0}' prefix")] ListS3ObjectsError(String), + #[error("Logs for a '{0}' testnet already exist")] + LogsForPreviousTestnetExist(String), #[error("Logs have not been retrieved for the '{0}' environment.")] LogsNotRetrievedError(String), #[error("Error in byte stream when attempting to retrieve S3 object")] diff --git a/src/lib.rs b/src/lib.rs index 87aec330..cabe51c4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -220,6 +220,14 @@ impl TestnetDeploy { } pub async fn init(&self, name: &str) -> Result<()> { + if self + .s3_repository + .folder_exists(&format!("testnet-logs/{name}")) + .await? + { + return Err(Error::LogsForPreviousTestnetExist(name.to_string())); + } + self.terraform_runner.init()?; let workspaces = self.terraform_runner.workspace_list()?; if !workspaces.contains(&name.to_string()) { diff --git a/src/logs.rs b/src/logs.rs index a6f4ddf6..b6b97a7a 100644 --- a/src/logs.rs +++ b/src/logs.rs @@ -1,3 +1,9 @@ +// Copyright (c) 2023, MaidSafe. +// All rights reserved. +// +// This SAFE Network Software is licensed under the BSD-3-Clause license. +// Please see the LICENSE file for more details. + use crate::error::{Error, Result}; use crate::s3::{S3Repository, S3RepositoryInterface}; use crate::TestnetDeploy; @@ -12,7 +18,7 @@ impl TestnetDeploy { /// It needs to be part of `TestnetDeploy` because the Ansible runner is already setup in that /// context. pub async fn copy_logs(&self, name: &str) -> Result<()> { - let dest = PathBuf::from(".").join("logs").join(format!("{name}")); + let dest = PathBuf::from(".").join("logs").join(name); if dest.exists() { println!("Removing existing {} directory", dest.to_string_lossy()); remove(dest.clone())?; @@ -62,6 +68,14 @@ pub async fn reassemble_logs(name: &str) -> Result<()> { Ok(()) } +pub async fn rm_logs(name: &str) -> Result<()> { + let s3_repository = S3Repository::new("sn-testnet"); + s3_repository + .delete_folder(&format!("testnet-logs/{name}")) + .await?; + Ok(()) +} + fn process_part_files(dir_path: &Path, source_root: &PathBuf, dest_root: &PathBuf) -> Result<()> { let reassembled_dir_path = if dir_path == dest_root { dest_root.clone() diff --git a/src/main.rs b/src/main.rs index a9dd88d5..31c69517 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,8 +5,9 @@ // Please see the LICENSE file for more details. use clap::{Parser, Subcommand}; -use color_eyre::{eyre::eyre, Result}; +use color_eyre::{eyre::eyre, Help, Result}; use dotenv::dotenv; +use sn_testnet_deploy::error::Error; use sn_testnet_deploy::setup::setup_dotenv_file; use sn_testnet_deploy::CloudProvider; use sn_testnet_deploy::TestnetDeployBuilder; @@ -117,10 +118,17 @@ enum LogCommands { #[arg(short = 'n', long)] name: String, }, + /// Remove the logs from a given environment from the bucket on S3. + Rm { + /// The name of the environment for which logs have already been retrieved + #[arg(short = 'n', long)] + name: String, + }, } #[tokio::main] async fn main() -> Result<()> { + color_eyre::install()?; dotenv().ok(); env_logger::init(); @@ -140,7 +148,29 @@ async fn main() -> Result<()> { vm_count, }) => { let testnet_deploy = TestnetDeployBuilder::default().provider(provider).build()?; - testnet_deploy.init(&name).await?; + let result = testnet_deploy.init(&name).await; + match result { + Ok(_) => {} + Err(e) => match e { + Error::LogsForPreviousTestnetExist(_) => { + return Err(eyre!(e) + .wrap_err(format!( + "Logs already exist for a previous testnet with the \ + name '{name}'" + )) + .suggestion( + "If you wish to keep them, retrieve the logs with the 'logs get' \ + command, then remove them with 'logs rm'. If you don't need them, \ + simply run 'logs rm'. Then you can proceed with deploying your \ + new testnet.", + )); + } + _ => { + return Err(eyre!(e)); + } + }, + } + testnet_deploy .deploy(&name, vm_count, node_count, repo_owner, branch) .await?; @@ -165,6 +195,10 @@ async fn main() -> Result<()> { sn_testnet_deploy::logs::reassemble_logs(&name).await?; Ok(()) } + LogCommands::Rm { name } => { + sn_testnet_deploy::logs::rm_logs(&name).await?; + Ok(()) + } }, Some(Commands::Setup {}) => { setup_dotenv_file()?; diff --git a/src/s3.rs b/src/s3.rs index 725bbb61..f61071d9 100644 --- a/src/s3.rs +++ b/src/s3.rs @@ -22,7 +22,9 @@ use tokio_stream::StreamExt; #[async_trait] pub trait S3RepositoryInterface { async fn download_object(&self, object_key: &str, dest_path: &Path) -> Result<()>; - async fn download_folder(&self, folder_path: &str, dest_path: &PathBuf) -> Result<()>; + async fn download_folder(&self, folder_path: &str, dest_path: &Path) -> Result<()>; + async fn delete_folder(&self, folder_path: &str) -> Result<()>; + async fn folder_exists(&self, folder_path: &str) -> Result; } pub struct S3Repository { @@ -34,46 +36,45 @@ impl S3RepositoryInterface for S3Repository { async fn download_object(&self, object_key: &str, dest_path: &Path) -> Result<()> { let conf = aws_config::from_env().region("eu-west-2").load().await; let client = Client::new(&conf); - - println!("Retrieving {object_key} from S3..."); - let mut resp = client - .get_object() - .bucket(self.bucket_name.clone()) - .key(object_key) - .send() - .await - .map_err(|_| { - Error::GetS3ObjectError(object_key.to_string(), self.bucket_name.clone()) - })?; - - if let Some(parent) = dest_path.parent() { - if !parent.exists() { - tokio::fs::create_dir_all(parent).await?; - } - } - - let mut file = tokio::fs::File::create(&dest_path).await?; - while let Some(bytes) = resp - .body - .try_next() - .await - .map_err(|_| Error::S3ByteStreamError)? - { - file.write_all(&bytes).await?; - } - - println!("Saved at {}", dest_path.to_string_lossy()); + self.retrieve_object(&client, object_key, &dest_path.to_path_buf()) + .await?; Ok(()) } - async fn download_folder(&self, folder_path: &str, dest_path: &PathBuf) -> Result<()> { + async fn download_folder(&self, folder_path: &str, dest_path: &Path) -> Result<()> { let conf = aws_config::from_env().region("eu-west-2").load().await; let client = Client::new(&conf); tokio::fs::create_dir_all(dest_path).await?; - self.list_and_retrieve(&client, folder_path, dest_path) + self.list_and_retrieve(&client, folder_path, &dest_path.to_path_buf()) .await?; Ok(()) } + + async fn delete_folder(&self, folder_path: &str) -> Result<()> { + let conf = aws_config::from_env().region("eu-west-2").load().await; + let client = Client::new(&conf); + self.list_and_delete(&client, folder_path).await?; + Ok(()) + } + + async fn folder_exists(&self, folder_path: &str) -> Result { + let conf = aws_config::from_env().region("eu-west-2").load().await; + let client = Client::new(&conf); + let folder = if folder_path.ends_with('/') { + folder_path.to_string() + } else { + format!("{}/", folder_path) + }; + let output = client + .list_objects_v2() + .bucket(self.bucket_name.clone()) + .prefix(folder) + .delimiter("/".to_string()) + .send() + .await + .map_err(|_| Error::ListS3ObjectsError(folder_path.to_string()))?; + Ok(!output.contents().unwrap_or_default().is_empty()) + } } impl S3Repository { @@ -103,7 +104,7 @@ impl S3Repository { if let Some(common_prefixes) = output.common_prefixes { for cp in common_prefixes { let next_prefix = cp.prefix.unwrap(); - self.list_and_retrieve(client, &next_prefix, &root_path) + self.list_and_retrieve(client, &next_prefix, root_path) .await?; } } @@ -118,38 +119,91 @@ impl S3Repository { continue; } - println!("Retrieving {object_key}..."); - let mut resp = client - .get_object() - .bucket(self.bucket_name.clone()) - .key(&object_key) - .send() - .await - .map_err(|_| { - Error::GetS3ObjectError( - root_path.to_string_lossy().to_string(), - self.bucket_name.clone(), - ) - })?; - - if let Some(parent) = dest_file_path.parent() { - if !parent.exists() { - tokio::fs::create_dir_all(parent).await?; - } - } + self.retrieve_object(client, &object_key, &dest_file_path) + .await?; + } + } - let mut file = tokio::fs::File::create(&dest_file_path).await?; - while let Some(bytes) = resp - .body - .try_next() - .await - .map_err(|_| Error::S3ByteStreamError)? - { - file.write_all(&bytes).await?; - } + Ok(()) + } + + #[async_recursion] + async fn list_and_delete(&self, client: &Client, prefix: &str) -> Result<(), Error> { + let output = client + .list_objects_v2() + .bucket(self.bucket_name.clone()) + .prefix(prefix) + .delimiter("/".to_string()) + .send() + .await + .map_err(|_| Error::ListS3ObjectsError(prefix.to_string()))?; + + // So-called 'common prefixes' are subdirectories. + if let Some(common_prefixes) = output.common_prefixes { + for cp in common_prefixes { + let next_prefix = cp.prefix.unwrap(); + self.list_and_delete(client, &next_prefix).await?; + } + } + + if let Some(objects) = output.contents { + for object in objects { + let object_key = object.key.unwrap(); + self.delete_object(client, &object_key).await?; } } Ok(()) } + + async fn retrieve_object( + &self, + client: &Client, + object_key: &str, + dest_path: &PathBuf, + ) -> Result<()> { + println!("Retrieving {object_key} from S3..."); + let mut resp = client + .get_object() + .bucket(self.bucket_name.clone()) + .key(object_key) + .send() + .await + .map_err(|_| { + Error::GetS3ObjectError(object_key.to_string(), self.bucket_name.clone()) + })?; + + if let Some(parent) = dest_path.parent() { + if !parent.exists() { + tokio::fs::create_dir_all(parent).await?; + } + } + + let mut file = tokio::fs::File::create(&dest_path).await?; + while let Some(bytes) = resp + .body + .try_next() + .await + .map_err(|_| Error::S3ByteStreamError)? + { + file.write_all(&bytes).await?; + } + + println!("Saved at {}", dest_path.to_string_lossy()); + Ok(()) + } + + async fn delete_object(&self, client: &Client, object_key: &str) -> Result<()> { + println!("Deleting {object_key} from S3..."); + client + .delete_object() + .bucket(self.bucket_name.clone()) + .key(object_key) + .send() + .await + .map_err(|_| { + Error::DeleteS3ObjectError(object_key.to_string(), self.bucket_name.clone()) + })?; + Ok(()) + } } diff --git a/src/tests/build_safe_network_binaries.rs b/src/tests/build_safe_network_binaries.rs index 4e2bf36b..243c91e8 100644 --- a/src/tests/build_safe_network_binaries.rs +++ b/src/tests/build_safe_network_binaries.rs @@ -16,7 +16,7 @@ use std::path::PathBuf; #[tokio::test] async fn should_run_ansible_to_build_faucet() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_inventory_list() @@ -67,7 +67,7 @@ async fn should_run_ansible_to_build_faucet() -> Result<()> { #[tokio::test] async fn should_run_ansible_to_build_faucet_and_custom_safenode_bin() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_inventory_list() diff --git a/src/tests/clean.rs b/src/tests/clean.rs index aa2e4015..f6183f94 100644 --- a/src/tests/clean.rs +++ b/src/tests/clean.rs @@ -20,7 +20,7 @@ use mockall::Sequence; async fn should_run_terraform_destroy_and_delete_workspace_and_delete_inventory_files() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("alpha", &working_dir)?; let mut terraform_runner = setup_default_terraform_runner("alpha"); let mut seq = Sequence::new(); terraform_runner diff --git a/src/tests/init.rs b/src/tests/init.rs index 62427f5a..166411ae 100644 --- a/src/tests/init.rs +++ b/src/tests/init.rs @@ -13,7 +13,7 @@ use crate::s3::MockS3RepositoryInterface; use crate::ssh::MockSshClientInterface; use crate::terraform::MockTerraformRunnerInterface; use assert_fs::prelude::*; -use color_eyre::Result; +use color_eyre::{eyre::eyre, Result}; use mockall::predicate::*; use mockall::Sequence; use std::os::unix::fs::PermissionsExt; @@ -32,7 +32,7 @@ async fn should_create_a_new_workspace() -> Result<()> { .times(1) .with(eq("beta".to_string())) .returning(|_| Ok(())); - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let testnet = TestnetDeploy::new( Box::new(terraform_runner), Box::new(MockAnsibleRunnerInterface::new()), @@ -68,7 +68,7 @@ async fn should_not_create_a_new_workspace_when_one_with_the_same_name_exists() .with(eq("alpha".to_string())) .returning(|_| Ok(())); - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("alpha", &working_dir)?; let testnet = TestnetDeploy::new( Box::new(terraform_runner), Box::new(MockAnsibleRunnerInterface::new()), @@ -90,7 +90,7 @@ async fn should_download_and_extract_the_rpc_client() -> Result<()> { working_dir.child("rpc_client-latest-x86_64-unknown-linux-musl.tar.gz"); let extracted_rpc_client_bin = working_dir.child(RPC_CLIENT_BIN_NAME); - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("alpha", &working_dir)?; let terraform_runner = setup_default_terraform_runner("alpha"); let testnet = TestnetDeploy::new( Box::new(terraform_runner), @@ -121,6 +121,11 @@ async fn should_not_download_the_rpc_client_if_it_already_exists() -> Result<()> fake_rpc_client_bin.write_binary(b"fake code")?; let mut s3_repository = MockS3RepositoryInterface::new(); + s3_repository + .expect_folder_exists() + .with(eq("testnet-logs/alpha".to_string())) + .times(1) + .returning(|_| Ok(false)); s3_repository.expect_download_object().times(0); let terraform_runner = setup_default_terraform_runner("alpha"); @@ -142,7 +147,7 @@ async fn should_not_download_the_rpc_client_if_it_already_exists() -> Result<()> #[tokio::test] async fn should_generate_ansible_inventory_for_digital_ocean_for_the_new_testnet() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("alpha", &working_dir)?; let terraform_runner = setup_default_terraform_runner("alpha"); let testnet = TestnetDeploy::new( @@ -176,10 +181,31 @@ async fn should_generate_ansible_inventory_for_digital_ocean_for_the_new_testnet #[tokio::test] async fn should_not_overwrite_generated_inventory() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; let mut terraform_runner = MockTerraformRunnerInterface::new(); let mut seq = Sequence::new(); + let saved_archive_path = working_dir + .to_path_buf() + .join("rpc_client-latest-x86_64-unknown-linux-musl.tar.gz"); + let rpc_client_archive_path = create_fake_rpc_client_archive(&working_dir)?; + let mut s3_repository = MockS3RepositoryInterface::new(); + s3_repository + .expect_download_object() + .with( + eq("rpc_client-latest-x86_64-unknown-linux-musl.tar.gz"), + eq(saved_archive_path), + ) + .times(1) + .returning(move |_object_path, archive_path| { + std::fs::copy(&rpc_client_archive_path, archive_path)?; + Ok(()) + }); + s3_repository + .expect_folder_exists() + .with(eq("testnet-logs/alpha".to_string())) + .times(2) + .returning(|_| Ok(false)); + terraform_runner.expect_init().times(2).returning(|| Ok(())); terraform_runner .expect_workspace_list() @@ -231,3 +257,55 @@ async fn should_not_overwrite_generated_inventory() -> Result<()> { drop(tmp_dir); Ok(()) } + +#[tokio::test] +async fn should_return_an_error_if_logs_already_exist_for_environment() -> Result<()> { + let (tmp_dir, working_dir) = setup_working_directory()?; + let mut terraform_runner = MockTerraformRunnerInterface::new(); + terraform_runner.expect_init().times(0).returning(|| Ok(())); + terraform_runner + .expect_workspace_list() + .times(0) + .returning(|| { + Ok(vec![ + "alpha".to_string(), + "default".to_string(), + "dev".to_string(), + ]) + }); + terraform_runner + .expect_workspace_new() + .times(0) + .with(eq("alpha".to_string())) + .returning(|_| Ok(())); + + let mut s3_repository = MockS3RepositoryInterface::new(); + s3_repository + .expect_folder_exists() + .with(eq("testnet-logs/alpha")) + .times(1) + .returning(|_| Ok(true)); + + let testnet = TestnetDeploy::new( + Box::new(terraform_runner), + Box::new(MockAnsibleRunnerInterface::new()), + Box::new(MockRpcClientInterface::new()), + Box::new(MockSshClientInterface::new()), + working_dir.to_path_buf(), + CloudProvider::DigitalOcean, + Box::new(s3_repository), + ); + + let result = testnet.init("alpha").await; + match result { + Ok(()) => { + drop(tmp_dir); + Err(eyre!("init should have returned an error")) + } + Err(e) => { + assert_eq!(e.to_string(), "Logs for a 'alpha' testnet already exist"); + drop(tmp_dir); + Ok(()) + } + } +} diff --git a/src/tests/provision_faucet.rs b/src/tests/provision_faucet.rs index 983dfe20..78a4853c 100644 --- a/src/tests/provision_faucet.rs +++ b/src/tests/provision_faucet.rs @@ -16,7 +16,7 @@ use std::path::PathBuf; #[tokio::test] async fn should_run_ansible_against_the_remaining_nodes() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_run_playbook() @@ -58,7 +58,7 @@ async fn should_run_ansible_against_the_remaining_nodes() -> Result<()> { #[tokio::test] async fn should_run_ansible_against_the_remaining_nodes_with_a_custom_binary() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_run_playbook() diff --git a/src/tests/provision_genesis_node.rs b/src/tests/provision_genesis_node.rs index e056e2f7..647f3784 100644 --- a/src/tests/provision_genesis_node.rs +++ b/src/tests/provision_genesis_node.rs @@ -18,7 +18,7 @@ const CUSTOM_BIN_URL: &str = "https://sn-node.s3.eu-west-2.amazonaws.com/maidsaf #[tokio::test] async fn should_run_ansible_against_genesis() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_inventory_list() @@ -67,7 +67,7 @@ async fn should_run_ansible_against_genesis() -> Result<()> { #[tokio::test] async fn should_run_ansible_against_genesis_with_a_custom_binary() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_inventory_list() diff --git a/src/tests/provision_remaining_nodes.rs b/src/tests/provision_remaining_nodes.rs index 5c355ec8..899667ca 100644 --- a/src/tests/provision_remaining_nodes.rs +++ b/src/tests/provision_remaining_nodes.rs @@ -18,7 +18,7 @@ const CUSTOM_BIN_URL: &str = "https://sn-node.s3.eu-west-2.amazonaws.com/maidsaf #[tokio::test] async fn should_run_ansible_against_the_remaining_nodes() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_run_playbook() @@ -62,7 +62,7 @@ async fn should_run_ansible_against_the_remaining_nodes() -> Result<()> { #[tokio::test] async fn should_run_ansible_against_the_remaining_nodes_with_a_custom_binary() -> Result<()> { let (tmp_dir, working_dir) = setup_working_directory()?; - let s3_repository = setup_default_s3_repository(&working_dir)?; + let s3_repository = setup_default_s3_repository("beta", &working_dir)?; let mut ansible_runner = MockAnsibleRunnerInterface::new(); ansible_runner .expect_run_playbook() diff --git a/src/tests/setup.rs b/src/tests/setup.rs index a6086b68..b4e8254b 100644 --- a/src/tests/setup.rs +++ b/src/tests/setup.rs @@ -57,7 +57,10 @@ pub fn setup_default_terraform_runner(name: &str) -> MockTerraformRunnerInterfac terraform_runner } -pub fn setup_default_s3_repository(working_dir: &ChildPath) -> Result { +pub fn setup_default_s3_repository( + env_name: &str, + working_dir: &ChildPath, +) -> Result { let saved_archive_path = working_dir .to_path_buf() .join("rpc_client-latest-x86_64-unknown-linux-musl.tar.gz"); @@ -74,5 +77,10 @@ pub fn setup_default_s3_repository(working_dir: &ChildPath) -> Result