Skip to content

Commit

Permalink
feat: provide logs rm command
Browse files Browse the repository at this point in the history
Remove the logs from a previous testnet run by deleting the entire folder from S3.

When a new testnet is created we check to see if logs from a previous run with the same name already
exist. We won't proceed unless the user deletes those logs. We offer the choice to delete or
retrieve them before deletion.

If we did proceed, the logs from the two testnets would be intermingled with each other and this
would lead to a confusing situation.
  • Loading branch information
jacderida committed Aug 29, 2023
1 parent d2c4bd0 commit 49f2e11
Show file tree
Hide file tree
Showing 12 changed files with 281 additions and 81 deletions.
6 changes: 5 additions & 1 deletion src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,15 @@ pub enum Error {
#[error("Could not determine content length for asset")]
AssetContentLengthUndetermined,
#[error(transparent)]
AwsS3Error(#[from] aws_sdk_s3::Error),
AwsS3Error(#[from] Box<aws_sdk_s3::Error>),
#[error("The {0} environment variable must be set to use your cloud provider")]
CloudProviderCredentialsNotSupplied(String),
#[error("The {0} cloud provider is not supported yet")]
CloudProviderNotSupported(String),
#[error("Both the repository owner and branch name must be supplied if either are used")]
CustomBinConfigError,
#[error("Failed to delete '{0}' from '{1}")]
DeleteS3ObjectError(String, String),
#[error("The '{0}' environment does not exist")]
EnvironmentDoesNotExist(String),
#[error("Command executed with {0} failed. See output for details.")]
Expand All @@ -39,6 +41,8 @@ pub enum Error {
Io(#[from] std::io::Error),
#[error("Failed to list objects in S3 bucket with '{0}' prefix")]
ListS3ObjectsError(String),
#[error("Logs for a '{0}' testnet already exist")]
LogsForPreviousTestnetExist(String),
#[error("Logs have not been retrieved for the '{0}' environment.")]
LogsNotRetrievedError(String),
#[error("Error in byte stream when attempting to retrieve S3 object")]
Expand Down
8 changes: 8 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,14 @@ impl TestnetDeploy {
}

pub async fn init(&self, name: &str) -> Result<()> {
if self
.s3_repository
.folder_exists(&format!("testnet-logs/{name}"))
.await?
{
return Err(Error::LogsForPreviousTestnetExist(name.to_string()));
}

self.terraform_runner.init()?;
let workspaces = self.terraform_runner.workspace_list()?;
if !workspaces.contains(&name.to_string()) {
Expand Down
16 changes: 15 additions & 1 deletion src/logs.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
// Copyright (c) 2023, MaidSafe.
// All rights reserved.
//
// This SAFE Network Software is licensed under the BSD-3-Clause license.
// Please see the LICENSE file for more details.

use crate::error::{Error, Result};
use crate::s3::{S3Repository, S3RepositoryInterface};
use crate::TestnetDeploy;
Expand All @@ -12,7 +18,7 @@ impl TestnetDeploy {
/// It needs to be part of `TestnetDeploy` because the Ansible runner is already setup in that
/// context.
pub async fn copy_logs(&self, name: &str) -> Result<()> {
let dest = PathBuf::from(".").join("logs").join(format!("{name}"));
let dest = PathBuf::from(".").join("logs").join(name);
if dest.exists() {
println!("Removing existing {} directory", dest.to_string_lossy());
remove(dest.clone())?;
Expand Down Expand Up @@ -62,6 +68,14 @@ pub async fn reassemble_logs(name: &str) -> Result<()> {
Ok(())
}

pub async fn rm_logs(name: &str) -> Result<()> {
let s3_repository = S3Repository::new("sn-testnet");
s3_repository
.delete_folder(&format!("testnet-logs/{name}"))
.await?;
Ok(())
}

fn process_part_files(dir_path: &Path, source_root: &PathBuf, dest_root: &PathBuf) -> Result<()> {
let reassembled_dir_path = if dir_path == dest_root {
dest_root.clone()
Expand Down
38 changes: 36 additions & 2 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,9 @@
// Please see the LICENSE file for more details.

use clap::{Parser, Subcommand};
use color_eyre::{eyre::eyre, Result};
use color_eyre::{eyre::eyre, Help, Result};
use dotenv::dotenv;
use sn_testnet_deploy::error::Error;
use sn_testnet_deploy::setup::setup_dotenv_file;
use sn_testnet_deploy::CloudProvider;
use sn_testnet_deploy::TestnetDeployBuilder;
Expand Down Expand Up @@ -117,10 +118,17 @@ enum LogCommands {
#[arg(short = 'n', long)]
name: String,
},
/// Remove the logs from a given environment from the bucket on S3.
Rm {
/// The name of the environment for which logs have already been retrieved
#[arg(short = 'n', long)]
name: String,
},
}

#[tokio::main]
async fn main() -> Result<()> {
color_eyre::install()?;
dotenv().ok();
env_logger::init();

Expand All @@ -140,7 +148,29 @@ async fn main() -> Result<()> {
vm_count,
}) => {
let testnet_deploy = TestnetDeployBuilder::default().provider(provider).build()?;
testnet_deploy.init(&name).await?;
let result = testnet_deploy.init(&name).await;
match result {
Ok(_) => {}
Err(e) => match e {
Error::LogsForPreviousTestnetExist(_) => {
return Err(eyre!(e)
.wrap_err(format!(
"Logs already exist for a previous testnet with the \
name '{name}'"
))
.suggestion(
"If you wish to keep them, retrieve the logs with the 'logs get' \
command, then remove them with 'logs rm'. If you don't need them, \
simply run 'logs rm'. Then you can proceed with deploying your \
new testnet.",
));
}
_ => {
return Err(eyre!(e));
}
},
}

testnet_deploy
.deploy(&name, vm_count, node_count, repo_owner, branch)
.await?;
Expand All @@ -165,6 +195,10 @@ async fn main() -> Result<()> {
sn_testnet_deploy::logs::reassemble_logs(&name).await?;
Ok(())
}
LogCommands::Rm { name } => {
sn_testnet_deploy::logs::rm_logs(&name).await?;
Ok(())
}
},
Some(Commands::Setup {}) => {
setup_dotenv_file()?;
Expand Down
176 changes: 115 additions & 61 deletions src/s3.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ use tokio_stream::StreamExt;
#[async_trait]
pub trait S3RepositoryInterface {
async fn download_object(&self, object_key: &str, dest_path: &Path) -> Result<()>;
async fn download_folder(&self, folder_path: &str, dest_path: &PathBuf) -> Result<()>;
async fn download_folder(&self, folder_path: &str, dest_path: &Path) -> Result<()>;
async fn delete_folder(&self, folder_path: &str) -> Result<()>;
async fn folder_exists(&self, folder_path: &str) -> Result<bool>;
}

pub struct S3Repository {
Expand All @@ -34,46 +36,45 @@ impl S3RepositoryInterface for S3Repository {
async fn download_object(&self, object_key: &str, dest_path: &Path) -> Result<()> {
let conf = aws_config::from_env().region("eu-west-2").load().await;
let client = Client::new(&conf);

println!("Retrieving {object_key} from S3...");
let mut resp = client
.get_object()
.bucket(self.bucket_name.clone())
.key(object_key)
.send()
.await
.map_err(|_| {
Error::GetS3ObjectError(object_key.to_string(), self.bucket_name.clone())
})?;

if let Some(parent) = dest_path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}

let mut file = tokio::fs::File::create(&dest_path).await?;
while let Some(bytes) = resp
.body
.try_next()
.await
.map_err(|_| Error::S3ByteStreamError)?
{
file.write_all(&bytes).await?;
}

println!("Saved at {}", dest_path.to_string_lossy());
self.retrieve_object(&client, object_key, &dest_path.to_path_buf())
.await?;
Ok(())
}

async fn download_folder(&self, folder_path: &str, dest_path: &PathBuf) -> Result<()> {
async fn download_folder(&self, folder_path: &str, dest_path: &Path) -> Result<()> {
let conf = aws_config::from_env().region("eu-west-2").load().await;
let client = Client::new(&conf);
tokio::fs::create_dir_all(dest_path).await?;
self.list_and_retrieve(&client, folder_path, dest_path)
self.list_and_retrieve(&client, folder_path, &dest_path.to_path_buf())
.await?;
Ok(())
}

async fn delete_folder(&self, folder_path: &str) -> Result<()> {
let conf = aws_config::from_env().region("eu-west-2").load().await;
let client = Client::new(&conf);
self.list_and_delete(&client, folder_path).await?;
Ok(())
}

async fn folder_exists(&self, folder_path: &str) -> Result<bool> {
let conf = aws_config::from_env().region("eu-west-2").load().await;
let client = Client::new(&conf);
let folder = if folder_path.ends_with('/') {
folder_path.to_string()
} else {
format!("{}/", folder_path)
};
let output = client
.list_objects_v2()
.bucket(self.bucket_name.clone())
.prefix(folder)
.delimiter("/".to_string())
.send()
.await
.map_err(|_| Error::ListS3ObjectsError(folder_path.to_string()))?;
Ok(!output.contents().unwrap_or_default().is_empty())
}
}

impl S3Repository {
Expand Down Expand Up @@ -103,7 +104,7 @@ impl S3Repository {
if let Some(common_prefixes) = output.common_prefixes {
for cp in common_prefixes {
let next_prefix = cp.prefix.unwrap();
self.list_and_retrieve(client, &next_prefix, &root_path)
self.list_and_retrieve(client, &next_prefix, root_path)
.await?;
}
}
Expand All @@ -118,38 +119,91 @@ impl S3Repository {
continue;
}

println!("Retrieving {object_key}...");
let mut resp = client
.get_object()
.bucket(self.bucket_name.clone())
.key(&object_key)
.send()
.await
.map_err(|_| {
Error::GetS3ObjectError(
root_path.to_string_lossy().to_string(),
self.bucket_name.clone(),
)
})?;

if let Some(parent) = dest_file_path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}
self.retrieve_object(client, &object_key, &dest_file_path)
.await?;
}
}

let mut file = tokio::fs::File::create(&dest_file_path).await?;
while let Some(bytes) = resp
.body
.try_next()
.await
.map_err(|_| Error::S3ByteStreamError)?
{
file.write_all(&bytes).await?;
}
Ok(())
}

#[async_recursion]
async fn list_and_delete(&self, client: &Client, prefix: &str) -> Result<(), Error> {
let output = client
.list_objects_v2()
.bucket(self.bucket_name.clone())
.prefix(prefix)
.delimiter("/".to_string())
.send()
.await
.map_err(|_| Error::ListS3ObjectsError(prefix.to_string()))?;

// So-called 'common prefixes' are subdirectories.
if let Some(common_prefixes) = output.common_prefixes {
for cp in common_prefixes {
let next_prefix = cp.prefix.unwrap();
self.list_and_delete(client, &next_prefix).await?;
}
}

if let Some(objects) = output.contents {
for object in objects {
let object_key = object.key.unwrap();
self.delete_object(client, &object_key).await?;
}
}

Ok(())
}

async fn retrieve_object(
&self,
client: &Client,
object_key: &str,
dest_path: &PathBuf,
) -> Result<()> {
println!("Retrieving {object_key} from S3...");
let mut resp = client
.get_object()
.bucket(self.bucket_name.clone())
.key(object_key)
.send()
.await
.map_err(|_| {
Error::GetS3ObjectError(object_key.to_string(), self.bucket_name.clone())
})?;

if let Some(parent) = dest_path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}

let mut file = tokio::fs::File::create(&dest_path).await?;
while let Some(bytes) = resp
.body
.try_next()
.await
.map_err(|_| Error::S3ByteStreamError)?
{
file.write_all(&bytes).await?;
}

println!("Saved at {}", dest_path.to_string_lossy());
Ok(())
}

async fn delete_object(&self, client: &Client, object_key: &str) -> Result<()> {
println!("Deleting {object_key} from S3...");
client
.delete_object()
.bucket(self.bucket_name.clone())
.key(object_key)
.send()
.await
.map_err(|_| {
Error::DeleteS3ObjectError(object_key.to_string(), self.bucket_name.clone())
})?;
Ok(())
}
}
4 changes: 2 additions & 2 deletions src/tests/build_safe_network_binaries.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use std::path::PathBuf;
#[tokio::test]
async fn should_run_ansible_to_build_faucet() -> Result<()> {
let (tmp_dir, working_dir) = setup_working_directory()?;
let s3_repository = setup_default_s3_repository(&working_dir)?;
let s3_repository = setup_default_s3_repository("beta", &working_dir)?;
let mut ansible_runner = MockAnsibleRunnerInterface::new();
ansible_runner
.expect_inventory_list()
Expand Down Expand Up @@ -67,7 +67,7 @@ async fn should_run_ansible_to_build_faucet() -> Result<()> {
#[tokio::test]
async fn should_run_ansible_to_build_faucet_and_custom_safenode_bin() -> Result<()> {
let (tmp_dir, working_dir) = setup_working_directory()?;
let s3_repository = setup_default_s3_repository(&working_dir)?;
let s3_repository = setup_default_s3_repository("beta", &working_dir)?;
let mut ansible_runner = MockAnsibleRunnerInterface::new();
ansible_runner
.expect_inventory_list()
Expand Down
2 changes: 1 addition & 1 deletion src/tests/clean.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use mockall::Sequence;
async fn should_run_terraform_destroy_and_delete_workspace_and_delete_inventory_files() -> Result<()>
{
let (tmp_dir, working_dir) = setup_working_directory()?;
let s3_repository = setup_default_s3_repository(&working_dir)?;
let s3_repository = setup_default_s3_repository("alpha", &working_dir)?;
let mut terraform_runner = setup_default_terraform_runner("alpha");
let mut seq = Sequence::new();
terraform_runner
Expand Down
Loading

0 comments on commit 49f2e11

Please sign in to comment.