diff --git a/Dockerfile b/Dockerfile index 7a33eded8..3b11948a3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -198,3 +198,34 @@ COPY --from=build-src /src/bottlerocket/agents/src/bin/migration-test-agent/ssm- COPY --from=build-src /usr/share/licenses/testsys /licenses/testsys ENTRYPOINT ["./migration-test-agent"] + +# =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= +# Builds the Kubernetes Workload test agent image +FROM public.ecr.aws/amazonlinux/amazonlinux:2 AS k8s-workload-agent +ARG ARCH + +# TODO remove unzip once aws-cli moves out +RUN yum install -y unzip iproute && yum clean all +ARG AWS_CLI_URL=https://awscli.amazonaws.com/awscli-exe-linux-${ARCH}.zip + +# Copy aws-iam-authenticator +COPY --from=tools /aws-iam-authenticator /usr/bin/aws-iam-authenticator +COPY --from=tools /licenses/aws-iam-authenticator /licenses/aws-iam-authenticator + +# TODO move this out, get hashes, and attribute licenses +# Download aws-cli +RUN temp_dir="$(mktemp -d --suffix aws-cli)" && \ + curl -fsSL "${AWS_CLI_URL}" -o "${temp_dir}/awscliv2.zip" && \ + unzip "${temp_dir}/awscliv2.zip" -d "${temp_dir}" && \ + ${temp_dir}/aws/install && \ + rm -rf ${temp_dir} + +# Copy sonobuoy +COPY --from=tools /sonobuoy /usr/bin/sonobuoy +COPY --from=tools /licenses/sonobuoy /licenses/sonobuoy + +# Copy k8s-workload-agent +COPY --from=build-src /src/bottlerocket/agents/bin/k8s-workload-agent ./ +COPY --from=build-src /usr/share/licenses/testsys /licenses/testsys + +ENTRYPOINT ["./k8s-workload-agent"] diff --git a/Makefile b/Makefile index 31b539018..e83f2f3fb 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,8 @@ TESTSYS_BUILD_GOPROXY ?= direct # The set of bottlerocket images to create. Add new artifacts here when added # to the project. IMAGES = controller sonobuoy-test-agent ec2-resource-agent eks-resource-agent ecs-resource-agent \ - migration-test-agent vsphere-vm-resource-agent vsphere-k8s-cluster-resource-agent ecs-test-agent + migration-test-agent vsphere-vm-resource-agent vsphere-k8s-cluster-resource-agent ecs-test-agent \ + k8s-workload-agent # Store targets for tagging images TAG_IMAGES = $(addprefix tag-, $(IMAGES)) @@ -138,7 +139,7 @@ tools: ./tools # Build the container image for a testsys agent -eks-resource-agent ec2-resource-agent ecs-resource-agent vsphere-vm-resource-agent vsphere-k8s-cluster-resource-agent sonobuoy-test-agent migration-test-agent ecs-test-agent: show-variables fetch +eks-resource-agent ec2-resource-agent ecs-resource-agent vsphere-vm-resource-agent vsphere-k8s-cluster-resource-agent sonobuoy-test-agent migration-test-agent ecs-test-agent k8s-workload-agent: show-variables fetch docker build $(DOCKER_BUILD_FLAGS) \ --build-arg ARCH="$(TESTSYS_BUILD_HOST_UNAME_ARCH)" \ --build-arg BUILDER_IMAGE="$(BUILDER_IMAGE)" \ diff --git a/bottlerocket/agents/src/bin/k8s-workload-agent/main.rs b/bottlerocket/agents/src/bin/k8s-workload-agent/main.rs new file mode 100644 index 000000000..01be5b264 --- /dev/null +++ b/bottlerocket/agents/src/bin/k8s-workload-agent/main.rs @@ -0,0 +1,116 @@ +/*! + +This is a test-agent for running workload tests on Kubernetes. +It expects to be run in a pod launched by the TestSys controller. + +You can configure the workload agent to run different types of plugins and tests. +See `WorkloadConfig` for the different configuration values. + +To build the container for the workload test agent, run `make k8s-workload-agent-image` from the +root directory of this repository. + +Here is an example manifest for deploying the test definition for the workload test agent to a K8s cluster: + +```yaml +apiVersion: testsys.system/v1 +kind: Test +metadata: + name: workload-full + namespace: testsys +spec: + agent: + configuration: + kubeconfigBase64: + plugins: + - name: nvidia-workload + image: testsys-nvidia-workload-test:v0.0.3 + image: + name: workload-test-agent + keep_running: true + resources: {} +``` + +!*/ + +use agent_utils::{base64_decode_write_file, init_agent_logger}; +use async_trait::async_trait; +use bottlerocket_agents::constants::TEST_CLUSTER_KUBECONFIG_PATH; +use bottlerocket_agents::error::Error; +use bottlerocket_agents::workload::{delete_workload, rerun_failed_workload, run_workload}; +use bottlerocket_types::agent_config::WorkloadConfig; +use log::{debug, info}; +use model::TestResults; +use std::path::PathBuf; +use test_agent::{BootstrapData, ClientError, DefaultClient, Spec, TestAgent}; + +struct WorkloadTestRunner { + config: WorkloadConfig, + results_dir: PathBuf, +} + +#[async_trait] +impl test_agent::Runner for WorkloadTestRunner { + type C = WorkloadConfig; + type E = Error; + + async fn new(spec: Spec) -> Result { + info!("Initializing Workload test agent..."); + Ok(Self { + config: spec.configuration, + results_dir: spec.results_dir, + }) + } + + async fn run(&mut self) -> Result { + debug!("Decoding kubeconfig for test cluster"); + base64_decode_write_file(&self.config.kubeconfig_base64, TEST_CLUSTER_KUBECONFIG_PATH) + .await?; + info!("Stored kubeconfig in {}", TEST_CLUSTER_KUBECONFIG_PATH); + + run_workload( + TEST_CLUSTER_KUBECONFIG_PATH, + &self.config, + &self.results_dir, + ) + .await + } + + async fn rerun_failed(&mut self, _prev_results: &TestResults) -> Result { + delete_workload(TEST_CLUSTER_KUBECONFIG_PATH).await?; + + debug!("Decoding kubeconfig for test cluster"); + base64_decode_write_file(&self.config.kubeconfig_base64, TEST_CLUSTER_KUBECONFIG_PATH) + .await?; + info!("Stored kubeconfig in {}", TEST_CLUSTER_KUBECONFIG_PATH); + + rerun_failed_workload( + TEST_CLUSTER_KUBECONFIG_PATH, + &self.config, + &self.results_dir, + ) + .await + } + + async fn terminate(&mut self) -> Result<(), Self::E> { + delete_workload(TEST_CLUSTER_KUBECONFIG_PATH).await + } +} + +#[tokio::main] +async fn main() { + init_agent_logger(env!("CARGO_CRATE_NAME"), None); + if let Err(e) = run().await { + eprintln!("{}", e); + std::process::exit(1); + } +} + +async fn run() -> Result<(), test_agent::error::Error> { + let mut agent = TestAgent::::new( + BootstrapData::from_env().unwrap_or_else(|_| BootstrapData { + test_name: "workload_test".to_string(), + }), + ) + .await?; + agent.run().await +} diff --git a/bottlerocket/agents/src/error.rs b/bottlerocket/agents/src/error.rs index 52c6f340f..385f3d0ae 100644 --- a/bottlerocket/agents/src/error.rs +++ b/bottlerocket/agents/src/error.rs @@ -157,4 +157,25 @@ pub enum Error { #[snafu(context(false))] #[snafu(display("{}", source))] Utils { source: agent_utils::Error }, + + #[snafu(display("Failed to create workload process: {}", source))] + WorkloadProcess { source: std::io::Error }, + + #[snafu(display("Failed to run workload test"))] + WorkloadRun, + + #[snafu(display("Failed to initialize workload test plugin: {}", plugin))] + WorkloadPlugin { plugin: String }, + + #[snafu(display( + "Failed to write workload test plugin configuration yaml for: {}", + plugin + ))] + WorkloadWritePlugin { plugin: String }, + + #[snafu(display("Failed to clean-up workload resources"))] + WorkloadDelete, + + #[snafu(display("Missing '{}' field from workload status", field))] + MissingWorkloadStatusField { field: String }, } diff --git a/bottlerocket/agents/src/lib.rs b/bottlerocket/agents/src/lib.rs index be793aa45..a206be145 100644 --- a/bottlerocket/agents/src/lib.rs +++ b/bottlerocket/agents/src/lib.rs @@ -14,6 +14,7 @@ pub mod error; pub mod sonobuoy; pub mod tuf; pub mod vsphere; +pub mod workload; /// Determines whether a cluster resource needs to be created given its creation policy pub async fn is_cluster_creation_required( diff --git a/bottlerocket/agents/src/sonobuoy.rs b/bottlerocket/agents/src/sonobuoy.rs index 1c91275c4..92894d82e 100644 --- a/bottlerocket/agents/src/sonobuoy.rs +++ b/bottlerocket/agents/src/sonobuoy.rs @@ -3,6 +3,7 @@ use bottlerocket_types::agent_config::{SonobuoyConfig, SONOBUOY_RESULTS_FILENAME use log::{error, info, trace}; use model::{Outcome, TestResults}; use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::HashMap; use std::path::Path; use std::process::Command; @@ -97,7 +98,7 @@ pub async fn rerun_failed_sonobuoy( /// Retrieve the results from a sonobuoy test and convert them into `TestResults`. pub fn results_sonobuoy( kubeconfig_path: &str, - sonobuoy_config: &SonobuoyConfig, + _: &SonobuoyConfig, results_dir: &Path, ) -> Result { let kubeconfig_arg = vec!["--kubeconfig", kubeconfig_path]; @@ -143,72 +144,102 @@ pub fn results_sonobuoy( serde_json::from_str(&stdout).context(error::DeserializeJsonSnafu)?; trace!("The sonobuoy results are valid json"); - let e2e_status = run_status - .get("plugins") - .context(error::MissingSonobuoyStatusFieldSnafu { field: "plugins" })? - .as_array() - .context(error::MissingSonobuoyStatusFieldSnafu { field: "plugins" })? - .first() - .context(error::MissingSonobuoyStatusFieldSnafu { - field: format!("plugins.{}", sonobuoy_config.plugin), - })?; - - // Sometimes a helpful log is available in the progress field, but not always. - let progress_status = e2e_status - .get("progress") - .map(|value| value.to_string()) - .unwrap_or_else(|| "".to_string()); - - let result_status = e2e_status - .get("result-status") - .context(error::MissingSonobuoyStatusFieldSnafu { - field: format!("plugins.{}.result-status", sonobuoy_config.plugin), - })? - .as_str() - .context(error::MissingSonobuoyStatusFieldSnafu { - field: format!("plugins.{}.result-status", sonobuoy_config.plugin), - })?; - - let result_counts = run_status + process_sonobuoy_test_results(&run_status) +} + +/// process_sonobuoy_test_results parses the output from `sonobuoy status --json` output and gets +/// the overall status of the plugin results. +pub(crate) fn process_sonobuoy_test_results( + run_status: &serde_json::Value, +) -> Result { + let mut num_passed: u64 = 0; + let mut num_failed: u64 = 0; + let mut num_skipped: u64 = 0; + let mut progress = Vec::new(); + let mut outcome_summary = HashMap::from([ + ("pass", 0), + ("passed", 0), + ("fail", 0), + ("failed", 0), + ("timeout", 0), + ("timed-out", 0), + ]); + + let plugin_results = run_status .get("plugins") .context(error::MissingSonobuoyStatusFieldSnafu { field: "plugins" })? .as_array() - .context(error::MissingSonobuoyStatusFieldSnafu { field: "plugins" })? - .first() - .context(error::MissingSonobuoyStatusFieldSnafu { - field: format!("plugins.{}", sonobuoy_config.plugin), - })? - .get("result-counts") - .context(error::MissingSonobuoyStatusFieldSnafu { - field: format!("plugins.{}.result-counts", sonobuoy_config.plugin), - })?; - - let num_passed = result_counts - .get("passed") - .map(|v| v.as_u64().unwrap_or(0)) - .unwrap_or(0); - - let num_failed = result_counts - .get("failed") - .map(|v| v.as_u64().unwrap_or(0)) - .unwrap_or(0); - - let num_skipped = result_counts - .get("skipped") - .map(|v| v.as_u64().unwrap_or(0)) - .unwrap_or(0); + .context(error::MissingSonobuoyStatusFieldSnafu { field: "plugins" })?; + + for result in plugin_results { + let plugin = result + .get("plugin") + .map(|value| value.as_str().unwrap()) + .unwrap_or_else(|| ""); + + // Sometimes a helpful log is available in the progress field, but not always. + let progress_status = result + .get("progress") + .map(|value| value.as_str().unwrap()) + .unwrap_or_else(|| ""); + if !progress_status.is_empty() { + progress.push(format!("{}: {}", plugin, progress_status)); + } + + let result_status = result + .get("result-status") + .context(error::MissingSonobuoyStatusFieldSnafu { + field: format!("plugins.{}.result-status", plugin), + })? + .as_str() + .context(error::MissingSonobuoyStatusFieldSnafu { + field: format!("plugins.{}.result-status", plugin), + })?; + *outcome_summary.entry(result_status).or_default() += 1; + + let result_counts = + result + .get("result-counts") + .context(error::MissingSonobuoyStatusFieldSnafu { + field: format!("plugins.{}.result-counts", plugin), + })?; + + let passed = result_counts + .get("passed") + .map(|v| v.as_u64().unwrap_or(0)) + .unwrap_or(0); + let failed = result_counts + .get("failed") + .map(|v| v.as_u64().unwrap_or(0)) + .unwrap_or(0); + let skipped = result_counts + .get("skipped") + .map(|v| v.as_u64().unwrap_or(0)) + .unwrap_or(0); + + num_passed += passed; + num_failed += failed; + num_skipped += skipped; + } + + // Figure out what outcome to report based on what each plugin reported + let mut outcome = Outcome::Unknown; + if outcome_summary["pass"] > 0 || outcome_summary["passed"] > 0 { + outcome = Outcome::Pass; + } + if outcome_summary["timeout"] > 0 || outcome_summary["timed-out"] > 0 { + outcome = Outcome::Timeout; + } + if outcome_summary["fail"] > 0 || outcome_summary["failed"] > 0 { + outcome = Outcome::Fail; + } Ok(TestResults { - outcome: match result_status { - "pass" | "passed" => Outcome::Pass, - "fail" | "failed" => Outcome::Fail, - "timeout" | "timed-out" => Outcome::Timeout, - _ => Outcome::Unknown, - }, + outcome, num_passed, num_failed, num_skipped, - other_info: Some(progress_status), + other_info: Some(progress.join(", ")), }) } @@ -227,3 +258,138 @@ pub async fn delete_sonobuoy(kubeconfig_path: &str) -> Result<(), error::Error> Ok(()) } + +// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + +#[cfg(test)] +mod test_sonobuoy { + use super::*; + // use serde::Serialize; + use serde_json::json; + + #[test] + fn test_process_results_pass() { + let result = + process_sonobuoy_test_results( + &json!({"plugins":[{"plugin":"e2e","node":"global","status":"complete","result-status":"passed","result-counts":{"passed":6}}]})).unwrap(); + assert_eq!(result.num_passed, 6); + assert_eq!(result.num_failed, 0); + assert_eq!(result.num_skipped, 0); + assert_eq!(result.outcome, Outcome::Pass); + assert_eq!(result.other_info.unwrap(), ""); + } + + #[test] + fn test_process_results_failed() { + let result = + process_sonobuoy_test_results( + &json!({"plugins":[{"plugin":"e2e","node":"global","status":"complete","result-status":"failed","result-counts":{"failed":1}}]})).unwrap(); + assert_eq!(result.num_passed, 0); + assert_eq!(result.num_failed, 1); + assert_eq!(result.num_skipped, 0); + assert_eq!(result.outcome, Outcome::Fail); + assert_eq!(result.other_info.unwrap(), ""); + } + + #[test] + fn test_process_results_timeout() { + let result = + process_sonobuoy_test_results( + &json!({"plugins":[{"plugin":"e2e","node":"global","status":"complete","result-status":"timed-out","result-counts":{"failed":1}}]})).unwrap(); + assert_eq!(result.num_passed, 0); + assert_eq!(result.num_failed, 1); + assert_eq!(result.num_skipped, 0); + assert_eq!(result.outcome, Outcome::Timeout); + assert_eq!(result.other_info.unwrap(), ""); + } + + #[test] + fn test_process_results_multiple_pass() { + // All must pass to report passing status. + let result = + process_sonobuoy_test_results( + &json!({ + "plugins":[ + {"plugin":"smoketest","node":"global","status":"complete","result-status":"pass","result-counts":{"passed":1}}, + {"plugin":"workload","node":"global","status":"complete","result-status":"pass","result-counts":{"passed":1,"skipped":1}}, + ]}) + ).unwrap(); + assert_eq!(result.num_passed, 2); + assert_eq!(result.num_failed, 0); + assert_eq!(result.num_skipped, 1); + assert_eq!(result.outcome, Outcome::Pass); + assert_eq!(result.other_info.unwrap(), ""); + } + + #[test] + fn test_process_results_multiple_pass_and_fail() { + // Verify that is one fails, overall status is reported as failure. + let result = + process_sonobuoy_test_results( + &json!({ + "plugins":[ + {"plugin":"smoketest","node":"global","status":"complete","result-status":"pass","result-counts":{"passed":1}}, + {"plugin":"workload","node":"global","status":"complete","result-status":"fail","result-counts":{"failed":1,"skipped":1}}, + ]}) + ).unwrap(); + assert_eq!(result.num_passed, 1); + assert_eq!(result.num_failed, 1); + assert_eq!(result.num_skipped, 1); + assert_eq!(result.outcome, Outcome::Fail); + assert_eq!(result.other_info.unwrap(), ""); + } + + #[test] + fn test_process_results_multiple_pass_and_timeout() { + // Timeout takes precedence over passing. + let result = + process_sonobuoy_test_results( + &json!({ + "plugins":[ + {"plugin":"smoketest","node":"global","status":"complete","result-status":"pass","result-counts":{"passed":1}}, + {"plugin":"workload","node":"global","status":"complete","result-status":"timeout","result-counts":{"failed":1,"skipped":1}}, + ]}) + ).unwrap(); + assert_eq!(result.num_passed, 1); + assert_eq!(result.num_failed, 1); + assert_eq!(result.num_skipped, 1); + assert_eq!(result.outcome, Outcome::Timeout); + assert_eq!(result.other_info.unwrap(), ""); + } + + #[test] + fn test_process_results_multiple_timeout_and_failure() { + // Failure takes precendence over timeout. + let result = + process_sonobuoy_test_results( + &json!({ + "plugins":[ + {"plugin":"smoketest","node":"global","status":"complete","result-status":"failed","result-counts":{"failed":1}}, + {"plugin":"workload","node":"global","status":"complete","result-status":"timeout","result-counts":{"skipped":1}}, + ]}) + ).unwrap(); + assert_eq!(result.num_passed, 0); + assert_eq!(result.num_failed, 1); + assert_eq!(result.num_skipped, 1); + assert_eq!(result.outcome, Outcome::Fail); + assert_eq!(result.other_info.unwrap(), ""); + } + + #[test] + fn test_process_results_other_info() { + // All must pass to report passing status. + let result = + process_sonobuoy_test_results( + &json!({ + "plugins":[ + {"plugin":"smoketest","progress":"one","status":"complete","result-status":"pass","result-counts":{"passed":1}}, + {"plugin":"workload","progress":"two","status":"complete","result-status":"pass","result-counts":{"passed":1,"skipped":1}}, + ]}) + ).unwrap(); + assert_eq!(result.num_passed, 2); + assert_eq!(result.num_failed, 0); + assert_eq!(result.num_skipped, 1); + assert_eq!(result.outcome, Outcome::Pass); + assert_eq!(result.other_info.unwrap(), "smoketest: one, workload: two"); + } +} diff --git a/bottlerocket/agents/src/workload.rs b/bottlerocket/agents/src/workload.rs new file mode 100644 index 000000000..ee7aca692 --- /dev/null +++ b/bottlerocket/agents/src/workload.rs @@ -0,0 +1,155 @@ +use crate::error; +use crate::sonobuoy::process_sonobuoy_test_results; +use bottlerocket_types::agent_config::{WorkloadConfig, WORKLOAD_RESULTS_FILENAME}; +use log::{info, trace}; +use model::TestResults; +use snafu::{ensure, ResultExt}; +use std::fs::File; +use std::io::Write; +use std::path::Path; +use std::process::Command; + +const SONOBUOY_BIN_PATH: &str = "/usr/bin/sonobuoy"; + +/// Runs the workload conformance tests according to the provided configuration and returns a test +/// result at the end. +pub async fn run_workload( + kubeconfig_path: &str, + workload_config: &WorkloadConfig, + results_dir: &Path, +) -> Result { + info!("Processing workload test plugins"); + let mut plugin_test_args: Vec = Vec::new(); + for (id, plugin) in workload_config.plugins.iter().enumerate() { + info!("Initializing test {}-{}", id, plugin.name); + let plugin_yaml = format!("{}-plugin.yaml", plugin.name); + let output = Command::new(SONOBUOY_BIN_PATH) + .arg("gen") + .arg("plugin") + .arg("--name") + .arg(plugin.name.clone()) + .arg("--image") + .arg(plugin.image.clone()) + .output() + .context(error::WorkloadProcessSnafu)?; + ensure!( + output.status.success(), + error::WorkloadPluginSnafu { + plugin: plugin.name.clone() + } + ); + + // Write out the output to a file we can reference later + let mut f = File::create(plugin_yaml.clone()).context(error::WorkloadProcessSnafu)?; + f.write_all(&output.stdout) + .context(error::WorkloadProcessSnafu)?; + + // Add plugin to the arguments to be passed to sonobuoy run + plugin_test_args.append(&mut vec!["--plugin".to_string(), plugin_yaml.to_string()]); + } + + info!("Running workload"); + let kubeconfig_arg = vec!["--kubeconfig", kubeconfig_path]; + let status = Command::new(SONOBUOY_BIN_PATH) + .args(kubeconfig_arg.to_owned()) + .arg("run") + .arg("--wait") + .arg("--namespace") + .arg("testsys-workload") + .args(plugin_test_args) + .status() + .context(error::WorkloadProcessSnafu)?; + info!("Workload testing has completed, checking results"); + + // TODO - log something or check what happened? + ensure!(status.success(), error::WorkloadRunSnafu); + + results_workload(kubeconfig_path, workload_config, results_dir) +} + +/// Reruns the the failed tests from workload conformance that has already run in this agent. +pub async fn rerun_failed_workload( + kubeconfig_path: &str, + workload_config: &WorkloadConfig, + results_dir: &Path, +) -> Result { + let kubeconfig_arg = vec!["--kubeconfig", kubeconfig_path]; + let results_filepath = results_dir.join(WORKLOAD_RESULTS_FILENAME); + + info!("Rerunning workload"); + let status = Command::new(SONOBUOY_BIN_PATH) + .args(kubeconfig_arg.to_owned()) + .arg("run") + .arg("--wait") + .arg("--namespace") + .arg("testsys-workload") + .arg("--rerun-failed") + .arg(results_filepath.as_os_str()) + .status() + .context(error::WorkloadProcessSnafu)?; + info!("Workload testing has completed, checking results"); + + // TODO - log something or check what happened? + ensure!(status.success(), error::WorkloadRunSnafu); + + results_workload(kubeconfig_path, workload_config, results_dir) +} + +/// Retrieve the results from a workload test and convert them into `TestResults`. +pub fn results_workload( + kubeconfig_path: &str, + _: &WorkloadConfig, + results_dir: &Path, +) -> Result { + let kubeconfig_arg = vec!["--kubeconfig", kubeconfig_path]; + + info!("Running workload retrieve"); + let results_filepath = results_dir.join(WORKLOAD_RESULTS_FILENAME); + let status = Command::new(SONOBUOY_BIN_PATH) + .args(kubeconfig_arg.to_owned()) + .arg("retrieve") + .arg("--namespace") + .arg("testsys-workload") + .arg("--filename") + .arg(results_filepath.as_os_str()) + .status() + .context(error::WorkloadProcessSnafu)?; + ensure!(status.success(), error::WorkloadRunSnafu); + + info!("Getting Workload status"); + let run_result = Command::new(SONOBUOY_BIN_PATH) + .args(kubeconfig_arg) + .arg("status") + .arg("--json") + .arg("--namespace") + .arg("testsys-workload") + .output() + .context(error::WorkloadProcessSnafu)?; + + let stdout = String::from_utf8_lossy(&run_result.stdout); + info!("Parsing the following workload results output:\n{}", stdout); + + trace!("Parsing workload results as json"); + let run_status: serde_json::Value = + serde_json::from_str(&stdout).context(error::DeserializeJsonSnafu)?; + trace!("The workload results are valid json"); + + process_sonobuoy_test_results(&run_status) +} + +/// Deletes all workload namespaces and associated resources in the target K8s cluster +pub async fn delete_workload(kubeconfig_path: &str) -> Result<(), error::Error> { + let kubeconfig_arg = vec!["--kubeconfig", kubeconfig_path]; + info!("Deleting workload resources from cluster"); + let status = Command::new(SONOBUOY_BIN_PATH) + .args(kubeconfig_arg) + .arg("delete") + .arg("--namespace") + .arg("testsys-workload") + .arg("--wait") + .status() + .context(error::WorkloadProcessSnafu)?; + ensure!(status.success(), error::WorkloadDeleteSnafu); + + Ok(()) +} diff --git a/bottlerocket/types/src/agent_config.rs b/bottlerocket/types/src/agent_config.rs index ec5a81def..9fcb84a3f 100644 --- a/bottlerocket/types/src/agent_config.rs +++ b/bottlerocket/types/src/agent_config.rs @@ -12,6 +12,7 @@ use std::str::FromStr; pub const AWS_CREDENTIALS_SECRET_NAME: &str = "awsCredentials"; pub const SONOBUOY_RESULTS_FILENAME: &str = "sonobuoy-results.tar.gz"; pub const VSPHERE_CREDENTIALS_SECRET_NAME: &str = "vsphereCredentials"; +pub const WORKLOAD_RESULTS_FILENAME: &str = "workload-results.tar.gz"; #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default)] #[serde(rename_all = "camelCase")] @@ -477,3 +478,17 @@ fn k8s_version_valid() { assert_eq!("v1.21.3", k8s_version.full_version_with_v()); assert_eq!("1.21.3", k8s_version.full_version_without_v()); } + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct WorkloadTest { + pub name: String, + pub image: String, +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize, Configuration, Builder)] +#[serde(rename_all = "camelCase")] +#[crd("Test")] +pub struct WorkloadConfig { + pub kubeconfig_base64: String, + pub plugins: Vec, +}