From 6a982a129106b68edbf0aac18ff58aac9bdbf36c Mon Sep 17 00:00:00 2001 From: karencfv Date: Thu, 7 Nov 2024 18:16:17 +1300 Subject: [PATCH 01/20] initial structure --- Cargo.lock | 13 +- Cargo.toml | 1 + clickhouse-admin/Cargo.toml | 1 + clickhouse-admin/tests/integration_test.rs | 539 +++++++++++---------- 4 files changed, 302 insertions(+), 252 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 03a4c9fc0c..c1ec792eba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1896,6 +1896,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "ctor" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" +dependencies = [ + "quote", + "syn 2.0.79", +] + [[package]] name = "ctr" version = "0.9.2" @@ -4837,7 +4847,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -6305,6 +6315,7 @@ dependencies = [ "clickhouse-admin-api", "clickhouse-admin-types", "clickward", + "ctor", "dropshot", "expectorate", "http 1.1.0", diff --git a/Cargo.toml b/Cargo.toml index ab82cbef54..e7d5eecfce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,6 +338,7 @@ crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", re crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "b7b9d5660b28ca5e865242b2bdecd032c0852d40" } crucible-common = { git = "https://github.com/oxidecomputer/crucible", rev = "b7b9d5660b28ca5e865242b2bdecd032c0852d40" } csv = "1.3.0" +ctor = "0.2.8" curve25519-dalek = "4" datatest-stable = "0.2.9" display-error-chain = "0.2.2" diff --git a/clickhouse-admin/Cargo.toml b/clickhouse-admin/Cargo.toml index 84b04f6caa..e7f253028b 100644 --- a/clickhouse-admin/Cargo.toml +++ b/clickhouse-admin/Cargo.toml @@ -31,6 +31,7 @@ omicron-workspace-hack.workspace = true [dev-dependencies] clickward.workspace = true +ctor.workspace = true dropshot.workspace = true expectorate.workspace = true nexus-test-utils.workspace = true diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index eb26bec668..4c91ee5d4a 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -18,264 +18,301 @@ use std::collections::BTreeSet; use std::net::{Ipv6Addr, SocketAddrV6}; use std::str::FromStr; -#[tokio::test] -async fn test_lgif_parsing() -> anyhow::Result<()> { - let logctx = test_setup_log("test_lgif_parsing"); - let log = logctx.log.clone(); +use ctor::{ctor, dtor}; +use std::sync::Once; - let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); - let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); - std::fs::create_dir(&path)?; +//static TEARDOWN: Once = Once::new(); - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29000, - raft: 29100, - clickhouse_tcp: 29200, - clickhouse_http: 29300, - clickhouse_interserver_http: 29400, - }; +//#[ctor] // This is executed before any test function runs +//fn before_tests() { +// println!("Before any tests"); +//} - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; - - let mut deployment = Deployment::new(config); - - // We only need a single keeper to test the lgif command - let num_keepers = 1; - let num_replicas = 1; - deployment - .generate_config(num_keepers, num_replicas) - .context("failed to generate config")?; - deployment.deploy().context("failed to deploy")?; - - wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; - - let clickhouse_cli = ClickhouseCli::new( - Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), - ) - .with_log(log.clone()); - - let lgif = clickhouse_cli.lgif().await.unwrap(); - - // The first log index from a newly created cluster should always be 1 - assert_eq!(lgif.first_log_idx, 1); - - info!(&log, "Cleaning up test"); - deployment.teardown()?; - std::fs::remove_dir_all(path)?; - logctx.cleanup_successful(); - Ok(()) +fn teardown() { + println!("Teardown after all tests: test"); } -#[tokio::test] -async fn test_raft_config_parsing() -> anyhow::Result<()> { - let logctx = test_setup_log("test_raft_config_parsing"); - let log = logctx.log.clone(); - - let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); - let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); - std::fs::create_dir(&path)?; - - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29500, - raft: 29600, - clickhouse_tcp: 29700, - clickhouse_http: 29800, - clickhouse_interserver_http: 29900, - }; - - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; - - let mut deployment = Deployment::new(config); - - let num_keepers = 3; - let num_replicas = 1; - deployment - .generate_config(num_keepers, num_replicas) - .context("failed to generate config")?; - deployment.deploy().context("failed to deploy")?; - - wait_for_keepers( - &log, - &deployment, - (1..=num_keepers).map(clickward::KeeperId).collect(), - ) - .await?; - - let clickhouse_cli = ClickhouseCli::new( - Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29501, 0, 0), - ) - .with_log(log.clone()); - - let raft_config = clickhouse_cli.raft_config().await.unwrap(); - - let mut keeper_servers = BTreeSet::new(); - - for i in 1..=num_keepers { - let raft_port = u16::try_from(29600 + i).unwrap(); - keeper_servers.insert(KeeperServerInfo { - server_id: clickhouse_admin_types::KeeperId(i), - host: ClickhouseHost::Ipv6("::1".parse().unwrap()), - raft_port, - server_type: KeeperServerType::Participant, - priority: 1, - }); +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_1() { + println!("Running test 1..."); } - let expected_raft_config = RaftConfig { keeper_servers }; - - assert_eq!(raft_config, expected_raft_config); - - info!(&log, "Cleaning up test"); - deployment.teardown()?; - std::fs::remove_dir_all(path)?; - logctx.cleanup_successful(); - Ok(()) -} - -#[tokio::test] -async fn test_keeper_conf_parsing() -> anyhow::Result<()> { - let logctx = test_setup_log("test_keeper_conf_parsing"); - let log = logctx.log.clone(); - - let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); - let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); - std::fs::create_dir(&path)?; - - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 30000, - raft: 30100, - clickhouse_tcp: 30200, - clickhouse_http: 30300, - clickhouse_interserver_http: 30400, - }; - - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; - - let mut deployment = Deployment::new(config); - - // We only need a single keeper to test the conf command - let num_keepers = 1; - let num_replicas = 1; - deployment - .generate_config(num_keepers, num_replicas) - .context("failed to generate config")?; - deployment.deploy().context("failed to deploy")?; - - wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; - - let clickhouse_cli = ClickhouseCli::new( - Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 30001, 0, 0), - ) - .with_log(log.clone()); - - let conf = clickhouse_cli.keeper_conf().await.unwrap(); - - assert_eq!(conf.server_id, clickhouse_admin_types::KeeperId(1)); - - info!(&log, "Cleaning up test"); - deployment.teardown()?; - std::fs::remove_dir_all(path)?; - logctx.cleanup_successful(); - Ok(()) -} - -#[tokio::test] -async fn test_keeper_cluster_membership() -> anyhow::Result<()> { - let logctx = test_setup_log("test_keeper_cluster_membership"); - let log = logctx.log.clone(); - - let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); - let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); - std::fs::create_dir(&path)?; - - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 30500, - raft: 30600, - clickhouse_tcp: 30700, - clickhouse_http: 30800, - clickhouse_interserver_http: 30900, - }; - - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; - - let mut deployment = Deployment::new(config); - - let num_keepers = 3; - let num_replicas = 1; - deployment - .generate_config(num_keepers, num_replicas) - .context("failed to generate config")?; - deployment.deploy().context("failed to deploy")?; - - wait_for_keepers( - &log, - &deployment, - (1..=num_keepers).map(clickward::KeeperId).collect(), - ) - .await?; - - let clickhouse_cli = ClickhouseCli::new( - Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 30501, 0, 0), - ) - .with_log(log.clone()); - - let keeper_cluster_membership = - clickhouse_cli.keeper_cluster_membership().await.unwrap(); - - let mut raft_config = BTreeSet::new(); - - for i in 1..=num_keepers { - raft_config.insert(clickhouse_admin_types::KeeperId(i)); + #[test] + fn test_2() { + println!("Running test 2..."); } - let expected_keeper_cluster_membership = - ClickhouseKeeperClusterMembership { - queried_keeper: KeeperId(1), - // This number is always different so we won't be testing it - leader_committed_log_index: 0, - raft_config, - }; - - assert_eq!( - keeper_cluster_membership.queried_keeper, - expected_keeper_cluster_membership.queried_keeper - ); - assert_eq!( - keeper_cluster_membership.raft_config, - expected_keeper_cluster_membership.raft_config - ); - - info!(&log, "Cleaning up test"); - deployment.teardown()?; - std::fs::remove_dir_all(path)?; - logctx.cleanup_successful(); - Ok(()) + // Ensure teardown after all tests finish + #[cfg(test)] + #[dtor] // Cleanup to be run when tests finish + fn after_tests() { + println!("Cleanup after tests: test"); + teardown(); + } } + +//#[tokio::test] +//async fn test_lgif_parsing() -> anyhow::Result<()> { +// let logctx = test_setup_log("test_lgif_parsing"); +// let log = logctx.log.clone(); +// +// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); +// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); +// std::fs::create_dir(&path)?; +// +// // We spin up several replicated clusters and must use a +// // separate set of ports in case the tests run concurrently. +// let base_ports = BasePorts { +// keeper: 29000, +// raft: 29100, +// clickhouse_tcp: 29200, +// clickhouse_http: 29300, +// clickhouse_interserver_http: 29400, +// }; +// +// let config = DeploymentConfig { +// path: path.clone(), +// base_ports, +// cluster_name: "oximeter_cluster".to_string(), +// }; +// +// let mut deployment = Deployment::new(config); +// +// // We only need a single keeper to test the lgif command +// let num_keepers = 1; +// let num_replicas = 1; +// deployment +// .generate_config(num_keepers, num_replicas) +// .context("failed to generate config")?; +// deployment.deploy().context("failed to deploy")?; +// +// wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; +// +// let clickhouse_cli = ClickhouseCli::new( +// Utf8PathBuf::from_str("clickhouse").unwrap(), +// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), +// ) +// .with_log(log.clone()); +// +// let lgif = clickhouse_cli.lgif().await.unwrap(); +// +// // The first log index from a newly created cluster should always be 1 +// assert_eq!(lgif.first_log_idx, 1); +// +// info!(&log, "Cleaning up test"); +// deployment.teardown()?; +// std::fs::remove_dir_all(path)?; +// logctx.cleanup_successful(); +// Ok(()) +//} +// +//#[tokio::test] +//async fn test_raft_config_parsing() -> anyhow::Result<()> { +// let logctx = test_setup_log("test_raft_config_parsing"); +// let log = logctx.log.clone(); +// +// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); +// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); +// std::fs::create_dir(&path)?; +// +// // We spin up several replicated clusters and must use a +// // separate set of ports in case the tests run concurrently. +// let base_ports = BasePorts { +// keeper: 29500, +// raft: 29600, +// clickhouse_tcp: 29700, +// clickhouse_http: 29800, +// clickhouse_interserver_http: 29900, +// }; +// +// let config = DeploymentConfig { +// path: path.clone(), +// base_ports, +// cluster_name: "oximeter_cluster".to_string(), +// }; +// +// let mut deployment = Deployment::new(config); +// +// let num_keepers = 3; +// let num_replicas = 1; +// deployment +// .generate_config(num_keepers, num_replicas) +// .context("failed to generate config")?; +// deployment.deploy().context("failed to deploy")?; +// +// wait_for_keepers( +// &log, +// &deployment, +// (1..=num_keepers).map(clickward::KeeperId).collect(), +// ) +// .await?; +// +// let clickhouse_cli = ClickhouseCli::new( +// Utf8PathBuf::from_str("clickhouse").unwrap(), +// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29501, 0, 0), +// ) +// .with_log(log.clone()); +// +// let raft_config = clickhouse_cli.raft_config().await.unwrap(); +// +// let mut keeper_servers = BTreeSet::new(); +// +// for i in 1..=num_keepers { +// let raft_port = u16::try_from(29600 + i).unwrap(); +// keeper_servers.insert(KeeperServerInfo { +// server_id: clickhouse_admin_types::KeeperId(i), +// host: ClickhouseHost::Ipv6("::1".parse().unwrap()), +// raft_port, +// server_type: KeeperServerType::Participant, +// priority: 1, +// }); +// } +// +// let expected_raft_config = RaftConfig { keeper_servers }; +// +// assert_eq!(raft_config, expected_raft_config); +// +// info!(&log, "Cleaning up test"); +// deployment.teardown()?; +// std::fs::remove_dir_all(path)?; +// logctx.cleanup_successful(); +// Ok(()) +//} +// +//#[tokio::test] +//async fn test_keeper_conf_parsing() -> anyhow::Result<()> { +// let logctx = test_setup_log("test_keeper_conf_parsing"); +// let log = logctx.log.clone(); +// +// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); +// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); +// std::fs::create_dir(&path)?; +// +// // We spin up several replicated clusters and must use a +// // separate set of ports in case the tests run concurrently. +// let base_ports = BasePorts { +// keeper: 30000, +// raft: 30100, +// clickhouse_tcp: 30200, +// clickhouse_http: 30300, +// clickhouse_interserver_http: 30400, +// }; +// +// let config = DeploymentConfig { +// path: path.clone(), +// base_ports, +// cluster_name: "oximeter_cluster".to_string(), +// }; +// +// let mut deployment = Deployment::new(config); +// +// // We only need a single keeper to test the conf command +// let num_keepers = 1; +// let num_replicas = 1; +// deployment +// .generate_config(num_keepers, num_replicas) +// .context("failed to generate config")?; +// deployment.deploy().context("failed to deploy")?; +// +// wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; +// +// let clickhouse_cli = ClickhouseCli::new( +// Utf8PathBuf::from_str("clickhouse").unwrap(), +// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 30001, 0, 0), +// ) +// .with_log(log.clone()); +// +// let conf = clickhouse_cli.keeper_conf().await.unwrap(); +// +// assert_eq!(conf.server_id, clickhouse_admin_types::KeeperId(1)); +// +// info!(&log, "Cleaning up test"); +// deployment.teardown()?; +// std::fs::remove_dir_all(path)?; +// logctx.cleanup_successful(); +// Ok(()) +//} +// +//#[tokio::test] +//async fn test_keeper_cluster_membership() -> anyhow::Result<()> { +// let logctx = test_setup_log("test_keeper_cluster_membership"); +// let log = logctx.log.clone(); +// +// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); +// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); +// std::fs::create_dir(&path)?; +// +// // We spin up several replicated clusters and must use a +// // separate set of ports in case the tests run concurrently. +// let base_ports = BasePorts { +// keeper: 30500, +// raft: 30600, +// clickhouse_tcp: 30700, +// clickhouse_http: 30800, +// clickhouse_interserver_http: 30900, +// }; +// +// let config = DeploymentConfig { +// path: path.clone(), +// base_ports, +// cluster_name: "oximeter_cluster".to_string(), +// }; +// +// let mut deployment = Deployment::new(config); +// +// let num_keepers = 3; +// let num_replicas = 1; +// deployment +// .generate_config(num_keepers, num_replicas) +// .context("failed to generate config")?; +// deployment.deploy().context("failed to deploy")?; +// +// wait_for_keepers( +// &log, +// &deployment, +// (1..=num_keepers).map(clickward::KeeperId).collect(), +// ) +// .await?; +// +// let clickhouse_cli = ClickhouseCli::new( +// Utf8PathBuf::from_str("clickhouse").unwrap(), +// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 30501, 0, 0), +// ) +// .with_log(log.clone()); +// +// let keeper_cluster_membership = +// clickhouse_cli.keeper_cluster_membership().await.unwrap(); +// +// let mut raft_config = BTreeSet::new(); +// +// for i in 1..=num_keepers { +// raft_config.insert(clickhouse_admin_types::KeeperId(i)); +// } +// +// let expected_keeper_cluster_membership = +// ClickhouseKeeperClusterMembership { +// queried_keeper: KeeperId(1), +// // This number is always different so we won't be testing it +// leader_committed_log_index: 0, +// raft_config, +// }; +// +// assert_eq!( +// keeper_cluster_membership.queried_keeper, +// expected_keeper_cluster_membership.queried_keeper +// ); +// assert_eq!( +// keeper_cluster_membership.raft_config, +// expected_keeper_cluster_membership.raft_config +// ); +// +// info!(&log, "Cleaning up test"); +// deployment.teardown()?; +// std::fs::remove_dir_all(path)?; +// logctx.cleanup_successful(); +// Ok(()) +//} From 18162e824904a303b69b3cd1502f684ef4380a5f Mon Sep 17 00:00:00 2001 From: karencfv Date: Thu, 7 Nov 2024 20:19:40 +1300 Subject: [PATCH 02/20] setup --- clickhouse-admin/tests/integration_test.rs | 23 +++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 4c91ee5d4a..017b8a7ebb 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -21,12 +21,15 @@ use std::str::FromStr; use ctor::{ctor, dtor}; use std::sync::Once; -//static TEARDOWN: Once = Once::new(); +static SETUP: Once = Once::new(); -//#[ctor] // This is executed before any test function runs -//fn before_tests() { -// println!("Before any tests"); -//} +// TODO: Do setup with nextest. It does setup twice with nextest +// for some reson, but with cargo test it only does setup once +fn setup() { + SETUP.call_once(|| { + println!("Setup before tests: test"); + }); +} fn teardown() { println!("Teardown after all tests: test"); @@ -36,13 +39,15 @@ fn teardown() { mod tests { use super::*; - #[test] - fn test_1() { + #[tokio::test] + async fn test_1() { + setup(); println!("Running test 1..."); } - #[test] - fn test_2() { + #[tokio::test] + async fn test_2() { + setup(); println!("Running test 2..."); } From 6d8529404929db7d6d48778289884ac1bbd0dbbb Mon Sep 17 00:00:00 2001 From: karencfv Date: Fri, 8 Nov 2024 10:46:25 +1300 Subject: [PATCH 03/20] setup nextest --- .config/nextest.toml | 7 +++++++ Cargo.lock | 11 ++++++++++ Cargo.toml | 2 ++ dev-tools/clickhouse-cluster-dev/Cargo.toml | 16 +++++++++++++++ dev-tools/clickhouse-cluster-dev/src/main.rs | 21 ++++++++++++++++++++ 5 files changed, 57 insertions(+) create mode 100644 dev-tools/clickhouse-cluster-dev/Cargo.toml create mode 100644 dev-tools/clickhouse-cluster-dev/src/main.rs diff --git a/.config/nextest.toml b/.config/nextest.toml index ee7117506a..9442070be2 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -23,6 +23,13 @@ fail-fast = false # invocations of nextest happen. command = 'cargo run -p crdb-seed --profile test' +[[profile.default.scripts]] +filter = 'package(omicron-clickhouse-admin)' +setup = 'clickhouse-cluster' + +[script.clickhouse-cluster] +command = 'cargo run -p clickhouse-cluster-dev' + [test-groups] # The ClickHouse cluster tests currently rely on a hard-coded set of ports for # the nodes in the cluster. We would like to relax this in the future, at which diff --git a/Cargo.lock b/Cargo.lock index c1ec792eba..335655cb11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1369,6 +1369,17 @@ dependencies = [ "slog-term", ] +[[package]] +name = "clickhouse-cluster-dev" +version = "0.1.0" +dependencies = [ + "anyhow", + "dropshot", + "omicron-workspace-hack", + "slog", + "tokio", +] + [[package]] name = "clickward" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index e7d5eecfce..a68193ecaf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ members = [ "cockroach-admin/types", "common", "dev-tools/cert-dev", + "dev-tools/clickhouse-cluster-dev", "dev-tools/ch-dev", "dev-tools/crdb-seed", "dev-tools/db-dev", @@ -150,6 +151,7 @@ default-members = [ "cockroach-admin/types", "common", "dev-tools/cert-dev", + "dev-tools/clickhouse-cluster-dev", "dev-tools/ch-dev", "dev-tools/crdb-seed", "dev-tools/db-dev", diff --git a/dev-tools/clickhouse-cluster-dev/Cargo.toml b/dev-tools/clickhouse-cluster-dev/Cargo.toml new file mode 100644 index 0000000000..a369ea099e --- /dev/null +++ b/dev-tools/clickhouse-cluster-dev/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "clickhouse-cluster-dev" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" +readme = "README.md" + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +dropshot.workspace = true +slog.workspace = true +tokio.workspace = true +omicron-workspace-hack.workspace = true diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs new file mode 100644 index 0000000000..599769088a --- /dev/null +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -0,0 +1,21 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use anyhow::{Context, Result}; +use dropshot::{test_util::LogContext, ConfigLogging, ConfigLoggingLevel}; + +#[tokio::main] +async fn main() -> Result<()> { + let logctx = LogContext::new( + "clickhouse-cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + + slog::info!( + logctx.log, + "Setting up a ClickHouse cluster" + ); + + Ok(()) +} From 97a747b7eda41029a47bd62fe0aa73a37d267b39 Mon Sep 17 00:00:00 2001 From: karencfv Date: Fri, 8 Nov 2024 13:19:35 +1300 Subject: [PATCH 04/20] start cluster during set up --- Cargo.lock | 2 + clickhouse-admin/tests/integration_test.rs | 184 ++++++++++++------- dev-tools/clickhouse-cluster-dev/Cargo.toml | 2 + dev-tools/clickhouse-cluster-dev/src/main.rs | 42 ++++- 4 files changed, 162 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 335655cb11..1b379ddfb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1374,8 +1374,10 @@ name = "clickhouse-cluster-dev" version = "0.1.0" dependencies = [ "anyhow", + "clickward", "dropshot", "omicron-workspace-hack", + "oximeter-test-utils", "slog", "tokio", ] diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 017b8a7ebb..0670157844 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -9,7 +9,8 @@ use clickhouse_admin_types::{ KeeperServerInfo, KeeperServerType, RaftConfig, }; use clickward::{BasePorts, Deployment, DeploymentConfig}; -use dropshot::test_util::log_prefix_for_test; +use dropshot::{ConfigLogging, ConfigLoggingLevel}; +use dropshot::test_util::{log_prefix_for_test, LogContext}; use omicron_clickhouse_admin::ClickhouseCli; use omicron_test_utils::dev::test_setup_log; use oximeter_test_utils::wait_for_keepers; @@ -21,15 +22,15 @@ use std::str::FromStr; use ctor::{ctor, dtor}; use std::sync::Once; -static SETUP: Once = Once::new(); - -// TODO: Do setup with nextest. It does setup twice with nextest -// for some reson, but with cargo test it only does setup once -fn setup() { - SETUP.call_once(|| { - println!("Setup before tests: test"); - }); -} +// static SETUP: Once = Once::new(); +// +// // TODO: Do setup with nextest. It does setup twice with nextest +// // for some reson, but with cargo test it only does setup once +// fn setup() { +// SETUP.call_once(|| { +// println!("Setup before tests: test"); +// }); +// } fn teardown() { println!("Teardown after all tests: test"); @@ -41,16 +42,67 @@ mod tests { #[tokio::test] async fn test_1() { - setup(); + // setup(); println!("Running test 1..."); } #[tokio::test] async fn test_2() { - setup(); + // setup(); println!("Running test 2..."); } + #[tokio::test] + async fn test_lgif_parsing() -> anyhow::Result<()> { + // TODO: Fix up log setup + let logctx = LogContext::new( + "clickhouse_cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + + let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); + // TODO: Switch to "{prefix}_clickward_test" ? + let path = parent_dir.join(format!("clickward_test")); + + let clickhouse_cli = ClickhouseCli::new( + Utf8PathBuf::from_str("clickhouse").unwrap(), + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + ) + .with_log(logctx.log.clone()); + + let lgif = clickhouse_cli.lgif().await.unwrap(); + + // The first log index from a newly created cluster should always be 1 + assert_eq!(lgif.first_log_idx, 1); + + info!(&logctx.log, "Cleaning up test"); + + // TODO: Move this code to teardown function + // TODO: Find another way to retrieve deployment + + // We spin up several replicated clusters and must use a + // separate set of ports in case the tests run concurrently. + let base_ports = BasePorts { + keeper: 29000, + raft: 29100, + clickhouse_tcp: 29200, + clickhouse_http: 29300, + clickhouse_interserver_http: 29400, + }; + + let config = DeploymentConfig { + path: path.clone(), + base_ports, + cluster_name: "oximeter_cluster".to_string(), + }; + + let mut deployment = Deployment::new(config); + deployment.teardown()?; + std::fs::remove_dir_all(path)?; + logctx.cleanup_successful(); + Ok(()) + } + // Ensure teardown after all tests finish #[cfg(test)] #[dtor] // Cleanup to be run when tests finish @@ -60,60 +112,60 @@ mod tests { } } -//#[tokio::test] -//async fn test_lgif_parsing() -> anyhow::Result<()> { -// let logctx = test_setup_log("test_lgif_parsing"); -// let log = logctx.log.clone(); -// -// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); -// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); -// std::fs::create_dir(&path)?; -// -// // We spin up several replicated clusters and must use a -// // separate set of ports in case the tests run concurrently. -// let base_ports = BasePorts { -// keeper: 29000, -// raft: 29100, -// clickhouse_tcp: 29200, -// clickhouse_http: 29300, -// clickhouse_interserver_http: 29400, -// }; -// -// let config = DeploymentConfig { -// path: path.clone(), -// base_ports, -// cluster_name: "oximeter_cluster".to_string(), -// }; -// -// let mut deployment = Deployment::new(config); -// -// // We only need a single keeper to test the lgif command -// let num_keepers = 1; -// let num_replicas = 1; -// deployment -// .generate_config(num_keepers, num_replicas) -// .context("failed to generate config")?; -// deployment.deploy().context("failed to deploy")?; -// -// wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; -// -// let clickhouse_cli = ClickhouseCli::new( -// Utf8PathBuf::from_str("clickhouse").unwrap(), -// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), -// ) -// .with_log(log.clone()); -// -// let lgif = clickhouse_cli.lgif().await.unwrap(); -// -// // The first log index from a newly created cluster should always be 1 -// assert_eq!(lgif.first_log_idx, 1); -// -// info!(&log, "Cleaning up test"); -// deployment.teardown()?; -// std::fs::remove_dir_all(path)?; -// logctx.cleanup_successful(); -// Ok(()) -//} +// #[tokio::test] +// async fn test_lgif_parsing() -> anyhow::Result<()> { +// let logctx = test_setup_log("test_lgif_parsing"); +// let log = logctx.log.clone(); +// +// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); +// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); +// std::fs::create_dir(&path)?; +// +// // We spin up several replicated clusters and must use a +// // separate set of ports in case the tests run concurrently. +// let base_ports = BasePorts { +// keeper: 29000, +// raft: 29100, +// clickhouse_tcp: 29200, +// clickhouse_http: 29300, +// clickhouse_interserver_http: 29400, +// }; +// +// let config = DeploymentConfig { +// path: path.clone(), +// base_ports, +// cluster_name: "oximeter_cluster".to_string(), +// }; +// +// let mut deployment = Deployment::new(config); +// +// // We only need a single keeper to test the lgif command +// let num_keepers = 1; +// let num_replicas = 1; +// deployment +// .generate_config(num_keepers, num_replicas) +// .context("failed to generate config")?; +// deployment.deploy().context("failed to deploy")?; +// +// wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; +// +// let clickhouse_cli = ClickhouseCli::new( +// Utf8PathBuf::from_str("clickhouse").unwrap(), +// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), +// ) +// .with_log(log.clone()); +// +// let lgif = clickhouse_cli.lgif().await.unwrap(); +// +// // The first log index from a newly created cluster should always be 1 +// assert_eq!(lgif.first_log_idx, 1); +// +// info!(&log, "Cleaning up test"); +// deployment.teardown()?; +// std::fs::remove_dir_all(path)?; +// logctx.cleanup_successful(); +// Ok(()) +// } // //#[tokio::test] //async fn test_raft_config_parsing() -> anyhow::Result<()> { diff --git a/dev-tools/clickhouse-cluster-dev/Cargo.toml b/dev-tools/clickhouse-cluster-dev/Cargo.toml index a369ea099e..0bb27f9908 100644 --- a/dev-tools/clickhouse-cluster-dev/Cargo.toml +++ b/dev-tools/clickhouse-cluster-dev/Cargo.toml @@ -11,6 +11,8 @@ workspace = true [dependencies] anyhow.workspace = true dropshot.workspace = true +clickward.workspace = true slog.workspace = true tokio.workspace = true omicron-workspace-hack.workspace = true +oximeter-test-utils.workspace = true diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs index 599769088a..9ef53f5eb6 100644 --- a/dev-tools/clickhouse-cluster-dev/src/main.rs +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -3,19 +3,57 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use anyhow::{Context, Result}; -use dropshot::{test_util::LogContext, ConfigLogging, ConfigLoggingLevel}; +use clickward::{BasePorts, Deployment, DeploymentConfig}; +use dropshot::{ConfigLogging, ConfigLoggingLevel}; +use dropshot::test_util::{log_prefix_for_test, LogContext}; +use oximeter_test_utils::wait_for_keepers; #[tokio::main] async fn main() -> Result<()> { let logctx = LogContext::new( - "clickhouse-cluster", + "clickhouse_cluster", &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, ); + let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); + // TODO: Switch to "{prefix}_clickward_test"? + let path = parent_dir.join(format!("clickward_test")); + std::fs::create_dir(&path)?; + slog::info!( logctx.log, "Setting up a ClickHouse cluster" ); + // We spin up several replicated clusters and must use a + // separate set of ports in case the tests run concurrently. + let base_ports = BasePorts { + keeper: 29000, + raft: 29100, + clickhouse_tcp: 29200, + clickhouse_http: 29300, + clickhouse_interserver_http: 29400, + }; + + let config = DeploymentConfig { + path: path.clone(), + base_ports, + cluster_name: "oximeter_cluster".to_string(), + }; + + let mut deployment = Deployment::new(config); + + // TODO: Use 2 replicas and 3 keepers, for now I'm just testing + let num_keepers = 1; + let num_replicas = 1; + deployment + .generate_config(num_keepers, num_replicas) + .context("failed to generate config")?; + deployment.deploy().context("failed to deploy")?; + + wait_for_keepers(&logctx.log, &deployment, vec![clickward::KeeperId(1)]).await?; + + // TODO: Also wait for replicas + Ok(()) } From fbdc52eec11dd8cbbd8c94fcb9e27fead8983cd5 Mon Sep 17 00:00:00 2001 From: karencfv Date: Fri, 8 Nov 2024 13:47:18 +1300 Subject: [PATCH 05/20] Leave myself some notes --- clickhouse-admin/tests/integration_test.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 0670157844..66f9441437 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -75,9 +75,11 @@ mod tests { // The first log index from a newly created cluster should always be 1 assert_eq!(lgif.first_log_idx, 1); + // TODO: Move this code to teardown function. + // For now moving it to that function results in a PoisonError + info!(&logctx.log, "Cleaning up test"); - // TODO: Move this code to teardown function // TODO: Find another way to retrieve deployment // We spin up several replicated clusters and must use a From 4b94c90886f5d982bab669ebc9006754a7d29709 Mon Sep 17 00:00:00 2001 From: karencfv Date: Fri, 8 Nov 2024 13:51:00 +1300 Subject: [PATCH 06/20] fmt --- clickhouse-admin/tests/integration_test.rs | 34 ++++++++++---------- dev-tools/clickhouse-cluster-dev/src/main.rs | 12 +++---- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 66f9441437..2b3ee57889 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -9,8 +9,8 @@ use clickhouse_admin_types::{ KeeperServerInfo, KeeperServerType, RaftConfig, }; use clickward::{BasePorts, Deployment, DeploymentConfig}; -use dropshot::{ConfigLogging, ConfigLoggingLevel}; use dropshot::test_util::{log_prefix_for_test, LogContext}; +use dropshot::{ConfigLogging, ConfigLoggingLevel}; use omicron_clickhouse_admin::ClickhouseCli; use omicron_test_utils::dev::test_setup_log; use oximeter_test_utils::wait_for_keepers; @@ -23,12 +23,12 @@ use ctor::{ctor, dtor}; use std::sync::Once; // static SETUP: Once = Once::new(); -// +// // // TODO: Do setup with nextest. It does setup twice with nextest // // for some reson, but with cargo test it only does setup once // fn setup() { // SETUP.call_once(|| { -// println!("Setup before tests: test"); +// println!("Setup before tests: test"); // }); // } @@ -39,7 +39,7 @@ fn teardown() { #[cfg(test)] mod tests { use super::*; - + #[tokio::test] async fn test_1() { // setup(); @@ -59,7 +59,7 @@ mod tests { "clickhouse_cluster", &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, ); - + let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); // TODO: Switch to "{prefix}_clickward_test" ? let path = parent_dir.join(format!("clickward_test")); @@ -91,13 +91,13 @@ mod tests { clickhouse_http: 29300, clickhouse_interserver_http: 29400, }; - + let config = DeploymentConfig { path: path.clone(), base_ports, cluster_name: "oximeter_cluster".to_string(), }; - + let mut deployment = Deployment::new(config); deployment.teardown()?; std::fs::remove_dir_all(path)?; @@ -118,11 +118,11 @@ mod tests { // async fn test_lgif_parsing() -> anyhow::Result<()> { // let logctx = test_setup_log("test_lgif_parsing"); // let log = logctx.log.clone(); -// +// // let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); // let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); // std::fs::create_dir(&path)?; -// +// // // We spin up several replicated clusters and must use a // // separate set of ports in case the tests run concurrently. // let base_ports = BasePorts { @@ -132,15 +132,15 @@ mod tests { // clickhouse_http: 29300, // clickhouse_interserver_http: 29400, // }; -// +// // let config = DeploymentConfig { // path: path.clone(), // base_ports, // cluster_name: "oximeter_cluster".to_string(), // }; -// +// // let mut deployment = Deployment::new(config); -// +// // // We only need a single keeper to test the lgif command // let num_keepers = 1; // let num_replicas = 1; @@ -148,20 +148,20 @@ mod tests { // .generate_config(num_keepers, num_replicas) // .context("failed to generate config")?; // deployment.deploy().context("failed to deploy")?; -// +// // wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; -// +// // let clickhouse_cli = ClickhouseCli::new( // Utf8PathBuf::from_str("clickhouse").unwrap(), // SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), // ) // .with_log(log.clone()); -// +// // let lgif = clickhouse_cli.lgif().await.unwrap(); -// +// // // The first log index from a newly created cluster should always be 1 // assert_eq!(lgif.first_log_idx, 1); -// +// // info!(&log, "Cleaning up test"); // deployment.teardown()?; // std::fs::remove_dir_all(path)?; diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs index 9ef53f5eb6..198ca42fc8 100644 --- a/dev-tools/clickhouse-cluster-dev/src/main.rs +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -4,8 +4,8 @@ use anyhow::{Context, Result}; use clickward::{BasePorts, Deployment, DeploymentConfig}; -use dropshot::{ConfigLogging, ConfigLoggingLevel}; use dropshot::test_util::{log_prefix_for_test, LogContext}; +use dropshot::{ConfigLogging, ConfigLoggingLevel}; use oximeter_test_utils::wait_for_keepers; #[tokio::main] @@ -20,12 +20,9 @@ async fn main() -> Result<()> { let path = parent_dir.join(format!("clickward_test")); std::fs::create_dir(&path)?; - slog::info!( - logctx.log, - "Setting up a ClickHouse cluster" - ); + slog::info!(logctx.log, "Setting up a ClickHouse cluster"); - // We spin up several replicated clusters and must use a + // We spin up several replicated clusters and must use a // separate set of ports in case the tests run concurrently. let base_ports = BasePorts { keeper: 29000, @@ -51,7 +48,8 @@ async fn main() -> Result<()> { .context("failed to generate config")?; deployment.deploy().context("failed to deploy")?; - wait_for_keepers(&logctx.log, &deployment, vec![clickward::KeeperId(1)]).await?; + wait_for_keepers(&logctx.log, &deployment, vec![clickward::KeeperId(1)]) + .await?; // TODO: Also wait for replicas From 94792e23498906a0b6d2df838984c22128b2a957 Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 17:28:36 +1300 Subject: [PATCH 07/20] this works I guess --- clickhouse-admin/tests/integration_test.rs | 211 +++++++++++++++++---- 1 file changed, 179 insertions(+), 32 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 2b3ee57889..20a9fc4751 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -32,10 +32,94 @@ use std::sync::Once; // }); // } -fn teardown() { - println!("Teardown after all tests: test"); +// static TEARDOWN: Once = Once::new(); + +struct Teardown; + +impl Drop for Teardown { + fn drop(&mut self) { + println!("Teardown after all tests: test"); + // let logctx = LogContext::new( + // "clickhouse_cluster", + // &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + // ); + // + let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); + // TODO: Switch to "{prefix}_clickward_test" ? + let path = parent_dir.join(format!("clickward_test")); + + println!("{}: test", path); + // + // info!(&logctx.log, "Cleaning up test"); + // + // // TODO: Find another way to retrieve deployment + // + // We spin up several replicated clusters and must use a + // separate set of ports in case the tests run concurrently. + let base_ports = BasePorts { + keeper: 29000, + raft: 29100, + clickhouse_tcp: 29200, + clickhouse_http: 29300, + clickhouse_interserver_http: 29400, + }; + + let config = DeploymentConfig { + path: path.clone(), + base_ports, + cluster_name: "oximeter_cluster".to_string(), + }; + + let mut deployment = Deployment::new(config); + deployment.teardown().unwrap(); + std::fs::remove_dir_all(path).unwrap(); + // logctx.cleanup_successful(); + + //Ok(()) + } } +// fn teardown() { +// println!("Teardown after all tests: test"); +// // let logctx = LogContext::new( +// // "clickhouse_cluster", +// // &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, +// // ); +// // +// let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); +// // TODO: Switch to "{prefix}_clickward_test" ? +// let path = parent_dir.join(format!("clickward_test")); +// +// println!("{}: test", path); +// // +// // info!(&logctx.log, "Cleaning up test"); +// // +// // // TODO: Find another way to retrieve deployment +// // +// // We spin up several replicated clusters and must use a +// // separate set of ports in case the tests run concurrently. +// let base_ports = BasePorts { +// keeper: 29000, +// raft: 29100, +// clickhouse_tcp: 29200, +// clickhouse_http: 29300, +// clickhouse_interserver_http: 29400, +// }; +// +// let config = DeploymentConfig { +// path: path.clone(), +// base_ports, +// cluster_name: "oximeter_cluster".to_string(), +// }; +// +// let mut deployment = Deployment::new(config); +// deployment.teardown().unwrap(); +// std::fs::remove_dir_all(path).unwrap(); +// // logctx.cleanup_successful(); +// +// //Ok(()) +// } + #[cfg(test)] mod tests { use super::*; @@ -43,26 +127,30 @@ mod tests { #[tokio::test] async fn test_1() { // setup(); + // let _teardown = Teardown; println!("Running test 1..."); } #[tokio::test] async fn test_2() { // setup(); + //let _teardown = Teardown; println!("Running test 2..."); + // assert_eq!(1, 2); } #[tokio::test] async fn test_lgif_parsing() -> anyhow::Result<()> { // TODO: Fix up log setup + // let _teardown = Teardown; let logctx = LogContext::new( "clickhouse_cluster", &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, ); - let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); - // TODO: Switch to "{prefix}_clickward_test" ? - let path = parent_dir.join(format!("clickward_test")); + // let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); + // // TODO: Switch to "{prefix}_clickward_test" ? + // let path = parent_dir.join(format!("clickward_test")); let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse").unwrap(), @@ -78,42 +166,101 @@ mod tests { // TODO: Move this code to teardown function. // For now moving it to that function results in a PoisonError - info!(&logctx.log, "Cleaning up test"); + // info!(&logctx.log, "Cleaning up test"); + // + // // TODO: Find another way to retrieve deployment + // + // // We spin up several replicated clusters and must use a + // // separate set of ports in case the tests run concurrently. + // let base_ports = BasePorts { + // keeper: 29000, + // raft: 29100, + // clickhouse_tcp: 29200, + // clickhouse_http: 29300, + // clickhouse_interserver_http: 29400, + // }; + // + // let config = DeploymentConfig { + // path: path.clone(), + // base_ports, + // cluster_name: "oximeter_cluster".to_string(), + // }; + // + // let mut deployment = Deployment::new(config); + // deployment.teardown()?; + // std::fs::remove_dir_all(path)?; + // logctx.cleanup_successful(); + Ok(()) + } - // TODO: Find another way to retrieve deployment + #[tokio::test] + async fn test_teardown() -> anyhow::Result<()> { + // println!("Teardown after all tests: test"); + // let logctx = LogContext::new( + // "clickhouse_cluster", + // &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + // ); + // + let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); + // TODO: Switch to "{prefix}_clickward_test" ? + let path = parent_dir.join(format!("clickward_test")); - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29000, - raft: 29100, - clickhouse_tcp: 29200, - clickhouse_http: 29300, - clickhouse_interserver_http: 29400, - }; + println!("{}: test", path); + // + // info!(&logctx.log, "Cleaning up test"); + // + // // TODO: Find another way to retrieve deployment + // + // We spin up several replicated clusters and must use a + // separate set of ports in case the tests run concurrently. + let base_ports = BasePorts { + keeper: 29000, + raft: 29100, + clickhouse_tcp: 29200, + clickhouse_http: 29300, + clickhouse_interserver_http: 29400, + }; - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; + let config = DeploymentConfig { + path: path.clone(), + base_ports, + cluster_name: "oximeter_cluster".to_string(), + }; - let mut deployment = Deployment::new(config); - deployment.teardown()?; - std::fs::remove_dir_all(path)?; - logctx.cleanup_successful(); - Ok(()) + let mut deployment = Deployment::new(config); + deployment.teardown().unwrap(); + std::fs::remove_dir_all(path).unwrap(); + // logctx.cleanup_successful(); + + Ok(()) } // Ensure teardown after all tests finish - #[cfg(test)] - #[dtor] // Cleanup to be run when tests finish - fn after_tests() { - println!("Cleanup after tests: test"); - teardown(); - } + // #[cfg(test)] + // #[ctor] // Cleanup to be run when tests finish + // fn after_tests() { + // println!("Cleanup after tests: test"); + // TEARDOWN.call_once(|| { + // teardown(); + // }); + + // teardown(); + // } } +//#[test] +//fn after_tests() { +// println!("Cleanup after tests: test"); +// TEARDOWN.call_once(|| { +// teardown(); +// }); +//} +// +//fn main() { +// // The `run_teardown` ensures the teardown happens after all tests. +// after_tests(); +//} + // #[tokio::test] // async fn test_lgif_parsing() -> anyhow::Result<()> { // let logctx = test_setup_log("test_lgif_parsing"); From 4b5c2038f9a0f9f980ae900c8eb93a5a28db67c4 Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 17:54:02 +1300 Subject: [PATCH 08/20] clean up a bit --- .config/nextest.toml | 2 +- Cargo.lock | 11 - Cargo.toml | 1 - clickhouse-admin/Cargo.toml | 1 - clickhouse-admin/tests/integration_test.rs | 334 ++++----------------- 5 files changed, 55 insertions(+), 294 deletions(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index 9442070be2..f5c58a5224 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -46,7 +46,7 @@ live-tests = { max-threads = 1 } default-filter = 'all() - package(omicron-live-tests) - package(end-to-end-tests)' [[profile.default.overrides]] -filter = 'package(oximeter-db) and test(replicated)' +filter = 'package(oximeter-db) and test(replicated) + package(omicron-clickhouse-admin)' test-group = 'clickhouse-cluster' [[profile.default.overrides]] diff --git a/Cargo.lock b/Cargo.lock index 1b379ddfb2..9748395c23 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1909,16 +1909,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "ctor" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" -dependencies = [ - "quote", - "syn 2.0.79", -] - [[package]] name = "ctr" version = "0.9.2" @@ -6328,7 +6318,6 @@ dependencies = [ "clickhouse-admin-api", "clickhouse-admin-types", "clickward", - "ctor", "dropshot", "expectorate", "http 1.1.0", diff --git a/Cargo.toml b/Cargo.toml index a68193ecaf..fcfac3dad1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -340,7 +340,6 @@ crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", re crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "b7b9d5660b28ca5e865242b2bdecd032c0852d40" } crucible-common = { git = "https://github.com/oxidecomputer/crucible", rev = "b7b9d5660b28ca5e865242b2bdecd032c0852d40" } csv = "1.3.0" -ctor = "0.2.8" curve25519-dalek = "4" datatest-stable = "0.2.9" display-error-chain = "0.2.2" diff --git a/clickhouse-admin/Cargo.toml b/clickhouse-admin/Cargo.toml index e7f253028b..84b04f6caa 100644 --- a/clickhouse-admin/Cargo.toml +++ b/clickhouse-admin/Cargo.toml @@ -31,7 +31,6 @@ omicron-workspace-hack.workspace = true [dev-dependencies] clickward.workspace = true -ctor.workspace = true dropshot.workspace = true expectorate.workspace = true nexus-test-utils.workspace = true diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 20a9fc4751..a25c87e77a 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -19,303 +19,77 @@ use std::collections::BTreeSet; use std::net::{Ipv6Addr, SocketAddrV6}; use std::str::FromStr; -use ctor::{ctor, dtor}; -use std::sync::Once; - -// static SETUP: Once = Once::new(); -// -// // TODO: Do setup with nextest. It does setup twice with nextest -// // for some reson, but with cargo test it only does setup once -// fn setup() { -// SETUP.call_once(|| { -// println!("Setup before tests: test"); -// }); -// } - -// static TEARDOWN: Once = Once::new(); - -struct Teardown; - -impl Drop for Teardown { - fn drop(&mut self) { - println!("Teardown after all tests: test"); - // let logctx = LogContext::new( - // "clickhouse_cluster", - // &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - // ); - // - let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); - // TODO: Switch to "{prefix}_clickward_test" ? - let path = parent_dir.join(format!("clickward_test")); - - println!("{}: test", path); - // - // info!(&logctx.log, "Cleaning up test"); - // - // // TODO: Find another way to retrieve deployment - // - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29000, - raft: 29100, - clickhouse_tcp: 29200, - clickhouse_http: 29300, - clickhouse_interserver_http: 29400, - }; - - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; - - let mut deployment = Deployment::new(config); - deployment.teardown().unwrap(); - std::fs::remove_dir_all(path).unwrap(); - // logctx.cleanup_successful(); - - //Ok(()) - } +#[tokio::test] +async fn test_1() { + println!("Running test 1..."); } -// fn teardown() { -// println!("Teardown after all tests: test"); -// // let logctx = LogContext::new( -// // "clickhouse_cluster", -// // &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, -// // ); -// // -// let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); -// // TODO: Switch to "{prefix}_clickward_test" ? -// let path = parent_dir.join(format!("clickward_test")); -// -// println!("{}: test", path); -// // -// // info!(&logctx.log, "Cleaning up test"); -// // -// // // TODO: Find another way to retrieve deployment -// // -// // We spin up several replicated clusters and must use a -// // separate set of ports in case the tests run concurrently. -// let base_ports = BasePorts { -// keeper: 29000, -// raft: 29100, -// clickhouse_tcp: 29200, -// clickhouse_http: 29300, -// clickhouse_interserver_http: 29400, -// }; -// -// let config = DeploymentConfig { -// path: path.clone(), -// base_ports, -// cluster_name: "oximeter_cluster".to_string(), -// }; -// -// let mut deployment = Deployment::new(config); -// deployment.teardown().unwrap(); -// std::fs::remove_dir_all(path).unwrap(); -// // logctx.cleanup_successful(); -// -// //Ok(()) -// } - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_1() { - // setup(); - // let _teardown = Teardown; - println!("Running test 1..."); - } - - #[tokio::test] - async fn test_2() { - // setup(); - //let _teardown = Teardown; - println!("Running test 2..."); - // assert_eq!(1, 2); - } - - #[tokio::test] - async fn test_lgif_parsing() -> anyhow::Result<()> { - // TODO: Fix up log setup - // let _teardown = Teardown; - let logctx = LogContext::new( - "clickhouse_cluster", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); +#[tokio::test] +async fn test_2() { + println!("Running test 2..."); + // assert_eq!(1, 2); +} - // let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); - // // TODO: Switch to "{prefix}_clickward_test" ? - // let path = parent_dir.join(format!("clickward_test")); +#[tokio::test] +async fn test_lgif_parsing() -> anyhow::Result<()> { + let logctx = LogContext::new( + "clickhouse_cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); - let clickhouse_cli = ClickhouseCli::new( - Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), - ) - .with_log(logctx.log.clone()); + let clickhouse_cli = ClickhouseCli::new( + Utf8PathBuf::from_str("clickhouse").unwrap(), + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + ) + .with_log(logctx.log.clone()); - let lgif = clickhouse_cli.lgif().await.unwrap(); + let lgif = clickhouse_cli.lgif().await.unwrap(); - // The first log index from a newly created cluster should always be 1 - assert_eq!(lgif.first_log_idx, 1); + // The first log index from a newly created cluster should always be 1 + assert_eq!(lgif.first_log_idx, 1); - // TODO: Move this code to teardown function. - // For now moving it to that function results in a PoisonError + Ok(()) +} - // info!(&logctx.log, "Cleaning up test"); - // - // // TODO: Find another way to retrieve deployment - // - // // We spin up several replicated clusters and must use a - // // separate set of ports in case the tests run concurrently. - // let base_ports = BasePorts { - // keeper: 29000, - // raft: 29100, - // clickhouse_tcp: 29200, - // clickhouse_http: 29300, - // clickhouse_interserver_http: 29400, - // }; - // - // let config = DeploymentConfig { - // path: path.clone(), - // base_ports, - // cluster_name: "oximeter_cluster".to_string(), - // }; - // - // let mut deployment = Deployment::new(config); - // deployment.teardown()?; - // std::fs::remove_dir_all(path)?; - // logctx.cleanup_successful(); - Ok(()) - } +#[tokio::test] +async fn test_teardown() -> anyhow::Result<()> { + let logctx = LogContext::new( + "clickhouse_cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); - #[tokio::test] - async fn test_teardown() -> anyhow::Result<()> { - // println!("Teardown after all tests: test"); - // let logctx = LogContext::new( - // "clickhouse_cluster", - // &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - // ); - // - let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); - // TODO: Switch to "{prefix}_clickward_test" ? - let path = parent_dir.join(format!("clickward_test")); + let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); + // TODO: Switch to "{prefix}_clickward_test" ? + let path = parent_dir.join(format!("clickward_test")); - println!("{}: test", path); - // - // info!(&logctx.log, "Cleaning up test"); - // - // // TODO: Find another way to retrieve deployment - // - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29000, - raft: 29100, - clickhouse_tcp: 29200, - clickhouse_http: 29300, - clickhouse_interserver_http: 29400, - }; + info!(&logctx.log, "Tearing down ClickHouse cluster"; "path" => ?path); - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; + // TODO: Find another way to retrieve deployment - let mut deployment = Deployment::new(config); - deployment.teardown().unwrap(); - std::fs::remove_dir_all(path).unwrap(); - // logctx.cleanup_successful(); + // We spin up several replicated clusters and must use a + // separate set of ports in case the tests run concurrently. + let base_ports = BasePorts { + keeper: 29000, + raft: 29100, + clickhouse_tcp: 29200, + clickhouse_http: 29300, + clickhouse_interserver_http: 29400, + }; - Ok(()) - } + let config = DeploymentConfig { + path: path.clone(), + base_ports, + cluster_name: "oximeter_cluster".to_string(), + }; - // Ensure teardown after all tests finish - // #[cfg(test)] - // #[ctor] // Cleanup to be run when tests finish - // fn after_tests() { - // println!("Cleanup after tests: test"); - // TEARDOWN.call_once(|| { - // teardown(); - // }); + let deployment = Deployment::new(config); + deployment.teardown()?; + std::fs::remove_dir_all(path)?; + logctx.cleanup_successful(); - // teardown(); - // } + Ok(()) } -//#[test] -//fn after_tests() { -// println!("Cleanup after tests: test"); -// TEARDOWN.call_once(|| { -// teardown(); -// }); -//} -// -//fn main() { -// // The `run_teardown` ensures the teardown happens after all tests. -// after_tests(); -//} - -// #[tokio::test] -// async fn test_lgif_parsing() -> anyhow::Result<()> { -// let logctx = test_setup_log("test_lgif_parsing"); -// let log = logctx.log.clone(); -// -// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); -// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); -// std::fs::create_dir(&path)?; -// -// // We spin up several replicated clusters and must use a -// // separate set of ports in case the tests run concurrently. -// let base_ports = BasePorts { -// keeper: 29000, -// raft: 29100, -// clickhouse_tcp: 29200, -// clickhouse_http: 29300, -// clickhouse_interserver_http: 29400, -// }; -// -// let config = DeploymentConfig { -// path: path.clone(), -// base_ports, -// cluster_name: "oximeter_cluster".to_string(), -// }; -// -// let mut deployment = Deployment::new(config); -// -// // We only need a single keeper to test the lgif command -// let num_keepers = 1; -// let num_replicas = 1; -// deployment -// .generate_config(num_keepers, num_replicas) -// .context("failed to generate config")?; -// deployment.deploy().context("failed to deploy")?; -// -// wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; -// -// let clickhouse_cli = ClickhouseCli::new( -// Utf8PathBuf::from_str("clickhouse").unwrap(), -// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), -// ) -// .with_log(log.clone()); -// -// let lgif = clickhouse_cli.lgif().await.unwrap(); -// -// // The first log index from a newly created cluster should always be 1 -// assert_eq!(lgif.first_log_idx, 1); -// -// info!(&log, "Cleaning up test"); -// deployment.teardown()?; -// std::fs::remove_dir_all(path)?; -// logctx.cleanup_successful(); -// Ok(()) -// } -// //#[tokio::test] //async fn test_raft_config_parsing() -> anyhow::Result<()> { // let logctx = test_setup_log("test_raft_config_parsing"); From 789cbd929685b997d68ae68ed3b49c0df4a63496 Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 17:54:53 +1300 Subject: [PATCH 09/20] clean up a bit --- clickhouse-admin/tests/integration_test.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index a25c87e77a..6c06e7be93 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -38,12 +38,12 @@ async fn test_lgif_parsing() -> anyhow::Result<()> { ); let clickhouse_cli = ClickhouseCli::new( - Utf8PathBuf::from_str("clickhouse").unwrap(), + Utf8PathBuf::from_str("clickhouse")?, SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), ) .with_log(logctx.log.clone()); - let lgif = clickhouse_cli.lgif().await.unwrap(); + let lgif = clickhouse_cli.lgif().await?; // The first log index from a newly created cluster should always be 1 assert_eq!(lgif.first_log_idx, 1); From f6c7f9767ed0fca3f64ae484871fc08d0b9ac487 Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 18:30:29 +1300 Subject: [PATCH 10/20] deploy full cluster on setup --- Cargo.lock | 2 ++ dev-tools/clickhouse-cluster-dev/Cargo.toml | 1 + dev-tools/clickhouse-cluster-dev/src/main.rs | 34 ++++++++++++++++---- oximeter/test-utils/Cargo.toml | 1 + oximeter/test-utils/src/lib.rs | 21 ++++++++++++ 5 files changed, 52 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9748395c23..3398709ca1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1377,6 +1377,7 @@ dependencies = [ "clickward", "dropshot", "omicron-workspace-hack", + "oximeter-db", "oximeter-test-utils", "slog", "tokio", @@ -7700,6 +7701,7 @@ dependencies = [ "clickward", "omicron-test-utils", "omicron-workspace-hack", + "oximeter-db", "oximeter-macro-impl", "oximeter-types", "slog", diff --git a/dev-tools/clickhouse-cluster-dev/Cargo.toml b/dev-tools/clickhouse-cluster-dev/Cargo.toml index 0bb27f9908..49623534d9 100644 --- a/dev-tools/clickhouse-cluster-dev/Cargo.toml +++ b/dev-tools/clickhouse-cluster-dev/Cargo.toml @@ -15,4 +15,5 @@ clickward.workspace = true slog.workspace = true tokio.workspace = true omicron-workspace-hack.workspace = true +oximeter-db.workspace = true oximeter-test-utils.workspace = true diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs index 198ca42fc8..acc9ba77b7 100644 --- a/dev-tools/clickhouse-cluster-dev/src/main.rs +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -3,13 +3,16 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use anyhow::{Context, Result}; -use clickward::{BasePorts, Deployment, DeploymentConfig}; +use clickward::{BasePorts, Deployment, DeploymentConfig, KeeperId}; use dropshot::test_util::{log_prefix_for_test, LogContext}; use dropshot::{ConfigLogging, ConfigLoggingLevel}; -use oximeter_test_utils::wait_for_keepers; +use oximeter_db::Client; +use oximeter_test_utils::{wait_for_keepers, wait_for_ping}; +use std::time::Duration; #[tokio::main] async fn main() -> Result<()> { + let request_timeout = Duration::from_secs(15); let logctx = LogContext::new( "clickhouse_cluster", &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, @@ -41,17 +44,34 @@ async fn main() -> Result<()> { let mut deployment = Deployment::new(config); // TODO: Use 2 replicas and 3 keepers, for now I'm just testing - let num_keepers = 1; - let num_replicas = 1; + let num_keepers = 3; + let num_replicas = 2; deployment .generate_config(num_keepers, num_replicas) .context("failed to generate config")?; deployment.deploy().context("failed to deploy")?; - wait_for_keepers(&logctx.log, &deployment, vec![clickward::KeeperId(1)]) - .await?; + let client1 = Client::new_with_request_timeout( + deployment.http_addr(1.into()), + deployment.native_addr(1.into()), + &logctx.log, + request_timeout, + ); + let client2 = Client::new_with_request_timeout( + deployment.http_addr(2.into()), + deployment.native_addr(2.into()), + &logctx.log, + request_timeout, + ); - // TODO: Also wait for replicas + wait_for_ping(&logctx.log, &client1).await?; + wait_for_ping(&logctx.log, &client2).await?; + wait_for_keepers( + &logctx.log, + &deployment, + (1..=num_keepers).map(KeeperId).collect(), + ) + .await?; Ok(()) } diff --git a/oximeter/test-utils/Cargo.toml b/oximeter/test-utils/Cargo.toml index 0bff56583e..adca1a51c8 100644 --- a/oximeter/test-utils/Cargo.toml +++ b/oximeter/test-utils/Cargo.toml @@ -13,6 +13,7 @@ chrono.workspace = true clickward.workspace = true omicron-workspace-hack.workspace = true omicron-test-utils.workspace = true +oximeter-db.workspace = true oximeter-macro-impl.workspace = true oximeter-types.workspace = true slog.workspace =true diff --git a/oximeter/test-utils/src/lib.rs b/oximeter/test-utils/src/lib.rs index 02f928abc0..8af8bc4fad 100644 --- a/oximeter/test-utils/src/lib.rs +++ b/oximeter/test-utils/src/lib.rs @@ -17,6 +17,7 @@ extern crate self as oximeter; use anyhow::Context; use clickward::{Deployment, KeeperClient, KeeperError, KeeperId}; use omicron_test_utils::dev::poll; +use oximeter_db::Client; use oximeter_macro_impl::{Metric, Target}; use oximeter_types::histogram; use oximeter_types::histogram::{Histogram, Record}; @@ -185,6 +186,26 @@ pub async fn wait_for_keepers( Ok(()) } +/// Try to ping the server until it responds. +pub async fn wait_for_ping(log: &Logger, client: &Client) -> anyhow::Result<()> { + poll::wait_for_condition( + || async { + client + .ping() + .await + .map_err(|_| poll::CondCheckError::::NotYet) + }, + &Duration::from_millis(100), + &Duration::from_secs(30), + ) + .await + .with_context(|| { + format!("failed to ping clickhouse server: {}", client.url()) + })?; + info!(log, "Clickhouse server ready: {}", client.url()); + Ok(()) +} + #[cfg(test)] mod tests { use chrono::Utc; From 358feb34915185f0bfccd8297a881c66b796a06e Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 18:43:30 +1300 Subject: [PATCH 11/20] start adding tests --- clickhouse-admin/tests/integration_test.rs | 158 ++++++++------------- 1 file changed, 61 insertions(+), 97 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 6c06e7be93..306fb415ac 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -41,7 +41,7 @@ async fn test_lgif_parsing() -> anyhow::Result<()> { Utf8PathBuf::from_str("clickhouse")?, SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), ) - .with_log(logctx.log.clone()); + .with_log(logctx.log); let lgif = clickhouse_cli.lgif().await?; @@ -52,116 +52,41 @@ async fn test_lgif_parsing() -> anyhow::Result<()> { } #[tokio::test] -async fn test_teardown() -> anyhow::Result<()> { +async fn test_raft_config_parsing() -> anyhow::Result<()> { let logctx = LogContext::new( "clickhouse_cluster", &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, ); - let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); - // TODO: Switch to "{prefix}_clickward_test" ? - let path = parent_dir.join(format!("clickward_test")); + let clickhouse_cli = ClickhouseCli::new( + Utf8PathBuf::from_str("clickhouse").unwrap(), + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + ) + .with_log(logctx.log); - info!(&logctx.log, "Tearing down ClickHouse cluster"; "path" => ?path); + let raft_config = clickhouse_cli.raft_config().await.unwrap(); - // TODO: Find another way to retrieve deployment + let mut keeper_servers = BTreeSet::new(); + let num_keepers = 3; - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29000, - raft: 29100, - clickhouse_tcp: 29200, - clickhouse_http: 29300, - clickhouse_interserver_http: 29400, - }; + for i in 1..=num_keepers { + let raft_port = u16::try_from(29100 + i).unwrap(); + keeper_servers.insert(KeeperServerInfo { + server_id: clickhouse_admin_types::KeeperId(i), + host: ClickhouseHost::Ipv6("::1".parse().unwrap()), + raft_port, + server_type: KeeperServerType::Participant, + priority: 1, + }); + } - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; + let expected_raft_config = RaftConfig { keeper_servers }; - let deployment = Deployment::new(config); - deployment.teardown()?; - std::fs::remove_dir_all(path)?; - logctx.cleanup_successful(); + assert_eq!(raft_config, expected_raft_config); Ok(()) } -//#[tokio::test] -//async fn test_raft_config_parsing() -> anyhow::Result<()> { -// let logctx = test_setup_log("test_raft_config_parsing"); -// let log = logctx.log.clone(); -// -// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); -// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); -// std::fs::create_dir(&path)?; -// -// // We spin up several replicated clusters and must use a -// // separate set of ports in case the tests run concurrently. -// let base_ports = BasePorts { -// keeper: 29500, -// raft: 29600, -// clickhouse_tcp: 29700, -// clickhouse_http: 29800, -// clickhouse_interserver_http: 29900, -// }; -// -// let config = DeploymentConfig { -// path: path.clone(), -// base_ports, -// cluster_name: "oximeter_cluster".to_string(), -// }; -// -// let mut deployment = Deployment::new(config); -// -// let num_keepers = 3; -// let num_replicas = 1; -// deployment -// .generate_config(num_keepers, num_replicas) -// .context("failed to generate config")?; -// deployment.deploy().context("failed to deploy")?; -// -// wait_for_keepers( -// &log, -// &deployment, -// (1..=num_keepers).map(clickward::KeeperId).collect(), -// ) -// .await?; -// -// let clickhouse_cli = ClickhouseCli::new( -// Utf8PathBuf::from_str("clickhouse").unwrap(), -// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29501, 0, 0), -// ) -// .with_log(log.clone()); -// -// let raft_config = clickhouse_cli.raft_config().await.unwrap(); -// -// let mut keeper_servers = BTreeSet::new(); -// -// for i in 1..=num_keepers { -// let raft_port = u16::try_from(29600 + i).unwrap(); -// keeper_servers.insert(KeeperServerInfo { -// server_id: clickhouse_admin_types::KeeperId(i), -// host: ClickhouseHost::Ipv6("::1".parse().unwrap()), -// raft_port, -// server_type: KeeperServerType::Participant, -// priority: 1, -// }); -// } -// -// let expected_raft_config = RaftConfig { keeper_servers }; -// -// assert_eq!(raft_config, expected_raft_config); -// -// info!(&log, "Cleaning up test"); -// deployment.teardown()?; -// std::fs::remove_dir_all(path)?; -// logctx.cleanup_successful(); -// Ok(()) -//} // //#[tokio::test] //async fn test_keeper_conf_parsing() -> anyhow::Result<()> { @@ -296,3 +221,42 @@ async fn test_teardown() -> anyhow::Result<()> { // logctx.cleanup_successful(); // Ok(()) //} + +#[tokio::test] +async fn test_teardown() -> anyhow::Result<()> { + let logctx = LogContext::new( + "clickhouse_cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + + let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); + // TODO: Switch to "{prefix}_clickward_test" ? + let path = parent_dir.join(format!("clickward_test")); + + info!(&logctx.log, "Tearing down ClickHouse cluster"; "path" => ?path); + + // TODO: Find another way to retrieve deployment + + // We spin up several replicated clusters and must use a + // separate set of ports in case the tests run concurrently. + let base_ports = BasePorts { + keeper: 29000, + raft: 29100, + clickhouse_tcp: 29200, + clickhouse_http: 29300, + clickhouse_interserver_http: 29400, + }; + + let config = DeploymentConfig { + path: path.clone(), + base_ports, + cluster_name: "oximeter_cluster".to_string(), + }; + + let deployment = Deployment::new(config); + deployment.teardown()?; + std::fs::remove_dir_all(path)?; + logctx.cleanup_successful(); + + Ok(()) +} \ No newline at end of file From 288de54d71d78da111a980c3024d3cd559dc703e Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 18:45:55 +1300 Subject: [PATCH 12/20] start adding tests --- clickhouse-admin/tests/integration_test.rs | 76 ++++++---------------- 1 file changed, 21 insertions(+), 55 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 306fb415ac..4b5ecee68b 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -87,61 +87,27 @@ async fn test_raft_config_parsing() -> anyhow::Result<()> { Ok(()) } -// -//#[tokio::test] -//async fn test_keeper_conf_parsing() -> anyhow::Result<()> { -// let logctx = test_setup_log("test_keeper_conf_parsing"); -// let log = logctx.log.clone(); -// -// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); -// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); -// std::fs::create_dir(&path)?; -// -// // We spin up several replicated clusters and must use a -// // separate set of ports in case the tests run concurrently. -// let base_ports = BasePorts { -// keeper: 30000, -// raft: 30100, -// clickhouse_tcp: 30200, -// clickhouse_http: 30300, -// clickhouse_interserver_http: 30400, -// }; -// -// let config = DeploymentConfig { -// path: path.clone(), -// base_ports, -// cluster_name: "oximeter_cluster".to_string(), -// }; -// -// let mut deployment = Deployment::new(config); -// -// // We only need a single keeper to test the conf command -// let num_keepers = 1; -// let num_replicas = 1; -// deployment -// .generate_config(num_keepers, num_replicas) -// .context("failed to generate config")?; -// deployment.deploy().context("failed to deploy")?; -// -// wait_for_keepers(&log, &deployment, vec![clickward::KeeperId(1)]).await?; -// -// let clickhouse_cli = ClickhouseCli::new( -// Utf8PathBuf::from_str("clickhouse").unwrap(), -// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 30001, 0, 0), -// ) -// .with_log(log.clone()); -// -// let conf = clickhouse_cli.keeper_conf().await.unwrap(); -// -// assert_eq!(conf.server_id, clickhouse_admin_types::KeeperId(1)); -// -// info!(&log, "Cleaning up test"); -// deployment.teardown()?; -// std::fs::remove_dir_all(path)?; -// logctx.cleanup_successful(); -// Ok(()) -//} -// + +#[tokio::test] +async fn test_keeper_conf_parsing() -> anyhow::Result<()> { + let logctx = LogContext::new( + "clickhouse_cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + + let clickhouse_cli = ClickhouseCli::new( + Utf8PathBuf::from_str("clickhouse").unwrap(), + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + ) + .with_log(log.clone()); + + let conf = clickhouse_cli.keeper_conf().await.unwrap(); + + assert_eq!(conf.server_id, clickhouse_admin_types::KeeperId(1)); + + Ok(()) +} + //#[tokio::test] //async fn test_keeper_cluster_membership() -> anyhow::Result<()> { // let logctx = test_setup_log("test_keeper_cluster_membership"); From 6186d816e2dd6be5268b078ed5c87e4049f11950 Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 18:57:33 +1300 Subject: [PATCH 13/20] add all the tests --- clickhouse-admin/tests/integration_test.rs | 124 ++++++++------------- 1 file changed, 44 insertions(+), 80 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 4b5ecee68b..722b322f77 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -32,6 +32,7 @@ async fn test_2() { #[tokio::test] async fn test_lgif_parsing() -> anyhow::Result<()> { + // TODO: Do we want these to have their own log dirs or not? let logctx = LogContext::new( "clickhouse_cluster", &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, @@ -99,7 +100,7 @@ async fn test_keeper_conf_parsing() -> anyhow::Result<()> { Utf8PathBuf::from_str("clickhouse").unwrap(), SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), ) - .with_log(log.clone()); + .with_log(logctx.log); let conf = clickhouse_cli.keeper_conf().await.unwrap(); @@ -108,85 +109,48 @@ async fn test_keeper_conf_parsing() -> anyhow::Result<()> { Ok(()) } -//#[tokio::test] -//async fn test_keeper_cluster_membership() -> anyhow::Result<()> { -// let logctx = test_setup_log("test_keeper_cluster_membership"); -// let log = logctx.log.clone(); -// -// let (parent_dir, prefix) = log_prefix_for_test(logctx.test_name()); -// let path = parent_dir.join(format!("{prefix}-oximeter-clickward-test")); -// std::fs::create_dir(&path)?; -// -// // We spin up several replicated clusters and must use a -// // separate set of ports in case the tests run concurrently. -// let base_ports = BasePorts { -// keeper: 30500, -// raft: 30600, -// clickhouse_tcp: 30700, -// clickhouse_http: 30800, -// clickhouse_interserver_http: 30900, -// }; -// -// let config = DeploymentConfig { -// path: path.clone(), -// base_ports, -// cluster_name: "oximeter_cluster".to_string(), -// }; -// -// let mut deployment = Deployment::new(config); -// -// let num_keepers = 3; -// let num_replicas = 1; -// deployment -// .generate_config(num_keepers, num_replicas) -// .context("failed to generate config")?; -// deployment.deploy().context("failed to deploy")?; -// -// wait_for_keepers( -// &log, -// &deployment, -// (1..=num_keepers).map(clickward::KeeperId).collect(), -// ) -// .await?; -// -// let clickhouse_cli = ClickhouseCli::new( -// Utf8PathBuf::from_str("clickhouse").unwrap(), -// SocketAddrV6::new(Ipv6Addr::LOCALHOST, 30501, 0, 0), -// ) -// .with_log(log.clone()); -// -// let keeper_cluster_membership = -// clickhouse_cli.keeper_cluster_membership().await.unwrap(); -// -// let mut raft_config = BTreeSet::new(); -// -// for i in 1..=num_keepers { -// raft_config.insert(clickhouse_admin_types::KeeperId(i)); -// } -// -// let expected_keeper_cluster_membership = -// ClickhouseKeeperClusterMembership { -// queried_keeper: KeeperId(1), -// // This number is always different so we won't be testing it -// leader_committed_log_index: 0, -// raft_config, -// }; -// -// assert_eq!( -// keeper_cluster_membership.queried_keeper, -// expected_keeper_cluster_membership.queried_keeper -// ); -// assert_eq!( -// keeper_cluster_membership.raft_config, -// expected_keeper_cluster_membership.raft_config -// ); -// -// info!(&log, "Cleaning up test"); -// deployment.teardown()?; -// std::fs::remove_dir_all(path)?; -// logctx.cleanup_successful(); -// Ok(()) -//} +#[tokio::test] +async fn test_keeper_cluster_membership() -> anyhow::Result<()> { + let logctx = LogContext::new( + "clickhouse_cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + + let clickhouse_cli = ClickhouseCli::new( + Utf8PathBuf::from_str("clickhouse").unwrap(), + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + ) + .with_log(logctx.log); + + let keeper_cluster_membership = + clickhouse_cli.keeper_cluster_membership().await.unwrap(); + + let mut raft_config = BTreeSet::new(); + + let num_keepers = 3; + for i in 1..=num_keepers { + raft_config.insert(clickhouse_admin_types::KeeperId(i)); + } + + let expected_keeper_cluster_membership = + ClickhouseKeeperClusterMembership { + queried_keeper: KeeperId(1), + // This number is always different so we won't be testing it + leader_committed_log_index: 0, + raft_config, + }; + + assert_eq!( + keeper_cluster_membership.queried_keeper, + expected_keeper_cluster_membership.queried_keeper + ); + assert_eq!( + keeper_cluster_membership.raft_config, + expected_keeper_cluster_membership.raft_config + ); + + Ok(()) +} #[tokio::test] async fn test_teardown() -> anyhow::Result<()> { From d8a67b81b4417b4fc2719f467d1836159a35889d Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 19:02:13 +1300 Subject: [PATCH 14/20] clean up --- clickhouse-admin/tests/integration_test.rs | 19 ++----------------- oximeter/test-utils/src/lib.rs | 5 ++++- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 722b322f77..88b1c3ce22 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -2,7 +2,6 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use anyhow::Context; use camino::Utf8PathBuf; use clickhouse_admin_types::{ ClickhouseHost, ClickhouseKeeperClusterMembership, KeeperId, @@ -12,24 +11,11 @@ use clickward::{BasePorts, Deployment, DeploymentConfig}; use dropshot::test_util::{log_prefix_for_test, LogContext}; use dropshot::{ConfigLogging, ConfigLoggingLevel}; use omicron_clickhouse_admin::ClickhouseCli; -use omicron_test_utils::dev::test_setup_log; -use oximeter_test_utils::wait_for_keepers; use slog::info; use std::collections::BTreeSet; use std::net::{Ipv6Addr, SocketAddrV6}; use std::str::FromStr; -#[tokio::test] -async fn test_1() { - println!("Running test 1..."); -} - -#[tokio::test] -async fn test_2() { - println!("Running test 2..."); - // assert_eq!(1, 2); -} - #[tokio::test] async fn test_lgif_parsing() -> anyhow::Result<()> { // TODO: Do we want these to have their own log dirs or not? @@ -88,7 +74,6 @@ async fn test_raft_config_parsing() -> anyhow::Result<()> { Ok(()) } - #[tokio::test] async fn test_keeper_conf_parsing() -> anyhow::Result<()> { let logctx = LogContext::new( @@ -105,7 +90,7 @@ async fn test_keeper_conf_parsing() -> anyhow::Result<()> { let conf = clickhouse_cli.keeper_conf().await.unwrap(); assert_eq!(conf.server_id, clickhouse_admin_types::KeeperId(1)); - + Ok(()) } @@ -189,4 +174,4 @@ async fn test_teardown() -> anyhow::Result<()> { logctx.cleanup_successful(); Ok(()) -} \ No newline at end of file +} diff --git a/oximeter/test-utils/src/lib.rs b/oximeter/test-utils/src/lib.rs index 8af8bc4fad..01d32576b0 100644 --- a/oximeter/test-utils/src/lib.rs +++ b/oximeter/test-utils/src/lib.rs @@ -187,7 +187,10 @@ pub async fn wait_for_keepers( } /// Try to ping the server until it responds. -pub async fn wait_for_ping(log: &Logger, client: &Client) -> anyhow::Result<()> { +pub async fn wait_for_ping( + log: &Logger, + client: &Client, +) -> anyhow::Result<()> { poll::wait_for_condition( || async { client From 1e5c3d22e740e802aed6316cf420511f67ca04e4 Mon Sep 17 00:00:00 2001 From: karencfv Date: Tue, 12 Nov 2024 19:20:04 +1300 Subject: [PATCH 15/20] clippy --- clickhouse-admin/tests/integration_test.rs | 2 +- dev-tools/clickhouse-cluster-dev/src/main.rs | 3 +-- oximeter/db/tests/integration_test.rs | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 88b1c3ce22..6349eaea9b 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -146,7 +146,7 @@ async fn test_teardown() -> anyhow::Result<()> { let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); // TODO: Switch to "{prefix}_clickward_test" ? - let path = parent_dir.join(format!("clickward_test")); + let path = parent_dir.join("clickward_test"); info!(&logctx.log, "Tearing down ClickHouse cluster"; "path" => ?path); diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs index acc9ba77b7..6c7bfb4db9 100644 --- a/dev-tools/clickhouse-cluster-dev/src/main.rs +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -20,7 +20,7 @@ async fn main() -> Result<()> { let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); // TODO: Switch to "{prefix}_clickward_test"? - let path = parent_dir.join(format!("clickward_test")); + let path = parent_dir.join("clickward_test"); std::fs::create_dir(&path)?; slog::info!(logctx.log, "Setting up a ClickHouse cluster"); @@ -43,7 +43,6 @@ async fn main() -> Result<()> { let mut deployment = Deployment::new(config); - // TODO: Use 2 replicas and 3 keepers, for now I'm just testing let num_keepers = 3; let num_replicas = 2; deployment diff --git a/oximeter/db/tests/integration_test.rs b/oximeter/db/tests/integration_test.rs index b34c962881..14246b6345 100644 --- a/oximeter/db/tests/integration_test.rs +++ b/oximeter/db/tests/integration_test.rs @@ -455,6 +455,7 @@ async fn wait_for_num_points( Ok(()) } +// TODO: Use the function in the other package /// Try to ping the server until it responds. async fn wait_for_ping(log: &Logger, client: &Client) -> anyhow::Result<()> { poll::wait_for_condition( From 4a41f1b97a3f50695af134bb4a75d48a09f3036f Mon Sep 17 00:00:00 2001 From: karencfv Date: Wed, 13 Nov 2024 15:53:39 +1300 Subject: [PATCH 16/20] clean up --- Cargo.lock | 11 ++++- Cargo.toml | 1 + clickhouse-admin/Cargo.toml | 3 +- clickhouse-admin/test-utils/Cargo.toml | 11 +++++ clickhouse-admin/test-utils/src/lib.rs | 15 ++++++ clickhouse-admin/tests/integration_test.rs | 48 ++++++++------------ dev-tools/clickhouse-cluster-dev/Cargo.toml | 1 + dev-tools/clickhouse-cluster-dev/src/main.rs | 11 ++--- 8 files changed, 62 insertions(+), 39 deletions(-) create mode 100644 clickhouse-admin/test-utils/Cargo.toml create mode 100644 clickhouse-admin/test-utils/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 3398709ca1..6b11a584d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1347,6 +1347,13 @@ dependencies = [ "slog", ] +[[package]] +name = "clickhouse-admin-test-utils" +version = "0.1.0" +dependencies = [ + "clickward", +] + [[package]] name = "clickhouse-admin-types" version = "0.1.0" @@ -1374,6 +1381,7 @@ name = "clickhouse-cluster-dev" version = "0.1.0" dependencies = [ "anyhow", + "clickhouse-admin-test-utils", "clickward", "dropshot", "omicron-workspace-hack", @@ -6317,13 +6325,13 @@ dependencies = [ "chrono", "clap", "clickhouse-admin-api", + "clickhouse-admin-test-utils", "clickhouse-admin-types", "clickward", "dropshot", "expectorate", "http 1.1.0", "illumos-utils", - "nexus-test-utils", "omicron-common", "omicron-test-utils", "omicron-uuid-kinds", @@ -6339,6 +6347,7 @@ dependencies = [ "slog-async", "slog-dtrace", "slog-error-chain", + "slog-term", "subprocess", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index fcfac3dad1..9849daf98c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -325,6 +325,7 @@ clickhouse-admin-api = { path = "clickhouse-admin/api" } clickhouse-admin-keeper-client = { path = "clients/clickhouse-admin-keeper-client" } clickhouse-admin-server-client = { path = "clients/clickhouse-admin-server-client" } clickhouse-admin-types = { path = "clickhouse-admin/types" } +clickhouse-admin-test-utils = { path = "clickhouse-admin/test-utils" } clickward = { git = "https://github.com/oxidecomputer/clickward", rev = "a1b342c2558e835d09e6e39a40d3de798a29c2f" } cockroach-admin-api = { path = "cockroach-admin/api" } cockroach-admin-client = { path = "clients/cockroach-admin-client" } diff --git a/clickhouse-admin/Cargo.toml b/clickhouse-admin/Cargo.toml index 84b04f6caa..3a8d45dd64 100644 --- a/clickhouse-admin/Cargo.toml +++ b/clickhouse-admin/Cargo.toml @@ -31,15 +31,16 @@ omicron-workspace-hack.workspace = true [dev-dependencies] clickward.workspace = true +clickhouse-admin-test-utils.workspace = true dropshot.workspace = true expectorate.workspace = true -nexus-test-utils.workspace = true omicron-test-utils.workspace = true oximeter-db.workspace = true oximeter-test-utils.workspace = true openapi-lint.workspace = true openapiv3.workspace = true serde_json.workspace = true +slog-term.workspace = true subprocess.workspace = true url.workspace = true diff --git a/clickhouse-admin/test-utils/Cargo.toml b/clickhouse-admin/test-utils/Cargo.toml new file mode 100644 index 0000000000..65b474787d --- /dev/null +++ b/clickhouse-admin/test-utils/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "clickhouse-admin-test-utils" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +clickward.workspace = true \ No newline at end of file diff --git a/clickhouse-admin/test-utils/src/lib.rs b/clickhouse-admin/test-utils/src/lib.rs new file mode 100644 index 0000000000..4924fb46e4 --- /dev/null +++ b/clickhouse-admin/test-utils/src/lib.rs @@ -0,0 +1,15 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Integration testing facilities for clickhouse-admin + +use clickward::BasePorts; + +pub const DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS: BasePorts = BasePorts { + keeper: 29000, + raft: 29100, + clickhouse_tcp: 29200, + clickhouse_http: 29300, + clickhouse_interserver_http: 29400, +}; \ No newline at end of file diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 6349eaea9b..48207c1241 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -7,28 +7,33 @@ use clickhouse_admin_types::{ ClickhouseHost, ClickhouseKeeperClusterMembership, KeeperId, KeeperServerInfo, KeeperServerType, RaftConfig, }; +use clickhouse_admin_test_utils::DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; use clickward::{BasePorts, Deployment, DeploymentConfig}; use dropshot::test_util::{log_prefix_for_test, LogContext}; use dropshot::{ConfigLogging, ConfigLoggingLevel}; use omicron_clickhouse_admin::ClickhouseCli; -use slog::info; +use slog::{info, o, Drain}; +use slog_term::{FullFormat, PlainDecorator, TestStdoutWriter}; use std::collections::BTreeSet; use std::net::{Ipv6Addr, SocketAddrV6}; use std::str::FromStr; +fn log() -> slog::Logger { + let decorator = PlainDecorator::new(TestStdoutWriter); + let drain = FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + slog::Logger::root(drain, o!()) +} + #[tokio::test] async fn test_lgif_parsing() -> anyhow::Result<()> { - // TODO: Do we want these to have their own log dirs or not? - let logctx = LogContext::new( - "clickhouse_cluster", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); + let log = log(); let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse")?, SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), ) - .with_log(logctx.log); + .with_log(log); let lgif = clickhouse_cli.lgif().await?; @@ -40,16 +45,13 @@ async fn test_lgif_parsing() -> anyhow::Result<()> { #[tokio::test] async fn test_raft_config_parsing() -> anyhow::Result<()> { - let logctx = LogContext::new( - "clickhouse_cluster", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); + let log = log(); let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse").unwrap(), SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), ) - .with_log(logctx.log); + .with_log(log); let raft_config = clickhouse_cli.raft_config().await.unwrap(); @@ -76,16 +78,13 @@ async fn test_raft_config_parsing() -> anyhow::Result<()> { #[tokio::test] async fn test_keeper_conf_parsing() -> anyhow::Result<()> { - let logctx = LogContext::new( - "clickhouse_cluster", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); + let log = log(); let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse").unwrap(), SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), ) - .with_log(logctx.log); + .with_log(log); let conf = clickhouse_cli.keeper_conf().await.unwrap(); @@ -96,16 +95,13 @@ async fn test_keeper_conf_parsing() -> anyhow::Result<()> { #[tokio::test] async fn test_keeper_cluster_membership() -> anyhow::Result<()> { - let logctx = LogContext::new( - "clickhouse_cluster", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); + let log = log(); let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse").unwrap(), SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), ) - .with_log(logctx.log); + .with_log(log); let keeper_cluster_membership = clickhouse_cli.keeper_cluster_membership().await.unwrap(); @@ -154,13 +150,7 @@ async fn test_teardown() -> anyhow::Result<()> { // We spin up several replicated clusters and must use a // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29000, - raft: 29100, - clickhouse_tcp: 29200, - clickhouse_http: 29300, - clickhouse_interserver_http: 29400, - }; + let base_ports = DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; let config = DeploymentConfig { path: path.clone(), diff --git a/dev-tools/clickhouse-cluster-dev/Cargo.toml b/dev-tools/clickhouse-cluster-dev/Cargo.toml index 49623534d9..cb5a6a90f2 100644 --- a/dev-tools/clickhouse-cluster-dev/Cargo.toml +++ b/dev-tools/clickhouse-cluster-dev/Cargo.toml @@ -11,6 +11,7 @@ workspace = true [dependencies] anyhow.workspace = true dropshot.workspace = true +clickhouse-admin-test-utils.workspace = true clickward.workspace = true slog.workspace = true tokio.workspace = true diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs index 6c7bfb4db9..07d5d84a25 100644 --- a/dev-tools/clickhouse-cluster-dev/src/main.rs +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -3,7 +3,8 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use anyhow::{Context, Result}; -use clickward::{BasePorts, Deployment, DeploymentConfig, KeeperId}; +use clickhouse_admin_test_utils::DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; +use clickward::{Deployment, DeploymentConfig, KeeperId}; use dropshot::test_util::{log_prefix_for_test, LogContext}; use dropshot::{ConfigLogging, ConfigLoggingLevel}; use oximeter_db::Client; @@ -27,13 +28,7 @@ async fn main() -> Result<()> { // We spin up several replicated clusters and must use a // separate set of ports in case the tests run concurrently. - let base_ports = BasePorts { - keeper: 29000, - raft: 29100, - clickhouse_tcp: 29200, - clickhouse_http: 29300, - clickhouse_interserver_http: 29400, - }; + let base_ports = DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; let config = DeploymentConfig { path: path.clone(), From 1786875f7c609fe05cf02a4ab67705157d3d8da1 Mon Sep 17 00:00:00 2001 From: karencfv Date: Wed, 13 Nov 2024 17:27:00 +1300 Subject: [PATCH 17/20] Set default base ports --- clickhouse-admin/test-utils/src/lib.rs | 8 +++- clickhouse-admin/tests/integration_test.rs | 55 ++++++++++++++++++---- 2 files changed, 51 insertions(+), 12 deletions(-) diff --git a/clickhouse-admin/test-utils/src/lib.rs b/clickhouse-admin/test-utils/src/lib.rs index 4924fb46e4..17c985319e 100644 --- a/clickhouse-admin/test-utils/src/lib.rs +++ b/clickhouse-admin/test-utils/src/lib.rs @@ -4,7 +4,7 @@ //! Integration testing facilities for clickhouse-admin -use clickward::BasePorts; +use clickward::{BasePorts, Deployment}; pub const DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS: BasePorts = BasePorts { keeper: 29000, @@ -12,4 +12,8 @@ pub const DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS: BasePorts = BasePorts { clickhouse_tcp: 29200, clickhouse_http: 29300, clickhouse_interserver_http: 29400, -}; \ No newline at end of file +}; + +//pub fn default_clickhouse_cluster_test_deployment() -> Deployment { +// +//} diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index 48207c1241..becd8d508c 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -3,12 +3,12 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use camino::Utf8PathBuf; +use clickhouse_admin_test_utils::DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; use clickhouse_admin_types::{ ClickhouseHost, ClickhouseKeeperClusterMembership, KeeperId, KeeperServerInfo, KeeperServerType, RaftConfig, }; -use clickhouse_admin_test_utils::DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; -use clickward::{BasePorts, Deployment, DeploymentConfig}; +use clickward::{Deployment, DeploymentConfig}; use dropshot::test_util::{log_prefix_for_test, LogContext}; use dropshot::{ConfigLogging, ConfigLoggingLevel}; use omicron_clickhouse_admin::ClickhouseCli; @@ -25,13 +25,33 @@ fn log() -> slog::Logger { slog::Logger::root(drain, o!()) } +// In Clickward, keeper server ports are assigned by adding i to each +// base port. Keeper IDs are also assigned with consecutive numbers +// starting with 1. +fn get_keeper_server_port(keeper_id: KeeperId) -> u16 { + let raw_id = keeper_id.0; + // We can safely unwrap raw_id as the Keeper IDs we use for testing are + // all in the single digits + DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS.keeper + u16::try_from(raw_id).unwrap() +} + +fn get_keeper_raft_port(keeper_id: KeeperId) -> u16 { + let raw_id = keeper_id.0; + DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS.raft + u16::try_from(raw_id).unwrap() +} + #[tokio::test] async fn test_lgif_parsing() -> anyhow::Result<()> { let log = log(); let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse")?, - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + SocketAddrV6::new( + Ipv6Addr::LOCALHOST, + get_keeper_server_port(KeeperId(1)), + 0, + 0, + ), ) .with_log(log); @@ -49,7 +69,12 @@ async fn test_raft_config_parsing() -> anyhow::Result<()> { let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + SocketAddrV6::new( + Ipv6Addr::LOCALHOST, + get_keeper_server_port(KeeperId(1)), + 0, + 0, + ), ) .with_log(log); @@ -59,9 +84,9 @@ async fn test_raft_config_parsing() -> anyhow::Result<()> { let num_keepers = 3; for i in 1..=num_keepers { - let raft_port = u16::try_from(29100 + i).unwrap(); + let raft_port = get_keeper_raft_port(KeeperId(i)); keeper_servers.insert(KeeperServerInfo { - server_id: clickhouse_admin_types::KeeperId(i), + server_id: clickhouse_admin_types::KeeperId(i.into()), host: ClickhouseHost::Ipv6("::1".parse().unwrap()), raft_port, server_type: KeeperServerType::Participant, @@ -82,13 +107,18 @@ async fn test_keeper_conf_parsing() -> anyhow::Result<()> { let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + SocketAddrV6::new( + Ipv6Addr::LOCALHOST, + get_keeper_server_port(KeeperId(1)), + 0, + 0, + ), ) .with_log(log); let conf = clickhouse_cli.keeper_conf().await.unwrap(); - assert_eq!(conf.server_id, clickhouse_admin_types::KeeperId(1)); + assert_eq!(conf.server_id, KeeperId(1)); Ok(()) } @@ -99,7 +129,12 @@ async fn test_keeper_cluster_membership() -> anyhow::Result<()> { let clickhouse_cli = ClickhouseCli::new( Utf8PathBuf::from_str("clickhouse").unwrap(), - SocketAddrV6::new(Ipv6Addr::LOCALHOST, 29001, 0, 0), + SocketAddrV6::new( + Ipv6Addr::LOCALHOST, + get_keeper_server_port(KeeperId(1)), + 0, + 0, + ), ) .with_log(log); @@ -110,7 +145,7 @@ async fn test_keeper_cluster_membership() -> anyhow::Result<()> { let num_keepers = 3; for i in 1..=num_keepers { - raft_config.insert(clickhouse_admin_types::KeeperId(i)); + raft_config.insert(KeeperId(i)); } let expected_keeper_cluster_membership = From aef271828c564b7b8878084cc81ada467142c14c Mon Sep 17 00:00:00 2001 From: karencfv Date: Wed, 13 Nov 2024 18:00:38 +1300 Subject: [PATCH 18/20] Clean up --- Cargo.lock | 3 +- clickhouse-admin/test-utils/Cargo.toml | 4 ++- clickhouse-admin/test-utils/src/lib.rs | 33 +++++++++++++++++--- clickhouse-admin/tests/integration_test.rs | 33 +++++--------------- dev-tools/clickhouse-cluster-dev/Cargo.toml | 1 - dev-tools/clickhouse-cluster-dev/src/main.rs | 31 +++++------------- 6 files changed, 49 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b11a584d6..c4c8a6dcd9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1351,7 +1351,9 @@ dependencies = [ name = "clickhouse-admin-test-utils" version = "0.1.0" dependencies = [ + "camino", "clickward", + "dropshot", ] [[package]] @@ -1383,7 +1385,6 @@ dependencies = [ "anyhow", "clickhouse-admin-test-utils", "clickward", - "dropshot", "omicron-workspace-hack", "oximeter-db", "oximeter-test-utils", diff --git a/clickhouse-admin/test-utils/Cargo.toml b/clickhouse-admin/test-utils/Cargo.toml index 65b474787d..37010dcd5e 100644 --- a/clickhouse-admin/test-utils/Cargo.toml +++ b/clickhouse-admin/test-utils/Cargo.toml @@ -8,4 +8,6 @@ license = "MPL-2.0" workspace = true [dependencies] -clickward.workspace = true \ No newline at end of file +camino.workspace = true +clickward.workspace = true +dropshot.workspace = true \ No newline at end of file diff --git a/clickhouse-admin/test-utils/src/lib.rs b/clickhouse-admin/test-utils/src/lib.rs index 17c985319e..cc7ff5e9c8 100644 --- a/clickhouse-admin/test-utils/src/lib.rs +++ b/clickhouse-admin/test-utils/src/lib.rs @@ -4,7 +4,10 @@ //! Integration testing facilities for clickhouse-admin -use clickward::{BasePorts, Deployment}; +use camino::Utf8PathBuf; +use clickward::{BasePorts, Deployment, DeploymentConfig}; +use dropshot::test_util::{log_prefix_for_test, LogContext}; +use dropshot::{ConfigLogging, ConfigLoggingLevel}; pub const DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS: BasePorts = BasePorts { keeper: 29000, @@ -14,6 +17,28 @@ pub const DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS: BasePorts = BasePorts { clickhouse_interserver_http: 29400, }; -//pub fn default_clickhouse_cluster_test_deployment() -> Deployment { -// -//} +pub fn default_clickhouse_cluster_test_deployment( + path: Utf8PathBuf, +) -> Deployment { + let base_ports = DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; + + let config = DeploymentConfig { + path, + base_ports, + cluster_name: "oximeter_cluster".to_string(), + }; + + Deployment::new(config) +} + +pub fn default_clickhouse_log_ctx_and_path() -> (LogContext, Utf8PathBuf) { + let logctx = LogContext::new( + "clickhouse_cluster", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + + let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); + let path = parent_dir.join("clickward_test"); + + (logctx, path) +} diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index becd8d508c..a11a697e25 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -3,14 +3,14 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use camino::Utf8PathBuf; -use clickhouse_admin_test_utils::DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; +use clickhouse_admin_test_utils::{ + default_clickhouse_cluster_test_deployment, + default_clickhouse_log_ctx_and_path, DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS, +}; use clickhouse_admin_types::{ ClickhouseHost, ClickhouseKeeperClusterMembership, KeeperId, KeeperServerInfo, KeeperServerType, RaftConfig, }; -use clickward::{Deployment, DeploymentConfig}; -use dropshot::test_util::{log_prefix_for_test, LogContext}; -use dropshot::{ConfigLogging, ConfigLoggingLevel}; use omicron_clickhouse_admin::ClickhouseCli; use slog::{info, o, Drain}; use slog_term::{FullFormat, PlainDecorator, TestStdoutWriter}; @@ -86,7 +86,7 @@ async fn test_raft_config_parsing() -> anyhow::Result<()> { for i in 1..=num_keepers { let raft_port = get_keeper_raft_port(KeeperId(i)); keeper_servers.insert(KeeperServerInfo { - server_id: clickhouse_admin_types::KeeperId(i.into()), + server_id: clickhouse_admin_types::KeeperId(i), host: ClickhouseHost::Ipv6("::1".parse().unwrap()), raft_port, server_type: KeeperServerType::Participant, @@ -170,30 +170,11 @@ async fn test_keeper_cluster_membership() -> anyhow::Result<()> { #[tokio::test] async fn test_teardown() -> anyhow::Result<()> { - let logctx = LogContext::new( - "clickhouse_cluster", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); - - let (parent_dir, _prefix) = log_prefix_for_test("clickhouse_cluster"); - // TODO: Switch to "{prefix}_clickward_test" ? - let path = parent_dir.join("clickward_test"); + let (logctx, path) = default_clickhouse_log_ctx_and_path(); info!(&logctx.log, "Tearing down ClickHouse cluster"; "path" => ?path); - // TODO: Find another way to retrieve deployment - - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; - - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; - - let deployment = Deployment::new(config); + let deployment = default_clickhouse_cluster_test_deployment(path.clone()); deployment.teardown()?; std::fs::remove_dir_all(path)?; logctx.cleanup_successful(); diff --git a/dev-tools/clickhouse-cluster-dev/Cargo.toml b/dev-tools/clickhouse-cluster-dev/Cargo.toml index cb5a6a90f2..2e23b7d52c 100644 --- a/dev-tools/clickhouse-cluster-dev/Cargo.toml +++ b/dev-tools/clickhouse-cluster-dev/Cargo.toml @@ -10,7 +10,6 @@ workspace = true [dependencies] anyhow.workspace = true -dropshot.workspace = true clickhouse-admin-test-utils.workspace = true clickward.workspace = true slog.workspace = true diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs index 07d5d84a25..080101e6fc 100644 --- a/dev-tools/clickhouse-cluster-dev/src/main.rs +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -3,10 +3,11 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use anyhow::{Context, Result}; -use clickhouse_admin_test_utils::DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; -use clickward::{Deployment, DeploymentConfig, KeeperId}; -use dropshot::test_util::{log_prefix_for_test, LogContext}; -use dropshot::{ConfigLogging, ConfigLoggingLevel}; +use clickhouse_admin_test_utils::{ + default_clickhouse_cluster_test_deployment, + default_clickhouse_log_ctx_and_path, +}; +use clickward::KeeperId; use oximeter_db::Client; use oximeter_test_utils::{wait_for_keepers, wait_for_ping}; use std::time::Duration; @@ -14,29 +15,13 @@ use std::time::Duration; #[tokio::main] async fn main() -> Result<()> { let request_timeout = Duration::from_secs(15); - let logctx = LogContext::new( - "clickhouse_cluster", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); - - let (parent_dir, _prefix) = log_prefix_for_test(logctx.test_name()); - // TODO: Switch to "{prefix}_clickward_test"? - let path = parent_dir.join("clickward_test"); + let (logctx, path) = default_clickhouse_log_ctx_and_path(); std::fs::create_dir(&path)?; slog::info!(logctx.log, "Setting up a ClickHouse cluster"); - // We spin up several replicated clusters and must use a - // separate set of ports in case the tests run concurrently. - let base_ports = DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; - - let config = DeploymentConfig { - path: path.clone(), - base_ports, - cluster_name: "oximeter_cluster".to_string(), - }; - - let mut deployment = Deployment::new(config); + let mut deployment = + default_clickhouse_cluster_test_deployment(path.clone()); let num_keepers = 3; let num_replicas = 2; From 21305b94d7247338e5c146f0bd51820058615b05 Mon Sep 17 00:00:00 2001 From: karencfv Date: Wed, 13 Nov 2024 21:13:35 +1300 Subject: [PATCH 19/20] make linter happy --- Cargo.lock | 1 + Cargo.toml | 2 ++ clickhouse-admin/test-utils/Cargo.toml | 4 +++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index c4c8a6dcd9..84db2cfe58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1354,6 +1354,7 @@ dependencies = [ "camino", "clickward", "dropshot", + "omicron-workspace-hack", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9849daf98c..249a1244eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "certificates", "clickhouse-admin", "clickhouse-admin/api", + "clickhouse-admin/test-utils", "clients/bootstrap-agent-client", "clients/clickhouse-admin-keeper-client", "clients/clickhouse-admin-server-client", @@ -131,6 +132,7 @@ default-members = [ "clickhouse-admin", "clickhouse-admin/api", "clickhouse-admin/types", + "clickhouse-admin/test-utils", "clients/bootstrap-agent-client", "clients/clickhouse-admin-keeper-client", "clients/clickhouse-admin-server-client", diff --git a/clickhouse-admin/test-utils/Cargo.toml b/clickhouse-admin/test-utils/Cargo.toml index 37010dcd5e..617dd94b9f 100644 --- a/clickhouse-admin/test-utils/Cargo.toml +++ b/clickhouse-admin/test-utils/Cargo.toml @@ -10,4 +10,6 @@ workspace = true [dependencies] camino.workspace = true clickward.workspace = true -dropshot.workspace = true \ No newline at end of file +dropshot.workspace = true + +omicron-workspace-hack.workspace = true \ No newline at end of file From 8163b74326dbfd365c22655dc0e174913a849674 Mon Sep 17 00:00:00 2001 From: karencfv Date: Thu, 14 Nov 2024 11:15:02 +1300 Subject: [PATCH 20/20] Address comments --- Cargo.lock | 1 + clickhouse-admin/test-utils/Cargo.toml | 1 + clickhouse-admin/test-utils/src/lib.rs | 7 +++---- clickhouse-admin/tests/integration_test.rs | 2 +- dev-tools/clickhouse-cluster-dev/src/main.rs | 6 ++++++ 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84db2cfe58..f14467ac36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1352,6 +1352,7 @@ name = "clickhouse-admin-test-utils" version = "0.1.0" dependencies = [ "camino", + "clickhouse-admin-types", "clickward", "dropshot", "omicron-workspace-hack", diff --git a/clickhouse-admin/test-utils/Cargo.toml b/clickhouse-admin/test-utils/Cargo.toml index 617dd94b9f..8ce2966206 100644 --- a/clickhouse-admin/test-utils/Cargo.toml +++ b/clickhouse-admin/test-utils/Cargo.toml @@ -9,6 +9,7 @@ workspace = true [dependencies] camino.workspace = true +clickhouse-admin-types.workspace = true clickward.workspace = true dropshot.workspace = true diff --git a/clickhouse-admin/test-utils/src/lib.rs b/clickhouse-admin/test-utils/src/lib.rs index cc7ff5e9c8..7bc73a43a8 100644 --- a/clickhouse-admin/test-utils/src/lib.rs +++ b/clickhouse-admin/test-utils/src/lib.rs @@ -5,6 +5,7 @@ //! Integration testing facilities for clickhouse-admin use camino::Utf8PathBuf; +use clickhouse_admin_types::OXIMETER_CLUSTER; use clickward::{BasePorts, Deployment, DeploymentConfig}; use dropshot::test_util::{log_prefix_for_test, LogContext}; use dropshot::{ConfigLogging, ConfigLoggingLevel}; @@ -20,12 +21,10 @@ pub const DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS: BasePorts = BasePorts { pub fn default_clickhouse_cluster_test_deployment( path: Utf8PathBuf, ) -> Deployment { - let base_ports = DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS; - let config = DeploymentConfig { path, - base_ports, - cluster_name: "oximeter_cluster".to_string(), + base_ports: DEFAULT_CLICKHOUSE_ADMIN_BASE_PORTS, + cluster_name: OXIMETER_CLUSTER.to_string(), }; Deployment::new(config) diff --git a/clickhouse-admin/tests/integration_test.rs b/clickhouse-admin/tests/integration_test.rs index a11a697e25..7c0d12b74b 100644 --- a/clickhouse-admin/tests/integration_test.rs +++ b/clickhouse-admin/tests/integration_test.rs @@ -55,7 +55,7 @@ async fn test_lgif_parsing() -> anyhow::Result<()> { ) .with_log(log); - let lgif = clickhouse_cli.lgif().await?; + let lgif = clickhouse_cli.lgif().await.unwrap(); // The first log index from a newly created cluster should always be 1 assert_eq!(lgif.first_log_idx, 1); diff --git a/dev-tools/clickhouse-cluster-dev/src/main.rs b/dev-tools/clickhouse-cluster-dev/src/main.rs index 080101e6fc..54714ac368 100644 --- a/dev-tools/clickhouse-cluster-dev/src/main.rs +++ b/dev-tools/clickhouse-cluster-dev/src/main.rs @@ -2,6 +2,12 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +//! Sets up a 3 keeper 2 replica ClickHouse cluster for clickhouse-admin +//! integration tests. +//! +//! NB: This should only be used for testing that doesn't write data to +//! ClickHouse. Otherwise, it may result in flaky tests. + use anyhow::{Context, Result}; use clickhouse_admin_test_utils::{ default_clickhouse_cluster_test_deployment,