From 6f6ec49098f90ec58f9116966e9579f2d4fa983e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Natalie=20Klestrup=20R=C3=B6ijezon?= Date: Tue, 14 Feb 2023 15:47:24 +0100 Subject: [PATCH 001/101] WIP: rekerberize see #154 --- rust/crd/src/lib.rs | 2 + rust/operator/src/container.rs | 67 ++++++++++++++-- rust/operator/src/hdfs_controller.rs | 110 ++++++++++++++++++++++++++- 3 files changed, 173 insertions(+), 6 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 7702a967..725fb9ac 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -523,6 +523,8 @@ impl HdfsCluster { let pnk = vec![ PropertyNameKind::File(HDFS_SITE_XML.to_string()), PropertyNameKind::File(CORE_SITE_XML.to_string()), + PropertyNameKind::File("ssl-server.xml".to_string()), + PropertyNameKind::File("ssl-client.xml".to_string()), PropertyNameKind::Env, ]; diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index fae8a90a..8f84ae6f 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -9,10 +9,13 @@ //! - Set resources //! - Add tcp probes and container ports (to the main containers) //! -use crate::product_logging::{ - FORMAT_NAMENODES_LOG4J_CONFIG_FILE, FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, HDFS_LOG4J_CONFIG_FILE, - MAX_LOG_FILES_SIZE_IN_MIB, STACKABLE_LOG_DIR, WAIT_FOR_NAMENODES_LOG4J_CONFIG_FILE, - ZKFC_LOG4J_CONFIG_FILE, +use crate::{ + hdfs_controller::KEYSTORE_DIR_NAME, + product_logging::{ + FORMAT_NAMENODES_LOG4J_CONFIG_FILE, FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, + HDFS_LOG4J_CONFIG_FILE, MAX_LOG_FILES_SIZE_IN_MIB, STACKABLE_LOG_DIR, + WAIT_FOR_NAMENODES_LOG4J_CONFIG_FILE, ZKFC_LOG4J_CONFIG_FILE, + }, }; use indoc::formatdoc; @@ -28,7 +31,10 @@ use stackable_hdfs_crd::{ DataNodeContainer, HdfsPodRef, HdfsRole, MergedConfig, NameNodeContainer, }; use stackable_operator::{ - builder::{ContainerBuilder, PodBuilder, VolumeBuilder, VolumeMountBuilder}, + builder::{ + ContainerBuilder, PodBuilder, SecretOperatorVolumeSourceBuilder, VolumeBuilder, + VolumeMountBuilder, + }, commons::product_image_selection::ResolvedProductImage, k8s_openapi::{ api::core::v1::{ @@ -268,6 +274,13 @@ impl ContainerConfig { cb.image_from_product_image(resolved_product_image) .command(self.command()) .args(self.args(merged_config, &[])) + .add_env_var( + "HADDOP_OPTS", + "-Djava.security.krb5.conf=/kerberos/krb5.conf", + ) + .add_env_var("HADOOP_JAAS_DEBUG", "true") + .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") + .add_env_var("KRB5_TRACE", "/dev/stdout") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, resources.as_ref())) .add_volume_mounts(self.volume_mounts(merged_config)) .add_container_ports(self.container_ports()); @@ -585,6 +598,47 @@ impl ContainerConfig { .build(), ); + let mut krb_src = VolumeBuilder::new("kerberos") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new("kerberos") + .with_pod_scope() + .with_node_scope() + .build(), + ) + .build(); + krb_src + .ephemeral + .as_mut() + .unwrap() + .volume_claim_template + .get_or_insert(Default::default()) + .metadata + .get_or_insert(Default::default()) + .annotations + .get_or_insert(Default::default()) + .insert( + "secrets.stackable.tech/kerberos.service.names".to_string(), + "jn,nn,dn,HTTP".to_string(), + ); + volumes.push(krb_src); + + volumes.push( + VolumeBuilder::new("tls") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new("tls") + .with_pod_scope() + .with_node_scope() + .build(), + ) + .build(), + ); + + volumes.push( + VolumeBuilder::new("keystore") + .with_empty_dir(Option::::None, None) + .build(), + ); + Some(merged_config.hdfs_logging()) } ContainerConfig::Zkfc { .. } => merged_config.zkfc_logging(), @@ -621,6 +675,9 @@ impl ContainerConfig { self.volume_mount_dirs().log_mount(), ) .build(), + VolumeMountBuilder::new("kerberos", "/kerberos").build(), + VolumeMountBuilder::new("tls", "/stackable/tls").build(), + VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build(), ]; match self { diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index efaada59..881cb491 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -45,6 +45,8 @@ const RESOURCE_MANAGER_HDFS_CONTROLLER: &str = "hdfs-operator-hdfs-controller"; const HDFS_CONTROLLER: &str = "hdfs-controller"; const DOCKER_IMAGE_BASE_NAME: &str = "hadoop"; +pub(crate) const KEYSTORE_DIR_NAME: &str = "/stackable/keystore"; + #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] pub enum Error { @@ -377,6 +379,8 @@ fn rolegroup_config_map( let mut hdfs_site_xml = String::new(); let mut core_site_xml = String::new(); + let mut ssl_server_xml = String::new(); + let mut ssl_client_xml = String::new(); for (property_name_kind, config) in rolegroup_config { match property_name_kind { @@ -406,6 +410,11 @@ fn rolegroup_config_map( .add("dfs.ha.nn.not-become-active-in-safemode", "true") .add("dfs.ha.automatic-failover.enabled", "true") .add("dfs.ha.namenode.id", "${env.POD_NAME}") + .add("dfs.block.access.token.enable", "true") + .add("dfs.data.transfer.protection", "authentication") + .add("dfs.http.policy", "HTTPS_ONLY") + .add("dfs.https.client.keystore.resource", "ssl-client.xml") + .add("dfs.https.server.keystore.resource", "ssl-server.xml") // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); @@ -414,10 +423,107 @@ fn rolegroup_config_map( core_site_xml = CoreSiteConfigBuilder::new(hdfs_name.to_string()) .fs_default_fs() .ha_zookeeper_quorum() + .extend( + &[ + ( + "hadoop.security.authentication".to_string(), + "kerberos".to_string(), + ), + ( + "hadoop.security.authentication".to_string(), + "kerberos".to_string(), + ), + ( + "hadoop.security.authorization".to_string(), + "true".to_string(), + ), + ( + "hadoop.registry.kerberos.realm".to_string(), + "CLUSTER.LOCAL".to_string(), + ), + ( + "dfs.web.authentication.kerberos.principal".to_string(), + "HTTP/_HOST@CLUSTER.LOCAL".to_string(), + ), + ( + "dfs.journalnode.kerberos.internal.spnego.principal".to_string(), + "HTTP/_HOST@CLUSTER.LOCAL".to_string(), + ), + ( + "dfs.journalnode.kerberos.principal".to_string(), + "jn/_HOST@CLUSTER.LOCAL".to_string(), + ), + ( + "dfs.namenode.kerberos.principal".to_string(), + "nn/_HOST@CLUSTER.LOCAL".to_string(), + ), + ( + "dfs.datanode.kerberos.principal".to_string(), + "dn/_HOST@CLUSTER.LOCAL".to_string(), + ), + ( + "dfs.web.authentication.keytab.file".to_string(), + "/kerberos/keytab".to_string(), + ), + ( + "dfs.journalnode.keytab.file".to_string(), + "/kerberos/keytab".to_string(), + ), + ( + "dfs.namenode.keytab.file".to_string(), + "/kerberos/keytab".to_string(), + ), + ( + "dfs.datanode.keytab.file".to_string(), + "/kerberos/keytab".to_string(), + ), + ] + .into(), + ) // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); } + PropertyNameKind::File(file_name) if file_name == "ssl-server.xml" => { + let mut config_opts = BTreeMap::new(); + config_opts.extend([ + ( + "ssl.server.keystore.location".to_string(), + Some(format!("{KEYSTORE_DIR_NAME}/keystore.p12")), + ), + ( + "ssl.server.keystore.password".to_string(), + Some("secret".to_string()), + ), + ( + "ssl.server.keystore.type".to_string(), + Some("pkcs12".to_string()), + ), + ]); + config_opts.extend(config.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + ssl_server_xml = + stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); + } + PropertyNameKind::File(file_name) if file_name == "ssl-client.xml" => { + let mut config_opts = BTreeMap::new(); + config_opts.extend([ + ( + "ssl.client.truststore.location".to_string(), + Some(format!("{KEYSTORE_DIR_NAME}/truststore.p12")), + ), + ( + "ssl.client.truststore.password".to_string(), + Some("secret".to_string()), + ), + ( + "ssl.client.truststore.type".to_string(), + Some("pkcs12".to_string()), + ), + ]); + config_opts.extend(config.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + ssl_client_xml = + stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); + } _ => {} } } @@ -443,7 +549,9 @@ fn rolegroup_config_map( .build(), ) .add_data(CORE_SITE_XML.to_string(), core_site_xml) - .add_data(HDFS_SITE_XML.to_string(), hdfs_site_xml); + .add_data(HDFS_SITE_XML.to_string(), hdfs_site_xml) + .add_data("ssl-server.xml", ssl_server_xml) + .add_data("ssl-client.xml", ssl_client_xml); extend_role_group_config_map( rolegroup_ref, From 1914ebcd98521ac6de9f73bd385a1327369f1b54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Natalie=20Klestrup=20R=C3=B6ijezon?= Date: Fri, 17 Feb 2023 13:03:23 +0100 Subject: [PATCH 002/101] Workingish but incredibly hacky Kerberized HDFS --- .../hdfs/examples/getting_started/hdfs.yaml | 36 ++++++++- rust/crd/src/constants.rs | 2 + rust/crd/src/lib.rs | 4 + rust/operator/src/config.rs | 16 ++-- rust/operator/src/container.rs | 76 +++++++++++++++---- rust/operator/src/hdfs_controller.rs | 16 +++- 6 files changed, 130 insertions(+), 20 deletions(-) diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index 4131b7f5..e30ed1b2 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -6,18 +6,52 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 + repo: docker.stackable.tech/natkr/krb5 zookeeperConfigMapName: simple-hdfs-znode dfsReplication: 3 nameNodes: roleGroups: default: replicas: 2 + config: + logging: + containers: + hdfs: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG + formatNameNodes: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG dataNodes: roleGroups: default: replicas: 1 + config: + logging: + containers: + hdfs: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG journalNodes: roleGroups: default: replicas: 1 + config: + logging: + containers: + hdfs: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG diff --git a/rust/crd/src/constants.rs b/rust/crd/src/constants.rs index 80c9f313..1f50d569 100644 --- a/rust/crd/src/constants.rs +++ b/rust/crd/src/constants.rs @@ -23,6 +23,7 @@ pub const SERVICE_PORT_NAME_METRICS: &str = "metrics"; pub const DEFAULT_NAME_NODE_METRICS_PORT: u16 = 8183; pub const DEFAULT_NAME_NODE_HTTP_PORT: u16 = 9870; +pub const DEFAULT_NAME_NODE_HTTPS_PORT: u16 = 9871; pub const DEFAULT_NAME_NODE_RPC_PORT: u16 = 8020; pub const DEFAULT_DATA_NODE_METRICS_PORT: u16 = 8082; @@ -40,6 +41,7 @@ pub const DFS_NAMENODE_NAME_DIR: &str = "dfs.namenode.name.dir"; pub const DFS_NAMENODE_SHARED_EDITS_DIR: &str = "dfs.namenode.shared.edits.dir"; pub const DFS_NAMENODE_RPC_ADDRESS: &str = "dfs.namenode.rpc-address"; pub const DFS_NAMENODE_HTTP_ADDRESS: &str = "dfs.namenode.http-address"; +pub const DFS_NAMENODE_HTTPS_ADDRESS: &str = "dfs.namenode.https-address"; pub const DFS_DATANODE_DATA_DIR: &str = "dfs.datanode.data.dir"; pub const DFS_JOURNALNODE_EDITS_DIR: &str = "dfs.journalnode.edits.dir"; pub const DFS_JOURNALNODE_RPC_ADDRESS: &str = "dfs.journalnode.rpc-address"; diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 725fb9ac..73af2f41 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -180,6 +180,10 @@ impl HdfsRole { String::from(SERVICE_PORT_NAME_HTTP), DEFAULT_NAME_NODE_HTTP_PORT, ), + ( + String::from(SERVICE_PORT_NAME_HTTPS), + DEFAULT_NAME_NODE_HTTPS_PORT, + ), ( String::from(SERVICE_PORT_NAME_RPC), DEFAULT_NAME_NODE_RPC_PORT, diff --git a/rust/operator/src/config.rs b/rust/operator/src/config.rs index f4760cbb..cc09fde9 100644 --- a/rust/operator/src/config.rs +++ b/rust/operator/src/config.rs @@ -1,9 +1,10 @@ use stackable_hdfs_crd::constants::{ - DEFAULT_JOURNAL_NODE_RPC_PORT, DEFAULT_NAME_NODE_HTTP_PORT, DEFAULT_NAME_NODE_RPC_PORT, - DFS_DATANODE_DATA_DIR, DFS_HA_NAMENODES, DFS_JOURNALNODE_EDITS_DIR, - DFS_JOURNALNODE_RPC_ADDRESS, DFS_NAMENODE_HTTP_ADDRESS, DFS_NAMENODE_NAME_DIR, - DFS_NAMENODE_RPC_ADDRESS, DFS_NAMENODE_SHARED_EDITS_DIR, DFS_NAME_SERVICES, DFS_REPLICATION, - FS_DEFAULT_FS, HA_ZOOKEEPER_QUORUM, JOURNALNODE_ROOT_DATA_DIR, NAMENODE_ROOT_DATA_DIR, + DEFAULT_JOURNAL_NODE_RPC_PORT, DEFAULT_NAME_NODE_HTTPS_PORT, DEFAULT_NAME_NODE_HTTP_PORT, + DEFAULT_NAME_NODE_RPC_PORT, DFS_DATANODE_DATA_DIR, DFS_HA_NAMENODES, DFS_JOURNALNODE_EDITS_DIR, + DFS_JOURNALNODE_RPC_ADDRESS, DFS_NAMENODE_HTTPS_ADDRESS, DFS_NAMENODE_HTTP_ADDRESS, + DFS_NAMENODE_NAME_DIR, DFS_NAMENODE_RPC_ADDRESS, DFS_NAMENODE_SHARED_EDITS_DIR, + DFS_NAME_SERVICES, DFS_REPLICATION, FS_DEFAULT_FS, HA_ZOOKEEPER_QUORUM, + JOURNALNODE_ROOT_DATA_DIR, NAMENODE_ROOT_DATA_DIR, }; use stackable_hdfs_crd::storage::{DataNodeStorageConfig, DataNodeStorageConfigInnerType}; use stackable_hdfs_crd::HdfsPodRef; @@ -150,6 +151,11 @@ impl HdfsSiteConfigBuilder { DFS_NAMENODE_HTTP_ADDRESS, DEFAULT_NAME_NODE_HTTP_PORT, ); + self.dfs_namenode_address_ha( + namenode_podrefs, + DFS_NAMENODE_HTTPS_ADDRESS, + DEFAULT_NAME_NODE_HTTPS_PORT, + ); self } diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 8f84ae6f..be60616f 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -275,12 +275,12 @@ impl ContainerConfig { .command(self.command()) .args(self.args(merged_config, &[])) .add_env_var( - "HADDOP_OPTS", + "HADOOP_OPTS", "-Djava.security.krb5.conf=/kerberos/krb5.conf", ) .add_env_var("HADOOP_JAAS_DEBUG", "true") .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") - .add_env_var("KRB5_TRACE", "/dev/stdout") + .add_env_var("KRB5_TRACE", "/dev/stderr") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, resources.as_ref())) .add_volume_mounts(self.volume_mounts(merged_config)) .add_container_ports(self.container_ports()); @@ -313,6 +313,13 @@ impl ContainerConfig { .image_from_product_image(resolved_product_image) .command(self.command()) .args(self.args(merged_config, namenode_podrefs)) + .add_env_var( + "HADOOP_OPTS", + "-Djava.security.krb5.conf=/kerberos/krb5.conf", + ) + .add_env_var("HADOOP_JAAS_DEBUG", "true") + .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") + .add_env_var("KRB5_TRACE", "/dev/stderr") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, None)) .add_volume_mounts(self.volume_mounts(merged_config)) .build()) @@ -368,6 +375,22 @@ impl ContainerConfig { self.create_config_directory_cmd(), self.copy_hdfs_and_core_site_xml_cmd(), ]; + args.push([ + "echo Storing password", + &format!("echo secret > {keystore_directory}/password", keystore_directory = KEYSTORE_DIR_NAME), + "echo Cleaning up truststore - just in case", + &format!("rm -f {keystore_directory}/truststore.p12", keystore_directory = KEYSTORE_DIR_NAME), + "echo Creating truststore", + &format!("keytool -importcert -file /stackable/tls/ca.crt -keystore {keystore_directory}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass secret", + keystore_directory = KEYSTORE_DIR_NAME), + "echo Creating certificate chain", + &format!("cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {keystore_directory}/chain.crt", keystore_directory = KEYSTORE_DIR_NAME), + "echo Creating keystore", + &format!("openssl pkcs12 -export -in {keystore_directory}/chain.crt -inkey /stackable/tls/tls.key -out {keystore_directory}/keystore.p12 --passout file:{keystore_directory}/password", + keystore_directory = KEYSTORE_DIR_NAME), + "echo Cleaning up password", + &format!("rm -f {keystore_directory}/password", keystore_directory = KEYSTORE_DIR_NAME), + ].join(" && ")); match self { ContainerConfig::Hdfs { role, .. } => { @@ -407,12 +430,17 @@ impl ContainerConfig { // $NAMENODE_DIR/current/VERSION. Then we dont do anything. // If there is no active namenode, the current pod is not formatted we format as // active namenode. Otherwise as standby node. - args.push(formatdoc!(r###" + args.push(formatdoc!( + r###" + # hdfs' admin tools don't support specifying a custom keytab + kinit nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL -kt /kerberos/keytab + cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Start formatting namenode $POD_NAME. Checking for active namenodes:" for id in {pod_names} do echo -n "Checking pod $id... " - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id 2>/dev/null) + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id | tail -n1) + echo "FOOBAR $SERVICE_STATE BARFOO" if [ "$SERVICE_STATE" == "active" ] then ACTIVE_NAMENODE=$id @@ -422,8 +450,21 @@ impl ContainerConfig { echo "" done + echo Storing password + echo secret > {KEYSTORE_DIR_NAME}/password + echo Cleaning up truststore - just in case + rm -f {KEYSTORE_DIR_NAME}/truststore.p12 + echo Creating truststore + keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass secret + echo Creating certificate chain + cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt + echo Creating keystore + openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout file:{KEYSTORE_DIR_NAME}/password + echo Cleaning up password + rm -f {KEYSTORE_DIR_NAME}/password + set -e - if [ ! -f "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" ] + if ! ls {NAMENODE_ROOT_DATA_DIR}/current/fsimage_* then if [ -z ${{ACTIVE_NAMENODE+x}} ] then @@ -434,8 +475,11 @@ impl ContainerConfig { {hadoop_home}/bin/hdfs namenode -bootstrapStandby -nonInteractive fi else + cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Pod $POD_NAME already formatted. Skipping..." - fi"###, + fi + + "###, hadoop_home = Self::HADOOP_HOME, pod_names = namenode_podrefs .iter() @@ -478,7 +522,7 @@ impl ContainerConfig { for id in {pod_names} do echo -n "Checking pod $id... " - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id 2>/dev/null) + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id 2>/dev/null | tail -n1) if [ "$SERVICE_STATE" = "active" ] || [ "$SERVICE_STATE" = "standby" ] then echo "$SERVICE_STATE" @@ -603,6 +647,7 @@ impl ContainerConfig { SecretOperatorVolumeSourceBuilder::new("kerberos") .with_pod_scope() .with_node_scope() + .with_service_scope("simple-hdfs-namenode-default") .build(), ) .build(); @@ -732,15 +777,20 @@ impl ContainerConfig { fn copy_hdfs_and_core_site_xml_cmd(&self) -> String { vec![ format!( - "cp {config_dir_mount}/{HDFS_SITE_XML} {config_dir_name}/{HDFS_SITE_XML}", - config_dir_mount = self.volume_mount_dirs().config_mount(), - config_dir_name = self.volume_mount_dirs().final_config() - ), - format!( - "cp {config_dir_mount}/{CORE_SITE_XML} {config_dir_name}/{CORE_SITE_XML}", + "cp {config_dir_mount}/* {config_dir_name}", config_dir_mount = self.volume_mount_dirs().config_mount(), config_dir_name = self.volume_mount_dirs().final_config() ), + // format!( + // "cp {config_dir_mount}/{HDFS_SITE_XML} {config_dir_name}/{HDFS_SITE_XML}", + // config_dir_mount = self.volume_mount_dirs().config_mount(), + // config_dir_name = self.volume_mount_dirs().final_config() + // ), + // format!( + // "cp {config_dir_mount}/{CORE_SITE_XML} {config_dir_name}/{CORE_SITE_XML}", + // config_dir_mount = self.volume_mount_dirs().config_mount(), + // config_dir_name = self.volume_mount_dirs().final_config() + // ), ] .join(" && ") } diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 881cb491..8038f04e 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -453,9 +453,19 @@ fn rolegroup_config_map( "dfs.journalnode.kerberos.principal".to_string(), "jn/_HOST@CLUSTER.LOCAL".to_string(), ), + ( + "dfs.journalnode.kerberos.principal.pattern".to_string(), + "jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@CLUSTER.LOCAL" + .to_string(), + ), ( "dfs.namenode.kerberos.principal".to_string(), - "nn/_HOST@CLUSTER.LOCAL".to_string(), + "nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL".to_string(), + ), + ( + "dfs.namenode.kerberos.principal.pattern".to_string(), + "nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL" + .to_string(), ), ( "dfs.datanode.kerberos.principal".to_string(), @@ -477,6 +487,10 @@ fn rolegroup_config_map( "dfs.datanode.keytab.file".to_string(), "/kerberos/keytab".to_string(), ), + ( + "hadoop.user.group.static.mapping.overrides".to_string(), + "dr.who=;nn=;".to_string(), + ), ] .into(), ) From bd090e8212284325ec58a46c1358d48329ffdc3c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 12:36:43 +0100 Subject: [PATCH 003/101] Use SecretOperatorVolumeSourceBuilder to build kerberos volume --- Cargo.lock | 4 +-- Cargo.toml | 4 +-- rust/operator/src/container.rs | 53 +++++++++++++++------------------- 3 files changed, 27 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9181b75e..70013454 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1859,7 +1859,7 @@ dependencies = [ [[package]] name = "stackable-operator" version = "0.37.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.37.0#5c552a45227abed969ebd2b8fed35fe877cc7788" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat/kerberos-secret-class-helpers#c04bca7baab57722a2b3bff25c2ad47bd5536867" dependencies = [ "chrono", "clap", @@ -1893,7 +1893,7 @@ dependencies = [ [[package]] name = "stackable-operator-derive" version = "0.37.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.37.0#5c552a45227abed969ebd2b8fed35fe877cc7788" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat/kerberos-secret-class-helpers#c04bca7baab57722a2b3bff25c2ad47bd5536867" dependencies = [ "darling", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 51bd6645..9a1b1108 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,6 @@ members = [ "rust/crd", "rust/operator", "rust/operator-binary" ] -# [patch."https://github.com/stackabletech/operator-rs.git"] +[patch."https://github.com/stackabletech/operator-rs.git"] # stackable-operator = { path = "/home/sbernauer/stackabletech/operator-rs" } -# stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "feat/affinities" } +stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "feat/kerberos-secret-class-helpers" } diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index c51fec80..46007dda 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -22,9 +22,9 @@ use indoc::formatdoc; use snafu::{ResultExt, Snafu}; use stackable_hdfs_crd::{ constants::{ - CORE_SITE_XML, DATANODE_ROOT_DATA_DIR_PREFIX, DEFAULT_DATA_NODE_METRICS_PORT, - DEFAULT_JOURNAL_NODE_METRICS_PORT, DEFAULT_NAME_NODE_METRICS_PORT, HDFS_SITE_XML, - LOG4J_PROPERTIES, NAMENODE_ROOT_DATA_DIR, SERVICE_PORT_NAME_IPC, SERVICE_PORT_NAME_RPC, + DATANODE_ROOT_DATA_DIR_PREFIX, DEFAULT_DATA_NODE_METRICS_PORT, + DEFAULT_JOURNAL_NODE_METRICS_PORT, DEFAULT_NAME_NODE_METRICS_PORT, LOG4J_PROPERTIES, + NAMENODE_ROOT_DATA_DIR, SERVICE_PORT_NAME_IPC, SERVICE_PORT_NAME_RPC, STACKABLE_ROOT_DATA_DIR, }, storage::DataNodeStorageConfig, @@ -381,12 +381,12 @@ impl ContainerConfig { "echo Cleaning up truststore - just in case", &format!("rm -f {keystore_directory}/truststore.p12", keystore_directory = KEYSTORE_DIR_NAME), "echo Creating truststore", - &format!("keytool -importcert -file /stackable/tls/ca.crt -keystore {keystore_directory}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass secret", + &format!("keytool -importcert -file /stackable/tls/ca.crt -keystore {keystore_directory}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass secret", keystore_directory = KEYSTORE_DIR_NAME), "echo Creating certificate chain", &format!("cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {keystore_directory}/chain.crt", keystore_directory = KEYSTORE_DIR_NAME), "echo Creating keystore", - &format!("openssl pkcs12 -export -in {keystore_directory}/chain.crt -inkey /stackable/tls/tls.key -out {keystore_directory}/keystore.p12 --passout file:{keystore_directory}/password", + &format!("openssl pkcs12 -export -in {keystore_directory}/chain.crt -inkey /stackable/tls/tls.key -out {keystore_directory}/keystore.p12 --passout file:{keystore_directory}/password", keystore_directory = KEYSTORE_DIR_NAME), "echo Cleaning up password", &format!("rm -f {keystore_directory}/password", keystore_directory = KEYSTORE_DIR_NAME), @@ -629,7 +629,7 @@ impl ContainerConfig { let mut volumes = vec![]; let container_log_config = match self { - ContainerConfig::Hdfs { .. } => { + ContainerConfig::Hdfs { role, .. } => { volumes.push( VolumeBuilder::new(ContainerConfig::STACKABLE_LOG_VOLUME_MOUNT_NAME) .empty_dir(EmptyDirVolumeSource { @@ -642,30 +642,23 @@ impl ContainerConfig { .build(), ); - let mut krb_src = VolumeBuilder::new("kerberos") - .ephemeral( - SecretOperatorVolumeSourceBuilder::new("kerberos") - .with_pod_scope() - .with_node_scope() - .with_service_scope("simple-hdfs-namenode-default") - .build(), - ) - .build(); - krb_src - .ephemeral - .as_mut() - .unwrap() - .volume_claim_template - .get_or_insert(Default::default()) - .metadata - .get_or_insert(Default::default()) - .annotations - .get_or_insert(Default::default()) - .insert( - "secrets.stackable.tech/kerberos.service.names".to_string(), - "jn,nn,dn,HTTP".to_string(), - ); - volumes.push(krb_src); + volumes.push( + VolumeBuilder::new("kerberos") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new("kerberos") + .with_pod_scope() + .with_node_scope() + // .with_service_scope("simple-hdfs-namenode-default") + .with_kerberos_service_name(match role { + HdfsRole::NameNode => "nn", + HdfsRole::DataNode => "dn", + HdfsRole::JournalNode => "jn", + }) + .with_kerberos_service_name("HTTP") + .build(), + ) + .build(), + ); volumes.push( VolumeBuilder::new("tls") From 941ff541a0ba7b2f730e41ed148170d955976f1c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 13:01:34 +0100 Subject: [PATCH 004/101] Add kdc and SecretClass to example --- .../hdfs/examples/getting_started/hdfs.yaml | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index fc5d2713..db9ba1c7 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -7,6 +7,7 @@ spec: image: productVersion: 3.3.4 stackableVersion: 23.4.0-rc2 + repo: docker.stackable.tech/natkr/krb5 # Needed because of e.g. openssl is missing clusterConfig: zookeeperConfigMapName: simple-hdfs-znode dfsReplication: 1 @@ -55,3 +56,159 @@ spec: level: DEBUG console: level: DEBUG +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: kerberos +spec: + backend: + kerberosKeytab: + realmName: CLUSTER.LOCAL + kdc: krb5-kdc.default.svc.cluster.local + adminServer: krb5-kdc.default.svc.cluster.local + adminKeytabSecret: + namespace: default + name: secret-operator-keytab + adminPrincipal: stackable-secret-operator +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: krb5-kdc +spec: + selector: + matchLabels: + app: krb5-kdc + template: + metadata: + labels: + app: krb5-kdc + spec: + initContainers: + - name: init + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + args: + - sh + - -euo + - pipefail + - -c + - | + test -e /var/kerberos/krb5kdc/principal || kdb5_util create -s -P asdf + kadmin.local get_principal -terse root/admin || kadmin.local add_principal -pw asdf root/admin + # stackable-secret-operator principal must match the keytab specified in the SecretClass + kadmin.local get_principal -terse stackable-secret-operator || kadmin.local add_principal -e aes256-cts-hmac-sha384-192:normal -pw asdf stackable-secret-operator + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + - mountPath: /var/kerberos/krb5kdc + name: data + containers: + - name: kdc + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + args: + - krb5kdc + - -n + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + - mountPath: /var/kerberos/krb5kdc + name: data + - name: kadmind + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + args: + - kadmind + - -nofork + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + - mountPath: /var/kerberos/krb5kdc + name: data + - name: client + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + tty: true + stdin: true + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + volumes: + - name: config + configMap: + name: krb5-kdc + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: krb5-kdc +spec: + selector: + app: krb5-kdc + ports: + - name: kadmin + port: 749 + - name: kdc + port: 88 + - name: kdc-udp + port: 88 + protocol: UDP +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: krb5-kdc +data: + krb5.conf: | + [logging] + default = STDERR + kdc = STDERR + admin_server = STDERR + # default = FILE:/var/log/krb5libs.log + # kdc = FILE:/var/log/krb5kdc.log + # admin_server = FILE:/vaggr/log/kadmind.log + [libdefaults] + dns_lookup_realm = false + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + default_realm = CLUSTER.LOCAL + spake_preauth_groups = edwards25519 + [realms] + CLUSTER.LOCAL = { + acl_file = /stackable/config/kadm5.acl + disable_encrypted_timestamp = false + } + [domain_realm] + .cluster.local = CLUSTER.LOCAL + cluster.local = CLUSTER.LOCAL + kadm5.acl: | + root/admin *e + stackable-secret-operator *e +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-operator-keytab +data: + keytab: BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB From eb6ad9eb04e56ac531965cc4d3754361ff1a252c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 13:08:15 +0100 Subject: [PATCH 005/101] Move ssl-server.xml and ssl-client.xml into constant --- rust/crd/src/constants.rs | 2 ++ rust/crd/src/lib.rs | 4 ++-- rust/operator/src/container.rs | 25 ++++++------------------- rust/operator/src/hdfs_controller.rs | 12 ++++++------ 4 files changed, 16 insertions(+), 27 deletions(-) diff --git a/rust/crd/src/constants.rs b/rust/crd/src/constants.rs index 1f50d569..1d624635 100644 --- a/rust/crd/src/constants.rs +++ b/rust/crd/src/constants.rs @@ -12,6 +12,8 @@ pub const LABEL_STS_POD_NAME: &str = "statefulset.kubernetes.io/pod-name"; pub const HDFS_SITE_XML: &str = "hdfs-site.xml"; pub const CORE_SITE_XML: &str = "core-site.xml"; +pub const SSL_SERVER_XML: &str = "ssl-server.xml"; +pub const SSL_CLIENT_XML: &str = "ssl-client.xml"; pub const LOG4J_PROPERTIES: &str = "log4j.properties"; pub const SERVICE_PORT_NAME_RPC: &str = "rpc"; diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index af06ef56..ae66e673 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -571,8 +571,8 @@ impl HdfsCluster { let pnk = vec![ PropertyNameKind::File(HDFS_SITE_XML.to_string()), PropertyNameKind::File(CORE_SITE_XML.to_string()), - PropertyNameKind::File("ssl-server.xml".to_string()), - PropertyNameKind::File("ssl-client.xml".to_string()), + PropertyNameKind::File(SSL_SERVER_XML.to_string()), + PropertyNameKind::File(SSL_CLIENT_XML.to_string()), PropertyNameKind::Env, ]; diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 46007dda..5400129e 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -766,26 +766,13 @@ impl ContainerConfig { ) } - /// Copy the `core-site.xml` and `hdfs-site.xml` to the respective container config dir. + /// Copy all the configuration files to the respective container config dir. fn copy_hdfs_and_core_site_xml_cmd(&self) -> String { - vec![ - format!( - "cp {config_dir_mount}/* {config_dir_name}", - config_dir_mount = self.volume_mount_dirs().config_mount(), - config_dir_name = self.volume_mount_dirs().final_config() - ), - // format!( - // "cp {config_dir_mount}/{HDFS_SITE_XML} {config_dir_name}/{HDFS_SITE_XML}", - // config_dir_mount = self.volume_mount_dirs().config_mount(), - // config_dir_name = self.volume_mount_dirs().final_config() - // ), - // format!( - // "cp {config_dir_mount}/{CORE_SITE_XML} {config_dir_name}/{CORE_SITE_XML}", - // config_dir_mount = self.volume_mount_dirs().config_mount(), - // config_dir_name = self.volume_mount_dirs().final_config() - // ), - ] - .join(" && ") + format!( + "cp {config_dir_mount}/* {config_dir_name}", + config_dir_mount = self.volume_mount_dirs().config_mount(), + config_dir_name = self.volume_mount_dirs().final_config() + ) } /// Copy the `log4j.properties` to the respective container config dir. diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index d467b96a..e33ec186 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -414,8 +414,8 @@ fn rolegroup_config_map( .add("dfs.block.access.token.enable", "true") .add("dfs.data.transfer.protection", "authentication") .add("dfs.http.policy", "HTTPS_ONLY") - .add("dfs.https.client.keystore.resource", "ssl-client.xml") - .add("dfs.https.server.keystore.resource", "ssl-server.xml") + .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) + .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML) // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); @@ -499,7 +499,7 @@ fn rolegroup_config_map( .extend(config) .build_as_xml(); } - PropertyNameKind::File(file_name) if file_name == "ssl-server.xml" => { + PropertyNameKind::File(file_name) if file_name == SSL_SERVER_XML => { let mut config_opts = BTreeMap::new(); config_opts.extend([ ( @@ -519,7 +519,7 @@ fn rolegroup_config_map( ssl_server_xml = stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); } - PropertyNameKind::File(file_name) if file_name == "ssl-client.xml" => { + PropertyNameKind::File(file_name) if file_name == SSL_CLIENT_XML => { let mut config_opts = BTreeMap::new(); config_opts.extend([ ( @@ -565,8 +565,8 @@ fn rolegroup_config_map( ) .add_data(CORE_SITE_XML.to_string(), core_site_xml) .add_data(HDFS_SITE_XML.to_string(), hdfs_site_xml) - .add_data("ssl-server.xml", ssl_server_xml) - .add_data("ssl-client.xml", ssl_client_xml); + .add_data(SSL_SERVER_XML, ssl_server_xml) + .add_data(SSL_CLIENT_XML, ssl_client_xml); extend_role_group_config_map( rolegroup_ref, From 33d6cb318117e0d9025d0c93b8154f7ca1a93d44 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 14:26:47 +0100 Subject: [PATCH 006/101] WIP --- deploy/helm/hdfs-operator/crds/crds.yaml | 13 ++ .../hdfs/examples/getting_started/hdfs.yaml | 4 + rust/crd/src/constants.rs | 1 + rust/crd/src/lib.rs | 169 +++++++++++------- rust/operator/src/container.rs | 14 +- rust/operator/src/hdfs_controller.rs | 3 +- 6 files changed, 137 insertions(+), 67 deletions(-) diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index da87d1f0..80967480 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -34,6 +34,19 @@ spec: minimum: 0.0 nullable: true type: integer + kerberos: + description: Configuration to set up a cluster secured using Kerberos. + nullable: true + properties: + kerberosSecretClass: + default: kerberos + description: Name of the SecretClass providing the keytab for the HDFS services. + type: string + tlsSecretClass: + default: tls + description: Name of the SecretClass providing the tls certificates for the WebUIs. + type: string + type: object vectorAggregatorConfigMapName: description: Name of the Vector aggregator discovery ConfigMap. It must contain the key `ADDRESS` with the address of the Vector aggregator. nullable: true diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index db9ba1c7..bd7448af 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -11,6 +11,10 @@ spec: clusterConfig: zookeeperConfigMapName: simple-hdfs-znode dfsReplication: 1 + # TODO discuss CRD structure and present in Arch meeting + kerberos: + tlsSecretClass: tls + kerberosSecretClass: kerberos nameNodes: roleGroups: default: diff --git a/rust/crd/src/constants.rs b/rust/crd/src/constants.rs index 1d624635..9d2422e0 100644 --- a/rust/crd/src/constants.rs +++ b/rust/crd/src/constants.rs @@ -30,6 +30,7 @@ pub const DEFAULT_NAME_NODE_RPC_PORT: u16 = 8020; pub const DEFAULT_DATA_NODE_METRICS_PORT: u16 = 8082; pub const DEFAULT_DATA_NODE_HTTP_PORT: u16 = 9864; +pub const DEFAULT_DATA_NODE_HTTPS_PORT: u16 = 9865; pub const DEFAULT_DATA_NODE_DATA_PORT: u16 = 9866; pub const DEFAULT_DATA_NODE_IPC_PORT: u16 = 9867; diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index ae66e673..26dfe808 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -86,6 +86,27 @@ pub struct HdfsClusterConfig { pub vector_aggregator_config_map_name: Option, /// Name of the ZooKeeper discovery config map. pub zookeeper_config_map_name: String, + /// Configuration to set up a cluster secured using Kerberos. + pub kerberos: Option, +} + +#[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct KerberosConfig { + /// Name of the SecretClass providing the keytab for the HDFS services. + #[serde(default = "default_kerberos_kerberos_secret_class")] + kerberos_secret_class: String, + /// Name of the SecretClass providing the tls certificates for the WebUIs. + #[serde(default = "default_kerberos_tls_secret_class")] + tls_secret_class: String, +} + +fn default_kerberos_tls_secret_class() -> String { + "tls".to_string() +} + +fn default_kerberos_kerberos_secret_class() -> String { + "kerberos".to_string() } /// This is a shared trait for all role/role-group config structs to avoid duplication @@ -178,66 +199,6 @@ impl HdfsRole { } } - /// Returns required port name and port number tuples depending on the role. - pub fn ports(&self) -> Vec<(String, u16)> { - match self { - HdfsRole::NameNode => vec![ - ( - String::from(SERVICE_PORT_NAME_METRICS), - DEFAULT_NAME_NODE_METRICS_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_HTTP), - DEFAULT_NAME_NODE_HTTP_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_HTTPS), - DEFAULT_NAME_NODE_HTTPS_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_RPC), - DEFAULT_NAME_NODE_RPC_PORT, - ), - ], - HdfsRole::DataNode => vec![ - ( - String::from(SERVICE_PORT_NAME_METRICS), - DEFAULT_DATA_NODE_METRICS_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_DATA), - DEFAULT_DATA_NODE_DATA_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_HTTP), - DEFAULT_DATA_NODE_HTTP_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_IPC), - DEFAULT_DATA_NODE_IPC_PORT, - ), - ], - HdfsRole::JournalNode => vec![ - ( - String::from(SERVICE_PORT_NAME_METRICS), - DEFAULT_JOURNAL_NODE_METRICS_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_HTTP), - DEFAULT_JOURNAL_NODE_HTTP_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_HTTPS), - DEFAULT_JOURNAL_NODE_HTTPS_PORT, - ), - ( - String::from(SERVICE_PORT_NAME_RPC), - DEFAULT_JOURNAL_NODE_RPC_PORT, - ), - ], - } - } - /// Merge the [Name|Data|Journal]NodeConfigFragment defaults, role and role group settings. /// The priority is: default < role config < role_group config pub fn merged_config( @@ -496,7 +457,11 @@ impl HdfsCluster { namespace: ns.clone(), role_group_service_name: rolegroup_ref.object_name(), pod_name: format!("{}-{}", rolegroup_ref.object_name(), i), - ports: role.ports().iter().map(|(n, p)| (n.clone(), *p)).collect(), + ports: self + .ports(role) + .iter() + .map(|(n, p)| (n.clone(), *p)) + .collect(), }) }) .collect()) @@ -611,6 +576,88 @@ impl HdfsCluster { Ok(result) } + + fn has_security_enabled(&self) -> bool { + self.spec.cluster_config.kerberos.is_some() + } + + fn has_https_enabled(&self) -> bool { + // TODO Clarify if https can be used without kerberos + self.has_security_enabled() + } + + /// Returns required port name and port number tuples depending on the role. + pub fn ports(&self, role: &HdfsRole) -> Vec<(String, u16)> { + match role { + HdfsRole::NameNode => vec![ + ( + String::from(SERVICE_PORT_NAME_METRICS), + DEFAULT_NAME_NODE_METRICS_PORT, + ), + ( + String::from(SERVICE_PORT_NAME_RPC), + DEFAULT_NAME_NODE_RPC_PORT, + ), + if self.has_https_enabled() { + ( + String::from(SERVICE_PORT_NAME_HTTPS), + DEFAULT_NAME_NODE_HTTPS_PORT, + ) + } else { + ( + String::from(SERVICE_PORT_NAME_HTTP), + DEFAULT_NAME_NODE_HTTP_PORT, + ) + }, + ], + HdfsRole::DataNode => vec![ + ( + String::from(SERVICE_PORT_NAME_METRICS), + DEFAULT_DATA_NODE_METRICS_PORT, + ), + ( + String::from(SERVICE_PORT_NAME_DATA), + DEFAULT_DATA_NODE_DATA_PORT, + ), + ( + String::from(SERVICE_PORT_NAME_IPC), + DEFAULT_DATA_NODE_IPC_PORT, + ), + if self.has_https_enabled() { + ( + String::from(SERVICE_PORT_NAME_HTTPS), + DEFAULT_DATA_NODE_HTTPS_PORT, + ) + } else { + ( + String::from(SERVICE_PORT_NAME_HTTP), + DEFAULT_DATA_NODE_HTTP_PORT, + ) + }, + ], + HdfsRole::JournalNode => vec![ + ( + String::from(SERVICE_PORT_NAME_METRICS), + DEFAULT_JOURNAL_NODE_METRICS_PORT, + ), + ( + String::from(SERVICE_PORT_NAME_RPC), + DEFAULT_JOURNAL_NODE_RPC_PORT, + ), + if self.has_https_enabled() { + ( + String::from(SERVICE_PORT_NAME_HTTPS), + DEFAULT_JOURNAL_NODE_HTTPS_PORT, + ) + } else { + ( + String::from(SERVICE_PORT_NAME_HTTP), + DEFAULT_JOURNAL_NODE_HTTP_PORT, + ) + }, + ], + } + } } /// Reference to a single `Pod` that is a component of a [`HdfsCluster`] /// diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 5400129e..d8b37a82 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -28,7 +28,7 @@ use stackable_hdfs_crd::{ STACKABLE_ROOT_DATA_DIR, }, storage::DataNodeStorageConfig, - DataNodeContainer, HdfsPodRef, HdfsRole, MergedConfig, NameNodeContainer, + DataNodeContainer, HdfsCluster, HdfsPodRef, HdfsRole, MergedConfig, NameNodeContainer, }; use stackable_operator::{ builder::{ @@ -144,6 +144,7 @@ impl ContainerConfig { #[allow(clippy::too_many_arguments)] pub fn add_containers_and_volumes( pb: &mut PodBuilder, + hdfs: &HdfsCluster, role: &HdfsRole, resolved_product_image: &ResolvedProductImage, merged_config: &(dyn MergedConfig + Send + 'static), @@ -156,6 +157,7 @@ impl ContainerConfig { let main_container_config = Self::from(role.clone()); pb.add_volumes(main_container_config.volumes(merged_config, object_name)); pb.add_container(main_container_config.main_container( + hdfs, resolved_product_image, zk_config_map_name, env_overrides, @@ -179,6 +181,7 @@ impl ContainerConfig { let zkfc_container_config = Self::try_from(NameNodeContainer::Zkfc.to_string())?; pb.add_volumes(zkfc_container_config.volumes(merged_config, object_name)); pb.add_container(zkfc_container_config.main_container( + hdfs, resolved_product_image, zk_config_map_name, env_overrides, @@ -259,6 +262,7 @@ impl ContainerConfig { /// - Journalnode main process fn main_container( &self, + hdfs: &HdfsCluster, resolved_product_image: &ResolvedProductImage, zookeeper_config_map_name: &str, env_overrides: Option<&BTreeMap>, @@ -283,7 +287,7 @@ impl ContainerConfig { .add_env_var("KRB5_TRACE", "/dev/stderr") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, resources.as_ref())) .add_volume_mounts(self.volume_mounts(merged_config)) - .add_container_ports(self.container_ports()); + .add_container_ports(self.container_ports(hdfs)); if let Some(resources) = resources { cb.resources(resources); @@ -840,10 +844,10 @@ impl ContainerConfig { } /// Container ports for the main containers namenode, datanode and journalnode. - fn container_ports(&self) -> Vec { + fn container_ports(&self, hdfs: &HdfsCluster) -> Vec { match self { - ContainerConfig::Hdfs { role, .. } => role - .ports() + ContainerConfig::Hdfs { role, .. } => hdfs + .ports(role) .into_iter() .map(|(name, value)| ContainerPort { name: Some(name), diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index e33ec186..49faf8d0 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -338,7 +338,7 @@ fn rolegroup_service( spec: Some(ServiceSpec { cluster_ip: Some("None".to_string()), ports: Some( - role.ports() + hdfs.ports(role) .into_iter() .map(|(name, value)| ServicePort { name: Some(name), @@ -620,6 +620,7 @@ fn rolegroup_statefulset( // Adds all containers and volumes to the pod builder ContainerConfig::add_containers_and_volumes( &mut pb, + hdfs, role, resolved_product_image, merged_config, From 8d63204a7b8cdae6104387febbc2faa2bbf61879 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 14:35:24 +0100 Subject: [PATCH 007/101] Only add volumes conditionally --- rust/crd/src/lib.rs | 4 +- rust/operator/src/container.rs | 97 +++++++++++++++++++--------------- 2 files changed, 56 insertions(+), 45 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 26dfe808..d5e56ff4 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -577,11 +577,11 @@ impl HdfsCluster { Ok(result) } - fn has_security_enabled(&self) -> bool { + pub fn has_security_enabled(&self) -> bool { self.spec.cluster_config.kerberos.is_some() } - fn has_https_enabled(&self) -> bool { + pub fn has_https_enabled(&self) -> bool { // TODO Clarify if https can be used without kerberos self.has_security_enabled() } diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index d8b37a82..9a2f7bfa 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -155,7 +155,7 @@ impl ContainerConfig { ) -> Result<(), Error> { // HDFS main container let main_container_config = Self::from(role.clone()); - pb.add_volumes(main_container_config.volumes(merged_config, object_name)); + pb.add_volumes(main_container_config.volumes(hdfs, merged_config, object_name)); pb.add_container(main_container_config.main_container( hdfs, resolved_product_image, @@ -179,7 +179,7 @@ impl ContainerConfig { HdfsRole::NameNode => { // Zookeeper fail over container let zkfc_container_config = Self::try_from(NameNodeContainer::Zkfc.to_string())?; - pb.add_volumes(zkfc_container_config.volumes(merged_config, object_name)); + pb.add_volumes(zkfc_container_config.volumes(hdfs, merged_config, object_name)); pb.add_container(zkfc_container_config.main_container( hdfs, resolved_product_image, @@ -191,9 +191,11 @@ impl ContainerConfig { // Format namenode init container let format_namenodes_container_config = Self::try_from(NameNodeContainer::FormatNameNodes.to_string())?; - pb.add_volumes( - format_namenodes_container_config.volumes(merged_config, object_name), - ); + pb.add_volumes(format_namenodes_container_config.volumes( + hdfs, + merged_config, + object_name, + )); pb.add_init_container(format_namenodes_container_config.init_container( resolved_product_image, zk_config_map_name, @@ -205,9 +207,11 @@ impl ContainerConfig { // Format ZooKeeper init container let format_zookeeper_container_config = Self::try_from(NameNodeContainer::FormatZooKeeper.to_string())?; - pb.add_volumes( - format_zookeeper_container_config.volumes(merged_config, object_name), - ); + pb.add_volumes(format_zookeeper_container_config.volumes( + hdfs, + merged_config, + object_name, + )); pb.add_init_container(format_zookeeper_container_config.init_container( resolved_product_image, zk_config_map_name, @@ -220,9 +224,11 @@ impl ContainerConfig { // Wait for namenode init container let wait_for_namenodes_container_config = Self::try_from(DataNodeContainer::WaitForNameNodes.to_string())?; - pb.add_volumes( - wait_for_namenodes_container_config.volumes(merged_config, object_name), - ); + pb.add_volumes(wait_for_namenodes_container_config.volumes( + hdfs, + merged_config, + object_name, + )); pb.add_init_container(wait_for_namenodes_container_config.init_container( resolved_product_image, zk_config_map_name, @@ -627,6 +633,7 @@ impl ContainerConfig { /// Return the container volumes. fn volumes( &self, + hdfs: &HdfsCluster, merged_config: &(dyn MergedConfig + Send + 'static), object_name: &str, ) -> Vec { @@ -646,40 +653,44 @@ impl ContainerConfig { .build(), ); - volumes.push( - VolumeBuilder::new("kerberos") - .ephemeral( - SecretOperatorVolumeSourceBuilder::new("kerberos") - .with_pod_scope() - .with_node_scope() - // .with_service_scope("simple-hdfs-namenode-default") - .with_kerberos_service_name(match role { - HdfsRole::NameNode => "nn", - HdfsRole::DataNode => "dn", - HdfsRole::JournalNode => "jn", - }) - .with_kerberos_service_name("HTTP") - .build(), - ) - .build(), - ); + if hdfs.has_security_enabled() { + volumes.push( + VolumeBuilder::new("kerberos") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new("kerberos") + .with_pod_scope() + .with_node_scope() + // .with_service_scope("simple-hdfs-namenode-default") + .with_kerberos_service_name(match role { + HdfsRole::NameNode => "nn", + HdfsRole::DataNode => "dn", + HdfsRole::JournalNode => "jn", + }) + .with_kerberos_service_name("HTTP") + .build(), + ) + .build(), + ); + } - volumes.push( - VolumeBuilder::new("tls") - .ephemeral( - SecretOperatorVolumeSourceBuilder::new("tls") - .with_pod_scope() - .with_node_scope() - .build(), - ) - .build(), - ); + if hdfs.has_https_enabled() { + volumes.push( + VolumeBuilder::new("tls") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new("tls") + .with_pod_scope() + .with_node_scope() + .build(), + ) + .build(), + ); - volumes.push( - VolumeBuilder::new("keystore") - .with_empty_dir(Option::::None, None) - .build(), - ); + volumes.push( + VolumeBuilder::new("keystore") + .with_empty_dir(Option::::None, None) + .build(), + ); + } Some(merged_config.hdfs_logging()) } From b9e7e726969c42e44ea20029a7db889491f9e366 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 15:56:56 +0100 Subject: [PATCH 008/101] Use actual SecretClasses instead of hardcoding --- rust/crd/src/lib.rs | 21 ++++++++++++++++++--- rust/operator/src/container.rs | 8 ++++---- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index d5e56ff4..ae6a4f3f 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -578,12 +578,27 @@ impl HdfsCluster { } pub fn has_security_enabled(&self) -> bool { - self.spec.cluster_config.kerberos.is_some() + self.kerberos_secret_class().is_some() + } + + pub fn kerberos_secret_class(&self) -> Option<&str> { + self.spec + .cluster_config + .kerberos + .as_ref() + .map(|k| k.kerberos_secret_class.as_str()) } pub fn has_https_enabled(&self) -> bool { - // TODO Clarify if https can be used without kerberos - self.has_security_enabled() + self.https_secret_class().is_some() + } + + pub fn https_secret_class(&self) -> Option<&str> { + self.spec + .cluster_config + .kerberos + .as_ref() + .map(|k| k.tls_secret_class.as_str()) } /// Returns required port name and port number tuples depending on the role. diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 9a2f7bfa..d231911e 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -653,11 +653,11 @@ impl ContainerConfig { .build(), ); - if hdfs.has_security_enabled() { + if let Some(kerberos_secret_class) = hdfs.kerberos_secret_class() { volumes.push( VolumeBuilder::new("kerberos") .ephemeral( - SecretOperatorVolumeSourceBuilder::new("kerberos") + SecretOperatorVolumeSourceBuilder::new(kerberos_secret_class) .with_pod_scope() .with_node_scope() // .with_service_scope("simple-hdfs-namenode-default") @@ -673,9 +673,9 @@ impl ContainerConfig { ); } - if hdfs.has_https_enabled() { + if let Some(https_secret_class) = hdfs.https_secret_class() { volumes.push( - VolumeBuilder::new("tls") + VolumeBuilder::new(https_secret_class) .ephemeral( SecretOperatorVolumeSourceBuilder::new("tls") .with_pod_scope() From 2aadadf68118c463a74c1305c2d146ac24107d67 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 15:58:52 +0100 Subject: [PATCH 009/101] Remove HADOOP_JAAS_DEBUG=true --- rust/operator/src/container.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index d231911e..23841fad 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -288,7 +288,6 @@ impl ContainerConfig { "HADOOP_OPTS", "-Djava.security.krb5.conf=/kerberos/krb5.conf", ) - .add_env_var("HADOOP_JAAS_DEBUG", "true") .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") .add_env_var("KRB5_TRACE", "/dev/stderr") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, resources.as_ref())) @@ -327,7 +326,6 @@ impl ContainerConfig { "HADOOP_OPTS", "-Djava.security.krb5.conf=/kerberos/krb5.conf", ) - .add_env_var("HADOOP_JAAS_DEBUG", "true") .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") .add_env_var("KRB5_TRACE", "/dev/stderr") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, None)) From b77b4188c29436f2eed121fec4c7416a8ace079e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 16:06:41 +0100 Subject: [PATCH 010/101] Use add instead of extend --- rust/operator/src/config.rs | 15 +++-- rust/operator/src/hdfs_controller.rs | 86 +++++----------------------- 2 files changed, 25 insertions(+), 76 deletions(-) diff --git a/rust/operator/src/config.rs b/rust/operator/src/config.rs index cc09fde9..78b147a4 100644 --- a/rust/operator/src/config.rs +++ b/rust/operator/src/config.rs @@ -199,6 +199,16 @@ impl CoreSiteConfigBuilder { } } + pub fn add(&mut self, property: &str, value: &str) -> &mut Self { + self.config.insert(property.to_string(), value.to_string()); + self + } + + pub fn extend(&mut self, properties: &BTreeMap) -> &mut Self { + self.config.extend(properties.clone()); + self + } + pub fn fs_default_fs(&mut self) -> &mut Self { self.config.insert( FS_DEFAULT_FS.to_string(), @@ -215,11 +225,6 @@ impl CoreSiteConfigBuilder { self } - pub fn extend(&mut self, properties: &BTreeMap) -> &mut Self { - self.config.extend(properties.clone()); - self - } - pub fn build_as_xml(&self) -> String { let transformed_config = transform_for_product_config(&self.config); stackable_operator::product_config::writer::to_hadoop_xml(transformed_config.iter()) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 49faf8d0..9ac7a6f5 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -424,77 +424,21 @@ fn rolegroup_config_map( core_site_xml = CoreSiteConfigBuilder::new(hdfs_name.to_string()) .fs_default_fs() .ha_zookeeper_quorum() - .extend( - &[ - ( - "hadoop.security.authentication".to_string(), - "kerberos".to_string(), - ), - ( - "hadoop.security.authentication".to_string(), - "kerberos".to_string(), - ), - ( - "hadoop.security.authorization".to_string(), - "true".to_string(), - ), - ( - "hadoop.registry.kerberos.realm".to_string(), - "CLUSTER.LOCAL".to_string(), - ), - ( - "dfs.web.authentication.kerberos.principal".to_string(), - "HTTP/_HOST@CLUSTER.LOCAL".to_string(), - ), - ( - "dfs.journalnode.kerberos.internal.spnego.principal".to_string(), - "HTTP/_HOST@CLUSTER.LOCAL".to_string(), - ), - ( - "dfs.journalnode.kerberos.principal".to_string(), - "jn/_HOST@CLUSTER.LOCAL".to_string(), - ), - ( - "dfs.journalnode.kerberos.principal.pattern".to_string(), - "jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@CLUSTER.LOCAL" - .to_string(), - ), - ( - "dfs.namenode.kerberos.principal".to_string(), - "nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL".to_string(), - ), - ( - "dfs.namenode.kerberos.principal.pattern".to_string(), - "nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL" - .to_string(), - ), - ( - "dfs.datanode.kerberos.principal".to_string(), - "dn/_HOST@CLUSTER.LOCAL".to_string(), - ), - ( - "dfs.web.authentication.keytab.file".to_string(), - "/kerberos/keytab".to_string(), - ), - ( - "dfs.journalnode.keytab.file".to_string(), - "/kerberos/keytab".to_string(), - ), - ( - "dfs.namenode.keytab.file".to_string(), - "/kerberos/keytab".to_string(), - ), - ( - "dfs.datanode.keytab.file".to_string(), - "/kerberos/keytab".to_string(), - ), - ( - "hadoop.user.group.static.mapping.overrides".to_string(), - "dr.who=;nn=;".to_string(), - ), - ] - .into(), - ) + .add("hadoop.security.authentication", "kerberos") + .add("hadoop.security.authorization","true") + .add("hadoop.registry.kerberos.realm","CLUSTER.LOCAL") + .add("dfs.web.authentication.kerberos.principal","HTTP/_HOST@CLUSTER.LOCAL") + .add("dfs.journalnode.kerberos.internal.spnego.principal","HTTP/_HOST@CLUSTER.LOCAL") + .add("dfs.journalnode.kerberos.principal","jn/_HOST@CLUSTER.LOCAL") + .add("dfs.journalnode.kerberos.principal.pattern","jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@CLUSTER.LOCAL") + .add("dfs.namenode.kerberos.principal","nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL") + .add("dfs.namenode.kerberos.principal.pattern","nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL") + .add("dfs.datanode.kerberos.principal","dn/_HOST@CLUSTER.LOCAL") + .add("dfs.web.authentication.keytab.file","/kerberos/keytab") + .add("dfs.journalnode.keytab.file","/kerberos/keytab") + .add("dfs.namenode.keytab.file","/kerberos/keytab") + .add("dfs.datanode.keytab.file","/kerberos/keytab") + .add("hadoop.user.group.static.mapping.overrides","dr.who=;nn=;") // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); From 1453819917334ddefe78e8d9cbfa279a5528ed0b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 16:14:25 +0100 Subject: [PATCH 011/101] Only add volume mounts when needed --- rust/operator/src/container.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 23841fad..aac2d004 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -197,6 +197,7 @@ impl ContainerConfig { object_name, )); pb.add_init_container(format_namenodes_container_config.init_container( + hdfs, resolved_product_image, zk_config_map_name, env_overrides, @@ -213,6 +214,7 @@ impl ContainerConfig { object_name, )); pb.add_init_container(format_zookeeper_container_config.init_container( + hdfs, resolved_product_image, zk_config_map_name, env_overrides, @@ -230,6 +232,7 @@ impl ContainerConfig { object_name, )); pb.add_init_container(wait_for_namenodes_container_config.init_container( + hdfs, resolved_product_image, zk_config_map_name, env_overrides, @@ -291,7 +294,7 @@ impl ContainerConfig { .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") .add_env_var("KRB5_TRACE", "/dev/stderr") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, resources.as_ref())) - .add_volume_mounts(self.volume_mounts(merged_config)) + .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .add_container_ports(self.container_ports(hdfs)); if let Some(resources) = resources { @@ -311,6 +314,7 @@ impl ContainerConfig { /// - Datanode (wait-for-namenodes) fn init_container( &self, + hdfs: &HdfsCluster, resolved_product_image: &ResolvedProductImage, zookeeper_config_map_name: &str, env_overrides: Option<&BTreeMap>, @@ -329,7 +333,7 @@ impl ContainerConfig { .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") .add_env_var("KRB5_TRACE", "/dev/stderr") .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, None)) - .add_volume_mounts(self.volume_mounts(merged_config)) + .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .build()) } @@ -711,6 +715,7 @@ impl ContainerConfig { /// Returns the container volume mounts. fn volume_mounts( &self, + hdfs: &HdfsCluster, merged_config: &(dyn MergedConfig + Send + 'static), ) -> Vec { let mut volume_mounts = vec![ @@ -726,10 +731,14 @@ impl ContainerConfig { self.volume_mount_dirs().log_mount(), ) .build(), - VolumeMountBuilder::new("kerberos", "/kerberos").build(), - VolumeMountBuilder::new("tls", "/stackable/tls").build(), - VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build(), ]; + if hdfs.kerberos_secret_class().is_some() { + volume_mounts.push(VolumeMountBuilder::new("kerberos", "/kerberos").build()); + } + if hdfs.https_secret_class().is_some() { + volume_mounts.push(VolumeMountBuilder::new("tls", "/stackable/tls").build()); + volume_mounts.push(VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build()); + } match self { ContainerConfig::FormatNameNodes { .. } => { From 35c6a9052e1c8a2ee0461739ae5ac72153b0ddac Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 16:28:29 +0100 Subject: [PATCH 012/101] Only write either http or https nn address into hdfs-site --- rust/operator/src/config.rs | 31 +++++++++++++++++----------- rust/operator/src/discovery.rs | 10 ++++++--- rust/operator/src/hdfs_controller.rs | 2 +- 3 files changed, 27 insertions(+), 16 deletions(-) diff --git a/rust/operator/src/config.rs b/rust/operator/src/config.rs index 78b147a4..eeee77f2 100644 --- a/rust/operator/src/config.rs +++ b/rust/operator/src/config.rs @@ -7,7 +7,7 @@ use stackable_hdfs_crd::constants::{ JOURNALNODE_ROOT_DATA_DIR, NAMENODE_ROOT_DATA_DIR, }; use stackable_hdfs_crd::storage::{DataNodeStorageConfig, DataNodeStorageConfigInnerType}; -use stackable_hdfs_crd::HdfsPodRef; +use stackable_hdfs_crd::{HdfsCluster, HdfsPodRef}; use std::collections::BTreeMap; #[derive(Clone)] @@ -145,17 +145,24 @@ impl HdfsSiteConfigBuilder { self } - pub fn dfs_namenode_http_address_ha(&mut self, namenode_podrefs: &[HdfsPodRef]) -> &mut Self { - self.dfs_namenode_address_ha( - namenode_podrefs, - DFS_NAMENODE_HTTP_ADDRESS, - DEFAULT_NAME_NODE_HTTP_PORT, - ); - self.dfs_namenode_address_ha( - namenode_podrefs, - DFS_NAMENODE_HTTPS_ADDRESS, - DEFAULT_NAME_NODE_HTTPS_PORT, - ); + pub fn dfs_namenode_http_address_ha( + &mut self, + hdfs: &HdfsCluster, + namenode_podrefs: &[HdfsPodRef], + ) -> &mut Self { + if hdfs.has_https_enabled() { + self.dfs_namenode_address_ha( + namenode_podrefs, + DFS_NAMENODE_HTTPS_ADDRESS, + DEFAULT_NAME_NODE_HTTPS_PORT, + ); + } else { + self.dfs_namenode_address_ha( + namenode_podrefs, + DFS_NAMENODE_HTTP_ADDRESS, + DEFAULT_NAME_NODE_HTTP_PORT, + ); + } self } diff --git a/rust/operator/src/discovery.rs b/rust/operator/src/discovery.rs index 13d973fc..870542c7 100644 --- a/rust/operator/src/discovery.rs +++ b/rust/operator/src/discovery.rs @@ -38,7 +38,7 @@ pub fn build_discovery_configmap( ) .add_data( HDFS_SITE_XML, - build_discovery_hdfs_site_xml(hdfs.name_any(), namenode_podrefs), + build_discovery_hdfs_site_xml(hdfs, hdfs.name_any(), namenode_podrefs), ) .add_data( CORE_SITE_XML, @@ -47,12 +47,16 @@ pub fn build_discovery_configmap( .build() } -fn build_discovery_hdfs_site_xml(logical_name: String, namenode_podrefs: &[HdfsPodRef]) -> String { +fn build_discovery_hdfs_site_xml( + hdfs: &HdfsCluster, + logical_name: String, + namenode_podrefs: &[HdfsPodRef], +) -> String { HdfsSiteConfigBuilder::new(logical_name) .dfs_name_services() .dfs_ha_namenodes(namenode_podrefs) .dfs_namenode_rpc_address_ha(namenode_podrefs) - .dfs_namenode_http_address_ha(namenode_podrefs) + .dfs_namenode_http_address_ha(hdfs, namenode_podrefs) .dfs_client_failover_proxy_provider() .build_as_xml() } diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 9ac7a6f5..7503897f 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -405,7 +405,7 @@ fn rolegroup_config_map( .dfs_namenode_shared_edits_dir(journalnode_podrefs) .dfs_namenode_name_dir_ha(namenode_podrefs) .dfs_namenode_rpc_address_ha(namenode_podrefs) - .dfs_namenode_http_address_ha(namenode_podrefs) + .dfs_namenode_http_address_ha(hdfs, namenode_podrefs) .dfs_client_failover_proxy_provider() .add("dfs.ha.fencing.methods", "shell(/bin/true)") .add("dfs.ha.nn.not-become-active-in-safemode", "true") From 3a3f744a59fd1f9689b3f72f95078dbbf14c3db2 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Mar 2023 18:03:55 +0100 Subject: [PATCH 013/101] Only mount tls certs to main containers and fix other stuff --- rust/operator/src/container.rs | 136 ++++++++++++++------------- rust/operator/src/hdfs_controller.rs | 12 +-- 2 files changed, 79 insertions(+), 69 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index aac2d004..a38bee01 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -287,13 +287,12 @@ impl ContainerConfig { cb.image_from_product_image(resolved_product_image) .command(self.command()) .args(self.args(merged_config, &[])) - .add_env_var( - "HADOOP_OPTS", - "-Djava.security.krb5.conf=/kerberos/krb5.conf", - ) - .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") - .add_env_var("KRB5_TRACE", "/dev/stderr") - .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, resources.as_ref())) + .add_env_vars(self.env( + hdfs, + zookeeper_config_map_name, + env_overrides, + resources.as_ref(), + )) .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .add_container_ports(self.container_ports(hdfs)); @@ -326,13 +325,7 @@ impl ContainerConfig { .image_from_product_image(resolved_product_image) .command(self.command()) .args(self.args(merged_config, namenode_podrefs)) - .add_env_var( - "HADOOP_OPTS", - "-Djava.security.krb5.conf=/kerberos/krb5.conf", - ) - .add_env_var("KRB5_CONFIG", "/kerberos/krb5.conf") - .add_env_var("KRB5_TRACE", "/dev/stderr") - .add_env_vars(self.env(zookeeper_config_map_name, env_overrides, None)) + .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .build()) } @@ -385,25 +378,8 @@ impl ContainerConfig { ) -> Vec { let mut args = vec![ self.create_config_directory_cmd(), - self.copy_hdfs_and_core_site_xml_cmd(), + self.copy_config_xml_cmd(), ]; - args.push([ - "echo Storing password", - &format!("echo secret > {keystore_directory}/password", keystore_directory = KEYSTORE_DIR_NAME), - "echo Cleaning up truststore - just in case", - &format!("rm -f {keystore_directory}/truststore.p12", keystore_directory = KEYSTORE_DIR_NAME), - "echo Creating truststore", - &format!("keytool -importcert -file /stackable/tls/ca.crt -keystore {keystore_directory}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass secret", - keystore_directory = KEYSTORE_DIR_NAME), - "echo Creating certificate chain", - &format!("cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {keystore_directory}/chain.crt", keystore_directory = KEYSTORE_DIR_NAME), - "echo Creating keystore", - &format!("openssl pkcs12 -export -in {keystore_directory}/chain.crt -inkey /stackable/tls/tls.key -out {keystore_directory}/keystore.p12 --passout file:{keystore_directory}/password", - keystore_directory = KEYSTORE_DIR_NAME), - "echo Cleaning up password", - &format!("rm -f {keystore_directory}/password", keystore_directory = KEYSTORE_DIR_NAME), - ].join(" && ")); - match self { ContainerConfig::Hdfs { role, .. } => { args.push(self.copy_log4j_properties_cmd( @@ -411,6 +387,18 @@ impl ContainerConfig { merged_config.hdfs_logging(), )); + // Only the main containers gets the tls certs mounted and builds the keystore + args.push([ + "echo Cleaning up truststore - just in case", + &format!("rm -f {KEYSTORE_DIR_NAME}/truststore.p12"), + "echo Creating truststore", + &format!("keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass changeit"), + "echo Creating certificate chain", + &format!("cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt"), + "echo Creating keystore", + &format!("openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"), + ].join(" && ")); + args.push(format!( "{hadoop_home}/bin/hdfs --debug {role}", hadoop_home = Self::HADOOP_HOME, @@ -445,7 +433,7 @@ impl ContainerConfig { args.push(formatdoc!( r###" # hdfs' admin tools don't support specifying a custom keytab - kinit nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL -kt /kerberos/keytab + kinit nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL -kt /stackable/kerberos/keytab cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Start formatting namenode $POD_NAME. Checking for active namenodes:" for id in {pod_names} @@ -462,19 +450,6 @@ impl ContainerConfig { echo "" done - echo Storing password - echo secret > {KEYSTORE_DIR_NAME}/password - echo Cleaning up truststore - just in case - rm -f {KEYSTORE_DIR_NAME}/truststore.p12 - echo Creating truststore - keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass secret - echo Creating certificate chain - cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt - echo Creating keystore - openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout file:{KEYSTORE_DIR_NAME}/password - echo Cleaning up password - rm -f {KEYSTORE_DIR_NAME}/password - set -e if ! ls {NAMENODE_ROOT_DATA_DIR}/current/fsimage_* then @@ -567,11 +542,12 @@ impl ContainerConfig { /// Returns the container env variables. fn env( &self, + hdfs: &HdfsCluster, zookeeper_config_map_name: &str, env_overrides: Option<&BTreeMap>, resources: Option<&ResourceRequirements>, ) -> Vec { - let mut env = Self::transform_env_overrides_to_env_vars(env_overrides); + let mut env = Vec::new(); env.extend(Self::shared_env_vars( self.volume_mount_dirs().final_config(), @@ -579,15 +555,31 @@ impl ContainerConfig { )); if let ContainerConfig::Hdfs { role, .. } = self { - if let Some(resources) = resources { - env.push(EnvVar { - name: role.hadoop_opts().to_string(), - value: self.build_hadoop_opts(resources).ok(), - ..EnvVar::default() - }); - } + env.push(EnvVar { + name: role.hadoop_opts().to_string(), + value: self.build_hadoop_opts(hdfs, resources).ok(), + ..EnvVar::default() + }); + } else { + // We need to push this for Kerberos to work as not only the main containers need Kerberos + env.push(EnvVar { + name: "HADOOP_OPTS".to_string(), + value: Some("-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string()), + ..EnvVar::default() + }); + } + + // Not only the main containers need Kerberos + if hdfs.has_security_enabled() { + env.push(EnvVar { + name: "KRB5_CONFIG".to_string(), + value: Some("/stackable/kerberos/krb5.conf".to_string()), + ..EnvVar::default() + }); } + // Overrides need to come last + env.extend(Self::transform_env_overrides_to_env_vars(env_overrides)); env } @@ -655,6 +647,8 @@ impl ContainerConfig { .build(), ); + // Note that we create the volume here, only for the main container. + // However, as other containers need this volume as well, it will be also mounted in other containers. if let Some(kerberos_secret_class) = hdfs.kerberos_secret_class() { volumes.push( VolumeBuilder::new("kerberos") @@ -732,12 +726,17 @@ impl ContainerConfig { ) .build(), ]; + + // Adding this for all containers, as not only the main container needs Kerberos if hdfs.kerberos_secret_class().is_some() { - volume_mounts.push(VolumeMountBuilder::new("kerberos", "/kerberos").build()); + volume_mounts.push(VolumeMountBuilder::new("kerberos", "/stackable/kerberos").build()); } - if hdfs.https_secret_class().is_some() { - volume_mounts.push(VolumeMountBuilder::new("tls", "/stackable/tls").build()); - volume_mounts.push(VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build()); + // Only the main container need the tls cert to create their keystore + if let ContainerConfig::Hdfs { .. } = self { + if hdfs.https_secret_class().is_some() { + volume_mounts.push(VolumeMountBuilder::new("tls", "/stackable/tls").build()); + volume_mounts.push(VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build()); + } } match self { @@ -789,9 +788,9 @@ impl ContainerConfig { } /// Copy all the configuration files to the respective container config dir. - fn copy_hdfs_and_core_site_xml_cmd(&self) -> String { + fn copy_config_xml_cmd(&self) -> String { format!( - "cp {config_dir_mount}/* {config_dir_name}", + "cp {config_dir_mount}/*.xml {config_dir_name}", config_dir_mount = self.volume_mount_dirs().config_mount(), config_dir_name = self.volume_mount_dirs().final_config() ) @@ -824,7 +823,11 @@ impl ContainerConfig { } /// Build HADOOP_{*node}_OPTS for each namenode, datanodes and journalnodes. - fn build_hadoop_opts(&self, resources: &ResourceRequirements) -> Result { + fn build_hadoop_opts( + &self, + hdfs: &HdfsCluster, + resources: Option<&ResourceRequirements>, + ) -> Result { match self { ContainerConfig::Hdfs { role, metrics_port, .. @@ -833,8 +836,15 @@ impl ContainerConfig { format!( "-javaagent:/stackable/jmx/jmx_prometheus_javaagent-0.16.1.jar={metrics_port}:/stackable/jmx/{role}.yaml", )]; - if let Some(Some(memory_limit)) = - resources.limits.as_ref().map(|limits| limits.get("memory")) + + if hdfs.has_security_enabled() { + jvm_args.push( + "-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string(), + ); + } + + if let Some(Some(Some(memory_limit))) = + resources.map(|r| r.limits.as_ref().map(|limits| limits.get("memory"))) { let memory_limit = MemoryQuantity::try_from(memory_limit).with_context(|_| { diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 7503897f..e9066919 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -434,10 +434,10 @@ fn rolegroup_config_map( .add("dfs.namenode.kerberos.principal","nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL") .add("dfs.namenode.kerberos.principal.pattern","nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL") .add("dfs.datanode.kerberos.principal","dn/_HOST@CLUSTER.LOCAL") - .add("dfs.web.authentication.keytab.file","/kerberos/keytab") - .add("dfs.journalnode.keytab.file","/kerberos/keytab") - .add("dfs.namenode.keytab.file","/kerberos/keytab") - .add("dfs.datanode.keytab.file","/kerberos/keytab") + .add("dfs.web.authentication.keytab.file","/stackable/kerberos/keytab") + .add("dfs.journalnode.keytab.file","/stackable/kerberos/keytab") + .add("dfs.namenode.keytab.file","/stackable/kerberos/keytab") + .add("dfs.datanode.keytab.file","/stackable/kerberos/keytab") .add("hadoop.user.group.static.mapping.overrides","dr.who=;nn=;") // the extend with config must come last in order to have overrides working!!! .extend(config) @@ -452,7 +452,7 @@ fn rolegroup_config_map( ), ( "ssl.server.keystore.password".to_string(), - Some("secret".to_string()), + Some("changeit".to_string()), ), ( "ssl.server.keystore.type".to_string(), @@ -472,7 +472,7 @@ fn rolegroup_config_map( ), ( "ssl.client.truststore.password".to_string(), - Some("secret".to_string()), + Some("changeit".to_string()), ), ( "ssl.client.truststore.type".to_string(), From 5044bafbd7aeec4663dab6ec6921a76c9759e74f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 16 Mar 2023 10:27:44 +0100 Subject: [PATCH 014/101] Use dedicated init container to create tls bundles --- rust/operator/src/container.rs | 136 ++++++++++++++++++++------------- 1 file changed, 84 insertions(+), 52 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index a38bee01..d7f490bb 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -174,6 +174,49 @@ impl ContainerConfig { )); } + if let Some(https_secret_class) = hdfs.https_secret_class() { + pb.add_volume( + VolumeBuilder::new(https_secret_class) + .ephemeral( + SecretOperatorVolumeSourceBuilder::new("tls") + .with_pod_scope() + .with_node_scope() + .build(), + ) + .build(), + ); + + pb.add_volume( + VolumeBuilder::new("keystore") + .with_empty_dir(Option::::None, None) + .build(), + ); + + let create_tls_cert_bundle_init_container = + ContainerBuilder::new("create-tls-cert-bundle") + .unwrap() + .image_from_product_image(resolved_product_image) + .command(vec!["/bin/bash".to_string(), "-c".to_string()]) + .args(vec![formatdoc!( + r###" + echo "Cleaning up truststore - just in case" + rm -f {KEYSTORE_DIR_NAME}/truststore.p12 + echo "Creating truststore" + keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass changeit + echo "Creating certificate chain" + cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt + echo "Cleaning up keystore - just in case" + rm -f {KEYSTORE_DIR_NAME}/keystore.p12 + echo "Creating keystore" + openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"### + )]) + // Only this init container needs the actual cert (from tls volume) to create the truststore + keystore from + .add_volume_mount("tls", "/stackable/tls") + .add_volume_mount("keystore", KEYSTORE_DIR_NAME) + .build(); + pb.add_init_container(create_tls_cert_bundle_init_container); + } + // role specific pod settings configured here match role { HdfsRole::NameNode => { @@ -286,7 +329,7 @@ impl ContainerConfig { cb.image_from_product_image(resolved_product_image) .command(self.command()) - .args(self.args(merged_config, &[])) + .args(self.args(hdfs, merged_config, &[])) .add_env_vars(self.env( hdfs, zookeeper_config_map_name, @@ -324,7 +367,7 @@ impl ContainerConfig { .with_context(|_| InvalidContainerNameSnafu { name: self.name() })? .image_from_product_image(resolved_product_image) .command(self.command()) - .args(self.args(merged_config, namenode_podrefs)) + .args(self.args(hdfs, merged_config, namenode_podrefs)) .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .build()) @@ -365,7 +408,7 @@ impl ContainerConfig { ContainerConfig::FormatNameNodes { .. } | ContainerConfig::FormatZooKeeper { .. } | ContainerConfig::WaitForNameNodes { .. } => { - vec!["bash".to_string(), "-c".to_string()] + vec!["/bin/bash".to_string(), "-c".to_string()] } } } @@ -373,6 +416,7 @@ impl ContainerConfig { /// Returns the container command arguments. fn args( &self, + hdfs: &HdfsCluster, merged_config: &(dyn MergedConfig + Send + 'static), namenode_podrefs: &[HdfsPodRef], ) -> Vec { @@ -380,6 +424,11 @@ impl ContainerConfig { self.create_config_directory_cmd(), self.copy_config_xml_cmd(), ]; + // We can't influence the order of the init containers. + // Some init containers - such as format-namenodes - need the tls certs, so let's wait for them to be properly set up + if hdfs.has_https_enabled() { + args.push(self.wait_for_trust_and_keystore_command()); + } match self { ContainerConfig::Hdfs { role, .. } => { args.push(self.copy_log4j_properties_cmd( @@ -387,18 +436,6 @@ impl ContainerConfig { merged_config.hdfs_logging(), )); - // Only the main containers gets the tls certs mounted and builds the keystore - args.push([ - "echo Cleaning up truststore - just in case", - &format!("rm -f {KEYSTORE_DIR_NAME}/truststore.p12"), - "echo Creating truststore", - &format!("keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass changeit"), - "echo Creating certificate chain", - &format!("cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt"), - "echo Creating keystore", - &format!("openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"), - ].join(" && ")); - args.push(format!( "{hadoop_home}/bin/hdfs --debug {role}", hadoop_home = Self::HADOOP_HOME, @@ -427,7 +464,7 @@ impl ContainerConfig { // for e.g. scaling. It may fail if the active namenode is restarted and the standby // namenode takes over. // This is why in the second part we check if the node is formatted already via - // $NAMENODE_DIR/current/VERSION. Then we dont do anything. + // $NAMENODE_DIR/current/VERSION. Then we don't do anything. // If there is no active namenode, the current pod is not formatted we format as // active namenode. Otherwise as standby node. args.push(formatdoc!( @@ -464,9 +501,7 @@ impl ContainerConfig { else cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Pod $POD_NAME already formatted. Skipping..." - fi - - "###, + fi"###, hadoop_home = Self::HADOOP_HOME, pod_names = namenode_podrefs .iter() @@ -502,6 +537,7 @@ impl ContainerConfig { } args.push(formatdoc!(r###" echo "Waiting for namenodes to get ready:" + kinit dn/simple-hdfs-datanode-default.default.svc.cluster.local@CLUSTER.LOCAL -kt /stackable/kerberos/keytab n=0 while [ ${{n}} -lt 12 ]; do @@ -539,6 +575,12 @@ impl ContainerConfig { vec![args.join(" && ")] } + fn wait_for_trust_and_keystore_command(&self) -> String { + format!( + "until [ -f {KEYSTORE_DIR_NAME}/truststore.p12 ]; do echo 'Waiting for truststore to be created' && sleep 1; done && until [ -f {KEYSTORE_DIR_NAME}/keystore.p12 ]; do echo 'Waiting for keystore to be created' && sleep 1; done" + ) + } + /// Returns the container env variables. fn env( &self, @@ -554,20 +596,32 @@ impl ContainerConfig { zookeeper_config_map_name, )); + // For the main container we use specialized env variables for every role + // (think of like HDFS_NAMENODE_OPTS or HDFS_DATANODE_OPTS) + // We do so, so that users shelling into the hdfs Pods will not have problems + // because the will read out the HADOOP_OPTS env var as well for the cli clients + // (but *not* the HDFS_NAMENODE_OPTS env var)! + // The hadoop opts contain a Prometheus metric emitter, which binds itself to a static port. + // When the users tries to start a cli tool the port is already taken by the hdfs services, + // so we don't want to stuff all the config into HADOOP_OPTS, but rather into the specialized env variables + // See https://github.com/stackabletech/hdfs-operator/issues/138 for details if let ContainerConfig::Hdfs { role, .. } = self { env.push(EnvVar { name: role.hadoop_opts().to_string(), value: self.build_hadoop_opts(hdfs, resources).ok(), ..EnvVar::default() }); - } else { - // We need to push this for Kerberos to work as not only the main containers need Kerberos - env.push(EnvVar { - name: "HADOOP_OPTS".to_string(), - value: Some("-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string()), - ..EnvVar::default() - }); } + // Additionally, any other init or sidecar container must have access to the following settings. + // As the Prometheus metric emitter is not part of this config it's safe to use for hdfs cli tools as well. + // This will not only enable the init containers to work, but also the user to run e.g. + // `bin/hdfs dfs -ls /` without getting `Caused by: java.lang.IllegalArgumentException: KrbException: Cannot locate default realm` + // because the `-Djava.security.krb5.conf` setting is missing + env.push(EnvVar { + name: "HADOOP_OPTS".to_string(), + value: Some("-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string()), + ..EnvVar::default() + }); // Not only the main containers need Kerberos if hdfs.has_security_enabled() { @@ -669,25 +723,6 @@ impl ContainerConfig { ); } - if let Some(https_secret_class) = hdfs.https_secret_class() { - volumes.push( - VolumeBuilder::new(https_secret_class) - .ephemeral( - SecretOperatorVolumeSourceBuilder::new("tls") - .with_pod_scope() - .with_node_scope() - .build(), - ) - .build(), - ); - - volumes.push( - VolumeBuilder::new("keystore") - .with_empty_dir(Option::::None, None) - .build(), - ); - } - Some(merged_config.hdfs_logging()) } ContainerConfig::Zkfc { .. } => merged_config.zkfc_logging(), @@ -727,16 +762,13 @@ impl ContainerConfig { .build(), ]; - // Adding this for all containers, as not only the main container needs Kerberos + // Adding this for all containers, as not only the main container needs Kerberos or TLS if hdfs.kerberos_secret_class().is_some() { volume_mounts.push(VolumeMountBuilder::new("kerberos", "/stackable/kerberos").build()); } - // Only the main container need the tls cert to create their keystore - if let ContainerConfig::Hdfs { .. } = self { - if hdfs.https_secret_class().is_some() { - volume_mounts.push(VolumeMountBuilder::new("tls", "/stackable/tls").build()); - volume_mounts.push(VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build()); - } + if hdfs.https_secret_class().is_some() { + // This volume will be propagated by the CreateTlsCertBundle container + volume_mounts.push(VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build()); } match self { From 668dec4a92fd8afcb118fcc8b1f71f7033abe4dd Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 16 Mar 2023 14:24:05 +0100 Subject: [PATCH 015/101] Move kerberos service name into separate fn --- rust/crd/src/lib.rs | 8 ++++++++ rust/operator/src/container.rs | 6 +----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index ae6a4f3f..802bc467 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -337,6 +337,14 @@ impl HdfsRole { } } + pub fn kerberos_service_name(&self) -> &'static str { + match self { + HdfsRole::NameNode => "nn", + HdfsRole::DataNode => "dn", + HdfsRole::JournalNode => "jn", + } + } + /// Return replicas for a certain rolegroup. pub fn role_group_replicas(&self, hdfs: &HdfsCluster, role_group: &str) -> i32 { match self { diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index d7f490bb..c0a3106c 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -711,11 +711,7 @@ impl ContainerConfig { .with_pod_scope() .with_node_scope() // .with_service_scope("simple-hdfs-namenode-default") - .with_kerberos_service_name(match role { - HdfsRole::NameNode => "nn", - HdfsRole::DataNode => "dn", - HdfsRole::JournalNode => "jn", - }) + .with_kerberos_service_name(role.kerberos_service_name()) .with_kerberos_service_name("HTTP") .build(), ) From 29fa1682f2a4b8bee28cf556602ede51476ca606 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 16 Mar 2023 15:36:53 +0100 Subject: [PATCH 016/101] Dynamically determine principal name --- rust/operator/src/container.rs | 48 +++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index c0a3106c..f6d946e7 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -94,6 +94,8 @@ pub enum ContainerConfig { volume_mounts: ContainerVolumeDirs, }, FormatNameNodes { + /// HDFS role (name-, data-, journal-node) which will be the container_name. + role: HdfsRole, /// The provided custom container name. container_name: String, /// Volume mounts for config and logging. @@ -106,6 +108,8 @@ pub enum ContainerConfig { volume_mounts: ContainerVolumeDirs, }, WaitForNameNodes { + /// HDFS role (name-, data-, journal-node) which will be the container_name. + role: HdfsRole, /// The provided custom container name. container_name: String, /// Volume mounts for config and logging. @@ -162,6 +166,7 @@ impl ContainerConfig { zk_config_map_name, env_overrides, merged_config, + object_name, )?); // Vector side container @@ -229,6 +234,7 @@ impl ContainerConfig { zk_config_map_name, env_overrides, merged_config, + object_name, )?); // Format namenode init container @@ -246,6 +252,7 @@ impl ContainerConfig { env_overrides, namenode_podrefs, merged_config, + object_name, )?); // Format ZooKeeper init container @@ -263,6 +270,7 @@ impl ContainerConfig { env_overrides, namenode_podrefs, merged_config, + object_name, )?); } HdfsRole::DataNode => { @@ -281,6 +289,7 @@ impl ContainerConfig { env_overrides, namenode_podrefs, merged_config, + object_name, )?); } HdfsRole::JournalNode => {} @@ -319,6 +328,7 @@ impl ContainerConfig { zookeeper_config_map_name: &str, env_overrides: Option<&BTreeMap>, merged_config: &(dyn MergedConfig + Send + 'static), + object_name: &str, ) -> Result { let mut cb = ContainerBuilder::new(self.name()).with_context(|_| InvalidContainerNameSnafu { @@ -329,7 +339,7 @@ impl ContainerConfig { cb.image_from_product_image(resolved_product_image) .command(self.command()) - .args(self.args(hdfs, merged_config, &[])) + .args(self.args(hdfs, merged_config, &[], object_name)) .add_env_vars(self.env( hdfs, zookeeper_config_map_name, @@ -354,6 +364,7 @@ impl ContainerConfig { /// Creates respective init containers for: /// - Namenode (format-namenodes, format-zookeeper) /// - Datanode (wait-for-namenodes) + #[allow(clippy::too_many_arguments)] fn init_container( &self, hdfs: &HdfsCluster, @@ -362,12 +373,13 @@ impl ContainerConfig { env_overrides: Option<&BTreeMap>, namenode_podrefs: &[HdfsPodRef], merged_config: &(dyn MergedConfig + Send + 'static), + object_name: &str, ) -> Result { Ok(ContainerBuilder::new(self.name()) .with_context(|_| InvalidContainerNameSnafu { name: self.name() })? .image_from_product_image(resolved_product_image) .command(self.command()) - .args(self.args(hdfs, merged_config, namenode_podrefs)) + .args(self.args(hdfs, merged_config, namenode_podrefs, object_name)) .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .build()) @@ -419,6 +431,7 @@ impl ContainerConfig { hdfs: &HdfsCluster, merged_config: &(dyn MergedConfig + Send + 'static), namenode_podrefs: &[HdfsPodRef], + object_name: &str, ) -> Vec { let mut args = vec![ self.create_config_directory_cmd(), @@ -427,7 +440,7 @@ impl ContainerConfig { // We can't influence the order of the init containers. // Some init containers - such as format-namenodes - need the tls certs, so let's wait for them to be properly set up if hdfs.has_https_enabled() { - args.push(self.wait_for_trust_and_keystore_command()); + args.push(Self::wait_for_trust_and_keystore_command()); } match self { ContainerConfig::Hdfs { role, .. } => { @@ -453,7 +466,7 @@ impl ContainerConfig { hadoop_home = Self::HADOOP_HOME )); } - ContainerConfig::FormatNameNodes { .. } => { + ContainerConfig::FormatNameNodes { role, .. } => { if let Some(container_config) = merged_config.format_namenodes_logging() { args.push(self.copy_log4j_properties_cmd( FORMAT_NAMENODES_LOG4J_CONFIG_FILE, @@ -467,10 +480,11 @@ impl ContainerConfig { // $NAMENODE_DIR/current/VERSION. Then we don't do anything. // If there is no active namenode, the current pod is not formatted we format as // active namenode. Otherwise as standby node. + if hdfs.has_security_enabled() { + args.push(Self::get_kerberos_ticket(hdfs, role, object_name)); + } args.push(formatdoc!( r###" - # hdfs' admin tools don't support specifying a custom keytab - kinit nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL -kt /stackable/kerberos/keytab cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Start formatting namenode $POD_NAME. Checking for active namenodes:" for id in {pod_names} @@ -528,16 +542,18 @@ impl ContainerConfig { hadoop_home = Self::HADOOP_HOME )); } - ContainerConfig::WaitForNameNodes { .. } => { + ContainerConfig::WaitForNameNodes { role, .. } => { if let Some(container_config) = merged_config.wait_for_namenodes() { args.push(self.copy_log4j_properties_cmd( WAIT_FOR_NAMENODES_LOG4J_CONFIG_FILE, container_config, )); } + if hdfs.has_security_enabled() { + args.push(Self::get_kerberos_ticket(hdfs, role, object_name)); + } args.push(formatdoc!(r###" echo "Waiting for namenodes to get ready:" - kinit dn/simple-hdfs-datanode-default.default.svc.cluster.local@CLUSTER.LOCAL -kt /stackable/kerberos/keytab n=0 while [ ${{n}} -lt 12 ]; do @@ -575,12 +591,24 @@ impl ContainerConfig { vec![args.join(" && ")] } - fn wait_for_trust_and_keystore_command(&self) -> String { + /// Wait until the init container has created global trust and keystore shared between all containers + fn wait_for_trust_and_keystore_command() -> String { format!( "until [ -f {KEYSTORE_DIR_NAME}/truststore.p12 ]; do echo 'Waiting for truststore to be created' && sleep 1; done && until [ -f {KEYSTORE_DIR_NAME}/keystore.p12 ]; do echo 'Waiting for keystore to be created' && sleep 1; done" ) } + /// `kinit` a ticket using the principal created for the specified hdfs role + fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole, object_name: &str) -> String { + // Something like `nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL` + let principal = format!( + "{service_name}/{object_name}.{namespace}.svc.cluster.local@CLUSTER.LOCAL", + service_name = role.kerberos_service_name(), + namespace = hdfs.namespace().expect("HdfsCluster must be set"), + ); + format!("kinit {principal} -kt /stackable/kerberos/keytab") + } + /// Returns the container env variables. fn env( &self, @@ -1064,6 +1092,7 @@ impl TryFrom for ContainerConfig { // namenode init containers name if name == NameNodeContainer::FormatNameNodes.to_string() => { Ok(Self::FormatNameNodes { + role: HdfsRole::NameNode, volume_mounts: ContainerVolumeDirs::try_from(name.as_str())?, container_name: name, }) @@ -1077,6 +1106,7 @@ impl TryFrom for ContainerConfig { // datanode init containers name if name == DataNodeContainer::WaitForNameNodes.to_string() => { Ok(Self::WaitForNameNodes { + role: HdfsRole::DataNode, volume_mounts: ContainerVolumeDirs::try_from(name.as_str())?, container_name: name, }) From 21b9233c0ddb2329269c36c2eeeb8e85d86891ec Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 16 Mar 2023 16:04:11 +0100 Subject: [PATCH 017/101] Read KERBEROS_REALM from krb5.conf rather than hardcoding --- rust/operator/src/container.rs | 12 +++++++++++- rust/operator/src/hdfs_controller.rs | 16 ++++++++-------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index f6d946e7..08f5bbbd 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -442,6 +442,9 @@ impl ContainerConfig { if hdfs.has_https_enabled() { args.push(Self::wait_for_trust_and_keystore_command()); } + if hdfs.has_security_enabled() { + args.push(Self::export_kerberos_real_env_var_command()); + } match self { ContainerConfig::Hdfs { role, .. } => { args.push(self.copy_log4j_properties_cmd( @@ -599,16 +602,23 @@ impl ContainerConfig { } /// `kinit` a ticket using the principal created for the specified hdfs role + /// Needs the KERBEROS_REALM env var to be present, as `Self::export_kerberos_real_env_var_command` does fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole, object_name: &str) -> String { // Something like `nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL` let principal = format!( - "{service_name}/{object_name}.{namespace}.svc.cluster.local@CLUSTER.LOCAL", + "{service_name}/{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", service_name = role.kerberos_service_name(), namespace = hdfs.namespace().expect("HdfsCluster must be set"), ); format!("kinit {principal} -kt /stackable/kerberos/keytab") } + // Command to export `KERBEROS_REALM` env var to default real from krb5.conf, e.g. `CLUSTER.LOCAL` + fn export_kerberos_real_env_var_command() -> String { + "export KERBEROS_REALM=$(grep -oP 'default_realm = \\K.*' /stackable/kerberos/krb5.conf)" + .to_string() + } + /// Returns the container env variables. fn env( &self, diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index e9066919..5d885dfa 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -426,14 +426,14 @@ fn rolegroup_config_map( .ha_zookeeper_quorum() .add("hadoop.security.authentication", "kerberos") .add("hadoop.security.authorization","true") - .add("hadoop.registry.kerberos.realm","CLUSTER.LOCAL") - .add("dfs.web.authentication.kerberos.principal","HTTP/_HOST@CLUSTER.LOCAL") - .add("dfs.journalnode.kerberos.internal.spnego.principal","HTTP/_HOST@CLUSTER.LOCAL") - .add("dfs.journalnode.kerberos.principal","jn/_HOST@CLUSTER.LOCAL") - .add("dfs.journalnode.kerberos.principal.pattern","jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@CLUSTER.LOCAL") - .add("dfs.namenode.kerberos.principal","nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL") - .add("dfs.namenode.kerberos.principal.pattern","nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL") - .add("dfs.datanode.kerberos.principal","dn/_HOST@CLUSTER.LOCAL") + .add("hadoop.registry.kerberos.realm","${env.KERBEROS_REALM}") + .add("dfs.web.authentication.kerberos.principal","HTTP/_HOST@${env.KERBEROS_REALM}") + .add("dfs.journalnode.kerberos.internal.spnego.principal","HTTP/_HOST@{env.KERBEROS_REALM}") + .add("dfs.journalnode.kerberos.principal","jn/_HOST@${env.KERBEROS_REALM}") + .add("dfs.journalnode.kerberos.principal.pattern","jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") + .add("dfs.namenode.kerberos.principal","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") + .add("dfs.namenode.kerberos.principal.pattern","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") + .add("dfs.datanode.kerberos.principal","dn/_HOST@${env.KERBEROS_REALM}") .add("dfs.web.authentication.keytab.file","/stackable/kerberos/keytab") .add("dfs.journalnode.keytab.file","/stackable/kerberos/keytab") .add("dfs.namenode.keytab.file","/stackable/kerberos/keytab") From 001394d740886baa72376476df7e692baa31e174 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 09:10:41 +0100 Subject: [PATCH 018/101] WIP checkpoint, not working. Tries to use fqdn, nn fails to connect to jn because of DNS problems --- .../hdfs/examples/getting_started/hdfs.yaml | 8 +- rust/operator/src/container.rs | 9 +- rust/operator/src/hdfs_controller.rs | 170 ++++++++++++++---- 3 files changed, 148 insertions(+), 39 deletions(-) diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index bd7448af..002fdfb6 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -2,7 +2,7 @@ apiVersion: hdfs.stackable.tech/v1alpha1 kind: HdfsCluster metadata: - name: simple-hdfs + name: hdfs-test spec: image: productVersion: 3.3.4 @@ -69,10 +69,10 @@ spec: backend: kerberosKeytab: realmName: CLUSTER.LOCAL - kdc: krb5-kdc.default.svc.cluster.local - adminServer: krb5-kdc.default.svc.cluster.local + kdc: krb5-kdc.test.svc.cluster.local + adminServer: krb5-kdc.test.svc.cluster.local adminKeytabSecret: - namespace: default + namespace: test name: secret-operator-keytab adminPrincipal: stackable-secret-operator --- diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 08f5bbbd..b41e941c 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -493,6 +493,8 @@ impl ContainerConfig { for id in {pod_names} do echo -n "Checking pod $id... " + # TODO remove the following line again, only for debugging purpose + {hadoop_home}/bin/hdfs haadmin -getServiceState $id SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id | tail -n1) echo "FOOBAR $SERVICE_STATE BARFOO" if [ "$SERVICE_STATE" == "active" ] @@ -603,14 +605,15 @@ impl ContainerConfig { /// `kinit` a ticket using the principal created for the specified hdfs role /// Needs the KERBEROS_REALM env var to be present, as `Self::export_kerberos_real_env_var_command` does + /// Also needs the POD_NAME env var to be present, which is set in the Pod spec fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole, object_name: &str) -> String { - // Something like `nn/simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL` + // Something like `nn/simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL` let principal = format!( - "{service_name}/{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", + "{service_name}/${{POD_NAME}}.{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", service_name = role.kerberos_service_name(), namespace = hdfs.namespace().expect("HdfsCluster must be set"), ); - format!("kinit {principal} -kt /stackable/kerberos/keytab") + format!("echo \"Getting ticket for {principal}\" from /stackable/kerberos/keytab && kinit {principal} -kt /stackable/kerberos/keytab") } // Command to export `KERBEROS_REALM` env var to default real from krb5.conf, e.g. `CLUSTER.LOCAL` diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 5d885dfa..93d1f64a 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -90,6 +90,8 @@ pub enum Error { }, #[snafu(display("Object has no name"))] ObjectHasNoName { obj_ref: ObjectRef }, + #[snafu(display("Object has no namespace"))] + ObjectHasNoNamespace { obj_ref: ObjectRef }, #[snafu(display("Cannot build config map for role [{role}] and role group [{role_group}]"))] BuildRoleGroupConfigMap { source: stackable_operator::error::Error, @@ -262,6 +264,7 @@ pub async fn reconcile_hdfs(hdfs: Arc, ctx: Arc) -> HdfsOperat rolegroup_service(&hdfs, &role, &rolegroup_ref, &resolved_product_image)?; let rg_configmap = rolegroup_config_map( &hdfs, + &role, &rolegroup_ref, rolegroup_config, &namenode_podrefs, @@ -359,6 +362,7 @@ fn rolegroup_service( #[allow(clippy::too_many_arguments)] fn rolegroup_config_map( hdfs: &HdfsCluster, + role: &HdfsRole, rolegroup_ref: &RoleGroupRef, rolegroup_config: &HashMap>, namenode_podrefs: &[HdfsPodRef], @@ -368,7 +372,6 @@ fn rolegroup_config_map( vector_aggregator_address: Option<&str>, ) -> HdfsOperatorResult { tracing::info!("Setting up ConfigMap for {:?}", rolegroup_ref); - let hdfs_name = hdfs .metadata .name @@ -376,6 +379,12 @@ fn rolegroup_config_map( .with_context(|| ObjectHasNoNameSnafu { obj_ref: ObjectRef::from_obj(hdfs), })?; + let hdfs_namespace = hdfs + .namespace() + .with_context(|| ObjectHasNoNamespaceSnafu { + obj_ref: ObjectRef::from_obj(hdfs), + })?; + // let object_name = rolegroup_ref.object_name(); let mut hdfs_site_xml = String::new(); let mut core_site_xml = String::new(); @@ -385,10 +394,11 @@ fn rolegroup_config_map( for (property_name_kind, config) in rolegroup_config { match property_name_kind { PropertyNameKind::File(file_name) if file_name == HDFS_SITE_XML => { - hdfs_site_xml = HdfsSiteConfigBuilder::new(hdfs_name.to_string()) - // IMPORTANT: these folders must be under the volume mount point, otherwise they will not - // be formatted by the namenode, or used by the other services. - // See also: https://github.com/apache-spark-on-k8s/kubernetes-HDFS/commit/aef9586ecc8551ca0f0a468c3b917d8c38f494a0 + let mut hdfs_site_xml_builder = HdfsSiteConfigBuilder::new(hdfs_name.to_string()); + // IMPORTANT: these folders must be under the volume mount point, otherwise they will not + // be formatted by the namenode, or used by the other services. + // See also: https://github.com/apache-spark-on-k8s/kubernetes-HDFS/commit/aef9586ecc8551ca0f0a468c3b917d8c38f494a0 + hdfs_site_xml_builder .dfs_namenode_name_dir() .dfs_datanode_data_dir(merged_config.data_node_resources().map(|r| r.storage)) .dfs_journalnode_edits_dir() @@ -410,38 +420,134 @@ fn rolegroup_config_map( .add("dfs.ha.fencing.methods", "shell(/bin/true)") .add("dfs.ha.nn.not-become-active-in-safemode", "true") .add("dfs.ha.automatic-failover.enabled", "true") - .add("dfs.ha.namenode.id", "${env.POD_NAME}") - .add("dfs.block.access.token.enable", "true") - .add("dfs.data.transfer.protection", "authentication") - .add("dfs.http.policy", "HTTPS_ONLY") - .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) - .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML) + .add("dfs.ha.namenode.id", "${env.POD_NAME}"); + + if hdfs.has_security_enabled() { + hdfs_site_xml_builder + .add("dfs.block.access.token.enable", "true") + .add("dfs.data.transfer.protection", "authentication") + .add("dfs.http.policy", "HTTPS_ONLY") + .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) + .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); + } + + hdfs_site_xml = hdfs_site_xml_builder // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); } PropertyNameKind::File(file_name) if file_name == CORE_SITE_XML => { - core_site_xml = CoreSiteConfigBuilder::new(hdfs_name.to_string()) - .fs_default_fs() - .ha_zookeeper_quorum() - .add("hadoop.security.authentication", "kerberos") - .add("hadoop.security.authorization","true") - .add("hadoop.registry.kerberos.realm","${env.KERBEROS_REALM}") - .add("dfs.web.authentication.kerberos.principal","HTTP/_HOST@${env.KERBEROS_REALM}") - .add("dfs.journalnode.kerberos.internal.spnego.principal","HTTP/_HOST@{env.KERBEROS_REALM}") - .add("dfs.journalnode.kerberos.principal","jn/_HOST@${env.KERBEROS_REALM}") - .add("dfs.journalnode.kerberos.principal.pattern","jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") - .add("dfs.namenode.kerberos.principal","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") - .add("dfs.namenode.kerberos.principal.pattern","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") - .add("dfs.datanode.kerberos.principal","dn/_HOST@${env.KERBEROS_REALM}") - .add("dfs.web.authentication.keytab.file","/stackable/kerberos/keytab") - .add("dfs.journalnode.keytab.file","/stackable/kerberos/keytab") - .add("dfs.namenode.keytab.file","/stackable/kerberos/keytab") - .add("dfs.datanode.keytab.file","/stackable/kerberos/keytab") - .add("hadoop.user.group.static.mapping.overrides","dr.who=;nn=;") - // the extend with config must come last in order to have overrides working!!! - .extend(config) - .build_as_xml(); + let mut core_site_xml_builder = CoreSiteConfigBuilder::new(hdfs_name.to_string()); + + core_site_xml_builder.fs_default_fs().ha_zookeeper_quorum(); + + if hdfs.has_security_enabled() { + // .add("hadoop.security.authentication", "kerberos") + // .add("hadoop.security.authorization","true") + // .add("hadoop.registry.kerberos.realm","${env.KERBEROS_REALM}") + // .add("dfs.web.authentication.kerberos.principal","HTTP/_HOST@${env.KERBEROS_REALM}") + // .add("dfs.journalnode.kerberos.internal.spnego.principal","HTTP/_HOST@{env.KERBEROS_REALM}") + // .add("dfs.journalnode.kerberos.principal","jn/_HOST@${env.KERBEROS_REALM}") + // .add("dfs.journalnode.kerberos.principal.pattern","jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") + // .add("dfs.namenode.kerberos.principal","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") + // .add("dfs.namenode.kerberos.principal.pattern","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") + // .add("dfs.datanode.kerberos.principal","dn/_HOST@${env.KERBEROS_REALM}") + // .add("dfs.web.authentication.keytab.file","/stackable/kerberos/keytab") + // .add("dfs.journalnode.keytab.file","/stackable/kerberos/keytab") + // .add("dfs.namenode.keytab.file","/stackable/kerberos/keytab") + // .add("dfs.datanode.keytab.file","/stackable/kerberos/keytab") + // .add("hadoop.user.group.static.mapping.overrides","dr.who=;nn=;") + + core_site_xml_builder + .add("hadoop.security.authentication", "kerberos") + .add("hadoop.security.authorization", "true") + .add("hadoop.user.group.static.mapping.overrides", "dr.who=;nn=;") + .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") + .add( + "dfs.web.authentication.kerberos.principal", + "HTTP/_HOST@${env.KERBEROS_REALM}", + ) + .add( + "dfs.web.authentication.keytab.file", + "/stackable/kerberos/keytab", + ) + // 10.244.0.30:8485: DestHost:destPort hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local:8485 , LocalHost:localPort hdfs-test-namenode-default-0/10.244.0.33:0. Failed on local exception: java.io.IOException: Couldn't set up IO streams: java.lang.IllegalArgumentException: Server has invalid Kerberos principal: jn/hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local@CLUSTER.LOCAL, doesn't match the pattern: jn/*@${{env.KERBEROS_REALM}} + // format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str() + .add( + "dfs.journalnode.kerberos.principal.pattern", + format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ) + .add( + "dfs.journalnode.kerberos.principal", + "jn/_HOST@${env.KERBEROS_REALM}", + ) + .add( + "dfs.journalnode.kerberos.internal.spnego.principal", + "HTTP/_HOST@{env.KERBEROS_REALM}", + ) + .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") + .add( + "dfs.namenode.kerberos.principal", + "nn/_HOST@${env.KERBEROS_REALM}", + ) + .add( + "dfs.namenode.kerberos.principal.pattern", + format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ) + .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab") + .add( + "dfs.datanode.kerberos.principal", + "dn/_HOST@${env.KERBEROS_REALM}", + ) + .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); + + // match role { + // HdfsRole::NameNode => { + // core_site_xml_builder + // // .add("dfs.namenode.kerberos.principal", format!("nn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str()) + // // .add("dfs.namenode.kerberos.principal.pattern", format!("nn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str()) + // .add("dfs.namenode.kerberos.principal", "nn/_HOST@${env.KERBEROS_REALM}") + // .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab") + // .add("dfs.journalnode.kerberos.principal.pattern", format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str()); + // } + // HdfsRole::DataNode => { + // core_site_xml_builder + // .add( + // "dfs.datanode.kerberos.principal", + // "dn/_HOST@${env.KERBEROS_REALM}", + // ) + // .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab") + // .add( + // "dfs.namenode.kerberos.principal", + // "nn/_HOST@${env.KERBEROS_REALM}", + // ) + // .add( + // "dfs.namenode.kerberos.principal.pattern", + // format!("nn/*@${{env.KERBEROS_REALM}}").as_str(), + // ); + // } + // HdfsRole::JournalNode => { + // core_site_xml_builder + // .add( + // "dfs.journalnode.kerberos.principal", + // "jn/_HOST@${env.KERBEROS_REALM}", + // ) + // .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") + // .add( + // "dfs.journalnode.kerberos.internal.spnego.principal", + // "HTTP/_HOST@{env.KERBEROS_REALM}", + // ); + // } + // }; + } + + // export KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /stackable/kerberos/krb5.conf) + // kinit dn/hdfs-test-datanode-default.soenke.svc.cluster.local@${KERBEROS_REALM} -kt /stackable/kerberos/keytab + // kinit dn/hdfs-test-datanode-default-0.hdfs-test-datanode-default.soenke.svc.cluster.local@${KERBEROS_REALM} -kt /stackable/kerberos/keytab + // /stackable/hadoop/bin/hdfs haadmin -getServiceState hdfs-test-namenode-default-0 + + // the extend with config must come last in order to have overrides working!!! + core_site_xml = core_site_xml_builder.extend(config).build_as_xml(); } PropertyNameKind::File(file_name) if file_name == SSL_SERVER_XML => { let mut config_opts = BTreeMap::new(); From aa02167e4a6ad1b7df18f8e6d944e963ddcbe52a Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 10:02:42 +0100 Subject: [PATCH 019/101] Swtich back to using service names as principals. Works again, only the datanode wait-for-namenodes init containers fails because it's stupid any requires the dfs.namenode.kerberos.principal setting (and ignores the dfs.namenode.kerberos.principal.pattern) (hdfs will start anyway) --- rust/operator/src/container.rs | 8 ++-- rust/operator/src/hdfs_controller.rs | 64 +++++++++++++++++----------- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index b41e941c..dea94e29 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -493,8 +493,6 @@ impl ContainerConfig { for id in {pod_names} do echo -n "Checking pod $id... " - # TODO remove the following line again, only for debugging purpose - {hadoop_home}/bin/hdfs haadmin -getServiceState $id SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id | tail -n1) echo "FOOBAR $SERVICE_STATE BARFOO" if [ "$SERVICE_STATE" == "active" ] @@ -566,6 +564,8 @@ impl ContainerConfig { for id in {pod_names} do echo -n "Checking pod $id... " + # TODO remove the following line again, only for debugging purpose + {hadoop_home}/bin/hdfs haadmin -getServiceState $id SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id 2>/dev/null | tail -n1) if [ "$SERVICE_STATE" = "active" ] || [ "$SERVICE_STATE" = "standby" ] then @@ -605,11 +605,9 @@ impl ContainerConfig { /// `kinit` a ticket using the principal created for the specified hdfs role /// Needs the KERBEROS_REALM env var to be present, as `Self::export_kerberos_real_env_var_command` does - /// Also needs the POD_NAME env var to be present, which is set in the Pod spec fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole, object_name: &str) -> String { - // Something like `nn/simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local@CLUSTER.LOCAL` let principal = format!( - "{service_name}/${{POD_NAME}}.{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", + "{service_name}/{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", service_name = role.kerberos_service_name(), namespace = hdfs.namespace().expect("HdfsCluster must be set"), ); diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 93d1f64a..2916021f 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -384,7 +384,7 @@ fn rolegroup_config_map( .with_context(|| ObjectHasNoNamespaceSnafu { obj_ref: ObjectRef::from_obj(hdfs), })?; - // let object_name = rolegroup_ref.object_name(); + let object_name = rolegroup_ref.object_name(); let mut hdfs_site_xml = String::new(); let mut core_site_xml = String::new(); @@ -465,41 +465,53 @@ fn rolegroup_config_map( .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") .add( "dfs.web.authentication.kerberos.principal", - "HTTP/_HOST@${env.KERBEROS_REALM}", + format!("HTTP/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add( "dfs.web.authentication.keytab.file", "/stackable/kerberos/keytab", ) - // 10.244.0.30:8485: DestHost:destPort hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local:8485 , LocalHost:localPort hdfs-test-namenode-default-0/10.244.0.33:0. Failed on local exception: java.io.IOException: Couldn't set up IO streams: java.lang.IllegalArgumentException: Server has invalid Kerberos principal: jn/hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local@CLUSTER.LOCAL, doesn't match the pattern: jn/*@${{env.KERBEROS_REALM}} - // format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str() .add( "dfs.journalnode.kerberos.principal.pattern", - format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), - ) - .add( - "dfs.journalnode.kerberos.principal", - "jn/_HOST@${env.KERBEROS_REALM}", - ) - .add( - "dfs.journalnode.kerberos.internal.spnego.principal", - "HTTP/_HOST@{env.KERBEROS_REALM}", - ) - .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") - .add( - "dfs.namenode.kerberos.principal", - "nn/_HOST@${env.KERBEROS_REALM}", + // We have to use the asterisk (*), because we don't now the possible rolegroups of the journalnodes here. + // It is *not* used as placeholder for the node replica number, but rather for the rolegroup + format!("jn/{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add( "dfs.namenode.kerberos.principal.pattern", - format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), - ) - .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab") - .add( - "dfs.datanode.kerberos.principal", - "dn/_HOST@${env.KERBEROS_REALM}", - ) - .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); + format!("nn/{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ); + + match role { + HdfsRole::NameNode => { + core_site_xml_builder + .add( + "dfs.namenode.kerberos.principal", + format!("nn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ) + .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab"); + } + HdfsRole::DataNode => { + core_site_xml_builder + .add( + "dfs.datanode.kerberos.principal", + format!("dn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ) + .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); + } + HdfsRole::JournalNode => { + core_site_xml_builder + .add( + "dfs.journalnode.kerberos.principal", + format!("jn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ) + .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") + .add( + "dfs.journalnode.kerberos.internal.spnego.principal", + format!("HTTP/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ); + } + } // match role { // HdfsRole::NameNode => { From 0edee669564b2e2d4ea94f612870376d4fb60a04 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 10:25:13 +0100 Subject: [PATCH 020/101] Only use hdfs_name within principal. It works! --- rust/operator/src/container.rs | 7 +++--- rust/operator/src/hdfs_controller.rs | 32 +++++++++++++++++++++------- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index dea94e29..41f182da 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -607,8 +607,9 @@ impl ContainerConfig { /// Needs the KERBEROS_REALM env var to be present, as `Self::export_kerberos_real_env_var_command` does fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole, object_name: &str) -> String { let principal = format!( - "{service_name}/{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", + "{service_name}/{hdfs_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", service_name = role.kerberos_service_name(), + hdfs_name = hdfs.name_unchecked(), namespace = hdfs.namespace().expect("HdfsCluster must be set"), ); format!("echo \"Getting ticket for {principal}\" from /stackable/kerberos/keytab && kinit {principal} -kt /stackable/kerberos/keytab") @@ -747,9 +748,7 @@ impl ContainerConfig { VolumeBuilder::new("kerberos") .ephemeral( SecretOperatorVolumeSourceBuilder::new(kerberos_secret_class) - .with_pod_scope() - .with_node_scope() - // .with_service_scope("simple-hdfs-namenode-default") + .with_service_scope(hdfs.name_unchecked()) .with_kerberos_service_name(role.kerberos_service_name()) .with_kerberos_service_name("HTTP") .build(), diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 2916021f..8ac95f14 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -465,7 +465,7 @@ fn rolegroup_config_map( .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") .add( "dfs.web.authentication.kerberos.principal", - format!("HTTP/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("HTTP/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add( "dfs.web.authentication.keytab.file", @@ -475,11 +475,11 @@ fn rolegroup_config_map( "dfs.journalnode.kerberos.principal.pattern", // We have to use the asterisk (*), because we don't now the possible rolegroups of the journalnodes here. // It is *not* used as placeholder for the node replica number, but rather for the rolegroup - format!("jn/{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("jn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add( "dfs.namenode.kerberos.principal.pattern", - format!("nn/{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ); match role { @@ -487,7 +487,7 @@ fn rolegroup_config_map( core_site_xml_builder .add( "dfs.namenode.kerberos.principal", - format!("nn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab"); } @@ -495,20 +495,36 @@ fn rolegroup_config_map( core_site_xml_builder .add( "dfs.datanode.kerberos.principal", - format!("dn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("dn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) - .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); + .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab") + // Besides having goddamn `dfs.namenode.kerberos.principal.pattern` setting, + // the `wait-for-namenodes` init container will fail with the following message when + // `dfs.namenode.kerberos.principal` is not set: + // + // Couldn't set up IO streams: java.lang.IllegalArgumentException: Failed to specify server's Kerberos principal name + // + // So we have to tell it the principal of the namenode, no pattern will be used here :() + // As we can have multiple rolegroups of namenodes we can *not* use the object_name (such as simple-hdfs-datanode-default) + // as this would only work for a specific - single - rolegroup (default in this case) + // + // *This* is actually the reason we did go with simply `nn/{hdfs_name}.{hdfs_namespace}` + // and not e.g. `nn/{object_name}.{hdfs_namespace}` + .add( + "dfs.namenode.kerberos.principal", + format!("nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ); } HdfsRole::JournalNode => { core_site_xml_builder .add( "dfs.journalnode.kerberos.principal", - format!("jn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("jn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") .add( "dfs.journalnode.kerberos.internal.spnego.principal", - format!("HTTP/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("HTTP/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ); } } From b7504173b183488ecfd4f79fc23cf43b398cc7ad Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 11:16:54 +0100 Subject: [PATCH 021/101] Test with more replicas --- docs/modules/hdfs/examples/getting_started/hdfs.yaml | 4 ++-- docs/modules/hdfs/examples/getting_started/zk.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index 002fdfb6..e3915cd4 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -37,7 +37,7 @@ spec: dataNodes: roleGroups: default: - replicas: 1 + replicas: 2 config: logging: containers: @@ -50,7 +50,7 @@ spec: journalNodes: roleGroups: default: - replicas: 1 + replicas: 3 config: logging: containers: diff --git a/docs/modules/hdfs/examples/getting_started/zk.yaml b/docs/modules/hdfs/examples/getting_started/zk.yaml index 71d2eab2..be3d5390 100644 --- a/docs/modules/hdfs/examples/getting_started/zk.yaml +++ b/docs/modules/hdfs/examples/getting_started/zk.yaml @@ -10,4 +10,4 @@ spec: servers: roleGroups: default: - replicas: 1 + replicas: 3 From 320ec001d8b9ae80ae27f3ea23744d729056d1da Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 12:12:25 +0100 Subject: [PATCH 022/101] Switch back to use fqdn principals --- rust/operator/src/container.rs | 16 +++--- rust/operator/src/hdfs_controller.rs | 79 ++++------------------------ 2 files changed, 18 insertions(+), 77 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 41f182da..2c8f4d93 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -564,9 +564,10 @@ impl ContainerConfig { for id in {pod_names} do echo -n "Checking pod $id... " - # TODO remove the following line again, only for debugging purpose - {hadoop_home}/bin/hdfs haadmin -getServiceState $id - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id 2>/dev/null | tail -n1) + # We need to calculate the exact principal and pass it in her (for security reasons) + # Otherwise the command will fail with Couldn't set up IO streams: java.lang.IllegalArgumentException: Failed to specify server's Kerberos principal name + PRINCIPAL=$(echo "nn/${{id}}.$(echo $id | grep -o '.*[^-0-9]').test.svc.cluster.local@${{KERBEROS_REALM}}") + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $id 2>/dev/null | tail -n1) if [ "$SERVICE_STATE" = "active" ] || [ "$SERVICE_STATE" = "standby" ] then echo "$SERVICE_STATE" @@ -605,14 +606,14 @@ impl ContainerConfig { /// `kinit` a ticket using the principal created for the specified hdfs role /// Needs the KERBEROS_REALM env var to be present, as `Self::export_kerberos_real_env_var_command` does + /// Needs the POD_NAME env var to be present, which will be provided by the PodSpec fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole, object_name: &str) -> String { let principal = format!( - "{service_name}/{hdfs_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", + "{service_name}/${{POD_NAME}}.{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", service_name = role.kerberos_service_name(), - hdfs_name = hdfs.name_unchecked(), namespace = hdfs.namespace().expect("HdfsCluster must be set"), ); - format!("echo \"Getting ticket for {principal}\" from /stackable/kerberos/keytab && kinit {principal} -kt /stackable/kerberos/keytab") + format!("echo \"Getting ticket for {principal}\" from /stackable/kerberos/keytab && kinit \"{principal}\" -kt /stackable/kerberos/keytab") } // Command to export `KERBEROS_REALM` env var to default real from krb5.conf, e.g. `CLUSTER.LOCAL` @@ -748,7 +749,8 @@ impl ContainerConfig { VolumeBuilder::new("kerberos") .ephemeral( SecretOperatorVolumeSourceBuilder::new(kerberos_secret_class) - .with_service_scope(hdfs.name_unchecked()) + .with_pod_scope() + .with_node_scope() .with_kerberos_service_name(role.kerberos_service_name()) .with_kerberos_service_name("HTTP") .build(), diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 8ac95f14..57dd1f72 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -465,7 +465,7 @@ fn rolegroup_config_map( .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") .add( "dfs.web.authentication.kerberos.principal", - format!("HTTP/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + "HTTP/_HOST@${env.KERBEROS_REALM}", ) .add( "dfs.web.authentication.keytab.file", @@ -473,13 +473,12 @@ fn rolegroup_config_map( ) .add( "dfs.journalnode.kerberos.principal.pattern", - // We have to use the asterisk (*), because we don't now the possible rolegroups of the journalnodes here. - // It is *not* used as placeholder for the node replica number, but rather for the rolegroup - format!("jn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + // jn/hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local@CLUSTER.LOCAL + format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add( "dfs.namenode.kerberos.principal.pattern", - format!("nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ); match role { @@ -487,7 +486,7 @@ fn rolegroup_config_map( core_site_xml_builder .add( "dfs.namenode.kerberos.principal", - format!("nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + "nn/_HOST@${env.KERBEROS_REALM}", ) .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab"); } @@ -495,85 +494,25 @@ fn rolegroup_config_map( core_site_xml_builder .add( "dfs.datanode.kerberos.principal", - format!("dn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + "dn/_HOST@${env.KERBEROS_REALM}", ) - .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab") - // Besides having goddamn `dfs.namenode.kerberos.principal.pattern` setting, - // the `wait-for-namenodes` init container will fail with the following message when - // `dfs.namenode.kerberos.principal` is not set: - // - // Couldn't set up IO streams: java.lang.IllegalArgumentException: Failed to specify server's Kerberos principal name - // - // So we have to tell it the principal of the namenode, no pattern will be used here :() - // As we can have multiple rolegroups of namenodes we can *not* use the object_name (such as simple-hdfs-datanode-default) - // as this would only work for a specific - single - rolegroup (default in this case) - // - // *This* is actually the reason we did go with simply `nn/{hdfs_name}.{hdfs_namespace}` - // and not e.g. `nn/{object_name}.{hdfs_namespace}` - .add( - "dfs.namenode.kerberos.principal", - format!("nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), - ); + .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); } HdfsRole::JournalNode => { core_site_xml_builder .add( "dfs.journalnode.kerberos.principal", - format!("jn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + "jn/_HOST@${env.KERBEROS_REALM}", ) .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") .add( "dfs.journalnode.kerberos.internal.spnego.principal", - format!("HTTP/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + "HTTP/_HOST@${env.KERBEROS_REALM}", ); } } - - // match role { - // HdfsRole::NameNode => { - // core_site_xml_builder - // // .add("dfs.namenode.kerberos.principal", format!("nn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str()) - // // .add("dfs.namenode.kerberos.principal.pattern", format!("nn/{object_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str()) - // .add("dfs.namenode.kerberos.principal", "nn/_HOST@${env.KERBEROS_REALM}") - // .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab") - // .add("dfs.journalnode.kerberos.principal.pattern", format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str()); - // } - // HdfsRole::DataNode => { - // core_site_xml_builder - // .add( - // "dfs.datanode.kerberos.principal", - // "dn/_HOST@${env.KERBEROS_REALM}", - // ) - // .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab") - // .add( - // "dfs.namenode.kerberos.principal", - // "nn/_HOST@${env.KERBEROS_REALM}", - // ) - // .add( - // "dfs.namenode.kerberos.principal.pattern", - // format!("nn/*@${{env.KERBEROS_REALM}}").as_str(), - // ); - // } - // HdfsRole::JournalNode => { - // core_site_xml_builder - // .add( - // "dfs.journalnode.kerberos.principal", - // "jn/_HOST@${env.KERBEROS_REALM}", - // ) - // .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") - // .add( - // "dfs.journalnode.kerberos.internal.spnego.principal", - // "HTTP/_HOST@{env.KERBEROS_REALM}", - // ); - // } - // }; } - // export KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /stackable/kerberos/krb5.conf) - // kinit dn/hdfs-test-datanode-default.soenke.svc.cluster.local@${KERBEROS_REALM} -kt /stackable/kerberos/keytab - // kinit dn/hdfs-test-datanode-default-0.hdfs-test-datanode-default.soenke.svc.cluster.local@${KERBEROS_REALM} -kt /stackable/kerberos/keytab - // /stackable/hadoop/bin/hdfs haadmin -getServiceState hdfs-test-namenode-default-0 - // the extend with config must come last in order to have overrides working!!! core_site_xml = core_site_xml_builder.extend(config).build_as_xml(); } From 129dc95637efe326bea59bf55d455bc1e228bd9c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 12:59:07 +0100 Subject: [PATCH 023/101] fix warnings --- rust/operator/src/hdfs_controller.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 57dd1f72..6c1c1637 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -379,12 +379,6 @@ fn rolegroup_config_map( .with_context(|| ObjectHasNoNameSnafu { obj_ref: ObjectRef::from_obj(hdfs), })?; - let hdfs_namespace = hdfs - .namespace() - .with_context(|| ObjectHasNoNamespaceSnafu { - obj_ref: ObjectRef::from_obj(hdfs), - })?; - let object_name = rolegroup_ref.object_name(); let mut hdfs_site_xml = String::new(); let mut core_site_xml = String::new(); From 8cf7a3d49ed0cdc5f31695d319af2658a215304c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 13:26:07 +0100 Subject: [PATCH 024/101] Only specify nn principal in wait-for-namenodes when kerberos is enabled --- rust/operator/src/container.rs | 62 ++++++++++++++++++---------- rust/operator/src/hdfs_controller.rs | 4 +- 2 files changed, 44 insertions(+), 22 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 2c8f4d93..135c2ee7 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -19,7 +19,7 @@ use crate::{ }; use indoc::formatdoc; -use snafu::{ResultExt, Snafu}; +use snafu::{OptionExt, ResultExt, Snafu}; use stackable_hdfs_crd::{ constants::{ DATANODE_ROOT_DATA_DIR_PREFIX, DEFAULT_DATA_NODE_METRICS_PORT, @@ -57,6 +57,8 @@ use strum::{Display, EnumDiscriminants, IntoStaticStr}; #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] pub enum Error { + #[snafu(display("object has no namespace"))] + ObjectHasNoNamespace, #[snafu(display("Invalid java heap config for [{role}]"))] InvalidJavaHeapConfig { source: stackable_operator::error::Error, @@ -339,7 +341,7 @@ impl ContainerConfig { cb.image_from_product_image(resolved_product_image) .command(self.command()) - .args(self.args(hdfs, merged_config, &[], object_name)) + .args(self.args(hdfs, merged_config, &[], object_name)?) .add_env_vars(self.env( hdfs, zookeeper_config_map_name, @@ -379,7 +381,7 @@ impl ContainerConfig { .with_context(|_| InvalidContainerNameSnafu { name: self.name() })? .image_from_product_image(resolved_product_image) .command(self.command()) - .args(self.args(hdfs, merged_config, namenode_podrefs, object_name)) + .args(self.args(hdfs, merged_config, namenode_podrefs, object_name)?) .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .build()) @@ -432,7 +434,7 @@ impl ContainerConfig { merged_config: &(dyn MergedConfig + Send + 'static), namenode_podrefs: &[HdfsPodRef], object_name: &str, - ) -> Vec { + ) -> Result, Error> { let mut args = vec![ self.create_config_directory_cmd(), self.copy_config_xml_cmd(), @@ -484,7 +486,7 @@ impl ContainerConfig { // If there is no active namenode, the current pod is not formatted we format as // active namenode. Otherwise as standby node. if hdfs.has_security_enabled() { - args.push(Self::get_kerberos_ticket(hdfs, role, object_name)); + args.push(Self::get_kerberos_ticket(hdfs, role, object_name)?); } args.push(formatdoc!( r###" @@ -553,9 +555,27 @@ impl ContainerConfig { )); } if hdfs.has_security_enabled() { - args.push(Self::get_kerberos_ticket(hdfs, role, object_name)); + args.push(Self::get_kerberos_ticket(hdfs, role, object_name)?); } - args.push(formatdoc!(r###" + let get_service_state_command = if hdfs.has_security_enabled() { + // We need to calculate the exact principal and pass it in her (for security reasons) + // Otherwise the command will fail with `Couldn't set up IO streams: java.lang.IllegalArgumentException: Failed to specify server's Kerberos principal name` + formatdoc!( + r###" + PRINCIPAL=$(echo "nn/${{id}}.$(echo $id | grep -o '.*[^-0-9]').{namespace}.svc.cluster.local@${{KERBEROS_REALM}}") + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $id 2>/dev/null | tail -n1)"###, + hadoop_home = Self::HADOOP_HOME, + namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, + ) + } else { + formatdoc!( + r###" + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id 2>/dev/null | tail -n1)"###, + hadoop_home = Self::HADOOP_HOME + ) + }; + args.push(formatdoc!( + r###" echo "Waiting for namenodes to get ready:" n=0 while [ ${{n}} -lt 12 ]; @@ -564,10 +584,7 @@ impl ContainerConfig { for id in {pod_names} do echo -n "Checking pod $id... " - # We need to calculate the exact principal and pass it in her (for security reasons) - # Otherwise the command will fail with Couldn't set up IO streams: java.lang.IllegalArgumentException: Failed to specify server's Kerberos principal name - PRINCIPAL=$(echo "nn/${{id}}.$(echo $id | grep -o '.*[^-0-9]').test.svc.cluster.local@${{KERBEROS_REALM}}") - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $id 2>/dev/null | tail -n1) + {get_service_state_command} if [ "$SERVICE_STATE" = "active" ] || [ "$SERVICE_STATE" = "standby" ] then echo "$SERVICE_STATE" @@ -585,16 +602,15 @@ impl ContainerConfig { n=$(( n + 1)) sleep 5 done"###, - hadoop_home = Self::HADOOP_HOME, - pod_names = namenode_podrefs - .iter() - .map(|pod_ref| pod_ref.pod_name.as_ref()) - .collect::>() - .join(" ") + pod_names = namenode_podrefs + .iter() + .map(|pod_ref| pod_ref.pod_name.as_ref()) + .collect::>() + .join(" ") )); } } - vec![args.join(" && ")] + Ok(vec![args.join(" && ")]) } /// Wait until the init container has created global trust and keystore shared between all containers @@ -607,13 +623,17 @@ impl ContainerConfig { /// `kinit` a ticket using the principal created for the specified hdfs role /// Needs the KERBEROS_REALM env var to be present, as `Self::export_kerberos_real_env_var_command` does /// Needs the POD_NAME env var to be present, which will be provided by the PodSpec - fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole, object_name: &str) -> String { + fn get_kerberos_ticket( + hdfs: &HdfsCluster, + role: &HdfsRole, + object_name: &str, + ) -> Result { let principal = format!( "{service_name}/${{POD_NAME}}.{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", service_name = role.kerberos_service_name(), - namespace = hdfs.namespace().expect("HdfsCluster must be set"), + namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, ); - format!("echo \"Getting ticket for {principal}\" from /stackable/kerberos/keytab && kinit \"{principal}\" -kt /stackable/kerberos/keytab") + Ok(format!("echo \"Getting ticket for {principal}\" from /stackable/kerberos/keytab && kinit \"{principal}\" -kt /stackable/kerberos/keytab")) } // Command to export `KERBEROS_REALM` env var to default real from krb5.conf, e.g. `CLUSTER.LOCAL` diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 6c1c1637..3d71eb8d 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -455,7 +455,9 @@ fn rolegroup_config_map( core_site_xml_builder .add("hadoop.security.authentication", "kerberos") .add("hadoop.security.authorization", "true") - .add("hadoop.user.group.static.mapping.overrides", "dr.who=;nn=;") + // Otherwise we fail with `java.io.IOException: No groups found for user nn` + // Default value is `dr.who=`, so we include that here + .add("hadoop.user.group.static.mapping.overrides", "dr.who=;nn=;nm=;jn=;") .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") .add( "dfs.web.authentication.kerberos.principal", From 119f29a4e8da453b0fb7634515a7e04b8be06566 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 13:56:34 +0100 Subject: [PATCH 025/101] Use operator-rs main branch --- Cargo.lock | 294 +++++++++++++++++++++++++---------------------------- Cargo.toml | 2 +- 2 files changed, 141 insertions(+), 155 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70013454..79013d36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,9 +40,9 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ "proc-macro2", "quote", @@ -157,9 +157,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "num-integer", @@ -170,9 +170,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.1.6" +version = "4.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0b0588d44d4d63a87dbd75c136c166bbfd9a86a31cb89e09906521c7d3f5e3" +checksum = "9a9d6ada83c1edcce028902ea27dd929069c70df4c7600b131b4d9a1ad2879cc" dependencies = [ "bitflags", "clap_derive", @@ -185,9 +185,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.1.0" +version = "4.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8" +checksum = "fddf67631444a3a3e3e5ac51c36a5e01335302de677bd78759eaa90ab1f46644" dependencies = [ "heck", "proc-macro-error", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade" +checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646" dependencies = [ "os_str_bytes", ] @@ -243,9 +243,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if", "crossbeam-utils", @@ -253,18 +253,18 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] [[package]] name = "cxx" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", @@ -289,15 +289,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", @@ -306,9 +306,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", @@ -316,9 +316,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", @@ -330,9 +330,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", @@ -392,9 +392,9 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "dyn-clone" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" [[package]] name = "either" @@ -529,9 +529,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -544,9 +544,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -554,15 +554,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -571,15 +571,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ "proc-macro2", "quote", @@ -588,15 +588,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -606,9 +606,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures-channel", "futures-core", @@ -675,9 +675,9 @@ checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -715,9 +715,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -818,9 +818,9 @@ dependencies = [ [[package]] name = "indoc" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2b9d82064e8a0226fddb3547f37f28eaa46d0fc210e275d835f08cf3b76a7" +checksum = "9f2cb48b81b1dc9f39676bf99f5499babfec7cd8fe14307f7b3d747208fb5690" [[package]] name = "instant" @@ -839,31 +839,32 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "76e86b86ae312accbf05ade23ce76b625e0e47a255712b7414037385a1c05380" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] name = "is-terminal" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef" +checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", "rustix", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "java-properties" @@ -878,9 +879,9 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -1043,9 +1044,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libgit2-sys" @@ -1144,7 +1145,7 @@ dependencies = [ "libc", "log", "wasi", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] @@ -1194,9 +1195,9 @@ checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "fd2523381e46256e40930512c7fd25562b9eae4812cb52078f155e87217c9d1e" dependencies = [ "bitflags", "cfg-if", @@ -1220,9 +1221,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "176be2629957c157240f68f61f2d0053ad3a4ecfdd9ebf1e6521d18d9635cf67" dependencies = [ "autocfg", "cc", @@ -1355,7 +1356,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] @@ -1443,9 +1444,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -1468,9 +1469,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -1588,35 +1589,35 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "schemars" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a5fb6c61f29e723026dc8e923d94c694313212abbecbbe5f55a7748eec5b307" +checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f" dependencies = [ "dyn-clone", "schemars_derive", @@ -1626,9 +1627,9 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f188d036977451159430f3b8dc82ec76364a42b7e289c2b18a9a18f4470058e9" +checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c" dependencies = [ "proc-macro2", "quote", @@ -1644,9 +1645,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "secrecy" @@ -1660,18 +1661,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] @@ -1688,9 +1689,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ "proc-macro2", "quote", @@ -1710,9 +1711,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "indexmap", "itoa", @@ -1734,9 +1735,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.17" +version = "0.9.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb06d4b6cdaef0e0c51fa881acb721bed3c924cfaa71d9c94a3b771dfdf6567" +checksum = "f82e6c8c047aa50a7328632d067bcae6ef38772a79e28daf32f735e0e4f3dd10" dependencies = [ "indexmap", "itoa", @@ -1765,9 +1766,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -1802,9 +1803,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -1859,7 +1860,7 @@ dependencies = [ [[package]] name = "stackable-operator" version = "0.37.0" -source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat/kerberos-secret-class-helpers#c04bca7baab57722a2b3bff25c2ad47bd5536867" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=main#0045c90a4ac56c1570e8cad27fb02919adf773a1" dependencies = [ "chrono", "clap", @@ -1879,7 +1880,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "serde_yaml 0.9.17", + "serde_yaml 0.9.19", "snafu", "stackable-operator-derive", "strum", @@ -1893,7 +1894,7 @@ dependencies = [ [[package]] name = "stackable-operator-derive" version = "0.37.0" -source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat/kerberos-secret-class-helpers#c04bca7baab57722a2b3bff25c2ad47bd5536867" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=main#0045c90a4ac56c1570e8cad27fb02919adf773a1" dependencies = [ "darling", "proc-macro2", @@ -1931,9 +1932,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -1951,18 +1952,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -2018,9 +2019,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg", "libc", @@ -2030,7 +2031,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -2068,9 +2069,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite", @@ -2256,15 +2257,15 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -2289,9 +2290,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "unsafe-libyaml" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2" +checksum = "ad2024452afd3874bf539695e04af6732ba06517424dbf958fdb16a01f3bef6c" [[package]] name = "url" @@ -2423,21 +2424,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -2449,9 +2435,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -2464,45 +2450,45 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "xml-rs" diff --git a/Cargo.toml b/Cargo.toml index 9a1b1108..6bc1974b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,4 +6,4 @@ members = [ [patch."https://github.com/stackabletech/operator-rs.git"] # stackable-operator = { path = "/home/sbernauer/stackabletech/operator-rs" } -stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "feat/kerberos-secret-class-helpers" } +stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" } From e029dfc95cfab99adce4e2670c1c4e35edb912f3 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 15:23:23 +0100 Subject: [PATCH 026/101] Use new image --- docs/modules/hdfs/examples/getting_started/hdfs.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index e3915cd4..a456ff6b 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -6,8 +6,7 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 23.4.0-rc2 - repo: docker.stackable.tech/natkr/krb5 # Needed because of e.g. openssl is missing + stackableVersion: 23.4.0-rc3 clusterConfig: zookeeperConfigMapName: simple-hdfs-znode dfsReplication: 1 From 7ca3b0393629ed4d1351595cec6a33d20207d211 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6nke=20Liebau?= Date: Fri, 17 Mar 2023 16:14:41 +0100 Subject: [PATCH 027/101] Changed namespace of keytab secret to default so the example is directly deployable and works. --- docs/modules/hdfs/examples/getting_started/hdfs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index a456ff6b..17e3c670 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -71,7 +71,7 @@ spec: kdc: krb5-kdc.test.svc.cluster.local adminServer: krb5-kdc.test.svc.cluster.local adminKeytabSecret: - namespace: test + namespace: default name: secret-operator-keytab adminPrincipal: stackable-secret-operator --- From fb1a5fbe25a3ff2bf17289c8fcfeae4c886293e9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 17:07:53 +0100 Subject: [PATCH 028/101] Specify concrete principal when connecting to NN from init container --- rust/operator/src/container.rs | 52 +++++++++++++++------------- rust/operator/src/hdfs_controller.rs | 27 +++++++++++++-- 2 files changed, 52 insertions(+), 27 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 135c2ee7..de1354f0 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -492,14 +492,13 @@ impl ContainerConfig { r###" cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Start formatting namenode $POD_NAME. Checking for active namenodes:" - for id in {pod_names} + for namenode_id in {pod_names} do - echo -n "Checking pod $id... " - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id | tail -n1) - echo "FOOBAR $SERVICE_STATE BARFOO" + echo -n "Checking pod $namenode_id... " + {get_service_state_command} if [ "$SERVICE_STATE" == "active" ] then - ACTIVE_NAMENODE=$id + ACTIVE_NAMENODE=$namenode_id echo "active" break fi @@ -521,6 +520,7 @@ impl ContainerConfig { cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Pod $POD_NAME already formatted. Skipping..." fi"###, + get_service_state_command = Self::get_service_state_command(hdfs)?, hadoop_home = Self::HADOOP_HOME, pod_names = namenode_podrefs .iter() @@ -557,23 +557,6 @@ impl ContainerConfig { if hdfs.has_security_enabled() { args.push(Self::get_kerberos_ticket(hdfs, role, object_name)?); } - let get_service_state_command = if hdfs.has_security_enabled() { - // We need to calculate the exact principal and pass it in her (for security reasons) - // Otherwise the command will fail with `Couldn't set up IO streams: java.lang.IllegalArgumentException: Failed to specify server's Kerberos principal name` - formatdoc!( - r###" - PRINCIPAL=$(echo "nn/${{id}}.$(echo $id | grep -o '.*[^-0-9]').{namespace}.svc.cluster.local@${{KERBEROS_REALM}}") - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $id 2>/dev/null | tail -n1)"###, - hadoop_home = Self::HADOOP_HOME, - namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, - ) - } else { - formatdoc!( - r###" - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id 2>/dev/null | tail -n1)"###, - hadoop_home = Self::HADOOP_HOME - ) - }; args.push(formatdoc!( r###" echo "Waiting for namenodes to get ready:" @@ -581,10 +564,10 @@ impl ContainerConfig { while [ ${{n}} -lt 12 ]; do ALL_NODES_READY=true - for id in {pod_names} + for namenode_id in {pod_names} do - echo -n "Checking pod $id... " - {get_service_state_command} + echo -n "Checking pod $namenode_id... " + {get_service_state_command} if [ "$SERVICE_STATE" = "active" ] || [ "$SERVICE_STATE" = "standby" ] then echo "$SERVICE_STATE" @@ -602,6 +585,7 @@ impl ContainerConfig { n=$(( n + 1)) sleep 5 done"###, + get_service_state_command = Self::get_service_state_command(hdfs)?, pod_names = namenode_podrefs .iter() .map(|pod_ref| pod_ref.pod_name.as_ref()) @@ -642,6 +626,24 @@ impl ContainerConfig { .to_string() } + fn get_service_state_command(hdfs: &HdfsCluster) -> Result { + Ok(if hdfs.has_security_enabled() { + formatdoc!( + r###" + PRINCIPAL=$(echo "nn/${{namenode_id}}.$(echo $namenode_id | grep -o '.*[^-0-9]').{namespace}.svc.cluster.local@${{KERBEROS_REALM}}") + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $id | tail -n1)"###, + hadoop_home = Self::HADOOP_HOME, + namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, + ) + } else { + formatdoc!( + r###" + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id | tail -n1)"###, + hadoop_home = Self::HADOOP_HOME + ) + }) + } + /// Returns the container env variables. fn env( &self, diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 3d71eb8d..35a4654d 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -379,6 +379,11 @@ fn rolegroup_config_map( .with_context(|| ObjectHasNoNameSnafu { obj_ref: ObjectRef::from_obj(hdfs), })?; + let hdfs_namespace = hdfs + .namespace() + .with_context(|| ObjectHasNoNamespaceSnafu { + obj_ref: ObjectRef::from_obj(hdfs), + })?; let mut hdfs_site_xml = String::new(); let mut core_site_xml = String::new(); @@ -470,11 +475,11 @@ fn rolegroup_config_map( .add( "dfs.journalnode.kerberos.principal.pattern", // jn/hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local@CLUSTER.LOCAL - format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ) .add( "dfs.namenode.kerberos.principal.pattern", - format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), ); match role { @@ -512,6 +517,24 @@ fn rolegroup_config_map( // the extend with config must come last in order to have overrides working!!! core_site_xml = core_site_xml_builder.extend(config).build_as_xml(); } + // PropertyNameKind::File(file_name) if file_name == HADOOP_POLICY_XML => { + // let mut config_opts = BTreeMap::new(); + // // When a NN connects to a JN, due to some reverse-dns roulette we have a (pretty low) chance of running into the follow error + // // (found in the logs of hdfs-journalnode-default-0 container journalnode): + // // + // // WARN authorize.ServiceAuthorizationManager (ServiceAuthorizationManager.java:authorize(122)) - Authorization failed for jn/hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-expert-killdeer.svc.cluster.local@CLUSTER.LOCAL (auth:KERBEROS) for protocol=interface org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol: this service is only accessible by jn/10-244-0-178.hdfs-journalnode-default-2.kuttl-test-expert-killdeer.svc.cluster.local@CLUSTER.LOCAL + // // Note: 10.244.0.178 belongs to hdfs-journalnode-default-2 in this case + // // So everything is right, but the JN does seem to make a reverse lookup and gets multiple dns names and get's misguided here + // // + // // An similar error that ocurred as well is + // // + // // User nn/hdfs-test-namenode-default-0.hdfs-test-namenode-default.test.svc.cluster.local@CLUSTER.LOCAL (auth:KERBEROS) is not authorized for protocol interface org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol: this service is only accessible by nn/10-244-0-65.hdfs-test-namenode-default-0.test.svc.cluster.local@CLUSTER.LOCAL + // config_opts + // .extend([("security.qjournal.service.protocol.acl".to_string(), Some())]); + // config_opts.extend(config.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + // ssl_server_xml = + // stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); + // } PropertyNameKind::File(file_name) if file_name == SSL_SERVER_XML => { let mut config_opts = BTreeMap::new(); config_opts.extend([ From acd49d350bd3a8eb834393fd0b8f5bbe26ba86ca Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 17:11:06 +0100 Subject: [PATCH 029/101] Add first basic test --- .../kuttl/kerberos/00-assert.yaml.j2 | 10 ++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 ++ tests/templates/kuttl/kerberos/01-assert.yaml | 12 ++ .../kerberos/01-install-krb5-kdc.yaml.j2 | 145 ++++++++++++++++++ .../02-create-kerberos-secretclass.yaml.j2 | 22 +++ tests/templates/kuttl/kerberos/10-assert.yaml | 12 ++ .../kuttl/kerberos/10-install-zk.yaml.j2 | 27 ++++ .../kuttl/kerberos/11-assert.yaml.j2 | 28 ++++ .../kuttl/kerberos/11-install-hdfs.yaml.j2 | 64 ++++++++ tests/templates/kuttl/smoke/01-assert.yaml | 2 - .../kuttl/smoke/02-install-hdfs.yaml.j2 | 2 +- tests/test-definition.yaml | 26 +++- 12 files changed, 352 insertions(+), 7 deletions(-) create mode 100644 tests/templates/kuttl/kerberos/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/kerberos/00-install-vector-aggregator-discovery-configmap.yaml.j2 create mode 100644 tests/templates/kuttl/kerberos/01-assert.yaml create mode 100644 tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 create mode 100644 tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 create mode 100644 tests/templates/kuttl/kerberos/10-assert.yaml create mode 100644 tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 create mode 100644 tests/templates/kuttl/kerberos/11-assert.yaml.j2 create mode 100644 tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 diff --git a/tests/templates/kuttl/kerberos/00-assert.yaml.j2 b/tests/templates/kuttl/kerberos/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/kerberos/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/kerberos/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/kerberos/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/kerberos/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/kerberos/01-assert.yaml b/tests/templates/kuttl/kerberos/01-assert.yaml new file mode 100644 index 00000000..fed2027c --- /dev/null +++ b/tests/templates/kuttl/kerberos/01-assert.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: krb5-kdc +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 new file mode 100644 index 00000000..78288f74 --- /dev/null +++ b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 @@ -0,0 +1,145 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: krb5-kdc +spec: + selector: + matchLabels: + app: krb5-kdc + template: + metadata: + labels: + app: krb5-kdc + spec: + initContainers: + - name: init + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + args: + - sh + - -euo + - pipefail + - -c + - | + test -e /var/kerberos/krb5kdc/principal || kdb5_util create -s -P asdf + kadmin.local get_principal -terse root/admin || kadmin.local add_principal -pw asdf root/admin + # stackable-secret-operator principal must match the keytab specified in the SecretClass + kadmin.local get_principal -terse stackable-secret-operator || kadmin.local add_principal -e aes256-cts-hmac-sha384-192:normal -pw asdf stackable-secret-operator + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + - mountPath: /var/kerberos/krb5kdc + name: data + containers: + - name: kdc + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + args: + - krb5kdc + - -n + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + - mountPath: /var/kerberos/krb5kdc + name: data + - name: kadmind + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + args: + - kadmind + - -nofork + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + - mountPath: /var/kerberos/krb5kdc + name: data + - name: client + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + tty: true + stdin: true + env: + - name: KRB5_CONFIG + value: /stackable/config/krb5.conf + volumeMounts: + - mountPath: /stackable/config + name: config + volumes: + - name: config + configMap: + name: krb5-kdc + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: krb5-kdc +spec: + selector: + app: krb5-kdc + ports: + - name: kadmin + port: 749 + - name: kdc + port: 88 + - name: kdc-udp + port: 88 + protocol: UDP +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: krb5-kdc +data: + krb5.conf: | + [logging] + default = STDERR + kdc = STDERR + admin_server = STDERR + # default = FILE:/var/log/krb5libs.log + # kdc = FILE:/var/log/krb5kdc.log + # admin_server = FILE:/vaggr/log/kadmind.log + [libdefaults] + dns_lookup_realm = false + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + default_realm = {{ test_scenario['values']['kerberos-realm'] }} + spake_preauth_groups = edwards25519 + [realms] + {{ test_scenario['values']['kerberos-realm'] }} = { + acl_file = /stackable/config/kadm5.acl + disable_encrypted_timestamp = false + } + [domain_realm] + .cluster.local = {{ test_scenario['values']['kerberos-realm'] }} + cluster.local = {{ test_scenario['values']['kerberos-realm'] }} + kadm5.acl: | + root/admin *e + stackable-secret-operator *e +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-operator-keytab +data: +{% if test_scenario['values']['kerberos-realm'] == 'CLUSTER.LOCAL' %} + keytab: BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB +{% else %} +# echo "BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB" | base64 -d | sed 's/CLUSTER.LOCAL/PROD.MYCORP/' | base64 -w 0 + keytab: BQIAAABdAAEADVBST0QuTVlDT1JQABlzdGFja2FibGUtc2VjcmV0LW9wZXJhdG9yAAAAAWQGFiIBABQAIJvDAmfAf/1xdbR/dhnIg+v7rFlDQJgU2Z2UMS7mtk5IAAAAAQ== +{% endif %} diff --git a/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 new file mode 100644 index 00000000..681939ca --- /dev/null +++ b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + kubectl apply -n $NAMESPACE -f - < 0 }} + resources: + cpu: + min: '300m' + max: '600m' + memory: + limit: '512Mi' + roleGroups: + default: + replicas: 1 diff --git a/tests/templates/kuttl/kerberos/11-assert.yaml.j2 b/tests/templates/kuttl/kerberos/11-assert.yaml.j2 new file mode 100644 index 00000000..47260144 --- /dev/null +++ b/tests/templates/kuttl/kerberos/11-assert.yaml.j2 @@ -0,0 +1,28 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: hdfs-namenode-default +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: hdfs-journalnode-default +status: + readyReplicas: 3 + replicas: 3 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: hdfs-datanode-default +status: + readyReplicas: 2 + replicas: 2 diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 new file mode 100644 index 00000000..daccaaf2 --- /dev/null +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -0,0 +1,64 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + kubectl apply -n $NAMESPACE -f - < 0 }} + resources: + cpu: + max: '1' + min: '300m' + memory: + limit: '512Mi' + roleGroups: + default: + replicas: 2 + dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + resources: + cpu: + max: '1' + min: '300m' + memory: + limit: '512Mi' + roleGroups: + default: + replicas: 2 + journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + resources: + cpu: + max: '1' + min: '300m' + memory: + limit: '512Mi' + roleGroups: + default: + replicas: 3 + EOF diff --git a/tests/templates/kuttl/smoke/01-assert.yaml b/tests/templates/kuttl/smoke/01-assert.yaml index 13108aeb..2f8f7173 100644 --- a/tests/templates/kuttl/smoke/01-assert.yaml +++ b/tests/templates/kuttl/smoke/01-assert.yaml @@ -1,8 +1,6 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -metadata: - name: install-zk timeout: 300 --- apiVersion: apps/v1 diff --git a/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 b/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 index 4a494f15..5bd71214 100644 --- a/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 @@ -14,8 +14,8 @@ spec: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" clusterConfig: - dfsReplication: 1 zookeeperConfigMapName: hdfs-zk + dfsReplication: 1 {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index c1ae83e5..aba880c0 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -2,12 +2,12 @@ dimensions: - name: hadoop values: - - 3.2.2-stackable23.4.0-rc2 - - 3.3.3-stackable23.4.0-rc2 - - 3.3.4-stackable23.4.0-rc2 + - 3.2.2-stackable23.4.0-rc3 + - 3.3.3-stackable23.4.0-rc3 + - 3.3.4-stackable23.4.0-rc3 - name: hadoop-latest values: - - 3.3.4-stackable23.4.0-rc2 + - 3.3.4-stackable23.4.0-rc3 - name: zookeeper values: - 3.8.0-stackable23.4.0-rc2 @@ -22,6 +22,18 @@ dimensions: values: - "default" - "2hdd-1ssd" + - name: kerberos-realm + values: + - "CLUSTER.LOCAL" + # - "PROD.MYCORP" # => Needs some work in the kerberos keytab for secret-operator + - name: test-number + values: + - "1" + - "2" + - "3" + - "4" + - "5" + - "6" tests: - name: smoke dimensions: @@ -29,6 +41,12 @@ tests: - zookeeper - number-of-datanodes - datanode-pvcs + - name: kerberos + dimensions: + - hadoop + - zookeeper-latest + - kerberos-realm + - test-number - name: orphaned-resources dimensions: - hadoop-latest From bfa6699eeb3c97348dea9a31ba4e90ab44a73000 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 17:17:34 +0100 Subject: [PATCH 030/101] fix typo --- rust/operator/src/container.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index de1354f0..0e5d7e3f 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -631,14 +631,14 @@ impl ContainerConfig { formatdoc!( r###" PRINCIPAL=$(echo "nn/${{namenode_id}}.$(echo $namenode_id | grep -o '.*[^-0-9]').{namespace}.svc.cluster.local@${{KERBEROS_REALM}}") - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $id | tail -n1)"###, + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $namenode_id | tail -n1)"###, hadoop_home = Self::HADOOP_HOME, namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, ) } else { formatdoc!( r###" - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $id | tail -n1)"###, + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $namenode_id | tail -n1)"###, hadoop_home = Self::HADOOP_HOME ) }) From c58b877b9e328c7ef3dfb52c454a4e6477f97285 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Mar 2023 17:24:50 +0100 Subject: [PATCH 031/101] Increase test number --- tests/test-definition.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index aba880c0..d9f7c657 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -34,6 +34,10 @@ dimensions: - "4" - "5" - "6" + - "7" + - "8" + - "9" + - "10" tests: - name: smoke dimensions: From d3c7a15e92b86d3741e8229b89bec4312a7558f8 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Mar 2023 09:57:58 +0100 Subject: [PATCH 032/101] Add hadoop.security.authentication=kerberos to discovery CM --- Cargo.lock | 8 ++++---- rust/crd/Cargo.toml | 2 +- rust/crd/src/constants.rs | 1 + rust/crd/src/lib.rs | 2 +- rust/operator-binary/Cargo.toml | 4 ++-- rust/operator/Cargo.toml | 2 +- rust/operator/src/config.rs | 14 ++++++++++++-- rust/operator/src/container.rs | 12 ++++++------ rust/operator/src/discovery.rs | 5 +++-- rust/operator/src/hdfs_controller.rs | 4 ++-- 10 files changed, 33 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79013d36..f3cdce64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1859,8 +1859,8 @@ dependencies = [ [[package]] name = "stackable-operator" -version = "0.37.0" -source = "git+https://github.com/stackabletech//operator-rs.git?branch=main#0045c90a4ac56c1570e8cad27fb02919adf773a1" +version = "0.38.0" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=main#bc443fb82390de45ecbf36b927fe7e92988ee7b2" dependencies = [ "chrono", "clap", @@ -1893,8 +1893,8 @@ dependencies = [ [[package]] name = "stackable-operator-derive" -version = "0.37.0" -source = "git+https://github.com/stackabletech//operator-rs.git?branch=main#0045c90a4ac56c1570e8cad27fb02919adf773a1" +version = "0.38.0" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=main#bc443fb82390de45ecbf36b927fe7e92988ee7b2" dependencies = [ "darling", "proc-macro2", diff --git a/rust/crd/Cargo.toml b/rust/crd/Cargo.toml index 18b6950d..c092f286 100644 --- a/rust/crd/Cargo.toml +++ b/rust/crd/Cargo.toml @@ -9,7 +9,7 @@ version = "0.0.0-dev" publish = false [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.37.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.38.0" } semver = "1.0" serde = { version = "1.0", features = ["derive"] } diff --git a/rust/crd/src/constants.rs b/rust/crd/src/constants.rs index 9d2422e0..33625e87 100644 --- a/rust/crd/src/constants.rs +++ b/rust/crd/src/constants.rs @@ -55,6 +55,7 @@ pub const DFS_HA_NAMENODES: &str = "dfs.ha.namenodes"; // core-site.xml pub const FS_DEFAULT_FS: &str = "fs.defaultFS"; pub const HA_ZOOKEEPER_QUORUM: &str = "ha.zookeeper.quorum"; +pub const HADOOP_SECURITY_AUTHENTICATION: &str = "hadoop.security.authentication"; pub const STACKABLE_ROOT_DATA_DIR: &str = "/stackable/data"; pub const NAMENODE_ROOT_DATA_DIR: &str = "/stackable/data/namenode"; diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 802bc467..44f07f1b 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -585,7 +585,7 @@ impl HdfsCluster { Ok(result) } - pub fn has_security_enabled(&self) -> bool { + pub fn has_kerberos_enabled(&self) -> bool { self.kerberos_secret_class().is_some() } diff --git a/rust/operator-binary/Cargo.toml b/rust/operator-binary/Cargo.toml index b14037dd..f9d8874d 100644 --- a/rust/operator-binary/Cargo.toml +++ b/rust/operator-binary/Cargo.toml @@ -10,7 +10,7 @@ build = "build.rs" publish = false [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.37.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.38.0" } stackable-hdfs-crd = { path = "../crd" } stackable-hdfs-operator = { path = "../operator" } anyhow = "1.0" @@ -20,7 +20,7 @@ tracing = "0.1" [build-dependencies] built = { version = "0.5", features = ["chrono", "git2"] } -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.37.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.38.0" } stackable-hdfs-crd = { path = "../crd" } [[bin]] diff --git a/rust/operator/Cargo.toml b/rust/operator/Cargo.toml index 56c56df0..ccde0e9a 100644 --- a/rust/operator/Cargo.toml +++ b/rust/operator/Cargo.toml @@ -9,7 +9,7 @@ version = "0.0.0-dev" publish = false [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.37.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.38.0" } stackable-hdfs-crd = { path = "../crd" } futures = "0.3" diff --git a/rust/operator/src/config.rs b/rust/operator/src/config.rs index eeee77f2..158e7c3d 100644 --- a/rust/operator/src/config.rs +++ b/rust/operator/src/config.rs @@ -3,8 +3,8 @@ use stackable_hdfs_crd::constants::{ DEFAULT_NAME_NODE_RPC_PORT, DFS_DATANODE_DATA_DIR, DFS_HA_NAMENODES, DFS_JOURNALNODE_EDITS_DIR, DFS_JOURNALNODE_RPC_ADDRESS, DFS_NAMENODE_HTTPS_ADDRESS, DFS_NAMENODE_HTTP_ADDRESS, DFS_NAMENODE_NAME_DIR, DFS_NAMENODE_RPC_ADDRESS, DFS_NAMENODE_SHARED_EDITS_DIR, - DFS_NAME_SERVICES, DFS_REPLICATION, FS_DEFAULT_FS, HA_ZOOKEEPER_QUORUM, - JOURNALNODE_ROOT_DATA_DIR, NAMENODE_ROOT_DATA_DIR, + DFS_NAME_SERVICES, DFS_REPLICATION, FS_DEFAULT_FS, HADOOP_SECURITY_AUTHENTICATION, + HA_ZOOKEEPER_QUORUM, JOURNALNODE_ROOT_DATA_DIR, NAMENODE_ROOT_DATA_DIR, }; use stackable_hdfs_crd::storage::{DataNodeStorageConfig, DataNodeStorageConfigInnerType}; use stackable_hdfs_crd::{HdfsCluster, HdfsPodRef}; @@ -232,6 +232,16 @@ impl CoreSiteConfigBuilder { self } + pub fn hadoop_security_authentication(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if hdfs.has_kerberos_enabled() { + self.config.insert( + HADOOP_SECURITY_AUTHENTICATION.to_string(), + "kerberos".to_string(), + ); + } + self + } + pub fn build_as_xml(&self) -> String { let transformed_config = transform_for_product_config(&self.config); stackable_operator::product_config::writer::to_hadoop_xml(transformed_config.iter()) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 0e5d7e3f..b6d9b327 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -444,7 +444,7 @@ impl ContainerConfig { if hdfs.has_https_enabled() { args.push(Self::wait_for_trust_and_keystore_command()); } - if hdfs.has_security_enabled() { + if hdfs.has_kerberos_enabled() { args.push(Self::export_kerberos_real_env_var_command()); } match self { @@ -485,7 +485,7 @@ impl ContainerConfig { // $NAMENODE_DIR/current/VERSION. Then we don't do anything. // If there is no active namenode, the current pod is not formatted we format as // active namenode. Otherwise as standby node. - if hdfs.has_security_enabled() { + if hdfs.has_kerberos_enabled() { args.push(Self::get_kerberos_ticket(hdfs, role, object_name)?); } args.push(formatdoc!( @@ -554,7 +554,7 @@ impl ContainerConfig { container_config, )); } - if hdfs.has_security_enabled() { + if hdfs.has_kerberos_enabled() { args.push(Self::get_kerberos_ticket(hdfs, role, object_name)?); } args.push(formatdoc!( @@ -627,7 +627,7 @@ impl ContainerConfig { } fn get_service_state_command(hdfs: &HdfsCluster) -> Result { - Ok(if hdfs.has_security_enabled() { + Ok(if hdfs.has_kerberos_enabled() { formatdoc!( r###" PRINCIPAL=$(echo "nn/${{namenode_id}}.$(echo $namenode_id | grep -o '.*[^-0-9]').{namespace}.svc.cluster.local@${{KERBEROS_REALM}}") @@ -687,7 +687,7 @@ impl ContainerConfig { }); // Not only the main containers need Kerberos - if hdfs.has_security_enabled() { + if hdfs.has_kerberos_enabled() { env.push(EnvVar { name: "KRB5_CONFIG".to_string(), value: Some("/stackable/kerberos/krb5.conf".to_string()), @@ -927,7 +927,7 @@ impl ContainerConfig { "-javaagent:/stackable/jmx/jmx_prometheus_javaagent-0.16.1.jar={metrics_port}:/stackable/jmx/{role}.yaml", )]; - if hdfs.has_security_enabled() { + if hdfs.has_kerberos_enabled() { jvm_args.push( "-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string(), ); diff --git a/rust/operator/src/discovery.rs b/rust/operator/src/discovery.rs index 870542c7..872dc240 100644 --- a/rust/operator/src/discovery.rs +++ b/rust/operator/src/discovery.rs @@ -42,7 +42,7 @@ pub fn build_discovery_configmap( ) .add_data( CORE_SITE_XML, - build_discovery_core_site_xml(hdfs.name_any()), + build_discovery_core_site_xml(hdfs, hdfs.name_any()), ) .build() } @@ -61,8 +61,9 @@ fn build_discovery_hdfs_site_xml( .build_as_xml() } -fn build_discovery_core_site_xml(logical_name: String) -> String { +fn build_discovery_core_site_xml(hdfs: &HdfsCluster, logical_name: String) -> String { CoreSiteConfigBuilder::new(logical_name) .fs_default_fs() + .hadoop_security_authentication(hdfs) .build_as_xml() } diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 35a4654d..e93678e8 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -421,7 +421,7 @@ fn rolegroup_config_map( .add("dfs.ha.automatic-failover.enabled", "true") .add("dfs.ha.namenode.id", "${env.POD_NAME}"); - if hdfs.has_security_enabled() { + if hdfs.has_kerberos_enabled() { hdfs_site_xml_builder .add("dfs.block.access.token.enable", "true") .add("dfs.data.transfer.protection", "authentication") @@ -440,7 +440,7 @@ fn rolegroup_config_map( core_site_xml_builder.fs_default_fs().ha_zookeeper_quorum(); - if hdfs.has_security_enabled() { + if hdfs.has_kerberos_enabled() { // .add("hadoop.security.authentication", "kerberos") // .add("hadoop.security.authorization","true") // .add("hadoop.registry.kerberos.realm","${env.KERBEROS_REALM}") From 38f406bc4a66acc5af9eba3981b799cd57110067 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Mar 2023 12:25:42 +0100 Subject: [PATCH 033/101] Add a teststep to access hdfs. Get it running by adding stuff to discovery CM --- rust/operator/src/config.rs | 12 ++- rust/operator/src/discovery.rs | 3 +- .../{11-assert.yaml.j2 => 11-assert.yaml} | 0 .../kuttl/kerberos/11-install-hdfs.yaml.j2 | 5 ++ .../kuttl/kerberos/20-access-hdfs.yaml | 73 +++++++++++++++++++ tests/templates/kuttl/kerberos/20-assert.yaml | 6 ++ 6 files changed, 97 insertions(+), 2 deletions(-) rename tests/templates/kuttl/kerberos/{11-assert.yaml.j2 => 11-assert.yaml} (100%) create mode 100644 tests/templates/kuttl/kerberos/20-access-hdfs.yaml create mode 100644 tests/templates/kuttl/kerberos/20-assert.yaml diff --git a/rust/operator/src/config.rs b/rust/operator/src/config.rs index 158e7c3d..f656dfd3 100644 --- a/rust/operator/src/config.rs +++ b/rust/operator/src/config.rs @@ -185,6 +185,16 @@ impl HdfsSiteConfigBuilder { self } + pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if hdfs.has_kerberos_enabled() { + self.config.insert( + "dfs.data.transfer.protection".to_string(), + "authentication".to_string(), + ); + } + self + } + pub fn build_as_xml(&self) -> String { let transformed_config = transform_for_product_config(&self.config); @@ -232,7 +242,7 @@ impl CoreSiteConfigBuilder { self } - pub fn hadoop_security_authentication(&mut self, hdfs: &HdfsCluster) -> &mut Self { + pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { if hdfs.has_kerberos_enabled() { self.config.insert( HADOOP_SECURITY_AUTHENTICATION.to_string(), diff --git a/rust/operator/src/discovery.rs b/rust/operator/src/discovery.rs index 872dc240..964e1b02 100644 --- a/rust/operator/src/discovery.rs +++ b/rust/operator/src/discovery.rs @@ -58,12 +58,13 @@ fn build_discovery_hdfs_site_xml( .dfs_namenode_rpc_address_ha(namenode_podrefs) .dfs_namenode_http_address_ha(hdfs, namenode_podrefs) .dfs_client_failover_proxy_provider() + .kerberos_discovery_config(hdfs) .build_as_xml() } fn build_discovery_core_site_xml(hdfs: &HdfsCluster, logical_name: String) -> String { CoreSiteConfigBuilder::new(logical_name) .fs_default_fs() - .hadoop_security_authentication(hdfs) + .kerberos_discovery_config(hdfs) .build_as_xml() } diff --git a/tests/templates/kuttl/kerberos/11-assert.yaml.j2 b/tests/templates/kuttl/kerberos/11-assert.yaml similarity index 100% rename from tests/templates/kuttl/kerberos/11-assert.yaml.j2 rename to tests/templates/kuttl/kerberos/11-assert.yaml diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index daccaaf2..ece8b065 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -32,6 +32,9 @@ commands: min: '300m' memory: limit: '512Mi' + configOverrides: &configOverrides + core-site.xml: + hadoop.user.group.static.mapping.overrides: "dr.who=;nn=;nm=;jn=;testuser=supergroup;" roleGroups: default: replicas: 2 @@ -45,6 +48,7 @@ commands: min: '300m' memory: limit: '512Mi' + configOverrides: *configOverrides roleGroups: default: replicas: 2 @@ -58,6 +62,7 @@ commands: min: '300m' memory: limit: '512Mi' + configOverrides: *configOverrides roleGroups: default: replicas: 3 diff --git a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml new file mode 100644 index 00000000..1756cf8b --- /dev/null +++ b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + kubectl apply -n $NAMESPACE -f - < Date: Mon, 20 Mar 2023 13:27:41 +0100 Subject: [PATCH 034/101] linter --- tests/templates/kuttl/kerberos/20-assert.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/templates/kuttl/kerberos/20-assert.yaml b/tests/templates/kuttl/kerberos/20-assert.yaml index 2ad9c516..c9df5ef6 100644 --- a/tests/templates/kuttl/kerberos/20-assert.yaml +++ b/tests/templates/kuttl/kerberos/20-assert.yaml @@ -1,3 +1,4 @@ +--- apiVersion: batch/v1 kind: Job metadata: From a89ecd8b9739a53f58a58709bd982164aa9b7e30 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Mar 2023 13:45:54 +0100 Subject: [PATCH 035/101] Set hdfs log level to DEBUG in test --- .../kuttl/kerberos/11-install-hdfs.yaml.j2 | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index ece8b065..1463ccd6 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -26,6 +26,19 @@ commands: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + containers: + hdfs: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG + formatNameNodes: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG resources: cpu: max: '1' @@ -42,6 +55,13 @@ commands: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + containers: + hdfs: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG resources: cpu: max: '1' @@ -56,6 +76,13 @@ commands: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + containers: + hdfs: + loggers: + ROOT: + level: DEBUG + console: + level: DEBUG resources: cpu: max: '1' From 6cb6bd672d0366ea32b3b509bac191b1f1356e2b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Mar 2023 21:05:34 +0100 Subject: [PATCH 036/101] Increase log level and double number of test runs --- tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 | 6 ++++++ tests/test-definition.yaml | 10 ++++++++++ 2 files changed, 16 insertions(+) diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index 1463ccd6..52704e75 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -33,6 +33,8 @@ commands: level: DEBUG console: level: DEBUG + file: + level: DEBUG formatNameNodes: loggers: ROOT: @@ -62,6 +64,8 @@ commands: level: DEBUG console: level: DEBUG + file: + level: DEBUG resources: cpu: max: '1' @@ -83,6 +87,8 @@ commands: level: DEBUG console: level: DEBUG + file: + level: DEBUG resources: cpu: max: '1' diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index d9f7c657..9910e005 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -38,6 +38,16 @@ dimensions: - "8" - "9" - "10" + - "11" + - "12" + - "13" + - "14" + - "15" + - "16" + - "17" + - "18" + - "19" + - "20" tests: - name: smoke dimensions: From 5ca4c2304cb1f7df219d787a2c323de6060621cd Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 21 Mar 2023 08:48:57 +0100 Subject: [PATCH 037/101] Disable logging again --- .../kuttl/kerberos/11-install-hdfs.yaml.j2 | 33 ------------------- 1 file changed, 33 deletions(-) diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index 52704e75..ece8b065 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -26,21 +26,6 @@ commands: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - containers: - hdfs: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG - file: - level: DEBUG - formatNameNodes: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG resources: cpu: max: '1' @@ -57,15 +42,6 @@ commands: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - containers: - hdfs: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG - file: - level: DEBUG resources: cpu: max: '1' @@ -80,15 +56,6 @@ commands: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - containers: - hdfs: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG - file: - level: DEBUG resources: cpu: max: '1' From 8b5503ca5847f03a4718e6663e038cfb5393d6f9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 21 Mar 2023 09:49:38 +0100 Subject: [PATCH 038/101] Test PROD.MYCORP realm --- .../kuttl/kerberos/01-install-krb5-kdc.yaml.j2 | 12 +++++++++--- .../{20-access-hdfs.yaml => 20-access-hdfs.yaml.j2} | 2 +- tests/test-definition.yaml | 12 +----------- 3 files changed, 11 insertions(+), 15 deletions(-) rename tests/templates/kuttl/kerberos/{20-access-hdfs.yaml => 20-access-hdfs.yaml.j2} (96%) diff --git a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 index 78288f74..ed25a815 100644 --- a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 +++ b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 @@ -137,9 +137,15 @@ kind: Secret metadata: name: secret-operator-keytab data: + # To create keytab + # # when promted enter password asdf + # cat | ktutil << 'EOF' + # list + # add_entry -password -p stackable-secret-operator@CLUSTER.LOCAL -k 1 -e aes256-cts-hmac-sha384-192 + # wkt /tmp/keytab + # EOF {% if test_scenario['values']['kerberos-realm'] == 'CLUSTER.LOCAL' %} keytab: BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB -{% else %} -# echo "BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB" | base64 -d | sed 's/CLUSTER.LOCAL/PROD.MYCORP/' | base64 -w 0 - keytab: BQIAAABdAAEADVBST0QuTVlDT1JQABlzdGFja2FibGUtc2VjcmV0LW9wZXJhdG9yAAAAAWQGFiIBABQAIJvDAmfAf/1xdbR/dhnIg+v7rFlDQJgU2Z2UMS7mtk5IAAAAAQ== +{% elif test_scenario['values']['kerberos-realm'] == 'PROD.MYCORP' %} + keytab: BQIAAABbAAEAC1BST0QuTVlDT1JQABlzdGFja2FibGUtc2VjcmV0LW9wZXJhdG9yAAAAAWQZa0EBABQAIC/EnFNejq/K5lX6tX+B3/tkI13TCzkPB7d2ggCIEzE8AAAAAQ== {% endif %} diff --git a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 similarity index 96% rename from tests/templates/kuttl/kerberos/20-access-hdfs.yaml rename to tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 index 1756cf8b..3c6fe9f6 100644 --- a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml +++ b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 @@ -28,7 +28,7 @@ commands: - | set -ex klist -k /stackable/kerberos/keytab - kinit -kt /stackable/kerberos/keytab testuser/access-hdfs.$NAMESPACE.svc.cluster.local@CLUSTER.LOCAL + kinit -kt /stackable/kerberos/keytab testuser/access-hdfs.$NAMESPACE.svc.cluster.local@{{ test_scenario['values']['kerberos-realm'] }} klist bin/hdfs dfs -ls / diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 9910e005..72724e4d 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -25,7 +25,7 @@ dimensions: - name: kerberos-realm values: - "CLUSTER.LOCAL" - # - "PROD.MYCORP" # => Needs some work in the kerberos keytab for secret-operator + - "PROD.MYCORP" - name: test-number values: - "1" @@ -38,16 +38,6 @@ dimensions: - "8" - "9" - "10" - - "11" - - "12" - - "13" - - "14" - - "15" - - "16" - - "17" - - "18" - - "19" - - "20" tests: - name: smoke dimensions: From d0cb73c72cf84d5e33e49f8a6a116cc7a7216bc1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Mar 2023 08:47:27 +0100 Subject: [PATCH 039/101] Fix misc stuff --- rust/crd/src/lib.rs | 2 +- rust/operator/src/container.rs | 21 ++++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 44f07f1b..03e2b3bf 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -329,7 +329,7 @@ impl HdfsRole { } /// Name of the Hadoop process HADOOP_OPTS. - pub fn hadoop_opts(&self) -> &'static str { + pub fn hadoop_opts_env_var_for_role(&self) -> &'static str { match self { HdfsRole::NameNode => "HDFS_NAMENODE_OPTS", HdfsRole::DataNode => "HDFS_DATANODE_OPTS", diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index b6d9b327..dbf70e49 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -183,9 +183,9 @@ impl ContainerConfig { if let Some(https_secret_class) = hdfs.https_secret_class() { pb.add_volume( - VolumeBuilder::new(https_secret_class) + VolumeBuilder::new("tls") .ephemeral( - SecretOperatorVolumeSourceBuilder::new("tls") + SecretOperatorVolumeSourceBuilder::new(https_secret_class) .with_pod_scope() .with_node_scope() .build(), @@ -670,7 +670,7 @@ impl ContainerConfig { // See https://github.com/stackabletech/hdfs-operator/issues/138 for details if let ContainerConfig::Hdfs { role, .. } = self { env.push(EnvVar { - name: role.hadoop_opts().to_string(), + name: role.hadoop_opts_env_var_for_role().to_string(), value: self.build_hadoop_opts(hdfs, resources).ok(), ..EnvVar::default() }); @@ -680,14 +680,13 @@ impl ContainerConfig { // This will not only enable the init containers to work, but also the user to run e.g. // `bin/hdfs dfs -ls /` without getting `Caused by: java.lang.IllegalArgumentException: KrbException: Cannot locate default realm` // because the `-Djava.security.krb5.conf` setting is missing - env.push(EnvVar { - name: "HADOOP_OPTS".to_string(), - value: Some("-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string()), - ..EnvVar::default() - }); - - // Not only the main containers need Kerberos if hdfs.has_kerberos_enabled() { + env.push(EnvVar { + name: "HADOOP_OPTS".to_string(), + value: Some("-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string()), + ..EnvVar::default() + }); + env.push(EnvVar { name: "KRB5_CONFIG".to_string(), value: Some("/stackable/kerberos/krb5.conf".to_string()), @@ -825,7 +824,7 @@ impl ContainerConfig { volume_mounts.push(VolumeMountBuilder::new("kerberos", "/stackable/kerberos").build()); } if hdfs.https_secret_class().is_some() { - // This volume will be propagated by the CreateTlsCertBundle container + // This volume will be propagated by the create-tls-cert-bundle container volume_mounts.push(VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build()); } From 864b4b59c56583b7a20658a061f602a992225903 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Mar 2023 09:21:32 +0100 Subject: [PATCH 040/101] Refactor stuff into kerberos.rs --- rust/operator/src/config.rs | 24 +------ rust/operator/src/hdfs_controller.rs | 104 +++------------------------ rust/operator/src/kerberos.rs | 99 +++++++++++++++++++++++++ rust/operator/src/lib.rs | 1 + 4 files changed, 112 insertions(+), 116 deletions(-) create mode 100644 rust/operator/src/kerberos.rs diff --git a/rust/operator/src/config.rs b/rust/operator/src/config.rs index f656dfd3..eeee77f2 100644 --- a/rust/operator/src/config.rs +++ b/rust/operator/src/config.rs @@ -3,8 +3,8 @@ use stackable_hdfs_crd::constants::{ DEFAULT_NAME_NODE_RPC_PORT, DFS_DATANODE_DATA_DIR, DFS_HA_NAMENODES, DFS_JOURNALNODE_EDITS_DIR, DFS_JOURNALNODE_RPC_ADDRESS, DFS_NAMENODE_HTTPS_ADDRESS, DFS_NAMENODE_HTTP_ADDRESS, DFS_NAMENODE_NAME_DIR, DFS_NAMENODE_RPC_ADDRESS, DFS_NAMENODE_SHARED_EDITS_DIR, - DFS_NAME_SERVICES, DFS_REPLICATION, FS_DEFAULT_FS, HADOOP_SECURITY_AUTHENTICATION, - HA_ZOOKEEPER_QUORUM, JOURNALNODE_ROOT_DATA_DIR, NAMENODE_ROOT_DATA_DIR, + DFS_NAME_SERVICES, DFS_REPLICATION, FS_DEFAULT_FS, HA_ZOOKEEPER_QUORUM, + JOURNALNODE_ROOT_DATA_DIR, NAMENODE_ROOT_DATA_DIR, }; use stackable_hdfs_crd::storage::{DataNodeStorageConfig, DataNodeStorageConfigInnerType}; use stackable_hdfs_crd::{HdfsCluster, HdfsPodRef}; @@ -185,16 +185,6 @@ impl HdfsSiteConfigBuilder { self } - pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if hdfs.has_kerberos_enabled() { - self.config.insert( - "dfs.data.transfer.protection".to_string(), - "authentication".to_string(), - ); - } - self - } - pub fn build_as_xml(&self) -> String { let transformed_config = transform_for_product_config(&self.config); @@ -242,16 +232,6 @@ impl CoreSiteConfigBuilder { self } - pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if hdfs.has_kerberos_enabled() { - self.config.insert( - HADOOP_SECURITY_AUTHENTICATION.to_string(), - "kerberos".to_string(), - ); - } - self - } - pub fn build_as_xml(&self) -> String { let transformed_config = transform_for_product_config(&self.config); stackable_operator::product_config::writer::to_hadoop_xml(transformed_config.iter()) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index e93678e8..446da764 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -393,11 +393,10 @@ fn rolegroup_config_map( for (property_name_kind, config) in rolegroup_config { match property_name_kind { PropertyNameKind::File(file_name) if file_name == HDFS_SITE_XML => { - let mut hdfs_site_xml_builder = HdfsSiteConfigBuilder::new(hdfs_name.to_string()); // IMPORTANT: these folders must be under the volume mount point, otherwise they will not // be formatted by the namenode, or used by the other services. // See also: https://github.com/apache-spark-on-k8s/kubernetes-HDFS/commit/aef9586ecc8551ca0f0a468c3b917d8c38f494a0 - hdfs_site_xml_builder + hdfs_site_xml = HdfsSiteConfigBuilder::new(hdfs_name.to_string()) .dfs_namenode_name_dir() .dfs_datanode_data_dir(merged_config.data_node_resources().map(|r| r.storage)) .dfs_journalnode_edits_dir() @@ -416,106 +415,23 @@ fn rolegroup_config_map( .dfs_namenode_rpc_address_ha(namenode_podrefs) .dfs_namenode_http_address_ha(hdfs, namenode_podrefs) .dfs_client_failover_proxy_provider() + .kerberos_config(hdfs) .add("dfs.ha.fencing.methods", "shell(/bin/true)") .add("dfs.ha.nn.not-become-active-in-safemode", "true") .add("dfs.ha.automatic-failover.enabled", "true") - .add("dfs.ha.namenode.id", "${env.POD_NAME}"); - - if hdfs.has_kerberos_enabled() { - hdfs_site_xml_builder - .add("dfs.block.access.token.enable", "true") - .add("dfs.data.transfer.protection", "authentication") - .add("dfs.http.policy", "HTTPS_ONLY") - .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) - .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); - } - - hdfs_site_xml = hdfs_site_xml_builder + .add("dfs.ha.namenode.id", "${env.POD_NAME}") // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); } PropertyNameKind::File(file_name) if file_name == CORE_SITE_XML => { - let mut core_site_xml_builder = CoreSiteConfigBuilder::new(hdfs_name.to_string()); - - core_site_xml_builder.fs_default_fs().ha_zookeeper_quorum(); - - if hdfs.has_kerberos_enabled() { - // .add("hadoop.security.authentication", "kerberos") - // .add("hadoop.security.authorization","true") - // .add("hadoop.registry.kerberos.realm","${env.KERBEROS_REALM}") - // .add("dfs.web.authentication.kerberos.principal","HTTP/_HOST@${env.KERBEROS_REALM}") - // .add("dfs.journalnode.kerberos.internal.spnego.principal","HTTP/_HOST@{env.KERBEROS_REALM}") - // .add("dfs.journalnode.kerberos.principal","jn/_HOST@${env.KERBEROS_REALM}") - // .add("dfs.journalnode.kerberos.principal.pattern","jn/*.simple-hdfs-journalnode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") - // .add("dfs.namenode.kerberos.principal","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") - // .add("dfs.namenode.kerberos.principal.pattern","nn/simple-hdfs-namenode-default.default.svc.cluster.local@${env.KERBEROS_REALM}") - // .add("dfs.datanode.kerberos.principal","dn/_HOST@${env.KERBEROS_REALM}") - // .add("dfs.web.authentication.keytab.file","/stackable/kerberos/keytab") - // .add("dfs.journalnode.keytab.file","/stackable/kerberos/keytab") - // .add("dfs.namenode.keytab.file","/stackable/kerberos/keytab") - // .add("dfs.datanode.keytab.file","/stackable/kerberos/keytab") - // .add("hadoop.user.group.static.mapping.overrides","dr.who=;nn=;") - - core_site_xml_builder - .add("hadoop.security.authentication", "kerberos") - .add("hadoop.security.authorization", "true") - // Otherwise we fail with `java.io.IOException: No groups found for user nn` - // Default value is `dr.who=`, so we include that here - .add("hadoop.user.group.static.mapping.overrides", "dr.who=;nn=;nm=;jn=;") - .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") - .add( - "dfs.web.authentication.kerberos.principal", - "HTTP/_HOST@${env.KERBEROS_REALM}", - ) - .add( - "dfs.web.authentication.keytab.file", - "/stackable/kerberos/keytab", - ) - .add( - "dfs.journalnode.kerberos.principal.pattern", - // jn/hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local@CLUSTER.LOCAL - format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), - ) - .add( - "dfs.namenode.kerberos.principal.pattern", - format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), - ); - - match role { - HdfsRole::NameNode => { - core_site_xml_builder - .add( - "dfs.namenode.kerberos.principal", - "nn/_HOST@${env.KERBEROS_REALM}", - ) - .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab"); - } - HdfsRole::DataNode => { - core_site_xml_builder - .add( - "dfs.datanode.kerberos.principal", - "dn/_HOST@${env.KERBEROS_REALM}", - ) - .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); - } - HdfsRole::JournalNode => { - core_site_xml_builder - .add( - "dfs.journalnode.kerberos.principal", - "jn/_HOST@${env.KERBEROS_REALM}", - ) - .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") - .add( - "dfs.journalnode.kerberos.internal.spnego.principal", - "HTTP/_HOST@${env.KERBEROS_REALM}", - ); - } - } - } - - // the extend with config must come last in order to have overrides working!!! - core_site_xml = core_site_xml_builder.extend(config).build_as_xml(); + core_site_xml = CoreSiteConfigBuilder::new(hdfs_name.to_string()) + .fs_default_fs() + .ha_zookeeper_quorum() + .kerberos_config(hdfs, role, hdfs_name, &hdfs_namespace) + // the extend with config must come last in order to have overrides working!!! + .extend(config) + .build_as_xml(); } // PropertyNameKind::File(file_name) if file_name == HADOOP_POLICY_XML => { // let mut config_opts = BTreeMap::new(); diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs new file mode 100644 index 00000000..ea262706 --- /dev/null +++ b/rust/operator/src/kerberos.rs @@ -0,0 +1,99 @@ +use stackable_hdfs_crd::{ + constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, + HdfsCluster, HdfsRole, +}; + +use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; + +impl HdfsSiteConfigBuilder { + pub fn kerberos_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if hdfs.has_kerberos_enabled() { + self.add("dfs.block.access.token.enable", "true") + .add("dfs.data.transfer.protection", "authentication") + .add("dfs.http.policy", "HTTPS_ONLY") + .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) + .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); + } + self + } + + pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if hdfs.has_kerberos_enabled() { + self.add("dfs.data.transfer.protection", "authentication"); + } + self + } +} + +impl CoreSiteConfigBuilder { + pub fn kerberos_config( + &mut self, + hdfs: &HdfsCluster, + role: &HdfsRole, + hdfs_name: &str, + hdfs_namespace: &str, + ) -> &mut Self { + if hdfs.has_kerberos_enabled() { + self + .add("hadoop.security.authentication", "kerberos") + .add("hadoop.security.authorization", "true") + // Otherwise we fail with `java.io.IOException: No groups found for user nn` + // Default value is `dr.who=`, so we include that here + .add("hadoop.user.group.static.mapping.overrides", "dr.who=;nn=;nm=;jn=;") + .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") + .add( + "dfs.web.authentication.kerberos.principal", + "HTTP/_HOST@${env.KERBEROS_REALM}", + ) + .add( + "dfs.web.authentication.keytab.file", + "/stackable/kerberos/keytab", + ) + .add( + "dfs.journalnode.kerberos.principal.pattern", + // E.g. jn/hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local@CLUSTER.LOCAL + format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ) + .add( + "dfs.namenode.kerberos.principal.pattern", + format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + ); + + match role { + HdfsRole::NameNode => { + self.add( + "dfs.namenode.kerberos.principal", + "nn/_HOST@${env.KERBEROS_REALM}", + ) + .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab"); + } + HdfsRole::DataNode => { + self.add( + "dfs.datanode.kerberos.principal", + "dn/_HOST@${env.KERBEROS_REALM}", + ) + .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); + } + HdfsRole::JournalNode => { + self.add( + "dfs.journalnode.kerberos.principal", + "jn/_HOST@${env.KERBEROS_REALM}", + ) + .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") + .add( + "dfs.journalnode.kerberos.internal.spnego.principal", + "HTTP/_HOST@${env.KERBEROS_REALM}", + ); + } + } + } + self + } + + pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if hdfs.has_kerberos_enabled() { + self.add(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + } + self + } +} diff --git a/rust/operator/src/lib.rs b/rust/operator/src/lib.rs index 88c5ed09..ce321082 100644 --- a/rust/operator/src/lib.rs +++ b/rust/operator/src/lib.rs @@ -3,6 +3,7 @@ mod container; mod discovery; mod event; mod hdfs_controller; +mod kerberos; mod pod_svc_controller; mod product_logging; mod rbac; From 0d81588a94d41932f5c84593d60c5718f7498f7c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Mar 2023 09:28:07 +0100 Subject: [PATCH 041/101] Create create_tls_cert_bundle_init_container_and_volumes fn --- rust/operator/src/container.rs | 44 +++----------------------- rust/operator/src/kerberos.rs | 57 +++++++++++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 40 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index dbf70e49..b71165d0 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -11,6 +11,7 @@ //! use crate::{ hdfs_controller::KEYSTORE_DIR_NAME, + kerberos::create_tls_cert_bundle_init_container_and_volumes, product_logging::{ FORMAT_NAMENODES_LOG4J_CONFIG_FILE, FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, HDFS_LOG4J_CONFIG_FILE, MAX_LOG_FILES_SIZE_IN_MIB, STACKABLE_LOG_DIR, @@ -182,46 +183,11 @@ impl ContainerConfig { } if let Some(https_secret_class) = hdfs.https_secret_class() { - pb.add_volume( - VolumeBuilder::new("tls") - .ephemeral( - SecretOperatorVolumeSourceBuilder::new(https_secret_class) - .with_pod_scope() - .with_node_scope() - .build(), - ) - .build(), - ); - - pb.add_volume( - VolumeBuilder::new("keystore") - .with_empty_dir(Option::::None, None) - .build(), + create_tls_cert_bundle_init_container_and_volumes( + pb, + https_secret_class, + resolved_product_image, ); - - let create_tls_cert_bundle_init_container = - ContainerBuilder::new("create-tls-cert-bundle") - .unwrap() - .image_from_product_image(resolved_product_image) - .command(vec!["/bin/bash".to_string(), "-c".to_string()]) - .args(vec![formatdoc!( - r###" - echo "Cleaning up truststore - just in case" - rm -f {KEYSTORE_DIR_NAME}/truststore.p12 - echo "Creating truststore" - keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass changeit - echo "Creating certificate chain" - cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt - echo "Cleaning up keystore - just in case" - rm -f {KEYSTORE_DIR_NAME}/keystore.p12 - echo "Creating keystore" - openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"### - )]) - // Only this init container needs the actual cert (from tls volume) to create the truststore + keystore from - .add_volume_mount("tls", "/stackable/tls") - .add_volume_mount("keystore", KEYSTORE_DIR_NAME) - .build(); - pb.add_init_container(create_tls_cert_bundle_init_container); } // role specific pod settings configured here diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index ea262706..2a2bec7b 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,9 +1,17 @@ +use indoc::formatdoc; use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, HdfsCluster, HdfsRole, }; +use stackable_operator::{ + builder::{ContainerBuilder, PodBuilder, SecretOperatorVolumeSourceBuilder, VolumeBuilder}, + commons::product_image_selection::ResolvedProductImage, +}; -use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; +use crate::{ + config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}, + hdfs_controller::KEYSTORE_DIR_NAME, +}; impl HdfsSiteConfigBuilder { pub fn kerberos_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { @@ -97,3 +105,50 @@ impl CoreSiteConfigBuilder { self } } + +pub fn create_tls_cert_bundle_init_container_and_volumes( + pb: &mut PodBuilder, + https_secret_class: &str, + resolved_product_image: &ResolvedProductImage, +) { + pb.add_volume( + VolumeBuilder::new("tls") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new(https_secret_class) + .with_pod_scope() + .with_node_scope() + .build(), + ) + .build(), + ); + + pb.add_volume( + VolumeBuilder::new("keystore") + .with_empty_dir(Option::::None, None) + .build(), + ); + + let create_tls_cert_bundle_init_container = + ContainerBuilder::new("create-tls-cert-bundle") + .unwrap() + .image_from_product_image(resolved_product_image) + .command(vec!["/bin/bash".to_string(), "-c".to_string()]) + .args(vec![formatdoc!( + r###" + echo "Cleaning up truststore - just in case" + rm -f {KEYSTORE_DIR_NAME}/truststore.p12 + echo "Creating truststore" + keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass changeit + echo "Creating certificate chain" + cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt + echo "Cleaning up keystore - just in case" + rm -f {KEYSTORE_DIR_NAME}/keystore.p12 + echo "Creating keystore" + openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"### + )]) + // Only this init container needs the actual cert (from tls volume) to create the truststore + keystore from + .add_volume_mount("tls", "/stackable/tls") + .add_volume_mount("keystore", KEYSTORE_DIR_NAME) + .build(); + pb.add_init_container(create_tls_cert_bundle_init_container); +} From 54f14bcbd9465a1d4abc483cfc1a9b26b9856759 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Mar 2023 09:30:26 +0100 Subject: [PATCH 042/101] Revert "Create create_tls_cert_bundle_init_container_and_volumes fn" This reverts commit 0d81588a94d41932f5c84593d60c5718f7498f7c. --- rust/operator/src/container.rs | 44 +++++++++++++++++++++++--- rust/operator/src/kerberos.rs | 57 +--------------------------------- 2 files changed, 40 insertions(+), 61 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index b71165d0..dbf70e49 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -11,7 +11,6 @@ //! use crate::{ hdfs_controller::KEYSTORE_DIR_NAME, - kerberos::create_tls_cert_bundle_init_container_and_volumes, product_logging::{ FORMAT_NAMENODES_LOG4J_CONFIG_FILE, FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, HDFS_LOG4J_CONFIG_FILE, MAX_LOG_FILES_SIZE_IN_MIB, STACKABLE_LOG_DIR, @@ -183,11 +182,46 @@ impl ContainerConfig { } if let Some(https_secret_class) = hdfs.https_secret_class() { - create_tls_cert_bundle_init_container_and_volumes( - pb, - https_secret_class, - resolved_product_image, + pb.add_volume( + VolumeBuilder::new("tls") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new(https_secret_class) + .with_pod_scope() + .with_node_scope() + .build(), + ) + .build(), ); + + pb.add_volume( + VolumeBuilder::new("keystore") + .with_empty_dir(Option::::None, None) + .build(), + ); + + let create_tls_cert_bundle_init_container = + ContainerBuilder::new("create-tls-cert-bundle") + .unwrap() + .image_from_product_image(resolved_product_image) + .command(vec!["/bin/bash".to_string(), "-c".to_string()]) + .args(vec![formatdoc!( + r###" + echo "Cleaning up truststore - just in case" + rm -f {KEYSTORE_DIR_NAME}/truststore.p12 + echo "Creating truststore" + keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass changeit + echo "Creating certificate chain" + cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt + echo "Cleaning up keystore - just in case" + rm -f {KEYSTORE_DIR_NAME}/keystore.p12 + echo "Creating keystore" + openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"### + )]) + // Only this init container needs the actual cert (from tls volume) to create the truststore + keystore from + .add_volume_mount("tls", "/stackable/tls") + .add_volume_mount("keystore", KEYSTORE_DIR_NAME) + .build(); + pb.add_init_container(create_tls_cert_bundle_init_container); } // role specific pod settings configured here diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index 2a2bec7b..ea262706 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,17 +1,9 @@ -use indoc::formatdoc; use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, HdfsCluster, HdfsRole, }; -use stackable_operator::{ - builder::{ContainerBuilder, PodBuilder, SecretOperatorVolumeSourceBuilder, VolumeBuilder}, - commons::product_image_selection::ResolvedProductImage, -}; -use crate::{ - config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}, - hdfs_controller::KEYSTORE_DIR_NAME, -}; +use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; impl HdfsSiteConfigBuilder { pub fn kerberos_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { @@ -105,50 +97,3 @@ impl CoreSiteConfigBuilder { self } } - -pub fn create_tls_cert_bundle_init_container_and_volumes( - pb: &mut PodBuilder, - https_secret_class: &str, - resolved_product_image: &ResolvedProductImage, -) { - pb.add_volume( - VolumeBuilder::new("tls") - .ephemeral( - SecretOperatorVolumeSourceBuilder::new(https_secret_class) - .with_pod_scope() - .with_node_scope() - .build(), - ) - .build(), - ); - - pb.add_volume( - VolumeBuilder::new("keystore") - .with_empty_dir(Option::::None, None) - .build(), - ); - - let create_tls_cert_bundle_init_container = - ContainerBuilder::new("create-tls-cert-bundle") - .unwrap() - .image_from_product_image(resolved_product_image) - .command(vec!["/bin/bash".to_string(), "-c".to_string()]) - .args(vec![formatdoc!( - r###" - echo "Cleaning up truststore - just in case" - rm -f {KEYSTORE_DIR_NAME}/truststore.p12 - echo "Creating truststore" - keytool -importcert -file /stackable/tls/ca.crt -keystore {KEYSTORE_DIR_NAME}/truststore.p12 -storetype pkcs12 -noprompt -alias ca_cert -storepass changeit - echo "Creating certificate chain" - cat /stackable/tls/ca.crt /stackable/tls/tls.crt > {KEYSTORE_DIR_NAME}/chain.crt - echo "Cleaning up keystore - just in case" - rm -f {KEYSTORE_DIR_NAME}/keystore.p12 - echo "Creating keystore" - openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"### - )]) - // Only this init container needs the actual cert (from tls volume) to create the truststore + keystore from - .add_volume_mount("tls", "/stackable/tls") - .add_volume_mount("keystore", KEYSTORE_DIR_NAME) - .build(); - pb.add_init_container(create_tls_cert_bundle_init_container); -} From 8d43afaa92e49b9c17b76189b290751b2ac7e1f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Natalie=20Klestrup=20R=C3=B6ijezon?= Date: Wed, 22 Mar 2023 10:38:16 +0100 Subject: [PATCH 043/101] Add AD tests --- .../{01-assert.yaml => 01-assert.yaml.j2} | 2 + .../kerberos/01-install-krb5-kdc.yaml.j2 | 18 +------ .../02-create-kerberos-secretclass.yaml.j2 | 53 ++++++++++++++++++- tests/test-definition.yaml | 6 +++ 4 files changed, 61 insertions(+), 18 deletions(-) rename tests/templates/kuttl/kerberos/{01-assert.yaml => 01-assert.yaml.j2} (70%) diff --git a/tests/templates/kuttl/kerberos/01-assert.yaml b/tests/templates/kuttl/kerberos/01-assert.yaml.j2 similarity index 70% rename from tests/templates/kuttl/kerberos/01-assert.yaml rename to tests/templates/kuttl/kerberos/01-assert.yaml.j2 index fed2027c..d34c1c63 100644 --- a/tests/templates/kuttl/kerberos/01-assert.yaml +++ b/tests/templates/kuttl/kerberos/01-assert.yaml.j2 @@ -2,6 +2,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 300 +{% if test_scenario['values']['kerberos-backend'] == 'mit' %} --- apiVersion: apps/v1 kind: StatefulSet @@ -10,3 +11,4 @@ metadata: status: readyReplicas: 1 replicas: 1 +{% endif %} diff --git a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 index ed25a815..c688f2e2 100644 --- a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 +++ b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 @@ -1,3 +1,4 @@ +{% if test_scenario['values']['kerberos-backend'] == 'mit' %} apiVersion: apps/v1 kind: StatefulSet metadata: @@ -131,21 +132,4 @@ data: kadm5.acl: | root/admin *e stackable-secret-operator *e ---- -apiVersion: v1 -kind: Secret -metadata: - name: secret-operator-keytab -data: - # To create keytab - # # when promted enter password asdf - # cat | ktutil << 'EOF' - # list - # add_entry -password -p stackable-secret-operator@CLUSTER.LOCAL -k 1 -e aes256-cts-hmac-sha384-192 - # wkt /tmp/keytab - # EOF -{% if test_scenario['values']['kerberos-realm'] == 'CLUSTER.LOCAL' %} - keytab: BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB -{% elif test_scenario['values']['kerberos-realm'] == 'PROD.MYCORP' %} - keytab: BQIAAABbAAEAC1BST0QuTVlDT1JQABlzdGFja2FibGUtc2VjcmV0LW9wZXJhdG9yAAAAAWQZa0EBABQAIC/EnFNejq/K5lX6tX+B3/tkI13TCzkPB7d2ggCIEzE8AAAAAQ== {% endif %} diff --git a/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 index 681939ca..2f9271b0 100644 --- a/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 +++ b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 @@ -12,11 +12,62 @@ commands: spec: backend: kerberosKeytab: +{% if test_scenario['values']['kerberos-backend'] == 'mit' %} realmName: {{ test_scenario['values']['kerberos-realm'] }} kdc: krb5-kdc.$NAMESPACE.svc.cluster.local - adminServer: krb5-kdc.$NAMESPACE.svc.cluster.local +{% elif test_scenario['values']['kerberos-backend'] == 'activeDirectory' %} + realmName: SBLE.TEST + # Must be the FQDN of the AD domain controller + kdc: sble-adds1.sble.test +{% endif %} + admin: + {{ test_scenario['values']['kerberos-backend'] }}: +{% if test_scenario['values']['kerberos-backend'] == 'mit' %} + adminServer: krb5-kdc.$NAMESPACE.svc.cluster.local +{% elif test_scenario['values']['kerberos-backend'] == 'activeDirectory' %} + # Must be the FQDN of the AD domain controller + ldapServer: sble-adds1.sble.test + passwordCacheSecret: + name: secret-operator-ad-passwords + namespace: $NAMESPACE + # Container must be created manually + # Users must be deleted by user between each test run + userDistinguishedName: CN=Stackable,CN=Users,DC=sble,DC=test + schemaDistinguishedName: CN=Schema,CN=Configuration,DC=sble,DC=test +{% endif %} adminKeytabSecret: namespace: $NAMESPACE name: secret-operator-keytab adminPrincipal: stackable-secret-operator EOF +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-operator-keytab +data: +{% if test_scenario['values']['kerberos-backend'] == 'mit' %} + # To create keytab + # # when promted enter password asdf + # cat | ktutil << 'EOF' + # list + # add_entry -password -p stackable-secret-operator@CLUSTER.LOCAL -k 1 -e aes256-cts-hmac-sha384-192 + # wkt /tmp/keytab + # EOF +{% if test_scenario['values']['kerberos-realm'] == 'CLUSTER.LOCAL' %} + keytab: BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB +{% elif test_scenario['values']['kerberos-realm'] == 'PROD.MYCORP' %} + keytab: BQIAAABbAAEAC1BST0QuTVlDT1JQABlzdGFja2FibGUtc2VjcmV0LW9wZXJhdG9yAAAAAWQZa0EBABQAIC/EnFNejq/K5lX6tX+B3/tkI13TCzkPB7d2ggCIEzE8AAAAAQ== +{% endif %} +{% elif test_scenario['values']['kerberos-backend'] == 'activeDirectory' %} + # To create keytab + # ktpass /princ foobar@SBLE.TEST /mapuser foobar@SBLE.TEST /ptype KRB5_NT_PRINCIPAL /crypto AES256-SHA1 /out foo.kt +rndPass + keytab: BQIAAABVAAEACVNCTEUuVEVTVAAZc3RhY2thYmxlLXNlY3JldC1vcGVyYXRvcgAAAAEAAAAAAwASACCm3DV5BWzI2rlyAXRFtLbSQtcTtlMuKKtm5zFOMingkQ== +{% endif %} +{% if test_scenario['values']['kerberos-backend'] == 'activeDirectory' %} +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-operator-ad-passwords +{% endif %} diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 72724e4d..4ab1b8d6 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -26,6 +26,11 @@ dimensions: values: - "CLUSTER.LOCAL" - "PROD.MYCORP" + - name: kerberos-backend + values: + - mit + # Requires manual setup, see create-kerberos-secretclass.yaml + - activeDirectory - name: test-number values: - "1" @@ -50,6 +55,7 @@ tests: - hadoop - zookeeper-latest - kerberos-realm + - kerberos-backend - test-number - name: orphaned-resources dimensions: From 0a8bc3ca8ce21ea26823925a64528f62e2e5fbde Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Mar 2023 12:01:42 +0100 Subject: [PATCH 044/101] Disable activeDirectory tests --- tests/test-definition.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 4ab1b8d6..2713ad52 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -30,7 +30,7 @@ dimensions: values: - mit # Requires manual setup, see create-kerberos-secretclass.yaml - - activeDirectory + # - activeDirectory - name: test-number values: - "1" From e9ae3e9c4c47b420a835dc84cea5569bfce3107f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Mar 2023 12:03:18 +0100 Subject: [PATCH 045/101] Improve comment --- .../kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 | 3 +-- tests/test-definition.yaml | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 index 2f9271b0..d6b86699 100644 --- a/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 +++ b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 @@ -47,8 +47,7 @@ metadata: name: secret-operator-keytab data: {% if test_scenario['values']['kerberos-backend'] == 'mit' %} - # To create keytab - # # when promted enter password asdf + # To create keytab. When promted enter password asdf # cat | ktutil << 'EOF' # list # add_entry -password -p stackable-secret-operator@CLUSTER.LOCAL -k 1 -e aes256-cts-hmac-sha384-192 diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 2713ad52..8933784a 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -30,6 +30,7 @@ dimensions: values: - mit # Requires manual setup, see create-kerberos-secretclass.yaml + # This will *not* respect the kerberos-realm test attribute, but instead use a hard-coded realm # - activeDirectory - name: test-number values: From ff60fae01ec8839fc0ff6e2ecce1f68ca78d6a28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Natalie=20Klestrup=20R=C3=B6ijezon?= Date: Wed, 22 Mar 2023 11:07:02 +0100 Subject: [PATCH 046/101] Don't hard-code realm when ktiniting --- tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 index 3c6fe9f6..b4b94dda 100644 --- a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 @@ -28,7 +28,7 @@ commands: - | set -ex klist -k /stackable/kerberos/keytab - kinit -kt /stackable/kerberos/keytab testuser/access-hdfs.$NAMESPACE.svc.cluster.local@{{ test_scenario['values']['kerberos-realm'] }} + kinit -kt /stackable/kerberos/keytab testuser/access-hdfs.$NAMESPACE.svc.cluster.local klist bin/hdfs dfs -ls / From 32360285aa01022827c67d8bcb4aa84160a6da4e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 27 Mar 2023 16:37:23 +0200 Subject: [PATCH 047/101] Write hadoop.kerberos.keytab.login.autorenewal.enabled to config and discovery CM --- rust/operator/src/kerberos.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index ea262706..2a43a53f 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -11,6 +11,7 @@ impl HdfsSiteConfigBuilder { self.add("dfs.block.access.token.enable", "true") .add("dfs.data.transfer.protection", "authentication") .add("dfs.http.policy", "HTTPS_ONLY") + .add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true") .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); } @@ -19,7 +20,10 @@ impl HdfsSiteConfigBuilder { pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { if hdfs.has_kerberos_enabled() { - self.add("dfs.data.transfer.protection", "authentication"); + self.add("dfs.data.transfer.protection", "authentication") + // We want e.g. hbase to automatically renew the Kerberos tickets. + // This shouldn't harm any other consumer. + .add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true"); } self } From 97f5daed231b86e5754b13a32eb551c1d2723446 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 28 Mar 2023 12:44:30 +0200 Subject: [PATCH 048/101] Remove uneeded volume in tests --- tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 index b4b94dda..4935280c 100644 --- a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 @@ -39,17 +39,12 @@ commands: volumeMounts: - name: hdfs-config mountPath: /stackable/conf/hdfs - - name: hdfs-nn-config - mountPath: /stackable/conf/hdfs-nn - name: kerberos mountPath: /stackable/kerberos volumes: - name: hdfs-config configMap: name: hdfs - - name: hdfs-nn-config - configMap: - name: hdfs-datanode-default - name: kerberos ephemeral: volumeClaimTemplate: From f9eff39ca7beca307da456bd5f2df54b63cc2f11 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 28 Mar 2023 13:08:16 +0200 Subject: [PATCH 049/101] Disable node principals by default --- deploy/helm/hdfs-operator/crds/crds.yaml | 4 ++++ rust/crd/src/lib.rs | 14 ++++++++++++++ rust/operator/src/container.rs | 19 +++++++++++-------- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index 80967480..507eb0ab 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -42,6 +42,10 @@ spec: default: kerberos description: Name of the SecretClass providing the keytab for the HDFS services. type: string + requestNodePrincipals: + default: false + description: Wether a principal including the Kubernetes node name should be requested. The principal could e.g. be `HTTP/my-k8s-worker-0.mycorp.lan`. This feature is disabled by default, as the resulting principals can already by existent e.g. in Active Directory which can cause problems. + type: boolean tlsSecretClass: default: tls description: Name of the SecretClass providing the tls certificates for the WebUIs. diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 03e2b3bf..e2bb8fa5 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -99,6 +99,12 @@ pub struct KerberosConfig { /// Name of the SecretClass providing the tls certificates for the WebUIs. #[serde(default = "default_kerberos_tls_secret_class")] tls_secret_class: String, + /// Wether a principal including the Kubernetes node name should be requested. + /// The principal could e.g. be `HTTP/my-k8s-worker-0.mycorp.lan`. + /// This feature is disabled by default, as the resulting principals can already by existent + /// e.g. in Active Directory which can cause problems. + #[serde(default)] + request_node_principals: bool, } fn default_kerberos_tls_secret_class() -> String { @@ -589,6 +595,14 @@ impl HdfsCluster { self.kerberos_secret_class().is_some() } + pub fn kerberos_request_node_principals(&self) -> Option { + self.spec + .cluster_config + .kerberos + .as_ref() + .map(|k| k.request_node_principals) + } + pub fn kerberos_secret_class(&self) -> Option<&str> { self.spec .cluster_config diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index dbf70e49..5f8d0eae 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -766,16 +766,19 @@ impl ContainerConfig { // Note that we create the volume here, only for the main container. // However, as other containers need this volume as well, it will be also mounted in other containers. if let Some(kerberos_secret_class) = hdfs.kerberos_secret_class() { + let mut kerberos_secret_operator_volume_builder = + SecretOperatorVolumeSourceBuilder::new(kerberos_secret_class); + kerberos_secret_operator_volume_builder + .with_pod_scope() + .with_kerberos_service_name(role.kerberos_service_name()) + .with_kerberos_service_name("HTTP"); + if let Some(true) = hdfs.kerberos_request_node_principals() { + kerberos_secret_operator_volume_builder.with_node_scope(); + } + volumes.push( VolumeBuilder::new("kerberos") - .ephemeral( - SecretOperatorVolumeSourceBuilder::new(kerberos_secret_class) - .with_pod_scope() - .with_node_scope() - .with_kerberos_service_name(role.kerberos_service_name()) - .with_kerberos_service_name("HTTP") - .build(), - ) + .ephemeral(kerberos_secret_operator_volume_builder.build()) .build(), ); } From f13a0bb2509bfadd9e9e27d2b0bc89e4e3b295e7 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 30 Mar 2023 08:37:09 +0200 Subject: [PATCH 050/101] Add wire encryption setting --- deploy/helm/hdfs-operator/crds/crds.yaml | 17 ++++++ rust/crd/src/lib.rs | 57 +++++++++++++------ rust/operator/src/container.rs | 10 ++-- rust/operator/src/kerberos.rs | 56 ++++++++++++++---- .../kuttl/kerberos/11-install-hdfs.yaml.j2 | 1 + tests/test-definition.yaml | 17 ++---- 6 files changed, 114 insertions(+), 44 deletions(-) diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index 507eb0ab..dbf47324 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -50,6 +50,23 @@ spec: default: tls description: Name of the SecretClass providing the tls certificates for the WebUIs. type: string + wireEncryption: + default: Privacy + description: |- + Configures how communication between hdfs nodes as well as between hdfs clients and cluster are secured. Possible values are: + + Authentication: Establishes mutual authentication between the client and the server. Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. + + Integrity: In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. + + Privacy: In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. + + Defaults to privacy for best security + enum: + - Authentication + - Integrity + - Privacy + type: string type: object vectorAggregatorConfigMapName: description: Name of the Vector aggregator discovery ConfigMap. It must contain the key `ADDRESS` with the address of the Vector aggregator. diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index e2bb8fa5..16b77e23 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -95,16 +95,34 @@ pub struct HdfsClusterConfig { pub struct KerberosConfig { /// Name of the SecretClass providing the keytab for the HDFS services. #[serde(default = "default_kerberos_kerberos_secret_class")] - kerberos_secret_class: String, + pub kerberos_secret_class: String, /// Name of the SecretClass providing the tls certificates for the WebUIs. #[serde(default = "default_kerberos_tls_secret_class")] - tls_secret_class: String, + pub tls_secret_class: String, /// Wether a principal including the Kubernetes node name should be requested. /// The principal could e.g. be `HTTP/my-k8s-worker-0.mycorp.lan`. /// This feature is disabled by default, as the resulting principals can already by existent /// e.g. in Active Directory which can cause problems. #[serde(default)] - request_node_principals: bool, + pub request_node_principals: bool, + /// Configures how communication between hdfs nodes as well as between hdfs clients and cluster are secured. + /// Possible values are: + /// + /// Authentication: + /// Establishes mutual authentication between the client and the server. + /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. + /// + /// Integrity: + /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. + /// + /// Privacy: + /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. + /// + /// Defaults to privacy for best security + #[serde(default)] + pub wire_encryption: WireEncryption, } fn default_kerberos_tls_secret_class() -> String { @@ -115,6 +133,21 @@ fn default_kerberos_kerberos_secret_class() -> String { "kerberos".to_string() } +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +pub enum WireEncryption { + /// Establishes mutual authentication between the client and the server. + /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. + Authentication, + /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. + Integrity, + /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. + #[default] + Privacy, +} + /// This is a shared trait for all role/role-group config structs to avoid duplication /// when extracting role specific configuration structs like resources or logging. pub trait MergedConfig { @@ -592,23 +625,11 @@ impl HdfsCluster { } pub fn has_kerberos_enabled(&self) -> bool { - self.kerberos_secret_class().is_some() + self.spec.cluster_config.kerberos.is_some() } - pub fn kerberos_request_node_principals(&self) -> Option { - self.spec - .cluster_config - .kerberos - .as_ref() - .map(|k| k.request_node_principals) - } - - pub fn kerberos_secret_class(&self) -> Option<&str> { - self.spec - .cluster_config - .kerberos - .as_ref() - .map(|k| k.kerberos_secret_class.as_str()) + pub fn kerberos_config(&self) -> Option<&KerberosConfig> { + self.spec.cluster_config.kerberos.as_ref() } pub fn has_https_enabled(&self) -> bool { diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 5f8d0eae..da1115bd 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -765,14 +765,16 @@ impl ContainerConfig { // Note that we create the volume here, only for the main container. // However, as other containers need this volume as well, it will be also mounted in other containers. - if let Some(kerberos_secret_class) = hdfs.kerberos_secret_class() { + if let Some(kerberos_config) = hdfs.kerberos_config() { let mut kerberos_secret_operator_volume_builder = - SecretOperatorVolumeSourceBuilder::new(kerberos_secret_class); + SecretOperatorVolumeSourceBuilder::new( + &kerberos_config.kerberos_secret_class, + ); kerberos_secret_operator_volume_builder .with_pod_scope() .with_kerberos_service_name(role.kerberos_service_name()) .with_kerberos_service_name("HTTP"); - if let Some(true) = hdfs.kerberos_request_node_principals() { + if kerberos_config.request_node_principals { kerberos_secret_operator_volume_builder.with_node_scope(); } @@ -823,7 +825,7 @@ impl ContainerConfig { ]; // Adding this for all containers, as not only the main container needs Kerberos or TLS - if hdfs.kerberos_secret_class().is_some() { + if hdfs.has_kerberos_enabled() { volume_mounts.push(VolumeMountBuilder::new("kerberos", "/stackable/kerberos").build()); } if hdfs.https_secret_class().is_some() { diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index 2a43a53f..45d4d024 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,29 +1,47 @@ use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, - HdfsCluster, HdfsRole, + HdfsCluster, HdfsRole, KerberosConfig, }; use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; impl HdfsSiteConfigBuilder { pub fn kerberos_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if hdfs.has_kerberos_enabled() { + if let Some(kerberos_config) = hdfs.kerberos_config() { self.add("dfs.block.access.token.enable", "true") - .add("dfs.data.transfer.protection", "authentication") .add("dfs.http.policy", "HTTPS_ONLY") .add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true") .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); + self.add_wire_encryption_settings(kerberos_config); } self } pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if hdfs.has_kerberos_enabled() { - self.add("dfs.data.transfer.protection", "authentication") - // We want e.g. hbase to automatically renew the Kerberos tickets. - // This shouldn't harm any other consumer. - .add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true"); + if let Some(kerberos_config) = hdfs.kerberos_config() { + // We want e.g. hbase to automatically renew the Kerberos tickets. + // This shouldn't harm any other consumers. + self.add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true"); + self.add_wire_encryption_settings(kerberos_config); + } + self + } + + fn add_wire_encryption_settings(&mut self, kerberos_config: &KerberosConfig) -> &mut Self { + match kerberos_config.wire_encryption { + stackable_hdfs_crd::WireEncryption::Authentication => { + self.add("dfs.data.transfer.protection", "authentication"); + self.add("dfs.encrypt.data.transfer", "false"); + } + stackable_hdfs_crd::WireEncryption::Integrity => { + self.add("dfs.data.transfer.protection", "integrity"); + self.add("dfs.encrypt.data.transfer", "false"); + } + stackable_hdfs_crd::WireEncryption::Privacy => { + self.add("dfs.data.transfer.protection", "privacy"); + self.add("dfs.encrypt.data.transfer", "true"); + } } self } @@ -37,7 +55,7 @@ impl CoreSiteConfigBuilder { hdfs_name: &str, hdfs_namespace: &str, ) -> &mut Self { - if hdfs.has_kerberos_enabled() { + if let Some(kerberos_config) = hdfs.kerberos_config() { self .add("hadoop.security.authentication", "kerberos") .add("hadoop.security.authorization", "true") @@ -90,13 +108,31 @@ impl CoreSiteConfigBuilder { ); } } + + self.add_wire_encryption_settings(kerberos_config); } self } pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if hdfs.has_kerberos_enabled() { + if let Some(kerberos_config) = hdfs.kerberos_config() { self.add(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + self.add_wire_encryption_settings(kerberos_config); + } + self + } + + fn add_wire_encryption_settings(&mut self, kerberos_config: &KerberosConfig) -> &mut Self { + match kerberos_config.wire_encryption { + stackable_hdfs_crd::WireEncryption::Authentication => { + self.add("hadoop.rpc.protection", "authentication"); + } + stackable_hdfs_crd::WireEncryption::Integrity => { + self.add("hadoop.rpc.protection", "integrity"); + } + stackable_hdfs_crd::WireEncryption::Privacy => { + self.add("hadoop.rpc.protection", "privacy"); + } } self } diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index ece8b065..92e07060 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -19,6 +19,7 @@ commands: kerberos: tlsSecretClass: tls kerberosSecretClass: kerberos-$NAMESPACE + wireEncryption: {{ test_scenario['values']['wire-encryption'] }} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 8933784a..9d7d340f 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -32,18 +32,11 @@ dimensions: # Requires manual setup, see create-kerberos-secretclass.yaml # This will *not* respect the kerberos-realm test attribute, but instead use a hard-coded realm # - activeDirectory - - name: test-number + - name: wire-encryption values: - - "1" - - "2" - - "3" - - "4" - - "5" - - "6" - - "7" - - "8" - - "9" - - "10" + - Privacy + - Integrity + - Authentication tests: - name: smoke dimensions: @@ -57,7 +50,7 @@ tests: - zookeeper-latest - kerberos-realm - kerberos-backend - - test-number + - wire-encryption - name: orphaned-resources dimensions: - hadoop-latest From ee6645319902c12d223e4a1690d51ed1dde04be6 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 30 Mar 2023 11:38:24 +0200 Subject: [PATCH 051/101] Adopt to new secret-op crd --- .../hdfs/examples/getting_started/hdfs.yaml | 201 +----------------- .../02-create-kerberos-secretclass.yaml.j2 | 2 +- 2 files changed, 5 insertions(+), 198 deletions(-) diff --git a/docs/modules/hdfs/examples/getting_started/hdfs.yaml b/docs/modules/hdfs/examples/getting_started/hdfs.yaml index 17e3c670..30a424e8 100644 --- a/docs/modules/hdfs/examples/getting_started/hdfs.yaml +++ b/docs/modules/hdfs/examples/getting_started/hdfs.yaml @@ -2,216 +2,23 @@ apiVersion: hdfs.stackable.tech/v1alpha1 kind: HdfsCluster metadata: - name: hdfs-test + name: simple-hdfs spec: image: productVersion: 3.3.4 - stackableVersion: 23.4.0-rc3 + stackableVersion: 0.3.0 clusterConfig: zookeeperConfigMapName: simple-hdfs-znode dfsReplication: 1 - # TODO discuss CRD structure and present in Arch meeting - kerberos: - tlsSecretClass: tls - kerberosSecretClass: kerberos nameNodes: roleGroups: default: replicas: 2 - config: - logging: - containers: - hdfs: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG - formatNameNodes: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG dataNodes: roleGroups: default: - replicas: 2 - config: - logging: - containers: - hdfs: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG + replicas: 1 journalNodes: roleGroups: default: - replicas: 3 - config: - logging: - containers: - hdfs: - loggers: - ROOT: - level: DEBUG - console: - level: DEBUG ---- -apiVersion: secrets.stackable.tech/v1alpha1 -kind: SecretClass -metadata: - name: kerberos -spec: - backend: - kerberosKeytab: - realmName: CLUSTER.LOCAL - kdc: krb5-kdc.test.svc.cluster.local - adminServer: krb5-kdc.test.svc.cluster.local - adminKeytabSecret: - namespace: default - name: secret-operator-keytab - adminPrincipal: stackable-secret-operator ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: krb5-kdc -spec: - selector: - matchLabels: - app: krb5-kdc - template: - metadata: - labels: - app: krb5-kdc - spec: - initContainers: - - name: init - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 - args: - - sh - - -euo - - pipefail - - -c - - | - test -e /var/kerberos/krb5kdc/principal || kdb5_util create -s -P asdf - kadmin.local get_principal -terse root/admin || kadmin.local add_principal -pw asdf root/admin - # stackable-secret-operator principal must match the keytab specified in the SecretClass - kadmin.local get_principal -terse stackable-secret-operator || kadmin.local add_principal -e aes256-cts-hmac-sha384-192:normal -pw asdf stackable-secret-operator - env: - - name: KRB5_CONFIG - value: /stackable/config/krb5.conf - volumeMounts: - - mountPath: /stackable/config - name: config - - mountPath: /var/kerberos/krb5kdc - name: data - containers: - - name: kdc - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 - args: - - krb5kdc - - -n - env: - - name: KRB5_CONFIG - value: /stackable/config/krb5.conf - volumeMounts: - - mountPath: /stackable/config - name: config - - mountPath: /var/kerberos/krb5kdc - name: data - - name: kadmind - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 - args: - - kadmind - - -nofork - env: - - name: KRB5_CONFIG - value: /stackable/config/krb5.conf - volumeMounts: - - mountPath: /stackable/config - name: config - - mountPath: /var/kerberos/krb5kdc - name: data - - name: client - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 - tty: true - stdin: true - env: - - name: KRB5_CONFIG - value: /stackable/config/krb5.conf - volumeMounts: - - mountPath: /stackable/config - name: config - volumes: - - name: config - configMap: - name: krb5-kdc - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: v1 -kind: Service -metadata: - name: krb5-kdc -spec: - selector: - app: krb5-kdc - ports: - - name: kadmin - port: 749 - - name: kdc - port: 88 - - name: kdc-udp - port: 88 - protocol: UDP ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: krb5-kdc -data: - krb5.conf: | - [logging] - default = STDERR - kdc = STDERR - admin_server = STDERR - # default = FILE:/var/log/krb5libs.log - # kdc = FILE:/var/log/krb5kdc.log - # admin_server = FILE:/vaggr/log/kadmind.log - [libdefaults] - dns_lookup_realm = false - ticket_lifetime = 24h - renew_lifetime = 7d - forwardable = true - rdns = false - default_realm = CLUSTER.LOCAL - spake_preauth_groups = edwards25519 - [realms] - CLUSTER.LOCAL = { - acl_file = /stackable/config/kadm5.acl - disable_encrypted_timestamp = false - } - [domain_realm] - .cluster.local = CLUSTER.LOCAL - cluster.local = CLUSTER.LOCAL - kadm5.acl: | - root/admin *e - stackable-secret-operator *e ---- -apiVersion: v1 -kind: Secret -metadata: - name: secret-operator-keytab -data: - keytab: BQIAAABdAAEADUNMVVNURVIuTE9DQUwAGXN0YWNrYWJsZS1zZWNyZXQtb3BlcmF0b3IAAAABZAYWIgEAFAAgm8MCZ8B//XF1tH92GciD6/usWUNAmBTZnZQxLua2TkgAAAAB + replicas: 1 diff --git a/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 index d6b86699..04ae9a63 100644 --- a/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 +++ b/tests/templates/kuttl/kerberos/02-create-kerberos-secretclass.yaml.j2 @@ -23,7 +23,7 @@ commands: admin: {{ test_scenario['values']['kerberos-backend'] }}: {% if test_scenario['values']['kerberos-backend'] == 'mit' %} - adminServer: krb5-kdc.$NAMESPACE.svc.cluster.local + kadminServer: krb5-kdc.$NAMESPACE.svc.cluster.local {% elif test_scenario['values']['kerberos-backend'] == 'activeDirectory' %} # Must be the FQDN of the AD domain controller ldapServer: sble-adds1.sble.test From aa15c3cd1f5c115b45851ed18a37eb3dba02e6eb Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 26 Apr 2023 08:34:12 +0200 Subject: [PATCH 052/101] Fix merge mistake --- tests/test-definition.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 66b82160..496c9310 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -49,6 +49,7 @@ tests: - zookeeper - number-of-datanodes - datanode-pvcs + - listener-class - name: kerberos dimensions: - hadoop From 4202cbccbd6f886e758c4e7db71a94ffd1b30f3d Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 26 Apr 2023 08:51:14 +0200 Subject: [PATCH 053/101] Bump tests to 23.4 and respect listenerClass --- .../templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 | 8 ++++---- tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 | 3 ++- tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 | 1 + tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 index c688f2e2..88ef2ce2 100644 --- a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 +++ b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 @@ -14,7 +14,7 @@ spec: spec: initContainers: - name: init - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 args: - sh - -euo @@ -35,7 +35,7 @@ spec: name: data containers: - name: kdc - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 args: - krb5kdc - -n @@ -48,7 +48,7 @@ spec: - mountPath: /var/kerberos/krb5kdc name: data - name: kadmind - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 args: - kadmind - -nofork @@ -61,7 +61,7 @@ spec: - mountPath: /var/kerberos/krb5kdc name: data - name: client - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4.0-rc1 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 tty: true stdin: true env: diff --git a/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 b/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 index 736ea8de..9e1e7bb9 100644 --- a/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 +++ b/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 @@ -7,8 +7,9 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" -{% if lookup('env', 'VECTOR_AGGREGATOR') %} clusterConfig: + listenerClass: {{ test_scenario['values']['listener-class'] }} +{% if lookup('env', 'VECTOR_AGGREGATOR') %} logging: vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index 92e07060..85774f74 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -16,6 +16,7 @@ commands: clusterConfig: zookeeperConfigMapName: hdfs-zk dfsReplication: 1 + listenerClass: {{ test_scenario['values']['listener-class'] }} kerberos: tlsSecretClass: tls kerberosSecretClass: kerberos-$NAMESPACE diff --git a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 index 4935280c..b79634fa 100644 --- a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 @@ -14,7 +14,7 @@ commands: spec: containers: - name: access-hdfs - image: docker.stackable.tech/stackable/hadoop:3.3.4-stackable23.4.0-rc3 + image: docker.stackable.tech/stackable/hadoop:3.3.4-stackable23.4 env: - name: HADOOP_CONF_DIR value: /stackable/conf/hdfs From 4836e06641337e43ac522fa77ddb4f8aa23fb233 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 26 Apr 2023 08:54:07 +0200 Subject: [PATCH 054/101] Increase assert timeout --- tests/templates/kuttl/kerberos/20-assert.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/templates/kuttl/kerberos/20-assert.yaml b/tests/templates/kuttl/kerberos/20-assert.yaml index c9df5ef6..f1ea354f 100644 --- a/tests/templates/kuttl/kerberos/20-assert.yaml +++ b/tests/templates/kuttl/kerberos/20-assert.yaml @@ -1,4 +1,8 @@ --- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- apiVersion: batch/v1 kind: Job metadata: From 037eb1a1b04d1013b6610627c05973ae36a45cee Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 26 Apr 2023 09:31:16 +0200 Subject: [PATCH 055/101] Only run kerberos tests --- tests/test-definition.yaml | 50 +++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 496c9310..8300a182 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -2,8 +2,8 @@ dimensions: - name: hadoop values: - - 3.2.2-stackable0.0.0-dev - - 3.3.3-stackable0.0.0-dev + # - 3.2.2-stackable0.0.0-dev + # - 3.3.3-stackable0.0.0-dev - 3.3.4-stackable0.0.0-dev - name: hadoop-latest values: @@ -25,11 +25,11 @@ dimensions: # Used for both, zookeeper and hdfs - name: listener-class values: - - "cluster-internal" + # - "cluster-internal" - "external-unstable" - name: kerberos-realm values: - - "CLUSTER.LOCAL" + # - "CLUSTER.LOCAL" - "PROD.MYCORP" - name: kerberos-backend values: @@ -40,16 +40,16 @@ dimensions: - name: wire-encryption values: - Privacy - - Integrity - - Authentication + # - Integrity + # - Authentication tests: - - name: smoke - dimensions: - - hadoop - - zookeeper - - number-of-datanodes - - datanode-pvcs - - listener-class + # - name: smoke + # dimensions: + # - hadoop + # - zookeeper + # - number-of-datanodes + # - datanode-pvcs + # - listener-class - name: kerberos dimensions: - hadoop @@ -58,15 +58,15 @@ tests: - kerberos-realm - kerberos-backend - wire-encryption - - name: orphaned-resources - dimensions: - - hadoop-latest - - zookeeper-latest - - name: logging - dimensions: - - hadoop - - zookeeper-latest - - name: cluster-operation - dimensions: - - hadoop-latest - - zookeeper-latest + # - name: orphaned-resources + # dimensions: + # - hadoop-latest + # - zookeeper-latest + # - name: logging + # dimensions: + # - hadoop + # - zookeeper-latest + # - name: cluster-operation + # dimensions: + # - hadoop-latest + # - zookeeper-latest From cfb645c8347665a1bb25306f10547e14d4856af4 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 16 May 2023 08:21:27 +0200 Subject: [PATCH 056/101] Revert example replicas to 1 --- docs/modules/hdfs/examples/getting_started/zk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/hdfs/examples/getting_started/zk.yaml b/docs/modules/hdfs/examples/getting_started/zk.yaml index ff9451f4..60264116 100644 --- a/docs/modules/hdfs/examples/getting_started/zk.yaml +++ b/docs/modules/hdfs/examples/getting_started/zk.yaml @@ -10,4 +10,4 @@ spec: servers: roleGroups: default: - replicas: 3 + replicas: 1 From 8e548d2d0a1b4999139b1a5e616df3c1ecfc6f91 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 16 May 2023 08:22:03 +0200 Subject: [PATCH 057/101] Update rust/operator/src/container.rs Co-authored-by: Natalie --- rust/operator/src/container.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index bb202a68..b4ea4b30 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -828,7 +828,7 @@ impl ContainerConfig { if hdfs.has_kerberos_enabled() { volume_mounts.push(VolumeMountBuilder::new("kerberos", "/stackable/kerberos").build()); } - if hdfs.https_secret_class().is_some() { + if hdfs.has_https_enabled() { // This volume will be propagated by the create-tls-cert-bundle container volume_mounts.push(VolumeMountBuilder::new("keystore", KEYSTORE_DIR_NAME).build()); } From 87e8b26eb740f1cd94ad0d55693716315d389039 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 16 May 2023 08:23:43 +0200 Subject: [PATCH 058/101] Update rust/operator/src/container.rs Co-authored-by: Natalie --- rust/operator/src/container.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index b4ea4b30..1062b7bb 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -937,8 +937,8 @@ impl ContainerConfig { ); } - if let Some(Some(Some(memory_limit))) = - resources.map(|r| r.limits.as_ref().map(|limits| limits.get("memory"))) + if let Some(memory_limit) = + resources.and_then(|r| r.limits.as_ref()?.get("memory")) { let memory_limit = MemoryQuantity::try_from(memory_limit).with_context(|_| { From 06ca693c1205b2474fd24a3d4ab5a7a37b3d20cb Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 16 May 2023 08:27:04 +0200 Subject: [PATCH 059/101] Remove commented out code regarding HADOOP_POLICY_XML --- rust/operator/src/hdfs_controller.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index eca736a6..b0aee644 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -466,24 +466,6 @@ fn rolegroup_config_map( .extend(config) .build_as_xml(); } - // PropertyNameKind::File(file_name) if file_name == HADOOP_POLICY_XML => { - // let mut config_opts = BTreeMap::new(); - // // When a NN connects to a JN, due to some reverse-dns roulette we have a (pretty low) chance of running into the follow error - // // (found in the logs of hdfs-journalnode-default-0 container journalnode): - // // - // // WARN authorize.ServiceAuthorizationManager (ServiceAuthorizationManager.java:authorize(122)) - Authorization failed for jn/hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-expert-killdeer.svc.cluster.local@CLUSTER.LOCAL (auth:KERBEROS) for protocol=interface org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol: this service is only accessible by jn/10-244-0-178.hdfs-journalnode-default-2.kuttl-test-expert-killdeer.svc.cluster.local@CLUSTER.LOCAL - // // Note: 10.244.0.178 belongs to hdfs-journalnode-default-2 in this case - // // So everything is right, but the JN does seem to make a reverse lookup and gets multiple dns names and get's misguided here - // // - // // An similar error that ocurred as well is - // // - // // User nn/hdfs-test-namenode-default-0.hdfs-test-namenode-default.test.svc.cluster.local@CLUSTER.LOCAL (auth:KERBEROS) is not authorized for protocol interface org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol: this service is only accessible by nn/10-244-0-65.hdfs-test-namenode-default-0.test.svc.cluster.local@CLUSTER.LOCAL - // config_opts - // .extend([("security.qjournal.service.protocol.acl".to_string(), Some())]); - // config_opts.extend(config.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); - // ssl_server_xml = - // stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); - // } PropertyNameKind::File(file_name) if file_name == SSL_SERVER_XML => { let mut config_opts = BTreeMap::new(); config_opts.extend([ From 5f9d6000ea7ad13661ebd9a767eefdf924b89ebe Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 16 May 2023 14:29:19 +0200 Subject: [PATCH 060/101] fix format --- rust/operator/src/container.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 1062b7bb..d8bbffde 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -937,8 +937,7 @@ impl ContainerConfig { ); } - if let Some(memory_limit) = - resources.and_then(|r| r.limits.as_ref()?.get("memory")) + if let Some(memory_limit) = resources.and_then(|r| r.limits.as_ref()?.get("memory")) { let memory_limit = MemoryQuantity::try_from(memory_limit).with_context(|_| { From 0856bc5a52d5b6f732124d995ebbfec6febec263 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 19 May 2023 09:35:38 +0200 Subject: [PATCH 061/101] Address Arch discussion feedback --- deploy/helm/hdfs-operator/crds/crds.yaml | 7 +-- rust/crd/src/kerberos.rs | 49 +++++++++++++++++++ rust/crd/src/lib.rs | 60 +----------------------- rust/operator/src/container.rs | 6 +-- rust/operator/src/kerberos.rs | 15 +++--- 5 files changed, 64 insertions(+), 73 deletions(-) create mode 100644 rust/crd/src/kerberos.rs diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index 8346c0a0..8cbd5489 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -39,13 +39,8 @@ spec: nullable: true properties: kerberosSecretClass: - default: kerberos description: Name of the SecretClass providing the keytab for the HDFS services. type: string - requestNodePrincipals: - default: false - description: Wether a principal including the Kubernetes node name should be requested. The principal could e.g. be `HTTP/my-k8s-worker-0.mycorp.lan`. This feature is disabled by default, as the resulting principals can already by existent e.g. in Active Directory which can cause problems. - type: boolean tlsSecretClass: default: tls description: Name of the SecretClass providing the tls certificates for the WebUIs. @@ -67,6 +62,8 @@ spec: - Integrity - Privacy type: string + required: + - kerberosSecretClass type: object listenerClass: default: cluster-internal diff --git a/rust/crd/src/kerberos.rs b/rust/crd/src/kerberos.rs new file mode 100644 index 00000000..e049fd4e --- /dev/null +++ b/rust/crd/src/kerberos.rs @@ -0,0 +1,49 @@ +use serde::{Deserialize, Serialize}; +use stackable_operator::schemars::{self, JsonSchema}; + +#[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct KerberosConfig { + /// Name of the SecretClass providing the keytab for the HDFS services. + pub kerberos_secret_class: String, + /// Name of the SecretClass providing the tls certificates for the WebUIs. + #[serde(default = "default_kerberos_tls_secret_class")] + pub tls_secret_class: String, + /// Configures how communication between hdfs nodes as well as between hdfs clients and cluster are secured. + /// Possible values are: + /// + /// Authentication: + /// Establishes mutual authentication between the client and the server. + /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. + /// + /// Integrity: + /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. + /// + /// Privacy: + /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. + /// + /// Defaults to privacy for best security + #[serde(default)] + pub wire_encryption: WireEncryption, +} + +fn default_kerberos_tls_secret_class() -> String { + "tls".to_string() +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +pub enum WireEncryption { + /// Establishes mutual authentication between the client and the server. + /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. + Authentication, + /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. + Integrity, + /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. + /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. + #[default] + Privacy, +} diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 431d1379..be6c455d 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -1,9 +1,11 @@ pub mod affinity; pub mod constants; +pub mod kerberos; pub mod storage; use affinity::get_affinity; use constants::*; +use kerberos::KerberosConfig; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ @@ -129,64 +131,6 @@ impl CurrentlySupportedListenerClasses { } } -#[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct KerberosConfig { - /// Name of the SecretClass providing the keytab for the HDFS services. - #[serde(default = "default_kerberos_kerberos_secret_class")] - pub kerberos_secret_class: String, - /// Name of the SecretClass providing the tls certificates for the WebUIs. - #[serde(default = "default_kerberos_tls_secret_class")] - pub tls_secret_class: String, - /// Wether a principal including the Kubernetes node name should be requested. - /// The principal could e.g. be `HTTP/my-k8s-worker-0.mycorp.lan`. - /// This feature is disabled by default, as the resulting principals can already by existent - /// e.g. in Active Directory which can cause problems. - #[serde(default)] - pub request_node_principals: bool, - /// Configures how communication between hdfs nodes as well as between hdfs clients and cluster are secured. - /// Possible values are: - /// - /// Authentication: - /// Establishes mutual authentication between the client and the server. - /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. - /// - /// Integrity: - /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. - /// - /// Privacy: - /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. - /// - /// Defaults to privacy for best security - #[serde(default)] - pub wire_encryption: WireEncryption, -} - -fn default_kerberos_tls_secret_class() -> String { - "tls".to_string() -} - -fn default_kerberos_kerberos_secret_class() -> String { - "kerberos".to_string() -} - -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "PascalCase")] -pub enum WireEncryption { - /// Establishes mutual authentication between the client and the server. - /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. - Authentication, - /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. - Integrity, - /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. - #[default] - Privacy, -} - /// This is a shared trait for all role/role-group config structs to avoid duplication /// when extracting role specific configuration structs like resources or logging. pub trait MergedConfig { diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index d8bbffde..77f7bc7d 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -772,11 +772,11 @@ impl ContainerConfig { ); kerberos_secret_operator_volume_builder .with_pod_scope() + // FIXME We always add the node scope here, as some customers access their datanodes from outside of k8s + // In the future listener-op will work together with secret-op, so that the scope automatically matches however the services are exposed + .with_node_scope() .with_kerberos_service_name(role.kerberos_service_name()) .with_kerberos_service_name("HTTP"); - if kerberos_config.request_node_principals { - kerberos_secret_operator_volume_builder.with_node_scope(); - } volumes.push( VolumeBuilder::new("kerberos") diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index 45d4d024..f1c9a327 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,6 +1,7 @@ use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, - HdfsCluster, HdfsRole, KerberosConfig, + kerberos::{KerberosConfig, WireEncryption}, + HdfsCluster, HdfsRole, }; use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; @@ -30,15 +31,15 @@ impl HdfsSiteConfigBuilder { fn add_wire_encryption_settings(&mut self, kerberos_config: &KerberosConfig) -> &mut Self { match kerberos_config.wire_encryption { - stackable_hdfs_crd::WireEncryption::Authentication => { + WireEncryption::Authentication => { self.add("dfs.data.transfer.protection", "authentication"); self.add("dfs.encrypt.data.transfer", "false"); } - stackable_hdfs_crd::WireEncryption::Integrity => { + WireEncryption::Integrity => { self.add("dfs.data.transfer.protection", "integrity"); self.add("dfs.encrypt.data.transfer", "false"); } - stackable_hdfs_crd::WireEncryption::Privacy => { + WireEncryption::Privacy => { self.add("dfs.data.transfer.protection", "privacy"); self.add("dfs.encrypt.data.transfer", "true"); } @@ -124,13 +125,13 @@ impl CoreSiteConfigBuilder { fn add_wire_encryption_settings(&mut self, kerberos_config: &KerberosConfig) -> &mut Self { match kerberos_config.wire_encryption { - stackable_hdfs_crd::WireEncryption::Authentication => { + WireEncryption::Authentication => { self.add("hadoop.rpc.protection", "authentication"); } - stackable_hdfs_crd::WireEncryption::Integrity => { + WireEncryption::Integrity => { self.add("hadoop.rpc.protection", "integrity"); } - stackable_hdfs_crd::WireEncryption::Privacy => { + WireEncryption::Privacy => { self.add("hadoop.rpc.protection", "privacy"); } } From 5011c2257774265798ec4e38ded3d5776dcbabda Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 19 May 2023 11:43:54 +0200 Subject: [PATCH 062/101] Add docs --- .../hdfs/images/hdfs_webui_kerberos.png | Bin 0 -> 169373 bytes .../hdfs/pages/usage-guide/security.adoc | 109 ++++++++++++++++++ docs/modules/hdfs/partials/nav.adoc | 1 + 3 files changed, 110 insertions(+) create mode 100644 docs/modules/hdfs/images/hdfs_webui_kerberos.png create mode 100644 docs/modules/hdfs/pages/usage-guide/security.adoc diff --git a/docs/modules/hdfs/images/hdfs_webui_kerberos.png b/docs/modules/hdfs/images/hdfs_webui_kerberos.png new file mode 100644 index 0000000000000000000000000000000000000000..9bffbf9d97aa45f6837b59a2eec8e1029a4c9488 GIT binary patch literal 169373 zcmeFYbz56a*EdW{TM9*5+$mZdiaWHF;u@rQp#*n#T3VnugaF0et+)m$?h=Aaad(0Q z0#ABf=h^#t@4wy;@XV1MN%o#uvu4ejCBMBAs-_}~gGG*ohK7bCFZWIZ4GjYy4egQp z(Ib9btG~BK~?+5X0xD;q;FVW=Rz0veE+F$hWB!p4-oOsmbdEn!V z;7dvEJT!VE#a{SPCi$u9148u|5Z38nh}u3Du~aUzCtTwZVXppjwRZ~J{9j^XmOMNT zX;^}lA3k`C=_@%J7qad#lA4sZh?C-XnHr0l`~S87CxQQe5@^>2dJ=QKLk43Dirf4h z(%|TKebVquLbloqbu$F7$2;Ngz0c2*yQ}~HLTP>I|NKyRDJLcqnZ4+J-)&pl)U`4D z= ztIeu^q%Z#M^jRU~icG*yw|iy8eeufR14@6L3r3ONGq7eok;y z{oY>p^zt>Z8lU;$zvDAc>EzhI4m<8`THj1IZ*m5C0gH?mHQ_d?8k0tnJc?2K>@2+c z1O0KQ22}%^^=wtHfDlSI+k=Fl>SzDbP|NSZp(sRd*ohAc$v$_B&kAxVMg(;a#F|(; zrlmFI9NNo}lNIk3=j>@5YVIl1!{?j1tj~sw6lTded3kjPM&c?K%OrGw#U6?5{1x}3 z*zf;P-pXGRw}-`bhIl1Jmvpwx?c`acpR*_So7CH^uSHuTx}xDLGt=TJqnImNzxX!d z@YXjrHfCKrJb$+~!}cyx80EF-fAKp^Y3i0~%TIJgFR7p9ClVg{E3mHm)?2~eW&~W- znJbZ3j8K&y+x{_ABUZzlcG>ktUPh|Xo*z)(S7&`Svbl;&P^wBe;>u55aj|?qs>{xr>HI6_ZkGc4}&>DP)9dSve!u zDkFd)Wq5NocSRb*W~sEu8d@A=0mZyNDF7m_r+_LPa7TWQFB zDJ8SC`?39V9=;EVju+9cR!rHR zEufC;w7;}{OZF6BQN1_0q{+Sj3`}I#?jNL`rL1apftBuP@*g0IA5xTSMI}lnNC9HY z>IZgh6udwai#%0~;efF6P0h%siCf309gxZcB`1`lI8P*BGU$t9V6;Ilr=Vm|Y{a3V zUw;$>-O;pPYOM6`KM?c)EY8)2`@1(8=$)KZbbF`zfB+h>!>R;Xn6IDTWWLt3?j9%w$GRC}ZDJ}zB^S$%>CRwy|l+{}KRs%b(-tMS9K~w2F zF+U>`f$91M(XQ@&T81KUC~EyX-$NuWAXu0Lg59n_Q6$QPN`Rx!u6(rTVa**9nNZ=CV&UKe_1mo zQImgWawP1Yf)F*Y(5Hk7giIv4fLg`8!_;I&>xpdwgI09dWX#Q>_5nEzSUrEfWgK9$ zf3vpXD&pq0#RkjX$s6eJA16APV;>?>9f1La~86l{;S-6eqATGqj2ylywji-a$XWKKc|&O5%1hTlEM#>Ab< z{DO3~itDMGw?)7&Ym>R16vk&bQUqas=!cr`;<@b>D-}gO4z!(m=RBNrJX$rT4HtD) zZ!N&NpJ;UMCB)qhw9?FK^(LEST>Q9KT*|ILK}0+n-ikXk>uUXa>n8MC-B{^HukEf? z8^m{;)iHN`Y0eMHtrRgBS2^;(3N01b!^{{IwLd7g5%B<^!!pn400p;o zy11g|%5<)rEhWFXlWnTK!I9ufa-Yd&vaU^Ud%$@w=N)ApVrbJ8I(R4S*3ChiEmx?66{&pwolkSZ}nk#QS zGRAvPUH&_>J^RG|!2N5w4_p~e>DcD43we)+(^^n?fU+no9OBZKVyGD-$=)->g0E>2bMNb|C6eG}_)FGpL>3*9gv4 z*00-L9&IfYJ@S(}Bj%hyzr+JG_=#LA9?aCr5-|A{eZdoR*5uaN{;*Z0rdxmQstez< z=C}V~jB++%)`k^3rH{iC`m`N6q~&|X+t6t|-~D;xNnC&@CMMv^vu9y-9MNmyo2pX; z(b%wcE8~*O&uAl!VVY-*8XVOhq za35L!IuAKN7|^frb)o3aeateRP)4}Hx|mjQg?yl)5Cz^%NV1Jx5ax-)W|=;wq(snEw+~_6*pCGzW2C5j| zlfrwC!N^1%Yn7~bcUDoX4oPNUp=$|C=Cdx82Usu*a;YXI=_U)g1(#cTB%$=@k8lp6 zBcZ2SZ3BGDsI9yyL@N`!kw~bDD&aE}aUaXh8?5!);!Mgff>Q`s!;{@m?)4^HBBid& z^5XJYd|DcS4|?!!B#*&vfihj|1US5K(V(h=tkTE_kelW`qL zlbcwx+<;_801_gvJ3Gsx4AAjt(l8DX)!*468yFcGZwU&ASJWkX*)#22UFt4$NF(;c zt;k>7ey65(&RDwp*rEZQ5DP*1s`kg_rNH-t$?~4%yk>pi9o_fBl{Ho@tI8<`WD-?m z`DWyxd9_1{5D;%w{Hj9CN?*L;(tyj{M#>gQMf~iYRNF=Z32F%L~dcR!n%5S*olXHaV{<3&mN48$USo zZ#a_#>1*88UYFM0&YEexmDZ|$&+m7=6C!h9gd5_Eyu+-($0rDH^$iqo~`fm3+)vg+^KHWnzj+y3+YxAZwBw(8ZFh;a#~HubyCyl zmg-zn9=$%=nQuIUBY$hcR!eQ6B;&FCoWj8lj98kg$FD#jMPeQ6*l1XsB@+?a^9NVQ1BSsRGMOp zZIi!&Fgg^hXE2`qtOu>xg{iV!TiN}w=seDi2IMzOz2wM&XJL_|nws_V2i_Zv1@QiL zF8G@2pjmQ$I^~^OB?n`@?wx7?wY}Z%S38^=t2?N*|&>bVbD+=bfsG_hcw>pfTrP@#KK`oF_{*ybZm@(rJM! zKZu0aDr+hg=3hlcgG&f~kOp(fFPL?L>xYzl?(hpvT6aNJG7#$#K<&cfsd8kzxL*a` z0)-o(tU^=#G6qaX)+nq|MSCe>d@$!QZKNIg=L~w8jkY2Y<`-g%E;5t*sJs zEx-~bs3z>YBPys_ujL!qn_iwS)Q%)?+ANN$(k-h(X>Os4otKY~mpccoz+t99uPtm5 z>D|BL*w@=Di;lRMoFFOzCngr~2bAg@B=fs^rA+Z)OV?UY3GFty7gQY){MpgTyiXW1 z#+o|8CWW(%k#4$!n|f8gc^1=g#FT`E$Vw*#75R7*(%p%oPW1zpBCBGZ`Z|XAA4Q%m zE?#xWV` zj~>f|CZN20R=e1c+XbkCf{M27!W-mI&(L1&jl@(?v4xSh@+Ho8LC58J;^4-_q4>@D zY04V7jB@mlvE_7X-W!!U@^8dl#>d5#z-*>=s#&0n@}Z%!=es{V+>Q+W_)KS>|A~8- zZyn0X-^nV9!2L45qx{HjT)*+|ax|`%qhky%b=Y)C&hofKbRLIl_x&cvSX?K~#5WIF zHV$z^m#zE4^N5J-i=_LsGT9JZjYJhHK9kkNxf;g&nWs_ z$`OaJ+uqiTRgI{0FRw&oGo%DlHflTc9_(-p3-ceM8LX*ji)DboCtOa?&H&bW$>x57 zvOX7wXWK~?r8h(K^NP`ur(knKy-D>Q28p~y7~`?T&zzT3tkm1dxe zg(V+99LuJg#;b^xK6G70l93oA0K0?_fop0OCAL0;QoqkM)67*MXl+4B`60wcPLcHUw}s z%tgWRdNL{Kb_iB6SugET^*^ltr>SilU_U>>3+(zNcj~fN9h}r;7m_h|h!@|RL}u@iT3$`eH{HU^EJ#cYpv3XqKxcnlo5$ab$|Qr?^sRyN1+=rTimr(@ALh` zhqU2*DZhBjs^mZ{!WuTwD; zYGL;MFHs@m4`P2&7L-5v^SV|Ih@AP_)lbgt3zpvlb| zRs|d%%})xL>AvThD1Vx9|Iy5AuzR_QGji1H;1r6_qGw7nk^y z6t=`cM(9DpyO^h`rP(hlU%qn{qI4FbJUu-HN$1-Ni2e_S7TG@(QdKrKH(7XjrM1JA zW7XBGbnWP%XsiiV3Mz>JS$zEXq$JvY)nIHgcTw8EbGIZ&<~@mzf^KM_rY$8-p)1yo zUwr@lX)10o?X2DPC~rx3erhVEuz13YKi2F|`|bM#&!VAk+y%Ei8U`V7I0{$+UVdtXzf_YWDtrnD|EkD5;s# zKeUK3Sr56guh+N#yRq$FQ7AH~0Fgk42qUW%vj5D7=!y{U%+oRA|{e23c`_l(i zM=7t4+S=lE7U;-`r4!EEfbz?1^jnz```BcM?APWIapD%S*m*v<=x9yV#*$^a%~REX zrLHRzLdeOai4x;liIATH!qG^#h&=L+$T_RC2UExhZ%EFPBzkxhF~+;(Slr^O{? zPtp?y#AF*Tl&62&)AQXJh_~}*>MToyg@&H6voSHfhafIa*#F&v5hrNAs*avsvPI*! zni@XkjMtsj5d=~(p7^C09{rW(v4lsAil%`>rJmQ+w^A#iyzwnu3L*E1MAK<^wbdv+ z9%vd5FOKf1k{6IYp_^(?c)=esiN&fFGbqsdx`9AK`3EfDj1xV2+VHDP6`lh(n8e>uPmb3Pm=qudg zQqPlX5*yZ(zIKRoSkt}`6pKBRHs4X)!M#;a+E0rkDr$naSs4$9|$jhbXye==F54Mgq zS?^D19(QTrZO{Jb64?`ZtsXME6>1f1k(G*V3~SDgBM$+)pk2N=h#u5hG*4t5``Nbw zyxezkU%$=MJ@PMelkYw>lf|jsnJ{=ka?B_~Az)kR^yxiGP$&wsYj~Xl`)MLQdiali zeRf&+7Tr4t);0g}fs`(phks@5Bz;)ufxzg)sL`3Umq0Ih3}o(SY_f6Txr{j@QFk}B z)fH`pb9kcHOjatHV4-mmgbi!$~ zEn#XAS1h^AA5dSzT0C1g+l4X*m}js#MdnBNsa8*Qu?~_KD>mUl``#RA!HNSY_l*At+CYyE%?JGZxJ{ArfMWGR-pi4AL88u z9m$EnAB3~gVl~z$ahfi!y+-0NZekIW>`IZ9NOO=|JoRZo_AX!# z8j=AM^dSHSY!mzKeV?#%@Xh`5#dzn=sI74RehF5V6>Ja^09~)7v7D-Cj<R*3+jknmq5=OV`#lS1o_6Ulp258ePx81g_z-@Dte zPPakcM>Hr=ER&T50S&lD$+cB=-~loKi~G!s)_b+Z>b8k(GJSB{X;INop0Sk|@)f_| z3O>i}@Ap8JoBG2WZms$@WLg3TQe~7savXz(ear*^vS~9HKLc+*StuCYx+|o~mnhd8 z%z*OpUJ_9_gb}|J%N*V0fQ8LE6#6X`cz^0~Z)9j(+uG@V0RTI#kq+xt8HDFc+cjHiy=WVcIb5-mGw3SeeyrpOKUhS$A|_mz}uGc8=O&w@r;%LpUu zvu)R5Zw(>Dw{`9@(QyaI4p?B*HvkWl0Hmd)nG_2UDVN`%!US zVOXFh4YFxA_e;qZUAk6LBnilS;WoWH{6^JM9<^e-6&~cd6_@U26bu|lp}pz14%57h z^is2f?Cz9bwn1C&mB%0?J`DNgi^&(~faI-dOIgri_i>gh5 zKfO+c2fsx>5Av#m!};#g&9Ak*5M8Asxz+~Oq@0q|D&nz2Cr>VFZxC8%1ugK2_}BTw z++h-gk18>|EA>QqAzIPQI`BFN{U9ej%yBKXO!n}d(c z45N*lBD^IlL4eapx5k0KB^6*7Kdo1| z7~5>xTjCr@zn3fj zP^mlmA)xtbh}ntyfep3Vk@p^a5zpstwr4^Q{m!q1vxqzsjYjs(c=4tnTz!zc*(_Q! z1UC|hxtnPu2#d?;j`2u~Lg=KKQGFjAIr2}v+5TYAOtN>w5jpAONCG<-F!Pkz4ZFG7 zt3t2lzdZW&Nc*(0CGpUjjH#LZv?L%h^rIvysQhm&fQU?i_3|Ab(q*H2py0|mzdKP_ z9I97uofj>M*ela03&+7{`&W#&~hs(>@t8(lOpB{K@)?;Q&R!x8$xN^&f0 zEOvz)oDvygQ~(EXenqKay7VNH#xstIs3xVH-Ae|@wg?VnAnVW~)MFp0(<^mO$!Dks z5k$*r(M9X2K*`m$w45)QUoV118Rh$<)EZ*{gIJ>q{c$jWEIP%3Z&> zPi)zpb{l=I_O#3R+!uu0_Lj=tp$azs(|*r{ak;Q=7)!=JuUvi3kp8Gb8y2s*#dA%0 zrIpam*N7KfXt-%)NQfvB$e#u;_LgnNimqp*uzLhb6Nl$^YJti8lOlfaqK=OG`!OQ# z^&;He`&G+f8e%4&H;YO)@)eR7MX7lHnJ4uI494M#6w60>Ss_t#O$)Szj0{R0OJy*J zxesp7*c-L-bE|r*nHj{C-utGmE^4TCV5OByW4O(fgcV;=R6Tma#_W-S4^?7e46HGOgs;0vVEXDKxad#a+okVQ!(p0^E6j#b*2K5ydwb;X%3e%FakrEo8*6bI|Dt;>SiL@_mdjpX8P-;lVDQ^#abvTv}+0dnfs?Ed;?hQ=NCUaX9@GOGZHXM}G z&f_SjmkI@zK(+QhZiTIHi~}n5Fje}8Br-G1ho(mYfr_ej_pkcgLz@;g0XF%*`OEmH z47~an3BK7C^~Tv0&6D8Xcr4X9rz3jVpKpCvYu)rg20R-V4Ovrj)7uH=^ZaXZz_4L~ zyp~SH!C8?W;NZ#P*wrmksY3^vpD42T)1))_+utmgvhzBHI#nf~W!YR^fefmOL6eqs z96|4VQ_#125VLZc7`q2r2Z^dyQ59t~&}+=*`1BWegUym3%dhL2Jt`a?D}cGdr4hw1 zJ*&SNqp+R3e8G&}9N>eyQ%r|E>S^OFnQOM7bRiomxZfI1<1$h%dI?d5U(I;*)6!7< z9B62;cLfNG{8<7Y`hgAMJ#<>UwFjNyshBTGxrAf9sT^Lthel{e*^967veS(iHq9U_SuyL7t)7r2OtlC_r`};;i^nRJ! z7P=jL-v{CfZ(v@$m}0;w)38Oe!CAaJO&K7&jC$f0pl-!UBU8Jbuby+Hp7X1y=pQ{o zLwn3BpjwoRO6OI4L3(dq{%Ez&NUh)i+aXo1S!5+}6S3uI8;OB^I^_ZU2?wDgr&jok zjf(@uTj%SghV?3CdUari?%rNDVHSB3LF@JLXYD&Rz%%p5A>Wu$(T{txp}>4m0QINa zR|J#w9yC)2|73ulUF75(#~x?oq#;CqldyKiEZASym$FRjpvPC5w?(lpvE~5>?GzF| zO3sQJ=C;?lo!(@aet-J=mjCMRO*-Sh+>eL^b^+9%W9xBfKdvj{s4Y4e&m#T5VH3Js z{eSzSp`of}{>?T})e#R-=z=*Z$t}Oc7v88Xs(-rue)|TMhrcjKv%njb{O4MG(C0si zG78lQYQ__SVuuI4eDTjM$2TuSi*wnD!%MYMh-~%a_8rq?5DJw*VH4flPYM5ZA6xvW zgnGcbb^ryy?l^l3JP=Y&QNpsgX0qKq3Nje-`E;-98I$Y-IF zmjidM#Gr7FpuC{J1cjF{D{^hMOIli5)}2L(0=8pLQ&k?y{O03UW*JSo7jtP$`vYl| zee?pD{{?SGEKpeqD@MsMQ;3qh|DT8Z9SuS#z+qs+x!<*!QJ$%v&QSvk{cX-~y<8Lz zs8?&Nzq8PO)tv3$1NHfg8YMVj^Qk6txQX23(fZW~bpHdO_{zpG zTXh6pr{}p$Tu_O+3f-xVkEZkDDnr@T(H*T7p5cfG{EH?0_U&q30cmJJ<(B`EBWiOQ zn7Mxkj*hdZKmL0S?SJk5zd#8Z+Gl)QR6n6aGjkv-r+|tq2qc#B-(hM>sOLu6#WMZV zz0=j0XrJTW#ytI<8lMzj6iFdi`vlHUDpCLYJ2U+7yEJ8gWdKm;?{dhyq`} zXcuNfQH1C06?xZMrz`)rMpEgYa9!$J#>vc6m3KoHSg1ZAQ%-XxHYWd z9X49#um2D9_T{hQp`l4({;x`~|4X+6hfx({@wzBjzo!YzV7Nb?LAZson4GK#0pCm9_Q~(6&@YjcxK#s9u6AG$jaS9XMnem(4wUDp za?;pHSlyu_8@Y^ZGs}uEVzo3Gn?fIw*8!wl2LtUEcp`WOb=WyLl({0j-zX+<0!mQv z!0t7PW*PZWkkQkTecltwcu=yr_Y_*HUt5K4j*5=^F?-_9&C!<4YOcJ(sND3=EIbx* zdP9O&%AmH4Hq;V-{xb6I?e5-Lx;fN=c*9-&rf*+waec_75_R9MR+Xex`mdl~9h`*{ z@ViX0%TG{=d#6rf2xkdlzHslbXEnQ_K2(52{~FnT4QWZR9?xtJH4n<$Oor_vwx0B zJvNv$SfTP6WXmm9s@D(R82T0(9qFlLWwk>tW*O6?`Xlo*T0kTf?^N}?*DI91pNl+Gr(me!wf^bp>FS*bIYmWi%?`BIY)8Xxu}KRw znfV6Y?Ql}w@8(=xO6s$zsVUz|940xhcSP@0Lkw6@&Y;pXZ^`dSJkKI`FN#{+RW_Ph zX0-ViSs1Uyu!g1?I9QxdlS8lOm~^E}O#D<*K~;6y=cv_1({H9Tx^H>exB`M0P=^zk zQ;$19Ei4`4nkRE~(b@`~-Z&q8$c#?(+{yovrUhl~Pb>YACu!N8;0e?Vgk}3UxD^^9 zeqk}zm050hMx`IW27emDio34A6FpItf$x?KmZX+T3M`2H^sTx}nKHs#_|P`N`6agb z=9RLc$uz&8XyJRW!$0VI=EL)boC5EQ7HJTl%O?BqErKnYx>q*lJHO4;_O*HrAqmOW ztY&dxA#S_G}vT2E*J+)OtClXs`LO>ymR-XjOSTr50+_i{=$eDhfe^Q$HPur z!#v@_d=A!R6k{DC4367MfSEGW^6R%s7!{NMR!d*hfjRgoNv!aThltwg)!CO}(gc)F zmFe3IC-al@nSGxs(_NcAXpxTTo5(LvR#J*Mq!Ih@6$57?&F{urAVaI!<7j-7BgJha zUWZkPxv%=V28E$Ty^kiN54cBLoUM;07qEOOA2!>h1t?ISp0tAUnH_)h6Z3CD)jx^X zJyGz6*G}^XbUa)fUvtN4xjDq|@RO2~y0h&nH{^GkFcZJsc3gDy(-B8b0%YN$F9OQ% z0rVN(bJYevqvg{#pj~+o#n!1hZ*f?j_Db`79!v$J;DQUOvApFzxi~$gwX*foxo&v6 z%lM(|S=s?n5=3S>!V8dr{AqMEVT%=MUqIcNKKV7RxSxr|V@Ix*1|gY8=(7lPK-T#G zm{f%+tdk4RwK<0^WgHuGERvQe`DpR)@JOjj&dMsI-(LqT49yhl=DgpI-$=AvIGvts z+(tK5L0)Tnz|$IZ%CncWWh9T|&GBqHrx+ML0|q5!%G~diglq{;=u5WcQ22?>F($DrTRHaLTk75^ zP+zx{PtB|%(=ckSPn!|j^YL>HT)K@CI=gv|M>}<_>7uTzz)|lqqh@(gynN<)yTpL_ zIqSKs96g=OSX!3LK7q1pw={txWwoZk^7g(b*7J=J@{r9o1|g&7A?$-|yU+w=t&Y5x zz+)|&T()vV;w}bJYX8C`JA+z-HV5%S&BVmFHip?3X`BG6`<$Z>ymnt(ubtzpFAs|b z?}QtH1B>t!|2b>G-aJNOWwidOc!<%j27>d!2pAS*?FoO4kAQYA^oCL}ZhE(5HmRw~ z7()hFIxV=IU*y+z5mvK}z0~6p&QX}pY5!46Ds=e|wBEi`Y1*wo6&B5<_h1KdDLEEK z#0JZf_K8k!uN1%h#r*gY!sgc*jgLUQ^+;-?xBn#ba0+j4^$wV5hmiPrF-6Ma%l?>(MV%A*5WefJP;&qIr^D&|vQAD;1;CApIGk5ForTBG)dy%i z79&nhhZkron7O*`JXkgz)3I%Wt%F3&>d8dFyOZ?+`D}f!0OIrS`v#Np{my#W5F-}A z%B|5|v1PaI8Fex;vh)p&>zf;!>xsN{U}~<*wqe7WtjYJ8#!m$GAMgv zFfn7C+vy@z*xO{1Inr~#5`QbtZEvcn+QAsY?;Y_z+Ju;Hz+%46F1hXFmnA=+1t$x# z_cAh8>rmAlMEUg2ifqW<;RAFPM39`fcknF|DK;1u7B<--9U?YZAiG3+Fqg0b;~bQ~004?~+SsbOq;qTBFLhhjs|eA8AhW%_i=yuw+U zov7mA2?Bz6kDcer{yW}5L@oBbEuBQ_11WIx-6O>9SEAF#Ycgf;yt)?L`+QX+PZ{au z((-FA4{%#!h9G+YK8t>*YBb_*%;b7MWvOHr38trFR+b69K53cnOPC(cH-y9a3mIKSp$( zZNNFzy`*R|GMw*-D^bsAT0iokORWPOPfutx>-Ul!k$wMo`G!jkUu#r((>1Hs<;X-j zEc_g43lR6N1-u*g;*PJftHup7F17ip*ht0rV?a(Jfj75xSN0JY+jf*k=aln6L4as? zR&8A6%w-|zM<)2U9#*Z~A$>?Zz8-+~vFc}DcZ$*12{DMKmNJV!sBfVsN?%1>w`2Z9 zU3<;$9Ei+wxM8#PW~nZ!5>p3Otfh*8U>kViQb$0;xzlQGB>>z8!y8wZRwA9-h=9r0S+nfe1F+J@5iZ{P1gFVZ`UwsvqA}Y~#3T?^b0{{L zcbaM&2#}8X&=D5$J?A~-_pI-TC9EmF=Dh{Gk`gMY*na&<0q4cDJk-ORgQnVa;O_=H zdU|n1U|3XCLaxH=HCW$Vg|T@BEZDDqpz?81Pac))!G_!V(LzBU)ZE4p1V9|!AQZK= z90DTg1UM&Xx^1X#N-R1d2=`NaLsx0=1&48oVZYZYR#C}jnn;N<4hO#fodBI>fCI#J?Hgn7C6Gw z?qvIA_8MB_p&DpyN3>sf z+k0248cnsxbb8;ZE(ahJFPcP(D`9yvu(p1$o15er%XDuE@VTn7JP#n=v8%}1PQTd1 zb^9bS!u8ees8)#EMoOIPB8{|LuXl-fhMHoS2qFMXM6mu(v^4 z*l;^LxYq7Vs{tr)(0eI+v*V5RgYt_4n#Il^yx3k&G7|e`1sPQ>fu={RTRzinoIp|(bZnXR+lFkkk z=w`L0B67R&7Fk(eTVA3sRPtJ0zmB0uuiA2BnIREWhb--ubkML(X1OmWDNzKexAvC62X*?mre7F9GCy?|m z%yvdw$D`px5P^XrPdw30NJ80_q9~q4-vL3ko-(ISfq)xPOy!$p-%4#Qdx&CMF;V`6 z*BpG!YyFd!C>9(%K!&1zw0v#@AIaD_yFa%0OD{h z^bIqk@FsF`65=B$f4ySR8H~dyk3%6ad0qqv4}=K2Y+C>NrmA8S^P#~Z)eG1f`ISIN z_ifi(3m&CZ5kI{S=lr~o?;t+!^B>tZObVb3<9;$@DJS^ebRq+lnCH)hjK29+zvkS4 zI&qIiqGtcE#RhI^2!n%?mWJBow%@WyO1Jv*GJQZAVli`Hm3Yv-Q_9e=q08i1h_jeY z63D=(#yY-U#1INd^Ggf6afu}!xl#A`Yu>|OMk3r+jGgOt?*dS@(_&$3zQ3G&Z<%=Zz@{eMrzQm;>bnH-Tl$q;q6O? zZ{NI+CQuw497V@{b7g&+vU%*E9jCvBG@N;QGKt3nG(a(Btz($KbEOD9Ilb)1EpuF*!8Y%Z7jVohz$oD;lu5lBkHvFP0ZPrIubhvAbS zKQ-00XNOLvNnW3H>*=N5lJQu*h?Z>q2^}ju;CpbpeR+>IWJXy#vpF$2_O#rdDp+Ll zwtGSPXcs`Ryr9*ngPt#;5l^B6 z#L_YIX>&iyP&2wIv`XjDWDXGrwt`1M4iay#D~mo9Jk>X}^aM(T%|D!`&XRd7SC56x#LnlicB4tLE}nQVx(L;=3l19 z**N_6Sp}lW@9)IklozUHJjt`tK~sx&V{{K z=PiUiA4das?M@o_w|-h6-Rr1m`l2h2W^9Ptl`f{@OO@@qOkd})(wxnyt8ox(ZqOTv z?#e9ul3Apt7y7+RG15@@=yt)CbaemYa1`WG@KTqW1zLPUDS=r;^A|4*A&X7T>n!@BBJAskI)YzrMDkr_{SDN zqe-^UvH)+L_!3<{E;8|)1AcgTaI4_V?$7=Vg*OE)yqF&KUL5MCocZ|j}M9MXBN&Wp_Y*hdJ52f?%hxv4`~+hdWW%jzc?w!&!N(fH$FUhl3A%4lV;O1Tokc? z%k&=C{aZdA7OMef`uez?7t!0NwBMdjr>jaJ!Bi@!p47Q_M7j#r3-5VTKe@SGfL`Z@L5$*?u(m z_2M2HEZm^)%vvPgWsamZh(wSJgSyswM@4I_Qc#~{{LZ;{&KV3$Os5NHgJ_=TC$DUs z%hIoh9c<%Tc|(r(emvi?Ilv?(?)>@LOeJBe4&xI-McPqGOj^HpAcB%c)u=}&HsE}fuWAGa&p)5@hJO$DaL z1^Ca6uO2#(2lS8HL%!(va!g_@cH20Jw?D-|qspQ23>C}|H2%z{nE~t_Ah|{&H-!8v zBgmE@G--Ke1cTF)E^9OvzvpkVwdc5r?+@-tVcU^Q(jl)e(MpWHzhL+m9bcaYowsFr zNO9282d`YLkV_~e4Irf#xD%;X4(jVQ#>JA~Kl?bavbNkNwGycFL8J$VQTc#;VZZ(L ziqn#R<5M39-U!D=>Rd}|fW-Cl9-@qsp$|c&)s2&fNRYIu#9)Z3j7dgwRGwr&uxGI+VC4_>@sCH_-*e&!qG+kF-1L5hyui%a zKFUJlj3A}ziLK28r*tUlckXm48OU-lG4)YBei}t3-4%+HyD32M0vO$6^RlyLu8wFZ zh8qv0P;5#2y9Hu7x&x%~_C&w2-!PJP91?nRBmD3%g8Ic!RwNud8uhP^l!xzlSy#C; z5_L>}6+k>H;}gbqF3+|n@h?i3BB9FlFZQ&DM(z*04-)3TTj5CQhl!L;4!rQ2OijzG z__r3I@vdY<`pJt13(5FeVoM~G)CuLUZ$GAJH4KA!y@qs`#2u5foe)#h{#3fm^1|{? zZQ~340*_Q5J5uz3uik3gOT6FwLGywbmXUVD%N&N6-A0^zHqp-nrg$RsabQ^*D}rljD~kk$e(d70jz0ZuaU=(e@=c9+$Ods-B{EM@!}Cu8tgeYNT#E6Y=0~7%>@5a z`K_5{>~&1xh5ND+9c=?m&-q(x7@Amdb$>aY+4?bg6p&<0Oo-CXinhDy<$G6s(xB%k z6Vp}8OiP=tRkHMHvM<)pdU|XR&cH-%F`6#E0L_}te!g{faZ0q%w*H3tL)EwX1TKu< zUl41E?NS9TK;n#ro3UM-vh zjH?CqsC1a_B4RJQGk5YlR_+pwvc>r0kz9~2)yo4L)g!yKqnstR@bGGSi@}#A6W^+( z334zRes8UH=M$n^DC#?&V=`Q9_6m8gKYIOzc+MM*87tEuxsBJ*z`*l3D+KKaj~Bk; zeNByG#nhnurd*4&w`C7L$?hEnO>TL4l$&wIk;^(#V`<1{x~y~3v`YC9R&LnbV0CCP zzaCD9R1#7D`ec^r*$HF^?mYo5gJ9p#iHqmVbKN3V9Y)1fm6Win&6g}w9)B2g_4a;( z8OOxyGo_U7g|M2J;rtv}P`}lvpjX+sgv*{#_i^a4n!(PNpS^&zEAsd(xRe~d*d5I& zu2Z)SiM}L{Ytgkm9znVutyq6V!&O*g&Bw-O)%q`UB$__ zSRH+b=wN#wEwPOX(j1}uj;7bpmZLk;lEa90T<&@M_5=WTC1yvNbuA90(d$_jl`y=zerX(F zy5186#XOo>0q{G!-+1Geyk785TViUMl=hE)p-x0ZM9B4i^vAo69tYj)O=4F)Q89Yi z*X^>3m0#1~-nE1=$@WaQ!4Jw@Z0pnjS|M>2H1-E1nqLc|J@8yUJY@F7RsN0o!-a1n zYk21Ni>?TAg+hyv9LMAQjJ-zDca_z$1yi5YA1D$LX)!6_`46SY&LyZ>S@EqIFMkiC zfN%Ae*Hv^QX?{q%8?o?)r~ad$NA>5>^YCxEK3lC7QDLh1~lB zxp7d!j+cg*J4rZ?j7fr@32f9)-{fP~-yxkOKS-%VEy1aj#puC23^jr4IKqS}7^|9U z-{mC}(7bMl{}XqV5p(_=L0;T0->W#uzrI}bsTjsjgETucvgts2>D#WnZkpbncXfB? z`OO#z3f8N1y!&~_c#kp_Ti(p%k%_+FgwnJ-j0!qb3Oe4vOdJYwvhr}Xv~&qQxC#^r ziX6&2J5<88OKJq9Q@0$TM}55P5ff&6Btt+JmMa{JCsF`qoV9dc&>!>A@G67hprk^i z%!Q?pRY=%|tXM})>;JI#R?%@R%c7uI7F%F3Sj^09v1Bnbv&GEJY=OlrihnVFdx zTdWpM?|tq*=e{@dIUje;y!@z@U6mCTnHdoo5g8dB5)YYKTS7yjn%8SFxS<%O-H&~P z_GqZC8JZtaUo~%uQ5$syI!>2gQv>FIK&y6LT%+#UUhr05)HYQS)yLTs!cYS6}4}4He%HBZ47-B$W z#~7G0%UF({(5-Gxowwmvt?fl^irXPxzp)(H>Vue!wiC^RVkqji(o$98VU9;Wwzt&l;Wg_wO@4Nl?bAz1fZwH= z*foamxP<#zkO_-bj?qnM(-##w%wPHg12s4C@=s4a;d&4z{<}iMVDhI?R!r0nTs#)` z4|nI&2y#Iwah%vl*f!Nxv{{L~dUF2ig0FXS*jq1=zsCW=C{{6KWiXSrFOii8@#C!? zh@HAsE-25ga$IM)mKNvNQ48Xycs(;)sL<0MCvNe4;w(cv477`ay5DL&o>ehqmtKC8!ty(CZVgN)Aj(k?n(m$M1o5vkSkZ{^acAjgyD8%`f?UJRuGa@mK2_^4b zl;pJ)1;2zu|M}PRnoX(5qfdwHERnGU8DC=f9^jP^%)Zg^s&1~t6OGi)RDbH^n8Eb4_oHqdB%7esHiHRY}!1Fz*6q6Fx zJUT`e$`i)OvMG0F<-Nzt0V*12EUnQBe7V+{`d08ATTF5JeMm4rE@G$n<)LvZe%T>Q zx@Tn~Mk@$hB^BdR@&k-=9P{%p z2_@6DPFQr)W`ug=g+kfxO>2YOD*NwB%B@-R9CBBgbMmv5C3PQyd^XA@T+wI(bm?J# z@TL_k+5KF}*~!ck2R>8nHMh%eTX_j+`IK;QfcA&MGwbD=_?3F|6*IRw!#;5^@t8ks zLbt!l)eM<%S899-;shSJsyp6ct~zgrT+!A-}(SQ))4f{rSU@lpHF}3_@$tj{pSc!E!SQdeMiAyg> z$}^s^PvWT=R*`Mf7!=AMvNVNhl&8P6{oRMdTsjjZ%1=4MElT;iqXb{*UnD+$xq;BJ zb3@SePtKINVG|lhdb-3@a-MYe1e++#huU^VgwWNWUbT4 z>;9|lA9Cal8}s z_dL1rFoDa&?QRfqGg=?|b23 zMm!;Icfik*&*N)zBNto3u|7~vcZ;;grx!6Dd6~E|w8TeevMcdw@+!Yct!C?ooyPLH zO(62J9?BOyF7d7@xw`vzUR7|%+HtrRpv8{5yK^ZX%!_svcjT^8~lj~b&kAXh9@(8xp$gn@U8{B*g+ z5ji(-K;*?>k$!Vh{M#F78Mc1uqH|<)L%^0fn+RKKS8GsWm-o9UZB-lr63D%MRYm&M zmk&1!d1~8UNx*}ka2v{2(HJiC-57&+h=IslpDrXs3ZA!Q7OcHnG1qLf&9Zq=rKIFcDUgWOyZK7eoivj8yxmuYQkq?%+t&GJg+uJQ zcX&7$_Gn{Gkt3me$@KH-MQcPV<8LC4AS4UsZSFp)l8#qono?ldXI!uY{M z@iOV}B^+(rhxz`6nL$Jy6JBjM7M2$t%b!_NYmfmcj~$gC z<`IfH?KKtY}ROdgu;0i+b^;5)W7<~=6FoR ziS9S#s5G9dcD{E7atH!_1$QF2^^$&PlRVE;Cn(QS9hXY$!6xrkzP{W~tvI|QwuV6e zzR;bxO7=x|x-v;GhB%ySZ?!PNh}OwDlI*N3xh8b|)2oGg>wQ{7c6I?x{xr)P9f6qGqim&^15g{Sw- zUQK~w3TV%>m_cK{V%U{d1FWSqIsT~$zq_!zeq=mNa~%`q!>|~B3U%9jk3?su`Jsy$ zhMY-OUlRDkm>_f%LhfQ+B$7H+h*-{`^k96EaYbi)?0zXMx0Jf@_dKw=*Mi5dbNBoH zb-;Yq@R21ntY6&mM<$|Xe z0X5ygS2V8)g90t1Uv zhx));9Qq!~g{{H0?kp?fRrka6St5+;!m?dan?D(FH705rg33rF&(@%zrzRn&A1B8c zp&L7M9-UDRBVtMIf~k-(%H8-#MxGV<8=|pz#fvOu^pEF{DEYcQN#e_(0$!*D-q~$V zYqp++2y5O1bKPRH5CSh&`*6aM?#0|DpAV|ud7THM$pL0Zg_oxDKnbSa`tY|5l%IN9 zK|Knxnzl3%IKTK|1Hh-Z1*y=EUPT{F)3Lc)Y?gKLj#}-yv%3xS%jAm{&I!Mlv_UMR zVw4qg++&11yx~k6YFPX&S%`UfT+gu!JaOo)K{_kQU$#ntFjK&6Jop(8y)RMzt!`=O zD-sN(yuiJ*k+BhQ9d2J(OjPt|QbNDv7C^}wWFpZXBZ z^R5QF>L6zN_%=Sa(8(qvm0&BLvMC`U15ak8oPrZ@njC=yV!iVinNX)L7&e8jXJR45 zJVn5&ZP@e|lO-Am=ev_F(Fl4S6<9wC&Q+aBkSNKD!mRhahpl+uhP>2i zU*>2_Tp;rH?@<`bit^N#3+Dh?EsPNnL(dymAKkQ!ALY`y(*{h}ZCf7!*Vos#lbqMO z{Ex474Kkcf1G7GeFsp)Z8SvB!qDsG6|?|2?XJh*7;^vYE;CDtKRb%pC5>|`fP^*5>VAQjn?U5Zo z7gqON(!o)Jzay~A=SKc}S%&>}l*#(Un7i9Z@S?}E3Y{7HA;VtRL$^+Uy8gx_LWC@r zdl-}VE=e-u8=Qj=q18g*iuIroxPrY)mEqdQTP!ESDD$q|@Ng+#`zstKQbV?iY_}|( zSABr#R9CkyQIf$W)^`=DL1m!=m->0+L;>KBYk$n?{KbDr9abQtvIw(T#UCx2@X zF^|iF8zWQBmRWG9*WTC95j;o*JuZi=v)45|zQVn8NywRj(lIsJJf}h;;p#xbFuP20 zt?q^&_P%-72!y~4fIs>QA*QqBoaMLA7-SjOPxD4nbM#=?e8YFLv)EaCv(jiq(1Fay z<|GbH*X4)3vS1*$oU(12=P+7q-DNfSR8JwXD@k-%DB9z4?QC0*Z?H5Y>)le{^CxC| zM}`nF-Cv_TDEy3_SY^)Dx7isO8c$I=IV*;dOL4;y>1C! z*^eGa;thm0+D9y7prJIiu#pwPr6o{+X#l*z^vSB^% zsK7wMd>$7rXKV~{ue(R0;^KeZ%>M9#12s9mKk^&;0!mo%&PM9FeGOJc^- z>>4%*{hgKC2;UjIeQ6X|9~4WmFrNPSA#MGeNB|fh`myhpbn*Vl*a_jSfMG(R z+~)N3R0D9BkwVw)oo5o;Pg`7Fnf)>M;|CJfbmS5z$QNjQQa^RGi@>Pk!hZ%I70Ffe z(QS%A(qzd?DgSnpLZ{a-%z;+}gR1X+LZ1A7BCl@qSxrYEEDqoBrXOl;-xdcEhS2BS z0Yhvjt=Lb*#DvVi=h@eHWlK@@m%6^kvlqW88inlPLdwU2>1JPlzbK(i79Q$X628pn zdh<2E&d2OFMoqBW_S3{?BU3cs8W!`UgjUVpep%4$gqKfTZg{yLET})9&&j*H9eh`- z@{D_eLkC-~ceC?q>wSer@Hn55cwuWliPdex@%qK~9z{E5z*Gb5L(%8}LSl$n=9n|k z(<6aQz=7;O_HKEPm|- zu;gPLRqLS^ZVE+Q<@BvUO;uqVwLugpuRh}8z`f$ zPJ60QP=zk_WF~jtDT+XDQ&X}&|9uh^Dzv>}fM`u8Hb|_qBqSuXJZ5__VIz4ztQ<0S0R^BV~Yw^6B2nKqqnz%DzE2r(Hnrac0#sF<1KyZ9gZ$O{WEEUP%+ zG@mbdn?_Q?pwRw9&YK`|rlMgmS{zT)T^&S(fdn}tgo$#mlv+aX~~ZDlS&m z(UFu+WdB*xxP(@MLc+p=c6QF1LK(iS=;Oc~rvHwmK^N_@E2YC=ERh*|RK&s?v;L+{ zB`F|Yke8n8D;K<8^Xcu0y95&<^6;hs(9vqdO{>;!nz3H+v7iV;{*x*|^czHkuuf&C z)mKI5uK}X}QkjB3@1~Rf^`nK0?B}PHCiyB51JaIpVBh)*jB}{|wHNyj6$EoJo@A5! zx8Hw1ITi*%{a>1a05FjKr22ns7+#*-oPq@%64EsCpO^AsK{wpJJU|-}pwDeZ{55v! z^gg-SchU7|GXJK+;NzgcUg25R$$XUgH8Z%sn;K=<)0dU9mo!;|#D<8E2)|f}n2MU} z%HNOw^@VWc_Kx;x*i#2rP%Jf?@FxI)drO<{twV#=JCy%_rs==BAZm^#nUp8{&*c5P zV;i8E`p-!GtCo)dNA@q9_P>qNpXn6}ohW1?Zlesc|7{5R*)P&_$k4iHW)vG`(f`x` zziUfXgM))s0YBHFY!4=xln*EvV-h0#u;D#JIufF_-}0g&ogJ@|4_?6Sc_u) z|F>Z{5e5M&Gtu>Beiyo3^|IOjN&{Fie-YI#@9eW+PuJyoZvkYMO+A>xxjA4z|Dv7E z6+>%H1|TRWaW_G9z1gAckm!D;Dvf8)*wwj%EPP;dT$aZrV*ohcVjg)L)k*qj^y=O8 zLq(DAjQNb@pA6pj-aBV(R8z03pu_zg41oSIAP?h75{E#{5WR~%^lB;3+@l+0L$JhM z?Ui*V`7gQ@m3MA}15I2%O2jVq$ovY5y|gtQWQUGo`sfIMTb9e&mXWb5QrQK3N=Qgu z{Z>sSS4JppJtS7vpbN~S+@G{>NdA!nN^w0uWj!~W&nKh`7LD4(QIg(eB6FmkJDa&p znLE%>k}+{{;fLv0raGg2Fxc+z59gTepBN(;eB+T!Db~h`V8rBw zd+o4wUQsr=l_iJP6dke$Hxen8+_xFMm7n`~J10%6VIm>y~&XO=L)0r$uG$ z5OfhRP+&r*j2$J}-eY48%8J@gvD@S;$SZ5Be=RM@I%M|mD#D+kT(;~kY1iUOp8w1U zzeO3Clgn#uP0CNOTk*d6dT=UMVaG6c``Z{tMU}NEndUofY@MJBX{if+Q&bc}{x}dfOi^+Z^Vo8y~-&wI9;WOWHQnWTc=V23eZr;9cIMnG2wj#D6)pIR9n` zK;HpV5-*Wo&{`G>0I<4sBQ6AOR%{SDurc8yU-k8F3+4K1epL@|l@STyV5HoAisKoU zF|Olf^H2lQ;(`xwODLk#-}gHIh`uRUml7$^Fl410hIUz|V4FeQNs7KwtY@I>0Et_P zKzJF1gX)Xm_8eAgw)I2l1&wSb^z+GHfRc3v05TslQ7LsQICti$2uo>J@AymmeF2<> zq>1$XBchg$;`6y+OXS}DQI)y0^1a6yhx7)sYgVmn$D+kGd`C(MDsF6+PLH?<>qD&X zlh>-T^@+&ylaPWv0FU*;=k%XgfKWZBQI0)$d(+|Yu++o!e$EJerR9pg0q&pQhJKu? zF#Paw^2njInG&Q&mLo=tN*}6paGk$+&0PQZwRTLibaBsdj#;c~&w$tn$8C&{sQMj~>X;+8$E`$&`-kz^*uT~A>he9Kb7 ziTETxr|#Kx*}Ym)Ab3TD8in3B?N>=fE-$Q~Uk6+1FvuR_e;L-C5=}O0m z=8cGNkIfGAF@XwVqZB^RJ%zrB>CCcaTS`W#KOoMrO`pW>^}v!twXt#E^k=q1s*Vf0 zpI%L>-8+zB5<IM;avb)WE!0cQq0E&cogOqNnV zuIyEq(eG|G?9bYc>2&_+LXbzSq}EWLlwRYqru#?LJLPjD*ln1@W0HlftqJKsPZgRH zw*3TEZy1r+y>Wc+%<(-+OW~d#D>P1ILW%G;BHL98nlZOpZc9?lBBAlryUz{`4^&HC z!ZKd+&05}GEwSu0RLJNWY^I+qC8p5$+-VeTt5Wgmr35vz+{wO+MZU`RH`(JcGs+Qn&I3w0Vvayc z>oG>#t3wqrMS$o6w?6!nePnx}byb)UI zE4vDpyejd^BL4g_8a=aqtUJ0oUV05aX|5i%S z7l?PHWDc}D69;Ck}|3zFNX9s&_%z#3cklba5#aG1_f#@VV3WKP(x8U?YJsH_CP*NT_Efy9wEHUB^5tAXdMJRTHbCtq;mv+6e8Rc+HS7{d^ zI?Ohl;i49Ccb^+{=D>;9pRb z{c-k8ATxUz^c1JjjctcSnSk<{&7^4JcHaZpldr$P(4)P}i9Y0Fz=k=le-&__{3yw( zUHwQmrhdL41NvWuPgOtIpP$cGembV!WKU`i9qIePnNsq7O5=+C^vF0;2_O}CH(a0H zB%AVMo(l>p8FPf)l#(y%h^)htT)z&9Mw#B5RuUF} zP&aAW9JoF;edR32op7TL>foa8m+ zr)%3gLvfqUsU+-cke#Wx38Ii3?rB+E{l~mHexm`EdYWvv2m(GvitCz8 zNNG`cZm;h*t+!s)!t#z!nHMt+%zKV7mNM*7w1zMGY|TJPj1#oN$mZN9cL$TJSKN=Q zleB=$&d2g;%-$p`FHK6NJg5TuN8|T$#(O(m@hcYc+y2kvt(S!3ZR+-*eWXQWid$o& zQaR1ZjF2zK>(zUqihB;*-_8WM(0HEA$8Wd>SADLO4Q>pNuXE}QRQT-(R-<}+aGmnK zrTAa@h;Zn*TA|QQ)P>&`mfbBTcNF9(925&aEoPJTRm{#R$=Na*lO?h%e}R$x3L^4Z^bvo6$cy> z(lK;`E=^`WqsWnU46c^JWOfuYdG#}5;)}`w9Mw2(VR9%(NPY%IVDv z`AZ94`%%~-)_gNHV!p6*;6_As*<)%uITbcdsVmx!M*Feg*XqyV(o-T-EH3{n_7x4V zm@|vemM@73sc|8kp$9;W}~OKaeU6;$QxPf%|~eVKg2QwPOz+f zasXE?@Z{TdD=dzFJ-dU-+vn#hwyMNd7Yo#p;*|QfykwesQqT#`)sHtO1LFF2geS+V zw*54G!1brT$~@pGKP{88o_4U=wgn7#;m_G=UH-Bj^KA(6oVQd)*UF+1L#9MyOlC@w zAWr4_n!5u&t57CWdx9;px5SB<*gF*c@pWdwBHJVQSWXERhdZ(c^R4|Yk1tfQ?@+B) zB|oYm*`dM)l|66xH>Ju3CeL7OV_KuARsBqKrC2RQQr$P+5LtZ((l>@mN&JtGiJV@r zLsVay71H1Q+Kas+VP8?)2ncT+>ys3Xn3(y#hWbLcQC&SXhPD<1L-kHetj~BOD3T&- z^)U8*dQazZPotsdaq(@kDd8sxFEY+ULVfd^nMw8_1=jWaS3KC}j4n+A3Im&!ShTnL zJd3sHQh8&3pNK#GB-&H&%xHXpXW~fqKO;LFlI?SLWxpbz?>^^?enNe$k$kb+l;V~~ zs1ikL%Neny)a-hsca+TzmS8Xm-HgaTb<*yvVOd_g{T?)=sxI&{s5JNX60|$*(VBhN z(79N;rv%Rc99U;w4uF(M0kkRx=^tmUb3C|4^KKnQE6X1>?s5FfhJD&1`^$uV(!;>V ze+4+$)J%Z!qMY-{FF~*+c;1=6wi+wx(coS^rV?R?=_}!e&`?#?x3W8eO@w@G=d^J& zJ?5|dN*pkD@{O?zTli9ZLWj)_5C7hC%R5Ei#_EG&%K9FcF|*^4E`p}SnC7W<^p zbbVyEMPA%EW9nu`;!>|~Hh3*2YThwi+S$;e3m+-yX%~_|kDrk(rFIB9%dPoML6D$m z-)>oxJuVov(|TDovg}0OTLwxq>_fClOxom?0QB;h?u$|dQVr$iVN`>*2a_$7T`>_P zMrNj5J@CS>zPWKglG#sWFwP8}s3tN~7$H)d51o{DxdR}Y@>UzCHLP=`+y1nxR+k>I zD%%c91HKuNGudq?K*l^5Fc3sCO;Nlp;0|HvkpsYSW2RV*VsuqUM`K0{t5E%|SL(D; zPC;`6X~bql6;`scHGYTT@{SKom;V{s2AscSj9=LIPWFk5b{bbw zCCNKBdsWV?1G?l;@(^w?25C?pQgszI`lNbqerAeG(bQ&M(CN?^8OYz~0vX5^uO_Kr zkH5LF_C)qVw?L!N6k@|&`^q*Cefv$A+N%M>eSTDuFH+z? zF#pZi2@+{yK}Acw1HN<~FVb@Q6wol|zY;i*I__X>wi1thYTx1ZruGd!Vs*JdFCCV_b&tm)?Yc1-g7K}Z_YWbW!i70HSuHKpQUSNrN zULa(m7#U>f2pgmE&gNsIVXge}K>~s&!j)}~pLw*-Lnc{oD?CFL-4hKr|FX>TN-9&$ zoD>{u2-kp^s~c&yxJHWZX*>K(E&gTT-TP5WZ>UlijFmghid$7V?mKyyqa3>6YEXX? z|2~J7zX(2c_CS`cX9GrV_K_a%`i6bO_fK<0?Su18IrPxyE4Oa0iw_4pK7x@!!iO`e z=amhM){#Csj4yuvh+@LJJ2IGq0>5BcSIoQiMOFD}ey0Bko%RpI$$R`t ztsbSO*uDPEI4V9Fle95(aY1Vf9XE=zwu5O3_)4`RY_tF+?sX@ktQ1_N55YWXX}!`F zm1*{+WM_H=DOfhGFV%N#MPad&Ovex=LH<3=KUM72`PAk2!TF)arlKRBbxw5Fy_y6& z__mz#4qn9+jV+3&OFGA0AeDmOPn#3!q|$GD3>@as+dD!IP&t!iz3#|*pp$zxv(<6g)ODZRBdOl7F*yQP>;3i=3SHh;&g-=$ z5nAI6J7Y6T&raH1?RIllWFmp_HY{MgbDd^OX-NOLHgy{R_JVFj5r_;`?1nHmo!HQI zaIk>oCM&6#3!fatf?Or=48F|4Kcev`mS+ghAr;QtoO1Ua#9^|fo#0cu4YJ}%an^Y0 zqqu{za41vJIRi9u)TJ0~TXRjdS*eCMg!G0uzALkM)v>f1_DkmTH)NKzI3lGIpyW}~ z_>;#FNFcOwyOIdB34#m-ctC)qwI44L0z)tq^bW=w?WPYP;^E%;A@(?-W0ffB@H~jj z<>_39{v-KfMLLz;^D7ai*)F#?ZeLmRjb2m4WSlyZK!9OO8}e?NY2u{MeYHz(6*mIP zz&A%Np3YiQz}!A6zc-afDPx@kYs9mNk;95tU>+*C2?d2-p@=!PMuro8QfvzPaHX3(EEjY3JF`GaCJK9 z7T^P#@`8oI(do!Y3qPR6*pbLuUJ45E<;^*9e>j%f2!>(OAPuBy`>AC@0%o3>uKyh2 zY&1n1g#{JitHajJTkmC@2aIQ3(}|2dI2@nL$*HN`mmZD&%B8}^P*G8Z#?Wb}WThsF zRdC~UF0pXAv?yciVC#SMC8#VeNp}?KYjrH)m@!$*U!cl;+By4?M^XkC!xRu zC2K;Vx_Y=8(WGQEEI-p&rOc;Q{>Hj#Nqs?OJ_`?b!N|79gr>Nx^?6%XSd8&ghog;d^Jqv$) zRo+ZjIp_3DdQhh~;e6sLiw0Y3M^ zH+yYVWatIViC7Hg*C#AH3@6;S#5h*JC)QV^Zn#)en>|TGWS5JOKH_YiOS@ZB zGvtlO#0zNxRVg{K6R|UH^HSQ&YeJJA4Q+qL;-NV?%MbrTbW<>8%C2z?v7k`QA2g5k zm%~dKskH(ywF+9kMSV`Igm#>a8Ar$s^M?vmQr%OU#Hw)J8ECV-i&BSZqo?#)5D{2v zFkW7InpKlG9*qjG*G>z0aX|JiYcIq+A#-zy_#pHjh9E`3%O*g0%^)8N^#oYqr80zO3~mH^6^l~d^)xfHl4Mg=f8#qRn4!jH!zGw+Y!b`c-Z>9a zqt6>Qctifk6sL(+CBXG&bN};{ryNDiv3wa6na6ufeehg~!FczhIb6eU zY0Q*;!?H7xU}AHo*{gctcksBn&e_MXl5Z`KgyDSrmZ=?C%2G$A$!Gs#; zUDn*oIkNUuR%~RD5Y5}j(|aa0@oDN7(`+10Y#&b`EZDR%Fr-s%b6wk;t+TH5&SHP` zTG0J&n8T0o>5#+vc0$o&ZF%BdHC6vnMxNLamQ6n5gi+8}n9$BJU~&;3Qlc;AF34SneEk9Kdyr1+s|9L_6Z@o|6VbUmBR#q zh+z4f?-^rb3IC$~-Jc7+x~dAZGFp_H`kT*TZ9q9pevdr7t^{`soK8tYw68^MXHr|C$8Z>wqu8+p_2TZM#q*h&`8gaGU9`T1?ukq2ExQ`=0R zRn6R0`61pFu)zjv`|Uc&Fg+EEQ0Wh*3}WBN$oR+ZTW|8?2*>0hjrH1Pi;gzk#%yxM zBQs(u|2zQ&jVJ~xDr$TjEp!A4NyMoUz(^nR2VXu)i*g!FbS z5A!8GN2_wnI&W-tfYjxWG%j2)Ojq7YN*g>pq&}!~gqZz{S{pZMQ~6~6DGT$R+m5U% z+P1*EARO=eri1aA^+^f(BPljvY+hqjxT8(V#_qR2sx7gy=0paI%K)gji=iDGJnCBT z(_wl(iQ$^gC-o|aDYxbC68gU|3t<=Sk0;ww*&9-cA*X6i7SJz(bukN}x!FXK$Tn4l zwL+GN{Oi3&EHuVTPE5&`e&2ZJg^Vo=dG<}EvXrM8BE7h`O?!=TLUav7%Um4n_rDjg z?dJN;fAb#OG%}ZJ<#lK8g}b$KKwe?6mKjGtD(N`J`!d?Pj%2d%$Ez9VJst{A!NZ|a zZl%$jx|TBW^CiNOh!1Ic@b$ORsL7q*@i(iufxTYnM);3+$jopEkv&7hPId)VKheB! zYXgu})EkaXm&BcXI9^!mxpf*6!ivVtu4`qGn~uqyNts?lSxHL;*+Z`4&meNHd+usk zJnqLWf9Qe<1Vc5QUIm&qb5U@GUz~Ee4hTpn#`HXe3#399^Ml8r08o8$T}ca@#B^<_ zCvye*pT5NxzioVqB!?J=&5ch;cZ4u90$9r2>Qw8NlJW{Bijh(>_25cMp1{OVbEiqhitt6u(5P6SBc^SamlR)rXNNshzxhW~v zrc|E1aP#c*f};2FQO%c>H{W}gG8gO<)_Gu!1ACcxqQNH5Y`G;qci2C)tp!uGk6a-23k_t_rEkQi?-2~SxwGkJ5B+p)k#nQ8fM@yv6m6Pv0TotVOiAr8Wp9u8E`Fw$ zVP+38Ly_6IJ{BB8iMpOC?;&yDdhxrd>F2DPp|3eEbtw%g(;uRpK>)&0r=12G^8@$n zDV=qFDm=w3Z9gn7->+O8DHDSuiCV8E1M@9)&e?lQWnJw+R<3v`qM%63gDX4%e~s@W z6_4$QF6~|=<@XmPkyeNl!NH14KQ}J>Xr?t=o9u3Dqk8g$ML~Rp`rLBkd8{EST7_o( zbuKKLJol9Sz3v{T>41o&c$W=YRjdJ<8NCLyIQ6l3&d~NZAK>|;>;&x4`HyQx2whX7qW0CF;r1f(hl1wFr8wJfRC`cHQmWo*C3K<3$6pY3fEc<=GzYuc@$be z=zHBAt+PAu1);+*8En(pcAcTT+W&yOs_+FEp$O5&(R`?WN#I1lH15b}J)#=w2_P3B@DEZmQ*zcm*9v%An;`9EJ`-L=_po{NP#Kh}?1^_1x4yIr=9wzH$c*Vhj zHs^@Paz-ChqL||(OQ^|(LVKYzFmaav?~)Y(=*eLlr!Gl>wzi$veeZuw}YSU$V^Xy+nyTPMC!;C{x`!fO)Dv=ZxVBqWP#su3`j^`&`(N!mt}0_O(sm7 zm*V!G$FrVbVSKm0HlT>r`EE49F)N+rFKgq&T{UeI8Ww_z@i|a`Ek=j-5Nr@BOV+Vk z=ngrCl!GKhiWG|__nC^atitsaFQ@9P1lipuhYK;C_Lk~a-q1vbiUOuns32{_&wnXS zkJKLi7AEd1#n2GY_i1lkVj2d~dYMCIo;@qh1S<}|3HXf1m0srF2y-fTkhtMuwV{5*i_~K_ z-I-a!v>ihP&Jedtr%2|N#UUBF;e5xy&gj{?bQN=u-~0PmrY_L*QsZ5J9q&u@ibikR zc?{oMTh~pGrJFnwXOz>4)v(?>KMWC2-L)yzewybI{sREzZMlN={5a$%9IiRm%350C zTN^b<+?Js0<#v0|II*)oc1C?{0XQUmy^XkvFEjRP+ykH2)!$V-s0|qiisb8JB|u22kBls{jx~LaoquI*;YD0d z%F>%C3ioO??y2rk?Q4B?dp=gdQ0CXJ!E>!Waffd%e)X4-z^viF=R6sO$05rcIx6f)y^U{1sB|3dP{0 zrW(*_5_CGcVLhoM@BTg99Pd7p?*___jnVutbtv`hDrmtumeqWd0?+?55JhhwaQf4$ zWo{Wu(1qodJ&;EqVp~LM6y;ewGUTAF_zy?ioW~Ske&j8QYlHH)YG?9eJ-Pn{Q19vOMH(>vYelEGD9{3jN`b)u;1np}^|880eS zaC9jM(xQrYB)-qj$VaI0<&U#jo$J&Y9d2VZ?BP~W`8(H%#Mb_|VOgvuqXp}og^e16 zCnEvg7d?n*7#bRu6_1jFC?tDA^1aNcq+mK^EKkoc@C|Nxt@@I2j}Vl;E5pr9eJ3X; z=r5`-D~VYH3CLj~7qNc{kyDTmNjl@wez4nKOsc+~Aj4`zIvO6)?0c{Aut~8lxP9aF zMoVKd;%VA!73BTCk&XHtg-7yP2WX+A# z#tYVY60e4U7c{Xry_ndVM#qcDq+r*q#uGRmn!waL&PUIBk&2p~dj$GL)}nfb;T{o3 z2#yq7@bhy(ec*1L^XA$!;ZTN6cUz|iz!}R$1$OmCU`3}k;4yZv-ub)>kg6-~vSEpw zwJzBcew}PJ>u$@B;~UjtWs8=ll92N#uQ$T8F=XSRp{4Wlu{D}L&mO4D+B*zS@UHHwGxa?3(=?JrdjJpgw#dHt!MhH^VLL!VN4IK4Ua5DMhTqL z@Cxn-^BcFrRRff#(H|i@I$S<=0iuAabkcgm32<(oO;nUs5=KvvrfQn5y~CI1Ou?d>5_#5;*1b^}?x0NNz?9kKrcb(Dz{Y^Yp-QxOrhtG0{4u0hlMl8j8l0}B=5L^;QIE`#Q&~lVZ>>^Fh zkuq;1y?fd5(8D@)J>cKT{)q_&LS|M_6>Ua?HXmO!@e}*-GP3tH>hu5BP~*>p_|ZE4 z2LJam#BMrg0!sJ&`uP@81dtayIx14J^}?eGj|w`!1EMXM{I80CRYdpER$|sXcHS;* z^po)tqF8Z0MyuC4y>&G(4*2@&xoz~Z{>HzxHq}P|^WW8fE{buW4_l^`KgoN4JPS7Z zX#NfF?~4HWJn6r{|8uXMO7brQ^*@CX9_?>T{%O!375H~Q{@nub4~GA=_#XlS+eZJ1 z4gS-onzc>}kPiFrl0#VJQTvf)s>#O2=6&kaAnyYXw%kGQNY~Or`QIJ>tMWg!)Bm5Dz5mmZf7nU%e+WPQpBedoeMaKt5!(*Iro5fFZ_OBY z1N5u*Di&Pf{v&8k7;>5zL8i)=X&rI3Qy32GwZ5JLN-kvu84nl!iBKY;9z#*`JM-)5 zc$rN0IFGr+h;gXLfBU+;S@U>Q~pd(U`Hv>6c!k!tQXPDkT!AE^%;R^;Mg52BH zV*65fds^K&&cEf6-Nz2!$&e+~Uoll@B>AamN8+{OrG;K|YV1Eyb;_b_R&tD*Y|0Px zL>c6bF+bez%3mGq3X3H&14zB85Y#FRM%JmLaWcQ9<{Ygcl?Tjb$~+WS?OmvEQm|KV zqVuE&$Gp(UzwQVnKu+0dc#hi^RV|8rll0F{{`Vlq+tSj~wq0L=a{eRsJnz8_%?}vzQ6Yvu|roq$n)#Mi7 zkw}J8hP~n6=nu0Oe4Ww8qF*N=nf+qEc>8*}fbSL_F17sI4lwl5UVZi%wYwVSw(G^q z*F%Ct0LFlK#Out@{XxNajr_l@xK-4$gybZSny1Tv-ExyQWzpzuDZZOMRm3~1dJXev z$s$3ZkL}j8te&8iA+P-HDk~A7b!$T|8eD_Y1Nc4#=#t1Uq19cS+mz4*p(>q;%g4O9T`j2>7Emgl|>6K;Qo1 z!ITv{|2UT2)N2lc)a1@AFX_8MB4+Ug8$ma#yN*4e!^YeMj^MRLb9Q;)bxGRv5B}cr zk*@{LC=&RUa$If;+J0c#{rUsjc`x-0!zg$Q_Qp#i-xl3W*b$R+Wzj#_)6K{?78MuW zJ>$y#9~vkxDWH%+MLmIO^Moa`F1;R)vPiHW3tC^aRagqy9$EKWvUo)V%lM;vRhu{6 zp*l`xtCtyF4Q`-V<3IqL!B7RtOg_74o!1Y+^eMF_7#eQ#<&b7`K)K%V_uX;&`1lZ{xSPy!@!50DDS2~+w_ECMbOKt#KSaY)W{oC< zoJU?^@x1txc9)Fgoe*#Gm*dC$42GPB;U1)jVmBN!Cc}oJg~HWj&@y9xro&0PvkQVH zCTHt@0tarB#f1h8%rwi$r5dca@#tE?na1mo(-KwX^D`U+!Wie`bZG zTQ~%(1p!`#tUm%2i4?7gxpBd~vD$$%F3L4fqK>P%@7`BF!Um>4%mk6GM`T7VKt4%6 zj}OdC8atO(;I7T%@J*|M5SeWCbFsuRNPlZ7YV`^2ZRL{K)TL@4&@6*dW5-%UvT$Y< zVRp)gGF;MG3FEY;WaUb-<2J*`t`0PlHcJ;T)T=^>2k_I>|1Q60D4)P-sI5w>`D9Ga zWVu(_sT0Oe$92eVPvu2QwA0j;kO6ugd6bHlv5BEr2l z=X7&Ko_3_GaFJ!3-bLz&DclVFnFiqc3lws$7`X8+sTk>4q1eAxEe0~p;)XV!X9Sx~ z9a`_9Zye280$j;fakb*l+%r2n(qzWQ^*2!2w<|%or!JgCl`nuwvJ10iFU)F|!%Ilk zzvZk?PUQ9UM^noaXCLtQir46lhZdvt`qx|p+31~ET%8rc;y9_GQ^cOh%sIkX(>+7V zIidysQB}DSSEbB8IIhTrSZn@FYqa6Lg8uL%5ktm@sg{^wzTQ&dCtgUVMbAK2<%B@T z^9;UxC$wfm{N17q6WH_IiBrr1{g<0*^XDw|WK3D0;T zG_9tP%_PMgwPCI2E4mk_;E*8yovyjv#JBW1U{hX=cWWlCW8I>m!Pap zp&&Z7+ipZse>zsmOJ0ZX3@3$X)9l2}UTR(aHxrH~)z*@5_!|cqQl9oq_v4YIiygs7 zU;^wt)hPVt!^cmT*B@^HoGI15)=7r}U&0r>@6muqdC5=b#td?8nO;QJ#8I82`dO>e zp}Q1y>iS5Cye0J3JT8P?Gc`q6zq?~*vwc%$oPNLZZ6(DM9r*Po{)qfO|2{c= zCP^ErL&7!JnzhFb2)Jw>RAi>3+qgHZg1r=2mnhQEj zLFAbsKdW6|pM!he@;%PXu~DT+X<0HGbO6pjgh(cN^YFqb!PfXRv*pUxqqAL@jJd1V)B>nPi+|2xuHkiemlj0{P$)a*WKr*)3G@7m_{!Wx{TkO zgXh)UtlmEnX$@2%rjo!o-Fd*r(-1RnF-fX8+FPf)qOY#M-Gmg|GRutKhv_UzpivLf zto@11q!KhtAh4_G9(YGS(qzbO8uE_ogQg^}@VHI%l4^eE6D{(nwW#FE2(_?hIz;%E za2*H>7H~LzCt8kk+wsgqR8P>6Z6oj(xD&T$Y%i~Gy6RZIcta}efi&^y{$mND{Td{15fLYx6lZ@C1B0$Wz=Dex*$t!5r(k)L;dCk2erwe#>rA^*ZZLN)y{{rq}Nz`dkx+Qu=86HYrF5 zHt2_nx^lNECL+w{VN-SYwWn;4Yh$rf3b8Ts0jC0>mYbqGSQ3)E;NjMo&S(p%LI8n4 zV85dg(`KkC0Ui4*phLMC*ehAuk^#?rf;ahqqQhKWcQh3GEF53=Y#!B`dh(fT*mo zkW7pu%R6kmovwFixvLnVL7`JRJuzfyi`By6uo|t@aSE!}^t!{c=U!A)Q9Eo(GqXPm zUHg@9DrsTOZfeI7N?pkEvM0?$ieWxwWwfwgWA=o9OD-g#*N%Q`vGz2M=}yq~&g7E7 zyDQ@xsll8bt@Ri1G&`@ZhP-uNAT>pNaZ|@fbZNt_9oB1f3m|P>Z-v3*%TuB&s)Rjc zyHtu^)@*cMkW@%hO1wQ#UxUFbIKm7hH$#ZG=haodTuUmius=D4yMv#oJh#zhV%s+|=rhkUm`p|^AJoBzJspZc=`c&Jd9D4kDTJdQ}rsdG~{-IbjkDbSEB z$$`U}o&T1Mx-mb?5bI0vm)_OtrJ$7#sc3Ps_@^3&7GH_v=I$gWboZUM$KAz`@N~h0mYe0eZ z9?aPi8k>!o9!(bGI~F$lRPyp5AmN74uos2=mGJK>7DCHliB2m*-q_Y1BUqFZCtYg^ zB}0yi(^m_h!4~_`^IETvHh401k%Tp=`_N&qVDGYy&viDk@k4~yIgy!i%8Q821?kE( zielhuGi`#*V0A$ECn>*N%3GHUiOjnx322%Sb@L+~lx+aBtFYsM@Nots)}1Q2wm)H( zA0771_(0H&p%#D4K;!2q!sf^A{;Ufa9D#JBS5mO4n8%!@%N$eY!#GL$Inl(Eu#MTL zMpT9ojP^lG2xRYPGj5u4!!u;t>ZZ+dfN|qEdQH}BX3|ft|H3%kkq8l;Wbk~03uuhU zZUnRQ!z_sL;fV{=tmR8#Zj6*E1D+HbVc0Hp-6AZdGgkKQl6=>hmMUE3$ilY9s5l~@ z6U@+s00vu%!28#O9cPCja@#340yncB4&`p9D=#!QL^u(fe8b?*Clda@M#<26+v?(I z=s*S$^EqS!&_~`93-I=)>i*&A`E=Y?fAgszl8fsQ_B42yS%5BDSnV=cs8nwjHg)+HyxiM`F29t@oZJyf}&0G7mNZiSCLk56CJ zI{w9Y+_@M&7_YhyLBwx-g+MUJ{QRaL260DAZ(&76_o@sQ3w%4U?Jin(%x|soN;ET2 zHDe@|Om$3~aQ^SbAQN~bRq*EQ^bXBvlgjnLIY!*wTgW+c{Z4IjRDk1VS53yLzoPU8 zF@fw{+DOV3IGu7arq325TnNctb8#?Q<*NaZiXDF@OyjL_L2ze!6Zgu>O#U!+6+~*D zOVK$r(`KmBy7xGyHDekhzr=HVZ2rbo>WFY8{{kC!CHpyXK_HatLu-RW@i3CCQ4z=> z8DVVkvn*hX;}$3RI|S%*=Fmk|p4BpedkFHgi-gbEebC6k z#^%_zcF1|-HeE{cM(|XPt%)ON0=jn}hE)FT2FiEc?rRw$31*+JciBG4a4s~+`tGqw znDNH<0a$61(HfGtb0!7x=|W@8E8nRkBXexQ*=@KkjwnUS`rH1Thw&*nVuQC-LUlpY zx$reEor9q#qG#?u-wsnUq85f4Lj?O#59>GHQMI@VBfS-|He@o3E_C4HPn$;W94Pl^ zy2TfopdU4Xwt|YM!1 z4KS1Y9H`0f&TJ`+Y`WIFpM;U48nc=E@suXhL(ty=6($OtqGNhf+7jXq5R{n@{fu^L zp{-@(GS0f@e&nursIT(&Z8P%HAIE^+qO;A>(V)hAU`F$`S2z83*HMv1 zdnaBJb-?19bRX~0XF*`>wYE74#s97ah?cke=qQBgxk(!f-=(T8ok1P@{A5`o=n^II z;ub^)wbwT`1>}MNSn5n9&(EaVGS;NNF?~=XDS)J4fAf9%;v*$pG=Uv-L3p&dlw(KB z*k8QwcXuB*X+d^(bW_gf>l?g6?6uY>zMKyPf%oDX$$ik{nKNjL!us<^p6MZ`F`7=I z+?K1pcRI?e#tRug$Jr;UKR3dWio9yT)MHlJupJCN;3W_4K;RZj(|LPIi=?xHT_zHHyo*)3 z$(9Zs9;dg81eMh&m?ireYpGw}@8Kgm!wE??3TpaAstlGqpo|$;=K6bAX--!?O|A|CN68;I*GA037xVXPXaJjfDVvW8t zn(yZv*!;?^YuXa^FDCkMygJisix+NtHZeMp-ea;*<+zIR*G6xwKRB5&jJuMiy&LH@ z#Ah&_y8rVxaQP@T9KABB$HCCK2Cf%dRI1U3l|*bpb1eQPX{3;tWTE7yjQ^P?2*Wn! zBNv&`=|Jt-o5=`++gJ-7W$k2LN#{ss_pDWuX5m)2jlrQ|<9D8x2(LBr-5pQq^%8OB z@GhLdhO@kGq_Izh=N#o_B~$3n&c`hK(zP$0m}1BPLL^`5l_sbkiAU zMBQHYx?kmaGW+j$kZ;jn%(zRG@-&~vcV)kya~Lf-r!U=MW@-Y%*tNMFt&Vi_(*YNA zDWe1GLOWvU0TE4+$in=QYncIH)s+6!eSL$`O{5e2Mmz}(HttGUqy<(zK@Y#_0m1iXFmnHOnmb@!m&flD z6fu$GL(#=Dd!>DqCxGO3%=E5JkCC|diBQ0uoPCQbg~b99Wk1;7DIw;0I5hQ_Z3oL%epOt z!Qhq8_G+1^i;dvO$wXwwq~mPG#8!LKeB2C{$OswqDrFcYlUz%F1f#86rSjFQvz(hq zU_*qq5oNl3TF|qumH=1HD;zv9psP{2H3Ss6>X8SLfxDv8?N?HrE?RyGTu@GQ0^{k~ z6GO9_gTVtC`V-pLANn*<>7(vJxXYa`h!?+v>A9-CdC-}uSB z314Q{cE4}NTfA@=R0x zEV9Z6Wp>xE`n1-Ox&TeuJm61zgy{AuPVky9>{e|8CTh;ECG>Q;lO=7*zIJsjl@Y#E zJ41#s9h1r#QfpK>VC&7)@jb8^g3w@IlI$i;}=)yhJ%eGbA@YEsiSgMXkKbO{I zr!81SACtHGZUZ`c_Esdu4(!W3&*cYn52&<8x?MAkZ!)b?t+z!Xu5*Hafy?b{!UN9H zw%j0#jct8sUGHCkA&C%_TwR5|J;oib2Udek^*h+g8UfIy z^@zvyk|!ai_g$0n-~rAV3cv9Or&=Md7RQs8O`M}&tq&Bo?R9}GNB|ExjRCWysG{gc zqd`r_DE1G}AMcZs)pI@n>5Vo9)(~BBv3#)S!AvDIkc(;|#Y*1!3U0-v`I5)JWeV3J zCE&Gz>txr#*MBn{pKS=atI%bixIwipq`&M{bX=9pLnrKr&~vDD{l;xjF9kQQx83YK zN3V9X(K*48C7K2DCLxh%B;EqCQ<+x(^hkb`8BXYZsF$>FaO*tm;^V?^dBjewzS3ml zE$(j>+(dzcV?j~S8!}tI_sn=|G`jt*)(d$Xogo@lMA?(DYLwa6(r&eMd`9#uNWo-2 z(4C$PEnR3_ur20v=ipX1-b2j-B)k2)sI-XJ00(z_nb&P~0~b%^hsM`O{Y!~=#x~8{ zM&DlZC}LYDX+#8uuO}dfmPx??hOa8O^0#=+6y}IB>N-{|@OA;1CR1UWA9*|+>Q+zw ze*bIDV*C&{`-|{1LT8AWM3ij)X>ph?yWMqW?+DE>0depfn9*RZ)BGD!4ZKXD8ILoi zE^Z6jMv!##;vdY75Tlb5d$BYwke>jko~!`%O`INkqOSQ6G7AhR*<%JVSB_-#?bBYl zfo~BdpzB=0;K=VnEQKrb-b~ACp-{jY_mSR8bYOKL3^g+NjgVMmM_?c|eD)?{D>TW? zW3J&R4yOt4C=Kw4!WQHV@I!3}QNgbi8sKX?4F72#6=(Wc_f>YPa@8?&=*+%4$ojM#}m_Qb1uQ z`y`ieXt3=f2k^>A&tghr=>2AJtHa@=s0x>MV`ky-RP=o*zW%@enFp&+jYD_$S72*EPHyckET`q`lYwFXk_jTu? z_gU(6Y1-86;8KB3xbqJ+-s(t$2Q({pMz|=7t)K`)b6GR)6G6uU2uh{A*Gfv}{2abG zu)(eg-}3>qpUF?B_YND*Q*?`PkvsS4(fXgdqmZod{L~aOq-s3v`jZWNPw6{D6+JY@ zWo&qCDoL$gtN&4of7{|0yC;#*FOAmTkrV`iYPY)?(h2i{iuKXu@?C~@!S@JN@40Jk zzs9vnYInWnTB2>CgVqLi8-YPO>>l)(o={XYbK7pq{&yzJTbg2Jz$CU&@v{ME()(ZU z&vLDPt}n~sHMdevKNiwN@MjI*#A8b-hF6RF4_0jPM@P1oF@Iw>6~IO2UCcP_vk~ff z9y7JcI+NV?Jv}L@eiQd+xHbjL&7Lf4lRcUE3cgt!UoN3_!Onrhhq2}6Zs1&X?34?nxc~AX`sEOE?(wX4v6a&G@q&u*zm^hmSelqFJP z6-!l|o;|?W2eVNVvN5cc_J$IX$<(3HxWH2fxliz$NDShbh-&Jt_EY+u5=Q~X`^De* zs|2}C*jgaQCgVk?*r(>U!Ke2%Wctsd8Ebqu;&%=bZr`F}{hVLXaAst?5)?-|YBAvQ zk;TIb;!*+UKN+)4bbE$HXB)byyd^vtNv5n?!Vj)?etWC&OASw9n3g1 z)U{TR^=iW+z(9%wC{$J)A^qolW~t0leih2RyoXf?0hhFve;KMSH9Pkx^vMJ}oT;@N zHFhfXze&`#mW-d14UY|D>qY*4lkC-Oaq%frAYDFAS7EuOJpQ$%*1R{@ZWL*P3(c>H z!O+GDsLc`@6+Jk?^R*Uzeu;gD-|*{%>qFT4)S7sAR>+lBOkhG|%#9?u;kCKG`O3t@ zD@3A_OqxOGgox@o77=TmpEwSl!Rq_Ojw5dJ`UY7UEmZxXQ*u0_qHp2b2w&YY-y+y;z zsa+u`aVY{84Bw2}_Ysfrdq2{1)jT0_1oy5U*l1=J-GPm_t%?gOrVcSwh)GsQm&rW2 zsACUY2BJA-C)U||-eF#O17W?{p)HA(TJd-ZMutiar#|kU+M`e{cH*PeZ}6_WvC#g)j{Ia|R2_G~<+|`g)oxmO z3i_<8rP;C}zeQPTQO;7RiGS%@Ai~PR9r`E%0Dxe(d1NkWO&~vkfEh)8EpVv0YOJEI zCz6M5T6u>-Y%7;*l*@9U`KP!Ac}|c_anTHeG)Hgu5AA zAr3%VtBd8mXRhLPr7L|&2DQK$Cv%vv;VM61&)Z}GODbT3o2n(z{4+Bx%k)KcV@TBT zO-+BkbPE5|NdVO9*YnC^J1TTe#NlR=RcEVg3=HqrfQisEveC;s7oS|0G8fE=PR|fp zk}eAK5ymly9SQ9PY9h;~!lP7qqs3xl=BY`eBXEbe!D=Xb32BVQlp#Yptt5i!oFs9K zA}g9_5lzjRuazw|nfIBGF7(-0(^CcBw(U=}NruVQgKP(@2S>%z@sirsvowU=oH9J9 z9-O-(hk-?0!{|J21c3ZhKoD)Vr_q9QcPl853o!?uK;P(>NnJO z0*=Yp~VgAWb8xG^*hgyS6$f0$GNyR zAdm$Sgs{-8PRz*}doQn$3^(p*+DNm7#Pa+z;o(h2TL-se-~wdUZni0QttjUrKf8L#a(CsPq4K# zKg9FvG+87E$Fjj4F2DJ!RgMR!VK*fD`Y1GDGhDyuaXNK%Wk<-!+nWq3&k8$-a(6YD zTm2dsAn3IzY$oc;`mJVMT)z|DnFUu)Jeg);?qFz)wF3NvkjuBEDJk58)G7v z$6j{fZaT(s1~i~;XL8riuI?6$s3V$>#ttReH@GrFjG2s6vnowZZ`WQ-OnmJoP(DgO zJ1#0X23}dmG;Z3q#j7oObdGs5xE<6+V4_g4ys08FKVa!QM^gYTv1|_tHHrA4?h{$)7H;`tGjl@Sw{+@Yy$fZtlVI=PM z*StPA{s;jN?41IK1nPsM<*JN=b==LWvlAkV}weL_&hV z^f~I3r#dc;WPK-bhqe;}Oi*g$dr2>O5+sa`2-+V~%KjSv#9d+w_#Dl4sU;!0yeooi;kr=lc8XvEg%ocPMy zgL{TntM^Ek^l?0TY3TqKTwo+?di?D;v}vSMeJ(`RO6MFk>I@Dus2MDE%hCTp3%6B&ar9gBF-q%*>zdozPur$I3uah~3`XQY zFY>3CBiV;rvO+loJZ4ktwj1JL1J!prReq#1a2U`-bKOVxi*Y`+)wGTK(;bw{%=SOc z9kx}C3WIuTq9w1TC2x_lH`rZy!v#LJJYOZoZa)ekml2;9DJbi0Zdg?!eR4BlGRh35 z%oB15!)DI&^8;;cC=MMy5Pc=7J)U;MYoj);ft{r=fM>spcVlCa3W)4B3ks;{P9!q> zLp^la;)^Wd&g2=dJj~g+1367>RJQOOWi%H5?L>lOmcM5XjqD zNytj<>nmb)TMg<&7(b0=*|PqjG>HOQ?kk#?i1wMVz$nWwT?R|q%5rLDTzSg5>vvgx z3RKG5b6hAQz~)Jb>OoE5amKeKPCaGvNpKp@_{&)U5mG8U+DJSb3O8l-_SdJoJwa#dg!__gWK6~^ zY?(Ad@&yIu#tQE(#h<)Q7qUK^Or^gqn-zN>po%9%ea7l2Ui?(++?;2c_kvCrmjhL zzj!7IgC2;oCR|QA4?_2;)!{ESSVqG!6_zJGzVJNy5)^Qe;n|upPe2JL>AO)iQ&0Q5 zx4G(Ca4-OK_NK!~h0N*uhR=!7{=OTNU-9@3%q@CF@*nRPS`VvufIuD8e(u<7*myp( z#@MEr_aKSMhZBCa!8W-W7Sq*1S0B5qWBLWz#!TahOw?<7-?4V+mlb|G^Vy3-!FJO2 zhTc>j&J|t(19i-s8~kg${k9_RFk&3>fFx1>^1wo_$far<8dU#rjyj8MF)Sjk+3XTT zT>M;71@7|lrvqv7pqpCf>80c44O0pMHoQRm<)h=ng}e(#?WsWG{v#h4d8zJ;kZjsf z$EQ0kle=;K}T=?x7^H5`j0lVDA0>B5<@X>JG2>Nf1Lg8 z%OqCY2SRh(99;yTMlNSEnc%nRE5`-We_AAu$hnoFZx&k5QV74nex0>i}|1vrMn9ax`A&C}a(Fei&fUYyS_| z+&<0`c>4;*;kJj;~4f7TbyF0L5el~aj)Zg?Mc24h^m z8}owA%!oT#yrCCqj=so>X12#DcUFBbBEwxHAokYD<2~j%EQe}ed-50QG1rd6%_n6V zhi&zHQN3)(ta=xW=L1yF3t{~oH{Gm9?c)~otHABHrzF5d{RE7inX}1j>wzV|#`{UO z4X?}I7q{|k&n*t=Q}UkmPr2)xyQTLOFV#Lj=O)49T2KpbC}!3^*k@@XkO>oli0PuF za!x++yhS2#w-^`i;W4EZX|U#Z2$H~%>h+t;kT@ zHt*{St}!mM{x5;NwG!Mr2s~P^`@5m=`|ntX?At7#i0kR1l-WJ3>W(M)1IK>o!^wz|FaFD} zVIPlMo@tXWoqE~v+fWOM9rocmvzC;1n=Jwyf1m*dAI^3C`6K7jWEoc^0aOXUA_s_f z@9YQr$*UK!TaRmdtfPk(Yv1id+l3MfA4@3YC^&|c@(N*a7V9U z@7@JMHZj_#gGHnPzCR_+M=q7e?Zr5MQYB53{VHyzXShA#WV0g%uCax1qf`M3*(ViW zXyW5%?S8C^IEzr8ZhP<7r|bVie21i^9d~NKqHAOWA+qFmOSqoSnsw}m-v;M(y1sP6 zkT7jEM=w7#Iy~ySIzki2t+Z;>KafE7zq~-BJr$P%3lYM&Sw{i;p;=y0nzTvQ(fziR zB8#T;0Y(%>%vf|_6cLY>=h-+{LC-> znlSthwl?N?DET;^C0Y0W;7O-NF&cp-ztW(jXbt~OaUbI_UH=<%{9l&A9`gZX#!3U@ z>;}IlA_8}UN+%tIxb^wPM?-=b%ZpN<_}7Yly*JLX81E;gFDvYIF8-v=Ui7}y{u6x+ zs}lFMh&akSs;{bDYu{DC2LmSMuZN!#KJopgNQs5;?|puAJBXhT%7fi*moQ6+=Xm}a zL;mLNoWp@qD|_WXKmV)*(ajgncpRAMeX$}-&HL~*Z+}Zq&#g3GNH)LxRpi`lut1o> zXP^MRL)e8>Z5xoOFXK3uMbf!htD_`=86;U`wMUN7gbB-b+l%yfq^vQiCt7TdHY{Yes?1s9{NG_2m)LuE^&c_s~)=sOr$F|e`qUD*1^RqR;T6=H4N2?`K zp3ko66T*$j9mS_dmyo<8=J^3#=0XX?w@%+Y_UgQ(D>-0}1_&n>b4$h+hZMlB-COAD z**Uh96$l9#8 z1V}mmSr%lzJS*`1f;z_%8w;;=<2@3U`SaR(r%R_P+$$-)*S19&)`Pt+&uFG_-?GwwCz-WQ*Rbn!e<|GCD+b0m@}oT`L#q-@*JHZFHgdSGAontyfUk(eT12bMp5YP8r=o>~dbF3#xD4LW)Vx+;;3coEEG^Y3Cdc zW!8e~``T_v(Nhsx5xS(7A8vj&b^H-R;xyuYxqV!ZdANdHfEz~+r=2E|4}TT!qz?|K z;6?au2Dqh`9q}m{&(}~wxzmN_qm9ioKXCpeB1sCC_2QxAe(&2HeVImAbn!aD(#gb4 zPxua^$Wxiq^OkP5--LzvXe=gw%OzsR|3d&1xFPE;N0LxZWTw8LH~CAuU;A+rGhUb2 z=DpJSxMrW#LN%hWyx#DN!-Uq+GfXH6bh1IP3k-?G7#LlXvsB?n=;kfcTpS7>1nVrc z(XT2GWD>|<6n2YgIH4K(3KidyFB229KrP(*nT(%$&Pl+@5bp4PN8#+P9|*wA2Y{BN zDb6uw`zdF9Mtj%Bb>L4)(wwGI=W&v67o$I~%wD+#%n*l7m`x^ZQxCqk7v*}s`o?A2 z@Nlpqpq9=4v!-E%r9vjFAv$z@t_PscxVeKWT;%4;!J5;-$p3L`9cO_{Ma+q zTUs(Ty5z0RsnD(@u>|hiMMlmGMZXe1w$3Z5Czu)I+(CxyDvpOFyotx!vu@8y6c!%! zZDBuwS))LZB(d-f#Ev03d3oV*!QZ#6H5oFoH)?9kRMrU4#F#s?PRo6^`c-a7y*885 zJ*neE6ot`TzUY#XT`zK=r9i9Px;bdie31(V zpiZG)g$R3$6nHDKm{k1KG$Mk`;a&9LnueJbVI7uEfz`(Eubj=L*;CYjnafJYx{=I> z;g|2e2!024uiv>lQ`u9}KQ&lbc{t@=?FN=Ac@8gD1-|8+aDbg03n*0whn&ATuqY}Y zMEcHBd$!oq6JyL}xymuGZFj&Ib<0Ow&Noxxe97G(t(n#kw|<8fOcH%LrVxe_9Z5CG z^1GlQ1w)cUMr~sEBv@QScj1+t30#q0rFNc>REBz!f0jymYxEBGnGWY3M)!TT5-C@Zc2xnU^=|=M3K-( zyNoOpI$c1tw#z~mPRnm=@R8fmJuU~SdsJ4|inO`;8;SC)vnZyAgeA`-{*rHk*OeEr z|9Cj8^nHAFJPJ9MMyIqK?Yw9Oljpz|^=`>|MC{!^5FCRK&QA06p*<7AAytbXm1oRq z4eo!b?N3lSguPteWY+p>k%j)_LiCX|xm_{t6GCsx}Vh$i~D z#h&!m(bL^gKa2ee&9l<=iGt)g?7II-3%e|S={{pnql ztZ{c(Aj3qK_zpT+8Jr?)XysCzJi;>zQz6m*>>L@dL`XOtGGPY!g*~lc5 zb2^SoYH#@!nZ^Jqjr@_0O`dc^%8+PI&YDBZ$!uwG{HM*HmBSvQ!d8KA2?X`mrzLQQ zsMWj@203$KZehxN5QbA8GaO~;&jPcUj5Hw-%M`U6UG;O!1TJiiJSs`e&T4}99D_nY zDC0xcBg6d+W1EW0@{{;mV}VkDcl@7rAg{|Ul?fe&@-G%_QqD-tV}MbAu40&vzPxqm zahS0Z%Xm{{Fa&C{5FGe}bI!g5EG9M0;7R9Zl<-^j%;Q7}2l|R=9TrbbP3wO3fFf$% zDc2c|{gJfEGlw_h`pir?$L9zx#{{FSc>URuDWCnJeU3WXwQffBD9~`x_}aOrghFL`io_R{pr{EJ z-44V*345IfEI2ieL)tCrZTUGCXh97=b8N&5H(s>qo?ZgpOS(SqMKrbVf-6n!%wJev zi}=i~ExU6baIjc2uFG_`n0@Z+Gyf|DP7(mUX*qKbbNT<6G5u4}E}-339c+JfG&Bgq za$&3c=G6f|lo^WUIb)N~Ih{Qej2i+kiGTvzuDk>xO#t1S@q?Cmdua~c*7Kl4hFmSy5k z!g>ns_9&L*FoEM(Pr?tIOKH59*8sP?Tx8t~S_%fguaew*sZ_VK@k;Y|yyTi$eim*t z{Q+6hzTU#HCt5Ns}Yj-NK`QuA~l#o9c)ZtUk8k&F^;Ox-tam##K3Zb2|) z8@WG&Hvc`vJujuggRn$B(b~5693QzBI@cJS_(msY@Pp=`>*4cS)xIyYdy`n1+HmgLmm&;RUiy8yHiv#H4k88Ysc zK3>ubv~fTR$@u2Gg@~rNGRjZWUd(E#!Od;%^v8XMxU=J7@kra4RAvFRxw!tGx9J%1 z3ZN;vNVlE;|5}vk&}NXl&W8w7+x?Rzj}(~`W3#-DJAqSWEbjDqyO>mOGET}Y?W>A_ zkT-fU%`2(G=&Fq>t|f+jIw?=-!cMVjax|v*Ts9={vEkZ!YMQJc-y5kSqf25c*^p>u z-~I6%^%rABIT}rnmL}MY!i@P+f zr9=6Abj(aI!0&X6GHTY7ZIAFZL-Jn(PAKj1x@!0RSOT4o`3MZ&?=X69Z(1?Xt?#!} zo0Zs{cH6LE6DJL%!#K;5$v8dE*f@^XpJ+OlW?sXs!Mle-3((s4UZ*eUMZ!NPVsMza zw5y0<@50kWKsEIYqjq-Zjh!3qnc@PMm1)u`2F9K3dFj+ZOl3ti8OuiWemq%O2YgiZ zawEOmmq~3J;=QfL#fw{eCa1+Ii~TMipEQJTV8y(J%Y;E&%Coc-dneW`~0c4eQwy%DpEuW_-+At zk5ynK*T%BodPpSdgSz5^Q4RnA?B4hHrtUVN&IVAg&cy+Fb$F1r2ba-Sk@Rab$ z?;S=Yt;MbKYeHKh`%xZ!>QLwwPuQ~_r{-+i$A`=8$2sEf`99=MZ$D_NsK)cb!6v@7 zbph#;PkinOpBAGUX&ARRC3c^@jEN}-ey>O}`<16#ufCR3)j)z?yn3V3QEzFE5QN9| z0wx&3?5Od_{jyHkyI_69)p4^S5FZrR)@ms%K4y7Hj&#E(%d9++mHnsCrGq zZ;$21)}Qmve_$7=&T-UfRV{z;d@bXX88y6LF8CQBe`0UCu?2)(EFsh(?bO_Cwhj?9 z{Zw^6%S)Qt7I2YZrhcDIERik^L^n}YbK>+(PfwnLvKWMbXz09|(d$jpTYu8Adzk~! zMZ^LkpHEo&@x-07MKvV>?Owg16y|cf@!A}-lMD`pt$Nn)DIG7RydL5AOF_zpB2ne&*-{{aWGiRTy#AXwWOn$!NxCP>7WQzAF`V}D#+TWDSlh%=YrLMQ@ zi7@LSiVw!(p{vOt@9oipIOeATao^NrKRP<%EY2u?n@dslxWsP?cF|(*?~mlc(>L8U z)e~|}EeKH0^6-sg^gjs0R#AMHGa%B!Ph8AqDXZUZc9W7s(FH5U_Fev6RldF1sA3D5JJQHV^G#Br^GjctBzcBVw^kWBP%U+?%K zBH2cKm8D;U%-;A!tQ{ab$;gjK4mhmMmt@kA)56oO*0$#)%#vE)DKJ6aS@?PY(=26_ zi+m@H&IPZ*a4^J21CP~^&udTZuY8BhT*}OjwNG-nqo|}$XL~{%Gb~13hR-g0!{j|B zhgeQ$X~qLc-jjVnAEk!0{^WjihSYq4HCZ$?RDp8MprF1X^KV>hv*OgylYVoORb%*2 zg8$w$`>YFi(nDV5hZnMDZAP@nFRS;`9fQ2uj;V2xSDKC{%HQnj915$Dg108h#=hiq z&QE>KQ;PT?v;0l8X znd?OrMOKIj_dFT*J|;~*2u;lw1?Z38}g9+x)VX9 zB5RYV^$_X|<@>{HwX?kmAFx8XswC?j78XueV>^M&49%+@-q3o!LAsIGZd#^{X%~@v z>V&dRW2S}875$OMX!xhvqv9#Y;ZT59MGEiLv`X8L#i(AauRnFzrjWjzq7!^_D++Tg zu)JI8J(jv9yYHu;VrPH1J|hKOaz7%okfkjm`0tJ4u1We|D8C@Zp0(w#V$X1YEnI8w z(p1|Rx~8ywK_<{%8}Q)IT+M%Dxst((hZrec1X%nY>`2ZyiiIGSkq z%l#UIN9RR)ckR9`kIFQY*|_W+xx`O|9eZ`f<+bei*<3ML3_TV=%4%#rGMhS2op024 zs4d3haAZ#ayRrx113>>=_ssTZ%Lz+%rW;>wY@q4q5y(ym(uQp1@jeJ5Ve z@!ReWBOgmR9OSum6)86qVliqeF1#=$fj@#%cmNrqN|vqQ&(g^9)ZBjevjS--h*{NW zLwZ`Y17(2|vr&3nG7@pNfg zClpfEjnY)@GqvrO0y|~!g(S?a+eI>;|B&$^MNFCvX}XWWt8xi%#ep^dQGlT^8Sy_R%#JO>aYtcmz*)QEG_gqcA&_#OtEg*ue{E-N3<|FaBQtYzdO@K~QS zb(mUjk&B4p16@}?%gSi3w^+F@u4R0lX}?TZ-a#%F2}7y0R+Mg2?nECsMG%4L%+He* z_C2|8>>|1G>ZC4h>a=8cC{2baneSplCtBn5IQM{r~|iKqAdEc<~0)~R5aj(ViF3+o*VAqx6?qmUu;_J%}5)x~Dp5@!L)G>5Y{f>I&7#;-wYLI@yOpYQ* zHLK`n>WFi1)%ryDj@yZb5f@TXn_%KbF&6NY^Xb>h)BRrHD}&M70&A_G#J7*I6gUA{ ztPZ)-b6=1n3}2A}&o!i%MgZTe&YSn!cCQo(_L4cJDdXMeI9D0y3u-rt5;}E zXU4gBknA z&}^_0$w_P@{%%{6E=oKF-@{9aGi=ghq|XE=Go#_qL&{c%^MbjxksFT2S$wwz_pQw^ScV$PbOR+H?WH%^7^1+@7X%9%j?b~-5E+n zM_G@sms>T=aVwEU;zuYGT_nQRrqv#$Qfhb#>I0m^Fn(3$;NO2KYujfcrc9QQCYGc= zv4TYTTKj~3xw04;>Z}k%tgnVW4h(pWq0-CCNhGw2G6fy~^h-x}lB9gu+=5bVL{aK` zg@r-(ch~?I>)Sps&{$OIj5N^aLHpYCr}_)nsNl3(cU8yaF1&Rtg_a8l@084;7@)+r zxv(Hr@jA9NAEeMhM?@HFxLtn@u-k&U3F2gmr{?%|a{2)&z4)gu4O5}dA*W~W)ilRhh_YDoy!zU%izVax(D29neGdV_Hd-T*|ELl)Qc7iJYse*yI6Spo?4YN( z8B!vH_M_lI!=ZlFSkC;CQiOr@MKsl*xHxwZJ5w+kOqKzzS7GDyOm)GLrV`+#oA0E} z54vP+GzbN3dIr9q(@t|VP@b4QQ&DY6exqNKH@W?Q!EVd&iTBGx2c(qw-^~duvkAIk)zFzx2m!YUhh#?E z>?as?^+|um3FPem8I~hhp$y@z)oj(GGPC2c z#d;UQS@T`<<}ED*QTmD8t)`!O7{(e(Vf`{DW%QYM=7Wqo0dg{{y6;+QEB;W1_LRjD z?Ab5IyFBp7((KxH)XXJ1xe1SF)vJi7MEta?55-QI`;^1H7(A7Lz^^NhYB=t+oL2VZ zz?#GvPTbmwVJDJoa9?X71MCG2C$DWe?)Uh{s#FqWBKP)(gJzEe@Vtx-U_M}>T%~7i z4K*y_(dTgrzT0_-`akhC=Um06B}l3?lyM+(0q<+%=9cgrpS`71P95@%7$MI)N79a= zj}};@K-o}#sB6^y{Cl)afe zTPC9c!nF#TpZiECMpIx@qvYsjRFUWpf8Igy5Ub!3)fPZSH=pfq7z`Di!c@`~}l^`?*1XuSHqztU0f(I%Sx5_@` zuiIY2FkRo>wG+1|hr9*|=6~TZ*!hGP;?RtGqs{$=tu3HCzVU@Wp!JSA<3X4ND2#}S z1Im4+QOOX2*^bEjZ9J~Je`x<*1!@yP?lcRPGDpAfKnd|`|S zn|X4!cgfJP=-WGid|2uverB&@0htTR`94Lva~G15J_k?1fd8h*ml_*8;US8Abub9| z+|%IQAasLOO;1-0o*sMXv#eKiq8qRH$;qcl{{1}_X!)1rZdK8b zrqC_8JJ*($G|ZZ}ysX=Z`x*NROroyaEqWtbn6nMh1)g>AvU8*e+pnN%D6ki*|F3;+ zH_FAFg6gsxmf3MEp(jB@k!6@tJPzDyN^X`M(X|3_&`pH5wc~H9Q+U*CLl7~)c-g7= zJacviHx>no`g3XBGgwjMZCetQD=6oz#q-dEV57~q!QiGRP`i)DZo~X84ivBCh7~Lx zA{wCMe*{HX)2P&*bK2vmZ@LPM!>@ zw4+aw_xo!f`{n7(fn#8v(TR8T6_h~E15US-UoQ9U-hHyXa=)NfV8TQqi}1%XjQ~SF zPTpmed%vynjiqg>PWh98E3D5*-!gaNcI?p8kJkaT>DTC6FuuqJ%{Fs>r;pc#s#oSp z{Z3~Z?p7al|7*yV5$-(xN}ePaWm|Ll#&Zx&#Bm9VvWKB<@~#?32D zF3+|zxhXc-8z9qC=)+V+Jo=1DuOSYuE8EQ@G>P1DlNe-&rh|Jj{b3ps__%7eAx1}o zNRc|F!FrK3Rpn2Wsf-KA$Rq6QRP7fmu5m@?*x6QB1v#gdE23pRE0R{qa!P;0Dy?&B^@(!^F(|A zk(?1!C!%W^lmR@Az>Mq1vW(40)X&q6EKK6O`4}mpeznnHJ!yl82&if1x!>*u^dkL( zk}X0`OV<3aM9L>jCA@So?WCg5`bxBh5fm9?b$m*{eEPzf-O%9(-McpJt{|fvNzbinnLPJEH5Z_pXN(_@c zvK=t~UzVG&?Kr(7t+m;i9d8%0Q zUnDwCieXh`SS&X?C=vvd(H%h^xRF;TZ?9t*>Aw0UpNB5fsmpk&%~zZ(0dAEK@B}1%bKgWQq|m<4|H|yOX!gb+W)8!ex26-JFQ!d6j<+kj z>Aw=X`NQuMY+1jP_>@7)?-BViUuxXUBISMO7E>8(D^7!w)`c|#T+g=*rK~b`&7q=j zG>poi`KVUcZG4sUgx3#m9VUXZhNsK!;gM^1coa&l&nYTuYJIBiaKB-~iVTI$?LOT6 z&cOO$x$|uoc|O`zqY=2L-v~ubm7Q}?)c%dI)SRu)NbHP%ERi!A>GVcKKmc|MI~BrV z_P)pY_r&O@jczc9u)4Wt*+_kJnH%z#-DI~q@V>t+;Wx&en|hJ!MfzDkYLhQRQeVS_KCCvzIRyikyubteHi>P*dHJ>FdkQcP+w^ze%~s9fU`?0P z;}sRclirM(k~y5}7a|kHnLqJ=wh+6MrQ{v!EKAa|5s}B}$gfG^1wwXI*1JZkLumX0 z-{mVv5X2gq`kpU_0K1leXZS$I{kBVf>Xa38*N+<9x+q=0#)gg#2%Y`n!8D_cS$KG~ z)NC|V$yORu1j^3az59mhZH-x-XE`3IcsGXMm58?d)NADTi~oyd_INp!#qeLGG(7`P zo9G?EaQ4Y1?T%UPIu`*EA6%H4lXa_?NEMZ4wZLn0L&4u+r`VG-*qp3)r)tmj+B|>{ zA>e@uVpgYhle@^P0%QBC!pdqmVwkmGWF6R3zE%Wwh z0gw2A*NtQ%yi=44=)}a-NZgkkUrSmTW)MFSYYq3*_=uLL^E?6s8=4s^wZth*PSW<2 zPkViVYoGKq?0P#%;{bbGlOjXn{e~hs>3ie#vmTc)c|Iph!aYkXFAu(%q|P^Jkbp;c z*x1a)Z$Z9`KSx+iC}%}dlO?F$2di>+_sOY1`SPPUPIMbSc2z?oAqI1~KML*dlSrDo+5`qzA)zcxZASk3 zGa_!3u*g<^1y^PqAd)7Fa(}kSDA+42w=BE*dJcN3vf*={*%>@4YpNB5{LUu|PfKGn zikbXhc`nE)OWCpzbgGbKlGW3JZ=ob~rH&w86oqinV{BbGZ8z{W%G$Qc)%H+=AOZV> z>*@CCcZ)p0=Ls<2Ia0k2Ug9C-_liW(l9&RRXcKXAYQOCe535^&CvaC&;O&wH7j(8e zLV8NgCX^%UNUY~bOPm&PEXT))nVy8r(~D zOB{*uv@ZGu4Jfh5*{Aac8}gUT3UptV+M5j z)1ACsz-4a~hT)q#zHgBU{9Q=bt(0yXTo0ZsxJkK=!(%(GBQxO0-_Gh0v;8~h>h>(~ z7u;84=xi)lUxB)7{z${8gmIaVzZl)g5es7$*Zfi}U)y6=Dxrht-U}Qu^65E{`xuj4 zNGr1+awW+2>92_bON*8dD{QC9UnV_a^jsL(Kv5utFhY>wFU z+>th1TaHd8L*rVweh>RKRkH4lG=JwiPbYO{;7USsB+RWMzUKc$8vvu{C~Mpza|7Dy zc+`c@MNw&&TYC8M_GK@1BCZgcdE3>fop^699YL(s(`o2I$|EIbI}C_h&&WXpXbniuG)(?DOLo8~|#4aPzM0@aZ6dVi%3c|%6pL}XFCfwZa*vlU=Gjj%lcNlfP z;lu-ty{KVo&bch4j@&W@*_#_pPMeHlWoUVMqeOqh0ef^uS#HqzY+-C&wsO|W9x=De z*sSVjR;Q)pT@meZ7~2B5tX;L7hJ>>-aUo$v(RWyYV7MQAx(`) zM2$!d(omRIHHi$x`isZSr-PX=f;&}^$csTrJ%PVI>Gi=dj54&SEP#THFyv3$fD?$6 zY`A%a^IXa1truRX#D#lLVguW&Ljykr>^82n>lu<|fCbo)jeNTxQ;Qb#Cs9`7D#%V& zu4yoLq_PBujyzAT{W5_CbSp`e+GI7 zfjt=gg$>-?wD*Zs*d(bqTze>7)G3{n1O=7+OZ0a{NKB9}N?$a{-CqR4`*&6vUCf9D zf=wuV=7o0qv#0D{u07np@OaJ$=Wk(R48^XLRDytt`rqy%U5loDOmKY}ec2`q$4ZZ7 zyknIAO?+sH(y5vsdvoN-yg&8qep*G3d1LXmp?PR8I(lrOY=JCT!e#uthlD*kV;V^L zJ^o#Oy+%j6svGfurwB!;E`jeOK+$7A&RR$I%4CHc_qNa5^HuLnVn#Rsu~f|Z*B zPn{gsvxh8wAASfqgInd(h+d-jHT2`#&Qd4HZ{wN&cn$Qv^`BXI3fA;!io zK}t$-TDsiYd3bP6X7V?PfW)k213Oh-v#tp=IDP8^n(?A}+F}I>%~{i_WRs|5AKwPZ zRFhuCda0F4OqbC(ahS}1|G`oVSeDS0ifjpePN~%RdRWF&Dt1%2ef=5`Hl70 z_y2vRh7}_5pT}hXw`vqMVMrz`@H38(*Mjw*{>%WhdxBx=ab=U@fBhxncK|QBfA#cr zp|zpn-rh|?8j|h?q@u>d&7?o_a$v3+H2!uXbtnmMYMi|+hqDgLMF}SQC!+-iy z99WBh&0q!*9Pw93A0ZPOGM8egUSg6zi{;9OIjT;+ElBhn|(zX69XF`HWQEQW?1Ai2cL8_sn zH|?(GYk6}_XcRy1sae|AVVXQTvyZNnHb*W1wJx3{qYHraqnh&3SWA+vo0_y*ZcF+@ zx0A{Im_rpd=x-oLqwd~WR;BIsup?PTI?zR&ugea$rITM5T2DNTf}aImjbg6iWfPh= zBCTpkfO7T0a{Nz5!_7j(@cOociIu?3J0$-D#IzC`wW7YyNNqpyskef@P|mJ-rD=&| zfqJ(a?M8+bqihuS-A{hIeNPen__67A)VB|4xe{nmXJ^WA5yqhMbWDi1QbfQ{L74fv z%h|yeK+s*iB&J`$Fn8pGzmQ{)-R}g8Xm}#uoj6uLWl5I+{G6BprJQ~Jrh+86+tovS zm#{N7kR8RWmaCb*s<4(O+y7o-{^}$KmEEovmFf&M=Bzkb;_t^q9z+)QhSd618vK@O z&~7>1xxwbr?EftaO8#O*UEIt2&`#4ntb_Xd^!8?UYnS_s*c0JDli|RMuZc=_NY~CA z)KW_ww%FUuinRBGS?reJUFhc4l0YDe9_A0V8gqFY?3MoG5oM13sL7?a!1RyZo&K~T zV0|gDOUZC<+gER(kl5PQBd3W6QpwnMgrfYt%l1phwJX6p!9ef$BL|Y6Ic)Fp)jGnH z@4YLY5l*YZq_r$bJ%tNr8wD4DpEQn&e;TYse}Bl%Xy!;2dctfEvIhcsktuPL%JlIs zOR9Iv>O9%Kj@2mM6+oNJPg)q;uq#>3Wm$aQ(&2ouCu#HM>n{F~__@Q?7eL~3q?cBV zU3v(QJ<@E8-R^=zp5}(r9e~Lz#GdG_QKGm2ngi9RB;=6{PdGA>hj0bxwR}?)l^*HsQZRRe%7R7`pKu-S`Tl=7UQ}-%(Lt1n>C@Hy2C6Dg;=D1$&Ib0)LJnMTa}5jDc1o5-NWoiQxGn*USu=`-7UM6j|v?MUI-?*>m}#{`Ln7 z+w@v5Q)rGf4hvD#!Q&+aQ7?6bYz{L0d=6wiP0&xt$3PbS&`a|xGeZTm?3hrZgW@u{ zZ*^JQpX)R#)yS{Jbz8`=M{&t=e%6q+vr!8|j{gN9?LLwixtL_1ULP-gZIks)!%6#> zqNjr>dV$>!AIYlKl&cS~-@gn_n#^&&6-eu+k-vU{N+#*b#qIth+tY^Yvghqi-xNUM zx%z_ggTwYcn%uDXss9mn_+3CEB`yCg4=##j9Lk5_cOQ33K`ab?IQ2CI$Xeh+-t#yUx1+Ad;|kR20;yV7w@Rr=JzsT35m$9OOL&HRmXbHg0&Nr2hxX1r zZbAg&dmH$nL5c2wpC5OGzq*oohupdXwsP5PoEIJ(okkUvou!X9SsWSu){(`4@j7bR z$7L#&r@}ES-FG~JHwcasV;qxgrr0s*r&J&J%A9MitX11#u`kVPxAL3Sk%)Oz_(T=p z8B)e1=@tjta-(u)TG$~tt<-r7cJ8|~QHe`m^@(64y>5&H6v+HoAbpoi4u)PH5!Bcpi;Xbwt-CHKyz=lNR|0J^+0ly;MFhJ6gtT| zG&81`w6lO^-}JugxJvELFqIyc6jrqGVIQ8Mh2rV1En7DPT1`gzuV@lHAk#lXMnAu( z?_S8f)tktD)1U|`iAggDV{$EmMpFszSh@YIoT)Tz2f{*tdlg`qdy|ziGjDwPe>7J2 zGI>ozI@Qa~Czx#4r7*{<8&g1?GiGL|Qwv{-{4QC852sOlK$ez8+0{!Zy&Xcl0OTyW z4zA;+xUDX~urO1{;>nrEIXWWatr`6BVSkYt)PrBo#+I&kIhS)HU0e^Ila{BqK~7yi zQy7WD8-24!w0km+CU?>i_;rLm;oD``i+LvKG2^M#+NjtIV5kB)l5jENP5bsR?|pEe zSU*Qq%w*ku9=v#70eU?b8Y)z12c3-JCQb+771i)XIds}^m@m4Cbo@ZIJz6N#IMz(E zd>gO6Rp#vQgfA7IPR|CcNP#_2FHY@|JCqAwn;{YAO8d`JotIQawqVh{X!goiA1c#Q z>Giqe)VrwK(})oT)X$rt2#~s)vKE8ASS~m$pTL0Su)ok_64imx1b$S~bu&Wp+yjG; zMy#Ndrg6*1lgHZ<#gb>cS7)K3H3OMUdD6|SlfqdDYutkn7Od6%v z9G&cF)ox?#O>uTC;a4-clboT3C`_-X`kj8#wQ$C>*6?s|j^&T(c72itW3}$q9Lrns z&T=(9E7P4^#=R+LJV9dwsgw&@)*Y{h(v`vouq68i^jC7Ji*h+nzh3lbF1@0mZ8JDc zMd-biimj^(LnBN45}xpV#61sF<;-9AlJCD9x@qoJC~gtJz@tk?$qlKJ6_BMerk@|; zz95Fu&%P*~9uf^jGy{k5Ee%0M0L_g{L}jx{136UV<(iI{^~RjpR^$^OliQ5k$n^RU%VWzc6!+v@-RmI zU-CesZ=KrzOzEU+*7aD4f3<+z`v1`dG^xjxNs3qc$IEzgx&6C`5Wm*wFBdFOB@6-k zCoaUx)$8!8dkOw)O1+3P8+P#dCQ=>Hwfv9t<^-f>9=v5Nm8#U!NhY7=q;4l>x>dei z@+@!(#O&?9hT%{mg%-p0yiS-lQy$JJD z8sOBk*&8XZX3ZbzpOFd_wY&WwJ7Yv8WBklb+|7mF&sXWzovT0b(u(Mb& zVWN7p(J_9#W5#eH(V%um__47wne{HAc}V49W;(X5813v>xCV^^E1p{GYm&xy$RT}u zInrgA7@cxbZ&w})zVUEn+r@XBm{vw?Ax;HEOF#BWx6iz~niTOR)&}-CpD(}no2|7N z5ipjC47@0J4qbn)*rxX+SbbU@9&krocKjP}6ouz+7-D-}v+8iD(eO6@=K)%{}Py@lwOWCb@gvCfZf~~;1fdiZdF2`)n|hGdC*?2@6%MPwf`Y6 zAcPT^k+wwhUeBTfbb{+%s1p+2aO^>ypfGwYe%tVXVASPGnw|%aeZ0w@+6eB61$Mg{ zL49k>NeDNTPneG@U#q1yU~NyhaBWUp3MsDJOeWV~YUD>g47;nBFbOWBt%H;TIxUb1b=+hFq~$%PkBmH0kLpcse|j z&ev^BYN(gGgTKNb_BF2Ud@fNtZu5i&Z!St{x=|K8O+VoyR^9o$0<-9;>R8K)CdviE2qJ0TdV{eit4$*hso(cL+Ab;iQ~#p!WBeg`#Dw)l2n!9_|9Dbaq7W9LU4bfP;` zR(CwHUOETBd(ZgOhXcp7x=_zSmV(n?xL!{$>_Sr{G!SY9X(_a5D7LBNPHEk;wex~) z2k!>|Z^WY043OT6wVaS zLmZcYMC*8p^g0+>!OxB=TX%IZ1{1uyBUV=X+9>F~`=hhKfQ~in2iMNblN6irWxJ5S zm~oen*&^6)E~3z-B>tn2NVw;q2#6o1dt(|7Y~l_5 zvFY-r$iMX|+vJ+$685a)hUvw&W__uv`2u_>Of2(f1s$p5#ni<;Fok!GbZjaH@#XZ?WWI{0 zIH8M2I0YYnHstw)-LIYCeyS|JOyZAxi2`;=x@>R-MYq0}KuzRxCWl837 z@V+F#l)>p0F&U5dS@pE-28E=n1d+NAoo6DaCyB{g|C?dxEU8Z%`eb)6rDZA&k;G!| z4}8z~fT%sAfV8piL!Zou5phbuqOjSeMJy>8EnZd&Z zUsJewZ@U}%J;E4vRKT~2qc@G`PFx~IG6ev6yZ*?0YbSw{?i@VdsA0GHB8(C2?sqvV z$P_1`{cvCw0DkYB%DTB!SLAgNH;p7@eLZ2Kq*iDnRh8#mLc`O`d zrz--uHSYmaG}(M@ggG{{cUv?IPc5s!G*PjaJ5tf(x63jSG|VCL`h`Za-Ed5@EVV$T zU}o-c9$0T1ib@!6SzLw=anb!AM$)yTzu)|sy>IDTjbN3?8^m>0%-*8^T#Prw-5JEf z?ZM^<8S{EP@dv&-Md`!O^s**hy&6Z0S(qW_Od}lPEcpxuHHV)jiYZd~x;TZiaZG%_ z;^@1-TBoq;rXVOz78(pNGwC-Ze#C!%yd@m3bVi2~AXc9Ci%PLyiKsuSG#%Z>f3h@% z6{BvuI~%CjiBY4fz_Glwk$aVN)yY8VNt^U`opI1Lngyb8%c&OMJ9D*d(}3t7h-?QIS5#FO>NAQO&a&D5X852UdMS%;tgHX3b!g55y}J zHF>|7(M#^4@i#oZX8m^n)uW!V6I!^MKOG(*GF0P;vNI9yi5a-w=4JMLOHE1Q8-LfO zH=Jc(A#sax7(T0pno)oeUDHEP+Yj|#)?6NA%%$9gqwzAFOe2RwmjB~~93?Y1GKXfJ z(+Em1SqT25p~RkLJr`De6fz;@6DvJD^<1&Tgs`jjWg5Nw1{;&IA~kc+95td&+I#k$ zBbD~^JYs}}YVoj)nv6!X_c5~^!|f*6=)F|G?PYenf_mo!T0hjyTAV$+mv&_e<2(zx ze}a*?wIC``!TXrl?kB994#-csUc+UP3v==2=*wBur`ndGWo8bF{amSyKj!?I(IP+w zkyfqXlUOg|Ew&kMsm5M_{^d*6_0c5^ZR1(blBH*<;Q{m~>opgnJzH;Iap=gc{s2?& z*Gl21lxpqx7gbX}+*V97W^Q+7ktSPs(oVh{-Z#$d&&!mdpV}821bGea+!aLWl<`MT zPf*Du{Hk;M)0h-TURbxhHn+%ynUNQJk|Y_I9wz$SY#$c85Q@)MBy~gLilNv==^Xh- zaVI)J%sJmg{iwpOks_&BEp&>7kQoNf)|6VPet+LKzAvOcE~R4BD*o2(g3XZ~SupOr z6SeI5lQRt>afg81wQNnwyWdf@wzdqv)p_b%#^3n}3FLA?f;pJtG`FT(KpR&?^q+<* zb;_YBz`^sCvE=)J3)q>D$zArs2efA2F4CcW&@~fUgHLrx$9!>*Cq#-^e|-Fi(uGw0 zw*h@z;QR~vu>`%_WVkDIOuuV=oXG6B-Y(1O48$V%IL~?URSXH(l-%#*-_qMPYjj-qU8u_*`CZpJYsXy-5&uYCbE^~I5t*|n+Pi|49G{Dm>+&q>a>Ts?{Smh3 zroCmAYG{{}Wo|5yFQYza&~W%ZAS?=37tpKe2NM$%tDWJYOUvWDu}YW&v{BiEpVT=n zHG$PCOmz{@&`o>p7?7w;bWBtn|FbRIS=4oa9})4Mf_blPQOnEmLb>me>&j{=O}a23 z+z|gg8ihz5p0VaTauZZ?*!B6Lr{GeO+gHcieWJ8z@cWsq*c;^IDP(gxI1 zyuy1iw8Aex^zgiY%fg+ea{?|z3DBbB#yrk?n1X|X#=Cak6$p2#ybG<*ZC6oHNHdK_ zgXZVE`ICrblS2Isdh-&8#^_IOho&d)d$i~g`RTgzan zLdI~F$as5h)fZy)onYdakp=9B05aGlm(6{d_%|0d$ohBVvf=6Knhk$J{vZz&yCglu zRrtq%*h#Qq+QarQR!JJ4j!X4?{NGd+6`v3VlD3!L7m?;-8UE6?LP>j z2v`vb*yTF~Wh7}Z0b`~Lb1PSMBHI*|{|NHZgj7K)Y%zK^^;|%8jiuC^V2=(en7?k4 z-aEgvrqClC1wMWqz(-TBz}VR|iBx2e&A`%pyA~dsjW6>%wy=PlHct}OeWe|czRLJu zw~q#XVgAGy2=!-fOg=KGDyDn)$lRO1Vgnva*i3-@C8R&4*lh$!M7Jd+b~iQZ{l}){ zoSaYFk==dHSOQ(h{-@STSGcUZ&vn4s?>DVfsgQC=mA~V;ZkSAA0B=d3N>Dm)LnB4a zUUcWB^R;i=q?y0|+=;2Xc{20BwowcNus!hUKk_m32mD%rY0n=9>jWfz9?xx#Q@a^W zOoo8_8ZK zo4vnb=G`7Ck4OKQ6kk;)V}}!kC7drYn~#m5Ow&-it9n2%tJDR3|Cj<|8l_O+LrRPt z2Vy6)KAA?T?A*^h^Z29Ci+zp`4H|~En9x_eI7~(UC(k@1Mtn?REjz=GA!j#|in~BV zz9=zT$`mrTv6(`j8lqAgWnF`0x*ZAEdfGK&hxB?p>|jW6cPLhpgPztY5(M^l+&gV| zF#i>1!1vOWu;en8&!OAmM+@xJeHZMmLt8@x_2{&4nBEi^MF=QH|zA(761SO`S)%P zrP?(OT8)qWJocERhq9k$e}4Hm6S(^j@Pfy6j&PdWaIs#WQ5=}%#IM^>GdS8DetGH1 zC?l&ga2zG>eKNGq#-JYBxYjwjeYRI`v*poLW7N-zs5Y!qO!x3AgNwnUa8bx!na3G{ zjE}a|6m*y86T=@N7)lvk;**}2lOYt{lMwJLcE?t`-3A??=p8rGZ6P~`LYdM{->Eb? zMB7JVI+L>|qtl&IBPBHagHdU7JU%s&JdMD9SA{})NBa2sY~2}$@OR7)@v_-y>J89go-Ahbo5Z37w%x>_VdduI zv$9@E26gH;HIZzgh${5RW9@NY2tszAcdxET*eTS$=%{>nP>;@U`tY5&-LSOCsl!K zQqlHNn8oXt?2hN#52O0*p#fz?Eec>#^uWy>B*J;%>=Bm5*d*x%7!Js8s?JWp^F6CI zgTi}Sm41&gH14!#~osrd5 zFUG_dywW>RuF*&b|G=QqXxB?_sVDcwWITG?k^a=y&u%@X<90j~0oPqK`q^Je;5rHT zZh4pEw2Sdzx(T1T4dsizl;vqx^$sZ_1_VMM_!7G(lNMS2n0MpbdvCEoEDFSYUHOCq z`G38f_IET4g@TVj{JBb&hNG*dyR|PtI~l07U;k*K?oamjEBZ0H(egsClR;sBx;{d9^z&dsZ{J^vw#~o| zkuq@6Ygl<;sBmJDV9#~76(+;a9Zt7NB-eIuyV$ea18YSKFf^j?^2CqzZ1yf!x3SC@ z7?EkP_n|FCKq91H^{FGE0U2C!c5rfVGu*Hr!x`i>QoZ7>?5nockyt;;m;Kf8r0{p{ zowHJycq8NA!~S{_@3FlbsQZ`Ivq>x#hlK3d{|Qna7>x(~o$h;Wea=>#nf||pEWID< z{PzWs=;m^#5r5 zZQ1@3(El8k4Y9`IUWoNlzKCMAPcnDnu^#n{K`?T1Ysr&&$4@fKCz`*x`zuAIDo-S*DvlTk)AK#s3aK)*DVr zdw$xh)*yJr746#^c|DmV5>g;sV>W&WHTysm2Y1y2RVB-p@0Or)8a-#b8jBHzdK;`{ z{>Om0<)RPiaD^>=s11w%oT2UuCFzP6&7G2b*@!`MYmK?zd{toFu!TjJGpi?((WhBj z!?H8@Oj6bmV#J5_%!1e~frvG=E0xz27JcxW-yu%a$jq;=!NiE(XB*3SkDs|u{mwiH zcJCqQT8+tU>@`cD&)mPU+3xUsf?-g*i)F-fG9cS zQREh{CZH~IqArZT>j6eTP*Iv;`-6=64X-Ia`=M}G))S$i3s$L3k)gh;qSp7lW!II6 zwYTAARHsGP*0NF104DzJaEu>^B@{jk!%VqW;3H5c#IJzzjCAReFm-8z8xKA0`OGQy;iO1l6ji{sZ-Akw-&C5xHxjT<>hBo+3n9z~boE%M#HCOLgV zM@d_Ihh`K7DYrOkm?eoMDYt8SyREFf`oJV-?!78KuV6kC+kvILlB9Ewi_ioze})S2 zHuq;o*d3lXh1Pr6jruCY-u?cCv?Ea$M(zG~2d;H%@S`}eGlKyRKGK6Zdwi=IFtPGH zAj>BM=T>~8Qv!kJ*l72QRwqZ zZRui0T4lOl_Xk7b2p6%m(z_=0`zrYFaKbNDxJW*J-t8EYo~CD(XTrDJ_)!3bpRN&t z5pf*Dsm<5MirT+2h=@x%^h(FR*o&L6){Drf%eJ4{UXj9{&WsnUGlzhr16H< z(D%aDrLi-$SH5d+@8tJ^`;fT?YvwNywK|^1Lg$K3K1H8z zx+gsLKUy{Bxm`agKJi*|?eeAVV*y33*ZM>=9;X8Zsghsrp2LX@#?ixV>rv}f?pkE= zHJPumCaMu_t1R)SUCJ1>PD~(AbHlreBd(%M`Sh=S6qYxAtz#( zkEWFzg9A%qsL-OBc6LNoy{@yV6};~`lpL3W5v5tk{I}Y#9i2&7i7c#VpUb$n`b&~C ze{vngaP~$g(9{=uxGM3J;mes%_e;l1YR^QubveAN z;!$3&I`bsf#tlN6f4;^%$R*jni1}X6`a|&W1pf-pTUF3U=uuekE76cqulnSgR89DA zIs=L*8)lXQ%UrJAaYF?Ga*XJdBEK*Mjm5hfH-g!2-GQjp$$S?k3C*}Nqlnqot*tm{ zQ7Ut&Y=o2|-4dp!6jr%BG*O|bk}=CPt#Z*;qHYd*2Mhy{5I;kPU?C(V>aELUU(=RT znkuLE-3GC`?I*XjI?ee64@PHu8N~xfABPifQ4`yDy^#pKP$rb<FqQipD_p@=v%>@*U!?j%MpwMhn|3SAr;^ttJ1%S#Q6X^>Ju zETX?li4W!~kQ<{1MCra}+|=u2=*D6aG?wc8_L^4+{J*GstFSnhsBJVr2mt~FcM0xp zgN9(i6Ck)d!6CQ>3GS{lAy^XJ-CZZaVPFP#7#MWW!`}P*{;P9+p8wQE%j&1ArMhd? zTI;QM3tO($J6vYNh(p-xg+s(I@nS-0AG1iZ}@=-njRRF)osxJ+a@OZg!sI!GHV6^7ARY!vX8(SJR)CX9NoWjJ4ajA8g=PVInqt=-!G z&kqC^tp0;^&POKk?J{%Op^9yia;(n_Tz6ONelg@KEfRgG*Gmq|oBld6ZL)Lkq_(%% zx(gg4$HIr6(LN*3)m&&d#|mrn{_X@xIB!1JG0AF-DPM6fF}{_~4^ME0%k2^seulDPZTgWUWm^xGk; zY1rpm!fCS6Sy5mf_73l(*#z}*OOzuEGB=a#OjahR>E{}P?LW&r6WP?lNX@XOncv(# zukB1Bl-cBbJTS9gu{XP8(F!uiqgt!2Z$WSd0TI+=O(ex`pxox~y`67v$kf~SiJ>jb zs-jV^7Xk79t+*D5CH-*|!nL9)|1$Z;6GH=rO5c5hrFmBM;{{%sbt9=;3lsSn&(oQfX9BC5d?u(88TZzb^r@DMl#TzOiYRQde9C z`dj$t=uix#ZL!Hw%#?iEcN$oW5SBNweX8flJ*HeK;0P2VBIRh)b^W9Oq8#f(h8#APu4Qk9KgNZsv8W zUL!Pld}#okj|;0X$iG}EJg2zhgP=woLI-gxe{2|=0~Qt-2vvfMx2c8iZSGtW*vG&s?~`r zw#Xb4te~-2uykSDXQMOiCO7T52uNK<4&y3IPOIk5##w;iq?^Tw7h88BLGcL;|(h+TY$R_Ub$P`9fqzJi<0wI#+ zPuw@XkP|13+q(1*{=VH^YHuod6LNUBr#8qV9*qsJAd{4J23@DjNK|Y>T}Cx4HGQW@ zfg=L6ldGc!p3x9fzWi^5gZv%m*}rHwx&{`m*kujfz%oU=NoHvWB9UJ zdB}~`$ko>}4sVm3WUH;k`dr8df6saI)%QSGx!l9=6YKAJe}JuD$h@{vUm0hADN5J3 zZT8fj>W(854C*#RlWOHTXXI75T`?$zK(k#ydTpNJzxv_a`(K|bJI27l`R1Tl>GZ*d zV)H|#(m2%x0SNfOOO3Fq!hUVOnCtBUru2wPvl}J8@-DRrVQTlAel4|<)8QP|jzrk- zNHo(u>m{=HL%)Y)O!8B*sD>ybF~QJVC)T_cE-H^*6i=n#cTwT=)@-J4AdY3jKqogq z;qim_7x%pPx!WzxZc|N;zi?>-^uvWUbm=2ZYqhZ!u192zuC%d?kA3rc8e$2+VuKfw zu}c09{nW6W?dbdLn+axvPq(39UF~AyRExiL*9q??5&6v*Tez>58`D?Zd`Bp6fEaB9 zTCI-B(lb!n8cXQGD+*CyfUd+KjvAKyhZHLQ99`oWR9RW>;6Bw!h%(QmS^D@zI zkMqHrSzP_FU*9u^fc8b`MY+ld!XmLkb&Nzl`xNAo{dh6YSv8k8h><-{u|c2aZgFb8+-Bome$_CzAt}_SH4W=&13E2U8{$wez>*3PIX-XG5a8m z{bLwrlw3l6|L_00MRA|Nswc&|hYi~Z`3FH9fD)7bo-waB^piMUrys*U37+iF=fx_6 z1O`ocZcQdVc^2ep$5P?P#VQ1Z25%8848CvHM1=#SLaMyov@?UkV>5nOO}@q!2^_x9 ztYTziPS)|p**@3V`Ez_h-Y)Jhs%t71>z96(+UhJN4dxp2gfA*fcfF90Ar67>@Vcl_?r%cJ$3+x)?XAP<_4l8!>b@7tgEPGyEiIlwbK|*V6kk7ZqN~s)@|HA1f#PcdwwdPEnK4?HLBEeLpVk}onBB1C#wr>A^?Jms~i$z!c zhwHl+2f_E@_=UDwKemdsFFF+d8Eh3jvOfxD7FjYXre}O{YK`Wx<+41A+3Mfq8d0Ja zpWZhBOqea@8Q@A0O-2&>S zSt+r@%iMY-MQV@_J6ZfZ*<0ed?x%gWh`x!dvjJen$fpvlEySdSNBRaU0z(#EX-Kb` zh-*pPR?hv+l8D4YTr+QR+xf;60MB(Oo0?8an%K}-p-W)?L}AXrI%k@HPyUICkK_n}=hySGWnG75~(__L&aQM(+M48-^ih)IzFextVW(a_F0{c<*nbDcZ@@aV zFd1Ldp*&3sJcoXi#m1yVA$l(VK_-WYO*z@5_&uDieQ5X{=Ee_&9Oi%>L3MMY@~&okWID?@8^4mnLbvv(%s-15 z_%Nj>$SgoF4*d*LLqgCWxA;|(;@MXQPfyF)#t&<|1#FUHsBFANBEo%%FKlo!dU;<{Wil)Ia4;nv{SbG%-Lv5PxA34&^qvSGNs7F_E;3=He>Cgq>G@3h-_ZvZbPSBr zCn-ITj8<;U|333S>-Q8`jQ@WU+#QjHW5k#G=C`&>B1wTI>QAkd-1R2xgpgww_FGCQ z@Ze7~O!!z&tLp4A?}A+gGFJi_ywyY5_$%x;>pT$}n=#|S9J7;X@1p6yN&_rbC-|?8 zGc9ucLY}@UNLxo12wBHtsUlW_cRUd&kYVLZn>1$z2%D_ZCuAG+F1#=&DH6F1YaJTb_;&~&E=}6OJwFKs75bTLGU|lvXtys>Y)R# z&#e*EUG$MJ{^xt2Y$NU8G6pNPw2ue2*BqO z_v58=TIDTD;7z+MfRs>=#ywz+U&k3XM5`<7YXi!q{{;JfYis;79Tt%ZuvNHbo6`V} z|Gv-kbv#g5yvZk<*-?w>yk;NQKAfP<;lvrp0(-s$JHR^%aPZ;f_+uNiYXwnaWjvWgD=5 zDL@xwfyZh#dDK-$V z$hVLI9~u4TbSj)GFNw~uPXL?D9z;l%wh$h~fjPK#|8}8OwXhiF*I+12{evR$1-fz%Babh(6=Jkx7j^p6;?euM>Y=~)4m5|CN$H0p8~aGSy5zpucb z;;xvf^=rV*sEb1_{64=v$I&#}b#~@PvnRvb8Qg{}~!ipnCAB z$8(+Dcc9gaNyfo0V^O{n+g|9bz}n-SnGPPc{D))|RmfpvsaqJWEsphglo63B6ra|E z1Sw#x*Z>?`JSh;;r~5nFL0w3G;sF?Kes{hntpy#qKd=UMkFHcn+n$v8Kq(tS2Z70V zomPT{7&b#5NX0Z>X{*PS^gOmdF&|^gl0dI7!L(z|V5(CCSS~`}Nea7$^oi)I%Ek5e zJ89}|dg1q2u2WG9Y%+XBy^dYmS|Rf8_GA-LDbZfUY3j9O1V*mnOz++Z05_lS?DPD{ zIafMf8;k7%zde!c(2nhNT$3w!KaYIv5X7H^E<(X;aY~w2ooBsr?RE~@=*a>9UQ4@s zI~}#5{>Lan{bg^@G}A27H~U~{^r#AB+NtK-wxo2aKe3C^(FWgPDwbf`IpgW_0)Nh; zuJNhQgTtQnB?z$bSGM=iq!7HW;VZOO@=O&le8pd52}ubMwUwM$Pmw>2bH!CZ=wk*W ze6m6oAj{21as$T=C!@L7**_Ba+J?-6c&4*8Y;X#{JRH7up>HFvEiV)~4d*mXjPi2U&9+d8@()wW(y1G}$yFTW%K4y+HlE1K!Rojv*Qr7z{j`P4^pNgE% zdSx=(#De_9XsY?7pEh~SOQM3G4Um{_t{2d$_unwbNz@y? zVciuY@sQeouH8=))p99`ZChb>RZ|wOpMU!MTWJI#p;gsVfb!Oh2RX33@LnGRevb+=sil%(ZJ#o_lSeqX$J3qA^NOFB*^0Q zu=r4vKs9TC9E^ohzxOwPyEvZ@e@sIbm8`jo;J!p;b3cXY5a3Cxa;!P?Xvce>OUyz^ z!d-c&4j!0iTk_=gfbv`tP~X!dLZc7Umjr%j*2?=DPPgxC4lcLHf13$;=^xV}5RN~R zu^Cv$)Q|z+vUxA4z|?jwK3L(sC>exmd#{LaK4IZh$p?lk1opjL)=Ngur_UBKCytag zwH|NU@Yx!DQ4)j{QsjZIn#U0Wg`g#m)rmioA1j zv?Q`?2uAb~twC!x$PzocqQ>E1EC`u?eO(DC3)oIruj=#Gdb&V9PI zR_4h{^w=_1EK8{d*Agy`@1G)CT_{!S$?CxTor_qr9s&d!gb@yR}3}=}ufJTV>Di z@(bR^%v$7if_~KFNN2q?Frbge?OyJ25HZk%N!1m~y_G`mIbxL#WZj>kkErq}7_;5d z7h2U^pUPa2t}%MBp{NtSA4NJ``Fzd<#i;r5-KF{wRihjfexPY1DR7MbL8Nx2tHK(;u%|!`Jic8k}#m-fanD!wJd;SE6?43DvXt zYJtkj_wA7z0ascQID=OOZodBCUr%5J4G!q^Eu1)rE6g>r96q^4+DAXs=zW~Jjn221 zocvvhqHwJ=r=B;SgQgobyglJiVW2h4J&VD&NZ|XK92CDWyhiy~)c1C(pcsVmNyaVU@9H+ zIoWt^U}n^$ag4i3JS$dr#Sf0$_^bF&x z9(M)J$d84~xV;Zp;wnq_uLUeIq21NwJ3miOzmR|r)Gr*FYq!perXXn94bCiWbMZg6 z`({=3Jac@WF~t3;7FFlIP8vjcm~olP0h&6lscPO@>t-xf^@QL+n#s%uqOi6hTwUv% zup~qyPcn$}5)|5#`h7NldQT%)0XYTTEYXF6C#$Q$ZyTk`hQ9hb=t(|$v^M=#=+Vxe z=I}qLs3MyRCKLn>M;M#dZKkg01Ghb-tGut+PdN>u?>J45E99}}?Tfv%T_D^^;e zov&$0l$$N35N(W`ygRPN!yL4{?hiIzM|zW`Op^lMqa*}5jvQCsi6!&rT13tw$<_{; zd`@PH`wLlQp^Ya@G{C^R8Wxbcv9vyH{!daZ6$vlG^HKAWq%y*T<<7Kv-c zrx3r!+KUTTXW5J$&W9y*ZbKE#&Y9Ti6xqd?v>0jd-l`98FVgz6ivosqDe|X$xiKJi zEg$+lw2gfHSdfZ0WAc+nSh5KlQ|cr#PMWgcd}!1sk3yqu(DS5XHfavTwRNS42^nRl z?)D+-Pveu;^4z6Eb*Cf`o-eKot{z}A_02g4j*nJ(*I6rT#kq8utrgfk<1{aj2x^Ws zpP*QWKZ?0{lJ$pvH)Ndnl4GB*%n)IDAmUzO;E+RF(^E2YJ8mGIGoODpWpT_y1!zyX zG)cVP>b6$S5UM#&&3Ha@J^-Icytc4)?Y%{<`(2vB`eVH=nL)AEwZ4P&+nJ&pa>}V( z4-*@Q;bupv-f=fEI7|fj9sb43INGn%Z~H>D<$T|O6_$7a4n0WU?Q#@{Wa9O~k6#Fb zR6H++Of7C3F`ma6^;{ltkZs>4rGo+|o32a`8E;k2wy-~DCYM1k~t)mKwh0cVPt5lJ8CbDI{79zdbA~4#icNu*JPCxY`x}zGqRx4Y`&UV-RGF-eBkfK`)Q^JBvJGyfkfx_<-kTv&Rug`Y98yk_@eCT4(O{Ff zL6B-mVwK8^5E-A#X;bZXX`iI{Qgg zKOCVNKO1a`@yctweA)h=8ulFu2|}?-fqvzbt~QP_vFbK{#!ecB1AlD?T%}ll^J4tM#l)IzJ+TItvjZ>YbOdWC$s{nylj}5iRjF>P=VilHLW?o(4nGJR* z5SnNhv{-1}sc_rQUwkNUkaYXGn5v(8YzE@tSzM=eKaMOOr3Y>qZ7{h3cM0e@3=;K~ENrBhAiQs(! zi}H(r``X=JN?-)$u4EWm4 z{N@v`{a7x%hzn$5DI9aov4=>#OObQ@GcGIX&hXpyPT)toN$ca`ID(7>-KkLVeVo{k8-S zd1Ecb$G?d!p4%)s6YzIcET;Tb#?eF}p^TSRm*xXGi9RI0nb;V@)vKwM;p+OTaCSDB zux&?IBoA}m8?$kv01s;-{oy<|MW)YbYEtu{bj2azYO?|EAUs?qT!GVMyl5Vm9*bwp z0+mdrTMb3C9kH71JCe|&&eTBf+%uq}gl?B$sRSTS$|NY0;V$?oUfE#Wh(H<0LmpIRwEpU$*Z9e2O>Tr+U?Mt=S5576O z*eJA&DkE2DPril@3g!y&#%UL@9iW9W2M%I6h}(Sm^>%8L&8;ceX0})8&S#xjW4(vy zHeB&E2onTxPjwJ+Vud#9<%n6v$I(q`hkM_67dB>+5BHQ&OBY?vMIVY2M!Lk40^&|A z$7w~w4C4p+s=R8=8skOass?IdKUv>Z(b~>WoqnNsJ(9xavTi9U_yF3lgGcG!3(v6H59#lOvw0mYkFDJX_?f%JWL9taotYm> zP(=rS_t?H5YF~>1Wu0XI;>k& z+kktC0-(HE!+RUD(37!nIWlc9FTwx#XTQJw5jAW&YV?x^rFh%p@{Wf}A|+9Ja>x>K z!tlME`ZY!bN|xIdT1| zC);pzZjJi~{z{w7&mLjJ1Dg1L?Iq&Uq8U0+Bd@yP|`%+x08UlQ9d{nvou<0Xs{FVdjW)eVN9}RDFpSs+OY)Bcp4Se03H> zHey2!9*OZt9|Ci;j+*G2=_;+@(|2Md4H!97^$%37=QI-~wXr@hj-%mGBBZQv*YZQf z6*6eTn+%8fPdC@6zI1cnU}a~AcY^$dCEOdeRiWltu1>sZ){^YS?FC-L(S;iyji^B< z*U`i&y*YD3;gTUn)*hX92NhR~(gmcZMwdIWpY#gX;+ZM9R za6)_G6Z?0q=4L{4@@M(Oo2y^L04yzesZ2|4>p2^2cnqciq+}Wi{P5`lDo&xaMai=D z6bnPI=FPKy^b>Z?(3o%ZqASh)giHe8bv?dWV)~$!36p5Vw z=r{DB$qxV_si=6xcY2xh85akI@LI}sqvH6g!JLxoWQM}PHG^xjoWr1GXR;T4`!}_% zx1Te-PkQp!9&T4JrhWXtuZp+A0dW->c#6k$dm9-6w-z33*+#6|tfYUh=%a>)+trzF zr=vvsb1JTUE5Ny!g*W^bwL?8U33)9kYTq13Wq`|#67fDUYgyuBexDDf<32jT%-&l) z2Os9SJsEV(BGjh{FAczN;pjpZ6XYx0Na=ykE$P0JFZjzGjtJ&?OBa&KDIxXgEEC2F zGNZ~FlYvReBMm(NX#k$0+glAexd*jz;5B;M;GBCrNEg2LB`-QO2qmont-7a758qMk zMIx=!4Ni;^-a>YolMeIi$yH1))rq)M5~EgMTOgV@j)8}!>KLkx1FVJ; zId!EjQqnpbd;01xY)gGcLNP^FGurM1D_IDuM69O9E49^0M(QGf>(@BmcZMv_O}puC z*3f`MC{EbxpLl^PJC#MgxFNxU)CCkP>79aIvYy6n5f-ob2Frc%&I;v! zu>k2Fq|^Rz7$_YPu+8-`&uBiu^=>Vef`HOJ%zqJ)3R`48`!YZJ#jqWC0j>ghmIMEU zzQdi}YY7nAfFV?OBDE0rLmpe#Ni#P-^kwiuL1siysdqI(F+joeNRwk%OF;uPo@zlf|pySE^{Q{d04@wHm1B zZjCxF&*iD(+j@%~RzD7JAZ@Fc+ea^U7GKercB=l2WkFBHSeF0SN6cuvCvnC76h4!( zk+{EOlY|qMCfuQvynFhnl^g>mHXUX%m>nL>AbZF!2d-GK6hL&KYsiLeJ9)JO)y34M z0duvHjsB3vVs0HjaR8#bfgzlNnW`gpHJLe)yy*q)$>8roM}Rme(SWu{udyu;@c5cR zk=UTA{Hs$dVRJEw?4SDMfJP329p>o;$<{__Dl4K?PqfKNN-^*AhYqVZ2r+#hA7?yl zU0`paxwkST^x{(Y{HUHP^MQlRm?uH+=oC8DW3EAhhA72$J`64lx zLcWbH(Z0)0j0?+eHB|ul0L0H*iOVlzk&kV*3`_TyQPFd2WTpLbs!>|;er&MgnQvjD zo0upTOw()m(G!2{G9p!wvi9P^B*O)#wBNz{#0#u`-hft~*NTJ6qA5~}nQO7dMt#r3 zp*ufxmLIbzbnFq(hjlsdX>&TIQGW+)@%W5vjY_XA9-}Jg-5F8gRxP(nBn6jGUj^ye zP!)rVuC#F#}9 z)=ng9IfIDWKupf-G+5^3j#=ef8{GzsG3qCeM$ba(*0t5J!va(y^IA%UFu^x=bt5k^ zK;7I(%p-T97O3ZPY<;{Bi|XT9E6m28{%P$wH<{C4z79I|YRQvm^8ntq?0=}Udt&Ar zo){7stYOo|yF*BBuV$(Mbeh>{Gi!T%zGRViV4Q^+Dn9eilJnm zTe5i96vw^}yaEBT5sOP7S5`*DRNgFn`lXhg)<>^3aucxh$uDw_2~xLuwzUa|Jclnx z>j}YGYM(Nx6W?-9QRlgyU#=lJ^S=(P$8;d^k+IjS*%Q4jPEQQFT{zxnZ9OiwqNFtM z;}11NTWsElzbkJ>c@5s1I1^1i6Z%cW6IBLCONEVFKV@X zvybH_wyo!ZXx;+hB_=CKp__?()nkCl_2mgbUCQIf+u0_&={iyue@N0p?sv_)zCt-n zqbt)wJte%rkS9xoxmJ9@)p5%%;{|Nz*8nh&hUq8Nk-lz8w;!e2!mOb`mG@*KnQ9K3 z^Tq-v^H|}vZ7$l?k56{I7Pz#tZav{B5yUF(#17010VY0!+3>kqh*;V&55C;V;%=Kg$Kcd)tiOHX^KrY!a|t4!^<9@ zF+;P5H^Gp)-5T@K9 z*~p2l>Q?|PnHvk6arz%>^0`RqeY}F0JMb7G4FFP?$%TRM?YI(H2HmSIB0fZxtGfnK z{6Y|cLR-p!aWgB=$q|JH8p1tEC)DXy$JG!aQl`6BfnTOa{d-!*zruf7u#;Y{(tOA4a`C)AnU8N8!WFdNALO$otl5aU;DmUv0Nl8qC| zzQ$-G6gc_)VbNM-^UjhQ`B)o}5|NdAg_FSEH59Yam8=j+$%R{FWLy`G{3U&Zoro`; z+VTYB^7}>2&I3A=uDU1_7tmJ0NIoK@FMFyYlqTxGIKD4tw&dk)qB+@KyP&KbBjFPW z;`$#z1m_bV;_>1rbW7q4dK(TgZsMJjR@~SAFZSQn@OT4rq(_*JSSdfCUIv$x(^?Tf z9ZG(@ieuRkr9Bv;?8Kt~UzEh~*x&Il|NQ5V&q|hE_#7Pei~k#yJ}t?oqvZaJ$NkSz zNjBvFQbC{a1u8ziFW~77{UVJXmd;y3|KB@Dihukkd}zA)4~8fCKMc=LZ$MUl2>etMXr3@|$4(cisZ$N0sI6R!aHjJkdB5@_ja1q7<28@|)6q zFK=_CP@MGdTz+pnQm+H}>X=Z-gFqDFAft)zSCm@dDyfMVK@%&FGu|Z^6dX@UmpsPM z9<(Bpbab{@{(EDOx{g{rub=QnRn51G3-wk^;<))6I^i<7B0S346)H~*3R=?}(u=Ps z`xO*k6B5eHX$S1f;UhWvK~~#sv~_FhqsM-?VucHUf;Z}H8O*%aJfjXc2OsHN#K6Ji ziO5D%Y@M5ND$r&oqBu`k(L5 zhSqZL(Hea@!L`xQA5FBCt=VK(KDxU3vy<=6SONUx*BYlsPb1yeGfRF;gz2^n6#*%o zdAqNxb1q`ff0wU%sILNI2$bENWxFe*>_g?X&+Ak$sCS^>2kst@H;kFuPZddt@6P>(@PX~X^gN6R?}$NZXb z2V%t)}t}moL`B=b+WqhhVd5bKLo=_Ll;$Z zI>qpn1`|eCNb8(eT6Z3GF3RL1+;E201&^k3epd$HrxW{|W5;&J7ARbea2ht{=y$NY zx=ipgA7TwWE1wtP54iTSKzmVRc4T04RXIL^yw5tBW{8`iM2m~^a3qI*GrzTTtpQ(O z$Dc5aQ{FOUgsPJ_(kfj0;(~u@J;#L*PA4&L-F0>8dQSc7`~OZ zsWbuqj;1~l6VL4N2wJn>k4KTX4U-Qmb?dDYnAWMX?D?wO(q+M`b@h!YAftRrfPk={ zw1NfU;W#>wvFaH|)<#;|G*oACuc;tXgSa>WFM2_+^e~ame1`C}qN9a@azhbNPf*R_ zb$}pc70dH{0WiqZ&s{$w9e*-}Tvk7zjy|x{Xe@_(h0QC7B&<_HVMb*W%q|&N@(O(4 zrLsI>Dw?MUd~`eYZuT9D#_|kcOB=$`@ZRaVJA1qzaNF61FLRu~K-(!_E92tG{xg`r zArMh}Z6zjKT~UyCx5W-P2=1-_l#Bd)^rbbub#%(S4nUUex0PlwS+RNMX}+o9)BEnf z_H|L~3V8BYuq%k(?tpC`(Kyt%AOhY8B92QZ#BH6B!S%g+%=7nlq47Ue4)dUyc+j7T zAXC|#2HNgr+DiP?q#WKAy9$LksBbt9gV>2pp!8jCJNPS;VzbAdL+9>N=!>8r!Oyd? z*|+c%UOpqvTZQKz>Lw#OrRoy_9nccbG zLt4<5rppUxJr6{-!a4+_b{KHDU|I zm4sfZY|@T~Ytd-5W097)u6Mn2R?jM**ZbSTq=jd?bvt5guOs~xcZ?hVk)Etk?iXDe^-4m#AwoHvmBMpoM)Hr?*# zl7Wzf?Y0x%&Lcxom{`7^r(_19fZ<>B5w^)dp_ogxyI+qZIy8Dc?vK1G5nsORqe<@z zeOAi+>j0q1(55)e%UX=V+qv&Q{v~O*_y_NC&}p}6^X1i(p*+V@fmyzz7}nIv@2l}L zy8(5(x-Vdn#r*zeLfYam<4i*Bj<-gp@a190H;y4){?!hJYaxmrC~Mg8bS~Nat*n9Y zteZV3mb>FQ|>CRd@TtO{er{NE5L5p7vZN_VDAxd zCPBZc^JVGGg1h@5Fyin11O*XKQlQ>o*3-0R zT>mP2MAMIdZA7Mh?;weDvNC{l$cj{v#biJ8ycAC{2dP>X1=uf>x=_mcuEZ3R59>pT zsubsZkj>Fgjca=3OInBL{wx=^f5{E8h)1JOuh}s&alboVWm5&|e5LnwihczcEx|kk zBn31_Zef*0s+zfc2?|(t@^wclZokT%D2NhVvESndQdupP7V6!Op&@1+)v<$JfHW(^ zSyIzT!1C8?>~bn$U{wN)Y4C46>Sd;W(;*9uB&=QLd}3@lDr)^5+?$j(|1zxZCd>l89Z_mu9%3keAZ$fCZnqTN_30v_DuY}Knu2i^3f7gWeH({ zE9Z2=M>LAHBRexSk*`E!Tb`gg!A33yNP7QdN^LT4wwf7#sYf4$#`SCm=ZYVG)z?-c zm8PUuG~c8K4>JiKJWm*? z^s%d9-A2`QqMEM&JnPXGLibCFwd)fz}l81oy$+9Ou;ZVvsW182iJGe5Ibs zhsyrg5q)aq%u=g6^vNRcAJb``pY@B;PBW30xsfxJt>F{7qypEoALd?>gD3xlJ{RPa zAMOk^Qu-~c@Vx6tIwHSH`jh53hU3<8lU9~rZ=}gt)5+20tm?t-i%$8K8je{Yx|ZQu z@4R!3G^ifhs1#<8`3n|wQCf-8~8XwX7j zpQvJj@|;M3u+f|To&;8U`Mo%x!INCpq-A1VX8NUHo^J>w&2ggsQ=k26UgWmGl?SUG zrW1B93WxWls@YTsYP+--cirtxGRp&%iWKFbGo&QWpfx%kJL2{0j_i=IIsCgjZN27B zYi{W+c=x#wrT#(_fwvA$p=H;^LOoR~ ztI_AHhIC;upF8>zFsEBcMDu{NwU%md_y4hLaz|E4Q=S0eF8c5ly3K5qMtL?Z&aK?q z6uqirtLZh~xWlSH-iuh<6NWvtomHz=5?1gW7au;fT$y$)>IhE5;bo?}J?$luINJ{e zBC(m1);&Koe3^1F9X74GSJdY^Y`zw{NR#m7^xau+_?CSoKj@?Dl+NLW@_9TBx2r?o za7xjRsbmymXz{1WpfBZnsCbu%l^VNs@MAY{RnPUeYSn;)Ub_ztwpgYz$u)1Xxa-Th zp+xhgB3wUr)8}9HaN}K6j|>`qekvnr_&i&9r5jxsii2&A%;Ed3q4hhREk4ok#%bsg zC+viwY|*`goeQ;t*HX5?-P%>GA?#>}p$65hU`W5soz}bt>~qO4dCNv7M|GRR*Veu# zP=rY%TtvaQ`F2)Aq}stALz!8KJ!6a$zWh2^S{fCiv1RcMeEq``K&82teHFp4mfU6&Op<&Ab9e%Q&-+G+}Rrj>W!c3$OX3=RY5XpX$!@3+YpB)QT~%4TMC zAzaL^yL&Q&LfDMFyh$a2UF(%hp)6}J0LI2C=mM&nqxsl0k*ADm%WM&6lyhg1-q<4I?LhW;VTffp=0dCkp!9}wvZ zGweBkJ+0Py=W|81E@k~h*y3|zIZPVF{`?-54O;M^7@Zk{-3$8TNvSqVRW!X^^8fY& zXMbaYx)bJ;bO#UW6;H=b;>g7K1ana_9z*FAPw~sz9OI9Q;Z%zJYi#$SMX(sVjUd>O z0|M7B)`F2}b8ki%uKlj3_>=6eD_o6Cv^v|_4$fDvC|ldwR_A;4wp*nRQ;Ww9&@L54 zE@OS{_@p&DgzmwFlzfL}Xy4SI6^kd*h?Tx1LzohzG(UEUrmc5PiGhMs0N-1`Mxi`-{16Ud394|QDwXqf$rE|rU}sKRrY1qEsmNnpEPP4 zpB4R?)m-Ux%C7At9Kt4T0=&$c9Mo)fXF_E4Y#u5V7%>O6&?-8{%s*TF&>TSSp7X4< zR-4@(tlMPS>@QBlaXg}{8u{&@XFlpFl_Q>7@k+t3k*UL%*ny*U*M`di-Hktas(cv( zihXn{-%Fj@A4{3e#=Co>0e6V-#n}S|V}ly%SRS0{4WY|fzE}$%>J5kqa>E{f?y69d z^Bun=#uzWG@$W?ebX@W=7{pn;ODy4R^5~bcMK=&Cj~m-^+`LZpPM^b&=J!Pa_T%$< z%3Ap2xQnf{vPd>E@m~|m@5U1bbXgD>N6OWjz0*p;qvP*Qth3v7bQZbG%N9z-m^a~l zg{Hb!MD{|5;ozKT9NkXBo!oOn$w$#*sBcDJ;%m<3S0^t-R;of)=?X9;`Sa&J(RW@6 z)(h$)e&a!z^uk{+2CkGoVYOietpOo0S;2FlE8tG`!>$am1~ zZacZ270P@=cUJapjyt+_7}`{XyBN=;By_}euL*7mt~f(ABWR{7YR zA5XAWqQjVPn)d4LC2o;Z*SlovCfE$n5`L5i^o6@=ly@*k8DP@=VTVEJi$ZLR#=F5M zuK8xyUjk}6%v<#Yg!n4i)i2BUBAV)K3|qz0jZ+flM#CKDqq$G820%P$T9X86qqHNm zJq3DYEhuJ}oT>#oJv@rFfTjP3yT6Kxqix?eVF-aFxVr=o!QCNvkU$9TP6u~yBoN%) z8VDXNxVzIh!L@OMHZr@*ch!BK=XG44yQa6` zv7(7)eB|M@e7!iXmLhGa40ld{KE~%ESKkx&ATfI)6p<5@7&5^3YH;0w+>u)TgYjj# zeI^W#R`1R%Shcg~@wkJ5sGYA{(kYyr1Dh7K(es+U9;n&q72?^0_$G=zrhnuGJBhH6n^`=_kQ0Rd&lrM)dAx_ zXKa05Rc>3P!*&N(2l@(cZ1g*xgS7kTgWTw}LE~dDnSR#@i~C>ySbUD&2?)@+jiqxh z#C+?|mQOXLmr4T7 z1Vv#gr9UEgsmI1QjL<%q58AWel_j;}DHDe)|gL%#I}<-MT+R2h(j&h-IYl z1q6j*#%5TW1m(edIW4CZD{v>gI|tB4@%W_ud@K`rjy)I`*W z{BGL-g!=3h?$;%Y@{>-F3vaxOW*pQxU3`t1Ox|~Kl8jdruT1Myx0cS)Q?Z_OgIKW< z>t&mYHh;9s@>|VeCMvN^ujfzGACN!>#nArlKqO+}+4i{Aru+gPI*gHx+Y|TdrM@%0 z$PwZP7Jd()a*m8@o3I!8CsDjk{!zB{y+$yN=;a2(@XzwmW__pdW^kf_r`VwRky%f+ zpxWC-?6D|Vr>(cORcVPt&vh~re?6I|k}%=7a6ZHMc+ z2r61XKI14PkBqg?J=`38@UU**&p2AhhK=kxJWsOODr63R9Wxy0f7D`Ec~Pn?4l)bE zME}a>_n!)$T9uw9ka?wX4bOTIQfSPY>EI?S5BYpVcKwII|7HO;l^MDux3hfuntkG? zi(eY@-8H#hz8jQW{yc;s8B4WOifkJ}kni~A6`T3mbP6@ve9S|4{xxbm|1JLD_@HT; z^SUaBj`3S5KK%Cw(G~jwDwMiR_qr4jbd)M9iGzJD3uZ(I-&)_;BR`E#J()nC+x4_i ziN|!FjA6^0J=tfwo&i4Xji06qrhr@~o4-n!c>Yr4eqv zeti6hEl;;p3B8_&&fmd3Jw>*GVtb$o!M!yXs3`Ot4w4%?a}B*qWy1zt+*%zW`8~l! zRGWv-?d?N58%bzj-nOuE^D^!X4U7B)X`*Tn=#FGrG?Y8ni_@7-^r=0t-`tPs zmLoLVawEX$Ue}hm)+gEjtgQ<_%A8djVb6^SPKzo+jxf4%^MI==<~C$z*C#p#OHEA? zlYbD{; zGJBi$1trj9PN8p>A33!>fph}|!KL2HsU3cB&k6!h4l9*_N~qng|_-TUWNyYgfk`YfHfXpyVIq{F53W8I@!1-zVV0mayxa(;y51j z&1D;gFK2#PNv_qe@rn(Pj@-0HGp5-#89H=m?#?C`GkNS8x z@$;4!KkVmI@aq~+Z?^S?_`ZIOoMXNDX~=p|2kJh1jW)1y`PG1)J7g$v^kCr|rKr%P z)2=)X`Y&~B!&Lnzn9h>qb~uTvEhJj`IOJ?hlzX6D1HPW6-r`hXOlLDTHv_hpS?Mpl zIN}OBOJv}xad?$cm)5^8P{sB+>!8{0?fpw`A@>5g*cev>&u>Ju#X7GSg}+UN#{7VG zCF^_c#97n87>H}PX*;JeVvagT)wt7Wk>KvLjW0Xqd?qCO`gzfhI#xz$?sZlR#VU616?lx*j-(C%qTMxKcTGUyv@Sg6^x9*w z!UK)o-ZVcrvqgD#A9O(Nvd|p-@ni|(9ngZcw(;3vIsG(lpAK#r>6j@qKwvg~v%(3Q z*n{Lsrt;xOyixjkw=<&OOroIQa$c zh|DPU&v&w01jd2&0Z_JAc%;rjTO&lZ>^10arF1JdrjFjRx;wo$UdNisi|Tp0)k#wn zX5lX@MrywuCoF*M=;aGT4D7+o!R(zcKwXd}1mvVSb zi4A=4@LOrgbTO%18t5EvaK+2GyB~H(2 z6l3qfL6fg;sIc-sMy~U%z#k$@>QQ#}Sg{C0`M8MGCXAJ=+* zE_rU6+XW{UV~J5J`@_lqt=*@AK{nYTD$fdACA-;_O8RBtE9Ml3om2SX%T>?7tTH0{ zQmrojAjGJX8FD_N8g}!C$6Eb1;{i^9pqTx1;&}Zk=VS1QQwi6?mlhd-i2w3vcP4{A z+DuVzKz5SP)z=1BV$fPXv`yaJpeEb5^M3exezVGHSYo4e2W){sj5cL*Z=9{9JNn2h zu@VK#pRTh6QF4XnzfpXf?}6wHXz5CP;bpW~CLz6sz5E5pRgR1Yz$yT1+UVDMtF=hz zk={5c8*&pc{5n}4cgT(9tR$68)sT;{W#)9gR<~Y;m_dH3Xgi9fptBj`mrq7n1!d5C z6)k64@C}-A(MzMw#WZ}UG#ChzX?z{sV7#{}(~WAEKG!ZNCL8u(MqES;O%1#eVXW%)n7J zvEVI^+8##!D(Ll*h@z@PpSJD|aW^nhYFXuU-AY7n(ACT)0d5=NKR$rWda7jec#)E$ zr0;!K3Y1mw$m{~R6mu635>jSlfwcStCf*xv;kfn>g1T2eR~dW>71nMr z_Kg17YgyGZ__;{5d5K`u;+dj$l@ndgxSvv6max|vJj{@aPSXp|7WUg4W*$|$$KA4} zFR3(slj1O1z;ry6p>WJ#rPA}RUQD@j4Nawo z2%GC{T)=TytAl(gR@N9}GKj+%F_J{BjieSCoexucb*-zd+sOG^o(JV_)RU+}aw6T} zXX=gqLc!kKe@*=O#@RAIq7Ds~6mu*D92|MM4<8T=4J-LSuRq0VW5$;ERw11w9W(|v ze_DB(3wFTq@|BQ~c!Hk?(toEg;;~T6g_j>{zLS~Yj(?(0nasMds=24|)po$^R_aAD7e2!lSuya6H3kwJN~L%W6gk+y0>7 z$($JH#e7m;G-hf1zd-}I%G6}hKVtZk#ppA9wHU56-oLo1TEeJSDrA%P22MqwvOWDV zFDRDF;Sw3O?vE7WL*RcR3h>w|UPhj}lCXQXr~BMb&-VMVxNw~nxyg0lo7(3l#e_bW zvPn+{y9k;dk4Zc8+^SXX?6TJt---uL{|$C@N=Ri!3_)^pKh6G2_So)>!%$vy-b@14 z3_X$Nvmd8EeYo46{wqK{qa7sqt0MmvhW-8gh>D8)pPm02eA0Bt$j>z?|6@xC4E$e$ z9}*II|ML)G1wZ~5hRB!Ce+eZ2zoRg{dnS7Pk1Za*JW=I2A25%HkDmlg2y5GMHT*B+ z@(tl3c|J?rX{0sd5g&6Yx z+{u7H;roYJU58fz1HZ#QBd>BF~h;AFvTT(2m3hKd@90GQjh}`Wqi9!!tf1aFMLWwyc=3|Uw=|0qt zaJo#7SJmeu2jqtaIpK_^h)5{;yd(x9V67HZ4Bf_^UQ>IA1`3*|Sr(D@mQ;}>in@@ln<9km3=L&mEaQFK@fwqEupp^%5&N`a z#6m0RvS8s}sXbiu(1}ZI&7k(n<0r%oVYPI(3#AM8{oH8G@=8nG`7#%|@kO%LR4Wy$ zqQLn<;Ufhh4U)g{YQ!g?2aL-jTi0>Okhqi2cYnI+E#`zKp?^*y8DZ6Z=GMK_9cQ(B zGI4_&3RO$tJSm=p@ZCAMPw{4Kb!^g^?}Nx5fPZV+t&u~#5)kZAJZpPoh|T;KwqROx zH1Xjsc)8@zx+iV%%4>{&92VAVCdBpCUt)j)q)-tBW^R+`ToI{@2)4O~jm#QnXK<3LhebiF*W57#mOUP3 zC9n|pPWso4m~otDK@*= z=pYHoV$P-{@$`nGibKI4;FuH%TK=@7SRH_=y8e63#ZO808<*7SxusmAnuyAY;og0BJxClFkR!)tD!EFoO|O$Qv<#M9cm~`o zIS6y^EKhm?#bFlJ)bDRl0l!)k`a53du;~SvH5LKYEIDmjw|n1nhH!iP_0_>C-Wf+R zZce$4KUJk`NC};~?7>G5KmSQKF+G~C2Ts`#o!EJERv4)|3+Uj*uG;v%;ekAbI7zu1 zPg}eoJJ{>zy8)!ckPfcXci8Uh;gW4O`Iw9KGEh+mrK;7Cq1e8gb|J`I%1nG2Zh7J> z9P&+^ps>nD7aW_e=R;V#qiiCi^a!F!H-n!qFGi6GzSyfD`I<_00ws}qN>;Jr@kHE0 zTf}kwDz25%5(0W*vQuBw09vN*iOb-zD3-|-S2Yu2X{b;s?QX{dckUil!E>I zt`Q8DfKHqJp&8h7V$5HRN`3~iroFxWtswggt((^6H(%x(-;0+}OSSX`9<9yh>41td zt<9$~kv0-(+Y?fC_M~VQe-9UokongJThFLA^z#=6XTU6?F-1cFTt2<|dcyyVa2;p*H9P=(qB#P3GKDc|7Ob4}<48mbIicn;}I&^dJ%iqahU-4n#!^Nej z5|vP_A{!-p;5L6ZENP4Tj_X{39%YnV!73G7xje0_%FmLJ1uEH19-UT~%QYpR2v37( z!}@_N{l3-Uc*Zej=Fyk_+$q#XgVWX*)(76fF4l130d(gs8w~*LX=m#{bzu%j5Bkr{ zD39vzPG28cFjx7_jN0&AEZuSoYH>`d9QS+h+MJ20`#)N$z#E{iqJ&-$$O1<|A-{-O zsBI55o~O9=2d7tQs)N}JnXhzsYy|JdU*cLf432S&?_^y3&;r|xl8VL}<*}w+V-@uH zRk7U)W@^>-E)!3z=4f&`?6WD~h`*f^6P9_8Q<+^Jp=k-@FJ~2Jql*Q6y)u2?CbY-R$AS7%!(gPn|bNQ9um?ovpCWu1!`U2 zK4^lrLan$L^xRP{Ytx!riiAZ|SKq44!-0!Jl|3?Et$l{Zvq_b2%7ni0=9is`{V-jg(V>6IQvYN5LDPIyRGkrg^+QW>B%Hg*Y$mDARwc`3~!?iPm0Xv(zVp zs;@Z^_V`2kqJkl(hqIX?Q4ND-wraOnP$eCHq+OrJCOfuq09A6&G^S(exSU2x_52g& zR8rpn`7o4`@{7p|ZfO=&F|<}b)vimd468#YQ7NLeZ_TWcX)*5&GHz$^07Q8FU@>yh zX_GW|G=1o7oKY!#jT}N)t24dXU!F}N{Mk*<$gdL@sX6Zk-kR9NqpL7){YLacL@(yY zJoJ-x(=ko7ID5))>PlTBK|oq2xwc3q!JXgEEgrMJvqtXxX#9*7UsAk$N>ZT&-unun zzKS33Sm^CIjNlQ$#SoK)TlSwx`>MLSHflAC8NY7s!M z1Z49Rxo3t=D3dTd9>uvZ#>n_U?g@Gal^*i4Ry~9^c^<{su48NsoTbwNrNN5UVbW}6 zmhzsb_6Q+zqwzNy4vSWHCHWjxkMv-KkFfiSQCrZCQcuN@afOZ1g7nJRICa=lWEqZP z)8lUh4v5bbbmB(6TOs(m=+AMi&|0*nD0)Do$=WKF6<)Ugg-4ziw?BJ%#6#sh_YVir z2%#5(!qb(DvA~uc1gug%+A@hmmpo?}01ojdF`)||kohlZEAmjDWg?&F4&Y(MyM1Catd?lo z1yer|_3gw!pBR@pOKZ>sfd&cXLp}MTXadKdQ>`=%I2YpFo8fGs*C1-2^I71RZX(*r zAJ1qPdy!O%y(R9dQL^^SkAGhyALR;e$qc2X*8+wTf&NFFmS*k;*Y|YP&xw1tV#ob9 ze}7`Wb5VC?O7N1|H*JD)`K(gCh}O~GN9NpA?x&jc(vB=l_Okd{yyt-}{;R z?NFR8E}OM6M53Y~=sOXYQvXvw7u9V!+Wfrmw9haQg-ggswPyeAuKh~yT6BKNjUz?k zyy08Qs&Tb%5hs9!niFzrhOqW?+Dt5&FB?J6h4~zog>zkJz}pxy?Jsr`xP;l{I)nTh zvtM)EuJsLdRy=Qhg{nEzGz?5P>aoD zHCF^Rdh34kYNo<4n)Fh4(&e`%TZm3qTzV;zR&|r2$&tb};F`Vo#>tkz)~NPDUWQ_M z)MR2D*lfYCXW2-5es*?Y1k47g=+%8EH@R)f@dPXwm$vE{iU9@6$+~KaIjP^W+B(UM%rL6E%3FiqVY(Kk9ut4NN17vS!Pf; z6eyS%Q=^MNBXM$7!i6t;Cyvl2*gSfC=@|SpztHfA-<-*pZ$7-jYvFaDqYt0{)+5uN z^YofWfUvM>0*}oARk<1XpEb|S^V)11GC&Rt(&Hqo8dr=5I>6YTibecvdn8dr6nK9u zGg=H;+L4Zb`?UNfsF$kV#PZ-7?PVb1l zta7yT^`kE-l5w}I_{+Ury!1zTdY}RKy$uyUpVtu72AO}WnamZ7JNV~{0}l6l$TQ6^ zm|ys)bjbhPpC6ekjbn68IOROdh$1o72^_l;hQj*quhiXn5t`3@t%?A7&21*40ANN< zT@x?yDwFPL7Ba+c&-Q_`Z41>TwXynsnXC^GOFi(SB-$l?u0crbG5*tCUJ0+)yRi`l zk(V4E*?YCP=e>>xF+$SPO5_cOey&+upe3E?59s_z4mj?nL;!D0Ga2ap!pchTwjV7? zx&>>bUi)ji1eR!V&1my_H^6QqIJRPcW~5)%C?h6AX6h@y2&m$9r&~m2CrGN&1Z8dv z{!X6#jEH5wPMmiDMD@GfO{c9QfF*i=euE=A$x0!ARyJtW}y6WA%z*rwphU<^zk0sL)ey-FLU1D8qFZW{o zgotn|`HlB%kVyBtqbz$|AoH3tJqH?mLUVf(Wui{Q9O( zZ+U)8UN}H%p1?jAP_>+%ag?E5xkk3ms?O{5zNIt$mDbSA;3bZHnUe1h+P_5GNqtpX zk0(;n#;fJ!bO)(#)rF30nWfJ}uokKxUQ_K=D)1eCd~wZgau_V|jwF+&4qdz>{ut~1 zRHNVUwOGLyO1yzYZ2+vJ4av#n+oW7>n*4G7e+noZTK)HLtHQzxx?x9oWr)3*ZCrqh z++8^fxc9*X$Y3r@b{S1AUq-8?{#O71)-sWH;>gN@8`<9nm&utlR!!h48x%q16^G{A zS5-<6Qu2bI+6D(ZdjbWkBZH`&7>Nx~KNo6#qjQtqcBoC`%jG^7vCrZ!kS1vfW^W-Rd@fAD+Y5DDO zci@!Bzkw;n7lsR{M$8B{5q?}D_7Y})v>ec6g~`a#cnYRk4e!Y_-n^QY zXPEQxGGehx&Kk6`ErYwix$J z2E!4vo7t(HO!ccFNB~=OLJa1o4P+l zprUu&9+ap;kgcr~9|7`}FwGLsk>C`T9Dmvu#nfxk+QR;zL0| zQ&Aq3-&-g+o*mraT>^~u57OS^aR6ULmqquF&tXDIS()^@LKNkFATQFORe%QxuDMLP z1wDmyjxUwu^e4L8mmv7BU?!3X^H6?E+eYQ zGc_gOIQ zpO5w`v@aR&t*Sg|)L@}94+fm6&FfTHA?Vwq$p)en+ZBMl4f)*#NR#`r5fWZZ6{V^z?_G} z&fYIBT)eABb^W%=Uu`1vx0Pe|n}W#a2Vyum1fM&*<1+EH_AjgvG&QI3fe%2!iI zWK{&$D{BjSdy3WUA6@k11>_REm646BCZbY3EZ-7&p%V3;$odZ0Jq7A0KK5Z6;+oV~S-MHy6hw=eZ01P-}%|Gq1K zxok=Ge-`Z6xy-#ouOk5K3ngzmblTFF)N~UdYY1ttJMolk8_1jJbKcn4mSYfy?fIQb zjW(G8y}z9!G8lqt%)+C1)-^ArygyaS9a0TW`*rcJ++Fe@7Abh@(qCPM$1o7$vc1*y zN*H9M4H3_8qDoG8_M?|4b{y-Q9>g(s{kRjc*ZF>zFLZBoJJXn3b}s^WYRG^dAb5cl zljR*Cw-~YQF=e6X)v8VOYt)BL@5XXVJ_FMW3KeH22+)4fP_bdG&e@_h9x`Q2=Q6Hu zuB_W%ir{7E;dr~EMyl!3gP`5x;B;6m+_6|5cDIF2Wc!|@-imX@7EwvW8$02qoQK=v zsZANAVuvcKlFb!&xSC+Db2JhC_1)%;t@qMqrM*ROr6G@@8pGKNl5yYaGTO3mYBR1S zb*v&R3uok#q8AR75joyfF0{(t7Hz}TUKHFwPlFzr&sY9Zx79$f39^gu#W`2hT}IW+ z76f-2vKd8(u(0c<3vn~l3UWqP)2jHfQ$BcxWbP*1kRmFrbU2YoHDC>y*7d>*8`4U+ z94U-0KjK#R8yS05!qZwO@)hAn1q%cpR-!)yqG!*HThb+avPEB!RZSdxI-yhh==q+m zNblPUhlB|;0$OaP!B)JQvU5{PUQbQsS3B=&%IWAz5)l|fbjn3>Mlq2N+jh^RA%aJF z&1bRXJq}%j(d##Dl-~*+du(Ex#Lqf0vy(+Rms-}-M|KLmHeq|c78_RMVz#j=n_Ol= z_Rj42uPD>Yhcuh8*~-`*1yO5+5z?Cd9%taLQ4e@)4IvSMtVW)t=@)M2+8P*!OAJ`LGQiOw@mc*2s~1-v3&ghRsQ=>l={5<-dY| zygJiAFmDe|%e~%-rK;9JXSN5CNWS{{gz4Vwg~_{>*mq;6zO`_U{-FSpcc1B~_(IvR zo2=-9C<*!<&#=YCeBDdPI1J2o7>yiSVguI=ZZKHi@zbWIG?$NEFhPfsXL zu*VYDZ9r>J1U8-RQfKVD(*lFcGzg&)b@xNj6P7CLHmQ_17CxiZ!OQ}QWnwu5bv?sL zSuCo6CTwi^;6T0=@#duD(4H!M6pl@Uws$0r4(9vuFY4G+DGmoy7u%2Y0@3OUzDAtS zGi&zCiVE0%o!)z+2 z={Qe>h3#Og&X~w4R!@;RTVRLSNBwFTKFoa(eEMBH_Lt=p=c`g>mS|R4AX&q1dnJIw z-rhb0s87uNWH$(<7{4lj=l)4`Bn-n#(3ORa?>MZI7$R zjuq$VG(ItC*+x@)-rQ^^U-q`Tl~{_$6}nt)t`>^e%t z6@5Kk+Wym$lCFmalW}9;tn{C_x6|s5%b6GC+m}BSz*lJWHGbni)b1%vK1!`G+z_u* z>)+jY_@$IiFl4uj!dBo!Hi(`;erZu))j3~yHbdQVeB{qIyEK^lR2Q=CEl0}g1` z421tb#@oL>S~M5o;c#kXZLKTV(ocYBsNyiT+GRw(BqMn!kvh4|pBQ|3#(}b$*7qAJ zRp82i*o(Q&!a1upD#Pb}ZbWzVi$Y%k*LA>EcJ#n{Ty&d;c%dQ$3oje4eu9U>^Tv3t zG4*xZ1*0JIOwSsEdw(mZLpB;(>hbzQGjx%_CRpt%P`y+wE~{`A0kqFgV;VB+`4j~%NNU2gm4@4p^&$Q;ngaOSqoy z9l`F8)pBuLxh$PoHOx0wMiG>?1E!*8O!-3Nw0)pAIi)$=?Gd!E8p-z_r#<;%|MczB z)h;Cc=}jrDI71ii*(Tf)BsGan1RxI2QcV25(nt`wdoCWcvS&7S+>^Pn||FdDR4MRskn7UWD=xK!Mqqs7@Y zYw=k{1vA=iqK_nwrA~|UBUDqPt^H^f^k1DA!!r{GZ z8c$SlBH?5$oWhjC_ubPOT?jCZl{(E87%Mh?$KgAi>T1tJnR!%bwUSSSVPEdsfx4WR zmCnbNAdluVZJrX$B6BcV53o5I^@g>LchW^~89OR=`a10qPTF_bQ<9ON4c%e#qOfjE z>x#**ZN-BYs*=g*#Z0WbX9CCShB2Bn$%c|z)zbJHkQAxNoY=oaN=&z?a!J*c{%8Dz z8@e%VR+xG{zUBJItH#C-_!|^}5l(ykZCA0Kqu9A9y$$6cMtAUyxr$i+_P;aJx9y7l zuUq5)@0rs7hYme9cHIA&L(k4$`PZSx$1nK*JM^DFfByf3L+@`vZ9cenqwzBUQWexk z$No67b^%-aaZ@{Ls~?VG-?r@A)bo#9#i*c869ZgU^ zS+#JR8Vk;KCO?|G9oVfE6L(T?-Ivdhy zD|3w@k(TqRrefgxgBiHJ8t<6Mh?f*cN{Cijr;SNJ7conSzaTDgqfiJX2q!aY+;f{$RbkM3B?W+=OVAA{8RC~M#*3}m ztY_Vmo4(l?`aX^>{5e3?hw%$7tugAxO3Y6A(iJU{eW;luSgLv8A9Mq{{e**0h19yS2RHR^!yZ}1%hc&`CmLjbg=du+9GI*(RW>h&@5EJLWbfVAOi`xO zdH8y-G8EhFP~qguQ2HOMZV6OJQFf%{~7n62%c$+pj-&@h*fH+vo5V;e;Xh5n_*P>C{&y z%_?+pB0HuJ7WewJvc{XnrpA8$IGSn%wMs5-yuTPW`q%-qk@UFCKP$lNV<-!!Mw(>= z4ho3tv6v9uc+auSQ9UFw&gs0!Y60qEP9f{kfc3~^Pp8F@-qy+tbwW)WlCFUmU(}ZJi5_I z@4#9=)2c7i;ZoYao;)Fl9xyddKSr|NuM{7Dd`sO>VfqU5b4biuQUa@Q< z?Lr1egj>g?hizjpS}-m;f53&s!>GuT{i?^>1?%D z2ly!L7v>s}-wT6=#_%6+$j6>Y%(U7<<@%8M#?llzz0$R(8qqOR>XuWP+b{p--G=n& zFO5c@Gb$)LYUNujDJ**ZMRw;_$7aFwCxodFy=qsO3iv*U9Fd`}1yis5Zf1X~9I{(h zoyQilA7n}iwp!t~>&_O37!j1tTFK5oCuh#RJUZy|Dr-9A?A~jX%eD#@d%;SRNqw#O zOx}#lKT8%rPzA6jj<*Kk+)ntvFP1OTAdO0SfJkrk>oSF7hAfiiO@VHwiT4Hl0cIUo zf9_SCY|)LATN#cslW1HLRG30hb83zEo#$*HLdMiG;^z5M4uy+rPWN6T1apsJ>0O|Pl{PqNun~$Ijp%c|a&vRLd3R(qE-+5nDRov9 z4R*jtqr!vDWVQ9f+J0_reM*U%XdxHd?E%XrQMCtj?0^z|zwxYp&SRB&)lg(zt#3Yd zr2LC(?Ca;H5Mc=qd7(I`?O=ka#UtrrHiD11>?V3m+~=uLKQsy@w?|K8au2fLO-kgc zY$(`R*lrp1%urbOLbz+GY7{8_87{jWk+^zT$L_}2sC?GyBDbq#6A??j7(NBep@n4i z=I;~Lu|uVa@ge;XZ(I?57QU zOaCu)7EoWsccEJLJ-@wC^K~`yeiSn+r-K~Fl026c?+pS2xm6q1Ug0}?^f1}r9L%vt zN}bxB(0fVfQsx-1eV4-jGaExwvTat$qT&GGPiFLbL@g~w zWBVoe4wjd!=9BrsMZl_6LZV`2GAobApx2={;+W2-ek4LyR!U3`ST#hY5|!Pbs_#yi zF~u7SmOEVM|2x?eJm5a-G_jm`UQZ0&8-yH|3;;}@FZ<@-cEFrP*Oc8EwDV!^tRLb* zb1S0t$yP?_0KGVpuwH*V|J#Yx{&ZnkXMDE6#s@X~LE#P_a|+@v{L#noAZ=H+Ko*Tx zT!jDxXmdBe%zRyiom;x00sL1ujzOS7tGro~q zd#P8WaiBa;%0@gEE8x7&!l@nB;uf8$Lfh=m8Yfpm|J+*Yqa_XWeiRE}_D;Y0qAWCp zN88K{Syt-6XtmrIvenK4SZp}sm;8k7bNx=wKdJyY#>I~8{)R%q-i5 zdlfeMhs^e5IjyDsaPo%+)0jz)h8OTt zqLfLq(d-NELX^wm(z2bx08Zp{JV{Eup$t}&ck?&mG0wk#(Jito7pnD=-e{D`EmN1l zd3mZ?{R@4=k5C)UF1nVs8)}V}*V1kfexWR={rot-XOw~wBl9jrI7*xiqR2byn;sk| z7t7fKpj9jlWvPI9_PltOM(#A)$~slnwwo#LFONSph(8_NqZHS;QcWi+P|l^Psq=51 z*~>V;0*-_vUWfPh<#X#T5UHnS5QyMj=LCmAQaJ1ZeBLo%ziTbYQB~1sZRMgn=!Pk! z6o^{k$z*LU2&yQU3k`+2U;aGN3JQ6)94?(VF@*Sne`;gK_fs*GvVAT8ED5FM4x zdEzXjLt)3;I{KB|f!P-19E>Eh{HxD1d5)o%_lU{>`8@?0=Av4omkbCU+kfl5f5y2Q z&r{^t3oiWkvfpl)pNlRAaVb94)zhXY&{b(>HxHR$_C1B{UoNFe!~Q#GFViRIL_E*g zmi@5lnzUrrJl0!)You;1@Vwxu8B=Z@$cYYSV za}cWq(U@bsN^A?q!rRl?sEUn})i-j>&Wrs1+ z2E#~8F7CM?rq^17;NQkL7RPuDBwYRT5eQAaNC`Bbn+ z?YqnQ-XS6>bY z*HIitI>QMp<@=$)KJI^#037+L6!0%b(k-6-)^_&q&|%0WA}gLES1C*n{!=CHm%nsYT!@rT~6KX zWp7*_jhRrC7W)2-Zh-Ve6E>vw63tfM4huJJ@_*%18N4obsl{m0li9rJe%End&XSIf zz^ShzoZ>c@!%=J@X8Wm=K~G9(C*kndsFXf*q!K#d&5wNdR8aSB%Nd4}aAHm^<7< zU-3EAo=WU=#w3%@RrqdqD0pDXOiWRL52d!4p!1X6LPlj-X&%cKhxl7DW`M+B#SGHw}EA)7xhGr$nrUrA_z z5({>iBeqt`G8k|or~*1Hm!lJSY|{4nT@KJOZ`#zdzaaabUNK28qWBU|&4mU)F*lzw z9E|eyFhW?2#txFv(yw$#0#mPkK)wAp!!~~(IN7=%3=9>C6%fft!yPQQzfUI-3#7H^ zLn41MNRxQN~JF)?p~`H(l-Oz z*S~<2ShQIxV?H3Vm;iKyY$-jgqkm}i_r1Mz8?JCZo9at0H>^*9ZwV#r^{BUCu0F71 z&4Qf$JG@dRxoveGXs4hJg)W&P>aML_OYBdv?RPP;c}q{?k4v9kQ-64yH0eW7^;RZ= z=40ji_wMb4N2ZlVs!#&;rn~@D_iD4;Aq@!${5I_>W2uapx9#(B<{p0jFC~X01GW8A z=g+BQUx5UJks zeC;!m)rMrd+yjuil##9$ldR+yuM8tk%8F?Khp#e2Tz3K!`}tfT?4s?$bo|Kmr5?l| zR4lGU8x%0cO=Y$|Wa!hY!T&R2TzY=r%# zyv*xk&_t$+|A%=*gQ05@XCUn_ugf>%C2cv%$T3#p%Zt0=|Hyr=)bJ#9T;q`x8n-$S5RXp99o;K zl%+!HTu6^XcugOtFE%=W6>_?+72Wide)tvi(iN&uJa4}}>?7;cr>Hgc79iALujFON zC2@yX$DRnmvP^TmW23l_`a}ymP$Jr4d&Qc%;Qi;u0x~)BlyX zxWAF`&1U$WXYmn6)Bz+V3H#z`Dfs&KdS?yi(i6U|(+1Zd9qAOwY`{8RWPJO&W2er& z(q&*$RbOb+Vm#{Qhp@h+qkZ>ty!ZNPJsai<+iW?C@8rRG!rGmzZ!`E(UQrng$kS5o z^<=#Hz~7S1lThYBI<$Zi!~~!7*KXp7JZx=GuJyrw=_n=4z}p#9wspSj$LwI(Nt?43 z`va?U=f084@+12fNd(oDM*vSw@&R~=u;2MMM6oCils&MkG$g;v%~$QD<@BB|^qc4V zy4H9`m9~&$z3DeZiVP-?cp$1$$w38^JS&G??hYJ~20df-3KUK+|9q2%5(P=i-*O|T zA1TEmAf~r+U8$*J%he0s9A)z+Z-n7eknl5SPZNTad zJ$c?Bkl4UCYN3ld706?yL=UUAiuE<%m`UH}6$-k6l-&YgZ(Ipi~&L%XI z(%jd5WlEnFRuyNYW>%VIkW;%&&(TT2#Bc{7=%5Xp-IgnW?vJXsjS9#u`V)9)2$s|y zU{@v_}6Mdv5_0*R#EgMu6ZDG`PFFLvVNZ;O?%I;7)L7aF^f?!EJ!x z?mEF;2QI&Jl5=0(diCnQTXmmQ{i~*?X7=>%X<5DcTVJoe)>g&P8l8dZqSnd7Jgc{W z|9d++HBz~sH$2tOgU6@ey^kI3PnAgKUkXBK^oy}5vLDhN=7EyewTmQ@158#G3iOKi z#w@2wge37J8o4@adry7yTVXn2xzvtK%etfQYzu;+L|9bHj{y;nNUeEzUB){Mx98V! zFN42qba%At8aIx;pyMDFNS?nge1rfNlhO1Bwb3J4xJUYdT!KlI@@2Jl7b=Z2+!=tb zg)eX2vBLM8d_OaO{dPdz=JRS0%Ds~T3;~`6 zkj!Ka1%xtNWOuD6^kWk$B3bH<``NXRs`mT=emX^H7--nR_u~ROIpM}1%^CUfI*CGh zSnEPQ3@PoH#`7VDT8-{}!p-!9zy(Vo)u&*Z>DSaRUA&P_F4=o!mqLZ+aq6J7rtBOo zlZQ-i^7CM}gSi&8)2_Gy$@k;i0ZnBI%~o)-GT9T6WaH#c6LBi#$_=(`Rn6BJo$XHa zhW!3Q4uk@Ub=G~^9DoljMIw$5Jkef&gZXyADKtG#-aBxlzp0f*D*Wn1o_;Cn4xfPW z&qg5C<<`P!9T6u}8VJGU(8cRC-`U&tj18?T6;V~9xNu>Ujn!xxUy4$f?Z&C zB8I?Bkk?J|MEPFh##|v6cU6AX*PXAkKV}Rw|G{&NVBlj9vpFz7UEmq^Zr?5HW6?+) zl62-7%mk!~ai4=u@oBF`&exx`{4iU(S76ilM=(yc;url(n}ej{9=k!!e4K`QQij+H z;r<<1x=!3)lj^G6-RP_mt9rGDNlGC9AdP(;u4#Vb}@>I>k>sO1YuC{-&)8h=!c21RfsfIq?~GoUsFw4?w-q z=wJ0gxc)24TB; z74Lgl^7S)B$Z}y{$YM+I=v0odRW{BoziLct6nYs&t;_i!Kc73{?y{b|bsPr#tkiIW zXyTR`8XQ$}k##gJG)&o}dOd}o0%N~hEV8j}IGv@cJvv*q_cw=f_w5UBr@l%DsgA2+ z&-;dil9@*-nGZJiCzUCw$!_HHbMiS_rMO3`hAAy^J9pf9m^MoJz9c#1!}qqaA(wpq z{7%~Uc0#F_@5QwF?m3E}sLrdA&2EvZBMAp-2qYZ$hl(woN3CX%_bvvu6T&h*Q7Yp_ zKmUx0wpqjkidinRcH#;J7$rnZT3FtIpeA+hqnV0*?|<&6kiY750u5gftu&yST33_t z%SjpPrbxeZ<$B1Ch2eNV6*>BnVuy6eos%Bd(OVUXn5~X9mwP95v|T))9FVTMz@vdX z2R6FeHw=L3ow)z{z!!zk^Jm{-T=vt&uREw-b6oJs6~ntL-oH~;&gPo<1vV&RaGY4K z5l*K|KSRgDm1T2&&plVX+{sYAKOf`?Qn7Z~g2r|k6K60kx{bs%Ixn#Bye@B8#2HEE zdD^dOvYb4mE_;uPy7+`}*@Q_-(}FKqZ?EvUX5nNVDP!7|h$>z-m+yT}w5V6j*@lo@ z&{ZneiR;ctg%YSl9n(WW=hSzudHyR&pl+V{aaPzkY>P^7A}~c@d}y$0XU4D*Ri0M% zc#n$h%+O*716v%BHgo_EcYW|^Tb-sn!8D}LRws(u$S$7$VP%mTKO2(^x$Zq!E(Hlg z!IDH!7P1{Cy>0*D$*@cYmsp|{N-6N+;_P1X_!(zRG3Y*F;CyvJUQBYe+brMNPKGB0#dwy-W#bEJEdRi3D<+DX)o|U~!-X~38Hv^S)JRm8)`otni+Y-VG0a}ZFOmyH zUb*^g7_$Zm$eW}mjXOf2yKH-5_J7uJAM=UAv)JL4YbF;U^v=kbMp!W5{T2=67k!P| zwUA2qU8|P$S~qK=tNKVZni&-imh>Yj4`%s2PwFk3=CVJu7@77F4eglWJ4wSEVlGLd z-G01@FFyz5&}20C7ZhFw1F^XlCa}%@QNr1OKRP_)_@@>>8207A`XhrEvRBy7^sEQp zmI}1EY)}&VJ-lV~w&z%;kqCwvbmBQ=^HvQ?`l1a`d}N3!6bWi!;UP#jVm~-GPkW_t^WPqczGhZc2@>7AXb!RJNL| z7N7%U?7>v@Wf5{A7K4l^!iz0 zES=+$^EGR#NbWBqU_Ry(h08yH=HIVSf>&7cpKmOPX#WB`|GLo6ES31TSUg5AvGRY4 zsUZIEyopTEA&?xrL2uZ}?~qW?n>i2t?*>Ac;CAh}@u7uLckB}WHf(B>4g;Rxh9_M$ zL*_s2i!l05{hvM`v*`e1+lSv`(}bTLGU^@Uv1q^8d&#*n#-pf{q#f zYA2hf02s-Lk1UAKXh{lg8UvZFfhF;zCR-|5eii_T2bA{;sArj~5cKe03S^`^v z_Hd=>&*&6%Rrhw1qh~IuP34Ykg>L#a@AR9AeN&no(0c@#=qVLDuEjjUPc7Cm$T>UF z{Z%F4(~(m7N~y#GQEdIuBO*;SXks~V47QWGFbU~r>bRXPFTB4>=fQ{NpK^^^mCH96 z?4hMvmGHqgi_bRD6+}K90;fP}7}WI=A_sntx9SL~Z#fP#Lb(`6Bv)q=&pTCG*8&>mp% z_7zRG_!3ed`t_zXDotD@@Wn^f^4#myGFS4@!du3Dh=m`l{WH%$0)1H`iOeS~ zhJPd3t~DlD`YOvLSISjNjB(fKI(Ro$$i=nu?T6KFHH`Au4bii1mpN^^I5^t%a=)3H z7TmnXHn5nosMBtB$4i_1dS~LW*MNZSJd3ogg&wRVVq))mC_P@K_45M~s{ZJLfAPi7 zeK1szl)cdRa};;d`yVd`fAoHVP{6D1B&a2iz+6q(fU;d`4(@v4AAXx=C3{C8?V zwcj`>Xk;dFqHOWQso&SUDs=*cGQfHh1vnp)x06CT{<4<(_G$fWn0~S5v>dc@hH%+= z7wpAG#bW;CweoV-P*1I{REtSsCyg5yrQgoAfdwP42O9T7Zc9@~=rWq^ak#U5(sk`{ zp@y`n7b*>~t^p1dV4{J8$p@X=3+)#cod)-8hKe{7sRj@nZwpCU^L?+HJe&I~@P|(i z4~SR};eG!0+Pc~)oS|-3E_cRLhwXFE=7bOXr8Bf88|6rs**c!_nMSI`G=jBaCD)bk(kx~lW=I27f{w$1V zW@v%BL~;VDW_q{7#3etH=(ExK+j}F6Y(aqNBxdBD^OJy{kSi}88*!(n_;14L%w&ew zzy;STYpn5qAg*em z(+v9rpYUBN%#m8oI#_KBo+Hm^UZ?LZR2ss>LPko7vS%hTSJTlrTlwl$>PMdM@SFrs!1fbnG&m+GW}h^FZ+^+c=)K~GdauG z5b*pV^~es$5Z7oMrJC(5W>Zc#NgQ=`V4kdcE26^9mN-qqVy!1D#VyZ1yra-x`>=Db z-{*8EoM{dnDbF_h9L;3UZM%#NSq(Xr;qs8mYoq*Y^M&2P{0r6td->?VfRBKWqq@Rz2a)@#fGNR3ZuIAw=_6Ab}I=3;=HL`z9e!C><+Y0KwDUl4U3V zwt~))N6d6y{lRLWgix^)XjlNelnd>qa+8(7(Q3wM0<+R1N>oXXW?EzW6e-v8gQY`e z|8`MATj-JEB;$c|lvR{`9z3*&oh2Ka>_%hQj@n%{W;;$VgYPtRvXm8)A9C&|@!53q zL%Yza@o)B_{YHHKeo0k-VzxH?xS533UspdbEC1k{DF<`$JaPyOoTS0fwE*o$qIUuG z8VUq`LgUQ=-41@k5m%{jY%hcX)XEW)7XwBsKHewH$iy*;WD}b}h+od@C;v$%pk<>OVh!)eD zyl+4#*Ttp~2KVu8&+s)*$4j)U%&_2>A1+uxfUeT86_(3j`w)ODxOC>;K%&D@$d8Ys zK;&E`=&R!r=GD2?>?_|ETvHPrvDNX_v(y=y*-37@Iu>1>hJM*95b)yVN*^A zgjzNEH{^3H6A0_8>pg6!YiO{q?3{jH--%Gf7B9n5xD4iEa?w{8&EJp zDU_OYv48K6Fl^fZyUnsxQMgY?B|Wwa%Gx}_s!Yz~*BkB4N<7(gh}5|)BBpn(Y~T^4 zNW?_45Sm}A?9~K*$-ak8DfFArz(9Gmf_co))=x&3tzxN1h0=Qz7k9(@)K8^SLTWaP z{~!fOHu4p{M!f3}w6dw|Y-~@XjeB;cEn8ch?c3XCo_fI;=cp1c*_~fouC+1gEo>JurOR9pS0; zy$O+S_zJDzoy0Eizz_F?@*$bHq_@5rPp|D6`M$U|K1*^i0n_Nm8GW~QohS<8)!~9&nMaH zUqBPdeHu)Ko7Vn86GvaMJa|=l(I`N8XQLwA`xBEG+cR@s&>nPHhwvh`Pj7}4OXA_hDSwtai-=W@wZj2IH%;?I}={YqPp}`XKn;6&lggB+Zl+r z+oa|0cU{NJ(O^hHFK1-Q619(*4zo$a>H;<;&Rq|@uo_n zv#VHT;g1jTPl2{|-rnhH9!# z9^Qy}=ph_MP%l~kEsplk6B4c zLa>fjB;>|FnP6W6bgt-%IR$*7m$A+-%T^N-Lu6g_GD7Zy!alnabXBZehkgiGO2iUa zPDsxpX9fJ0q>mPH9j1~qjE8_jM(Yc0E(`oT{;&WALf>CuD#R+U;`!u*vvY+?Qb}TB zTRXj2Ts#f=NvW3JJzHcWHmZ@`V>}Jep7-wh5!nL=$8`58mpL5nKIK+i^CiW~<>usE zl6Ix}gMOL$hhgo@;`8APxv6KqtYGaoMk7t9BHO;Iiht0`gC7pL%oGW{dzHmlav(|? zYqI>8p~&^)Yz;O!FjgfG>4Z$fIme-rpNH=EXNv80{cObsBVM773k)J#W(j_++^s@s zntDEd#5rb%=aBcIKeXvQPviFQ_9k?`vOdGMx|PfN!iItNFPZk3O$GvvCWNZs3EJ@c z9rP4b|IDaA<|_rmE7l!xzOs;yXL0+8O(xMbVIjn5v>MZ1W0*)_v&Vd#^myPi43RTT zAAT^#(sVr}EfVw>;Nowy=nx`E z41!^|Z{CNj_G?Zc8H1jVy^0v4Yt1$>2oF8r-V5p|`o2O^Kr{1f+ttQF5Gs^5DC?PY z+JD3V$WkRi#XCQdz$^P#{eg<(0lD1Y}$emR${(cRjHczL)?}Fu34h|8fIb_kLf0TMO`Ws()RvSwzyC9gyiDxY(UMA-HGrRf+$) z)wSrx|G8D_Z*PWQnJ{GaO=Ap2SmC*~H%wvXwI-u5?zW5H%eU=wxn*V!M|PXme}bcB z@N>xd#hec_J~a$SY7_wj`(XMGnigq7?skW^^6f1(jKv(2~#yw#VPKJc{ekG1fVs5%BDD#6?N zWdHnub6RKR;fO;!6UInTPPlOz`4zxP1X2Vq^rvXeR)4y-q#x;L+p6?-R!JL<(np6D!4ND$ zAfWw3_6Np3Tw>mFKRTj9eKX^aFX#$AnBezY^Wx_bLJ*P0UkC8VUeWF%>Hd3y9uk5@ za(ka^GAWSb$sy6E&sT&ASmtk0Y3Bl}sER(3{;g8>AM)?2sU}kU)c3Ov6tP_RcH2~I z5X&F3u%PQ-ydeO+dk-9yQ5!=2nL7ztKv#v{f$FaoNcdL`9;mh!b9IX6lY7*tq`CeM zM>nt8)3VCl)BZ^08nq1sQx0-qX)4uqBcuhcKh5V~u6hh}1 zXqr$aXG^+?b!TrbkzM?c34evn_R8Xd6|rbDWuntDwc{I;8NlCD^^$2N*$eF^>Uioz z3F5On|00$3uh+<*QK5v_IRA9=^_1ifZ>)Vu{GV(8Kju{X_U*otW7=*HM0;iYdyQOi zCCA6dH%Z57OMf{r@g;p*h-D^af(nM0yD2Ckgh(-PkOAP0=MwoF18#Sf^3OQ& z;{R*H34*fNaNHqpIxSt1Ad;NvYC+@DeXE)=fj?C$~BuVVC)+*XC`Wn$ZVlkSZANO`}Z#E7uVkUzl zJw1LUWzL;@>eO>*$8<1lKTBJA-7GSg;3S0VO9Bt4stWTIxHRY?9ql2GgeQ@{y`+}iye>a$hihn3uRE0{>iAjB7){G&7r+0b{))zqc6M6{XKrD&%Og_$#+5FPwHtM&zMZ&0&+)Q7!gBM1!;V-RRx!2 z9u7#x-XsXGQkyHQ z*>Bg}Q!6)ZZYy{p$=XzbXC(g$r?-&mqn~lFjzXskjhRY9d@Fp^!a+#8K5QF%2J8+C zUv*01ynOm(Y-RQv#nX6p*=i%v^69fVqj#@)e^og@Hw8**P%3Xp$uJ5#m&(jIEJkxS zm`vcs)Ba4+tBpgRSZFkeV70L6 z>K)ylNe2^?cdZMNf+N;z;6gzZicNQFc?gYEVinv7{2Ev-2$iQD$KS0V7St1bYsK>^ z7}q^l6l{}v0jA{TN*x#+jmhz8yMXpDCs%u-X2jao4O3J?pRbvaT%eC;}R2wBY;x(9z z#r1GhR8?=EX;m|Wl+cpOlZ~g_y((e`@WbxzJZo85U4otXw6hyyQmde0gL_cJPudK9 z1$f=)2b+le2HJ7hw3dFnw;Lrms?0$x#B<%7dOLAB(@&9hGZ0!a#8%>%v@{cC*p~?n z_QA`iOJ#GwOR`qqpO*ywF(C@NM!E$BlF4_`w3*{93^RtP3@6OH2NUv`t~dwb#_lZx3a0yc=xgADzwL+UY+s$|o>6FHBEy*is@WLGBlT+6SHpH?{KgI8-o6_KK1H5_M^W3XL`-*_hbIk% z$S){dsM5H%mBRPoh}b}y?=@KO42g%+-nZkeL&=9<_e2vc^Q8)Z5f;<-t8BQ!ikznv z_}#s8;ID~~=6jDOtG+vgUTuCiD?oz{gNFOda^)wq%sV?z-`=hY?VS`0NA=A@=Oek^W z5Qm|=a4WXP$%;|9MBXEZz`wHb-pN{3npX}AXL+&J1l>gf#X_V20i}__OuO|@J>akZ zSQgB2EGDSMD}p$*O*=#{t?@vx^&E^}2=r`44g{p?|>f z-*5kKxf}f7aBL{(68o8aYOt{=jXkmqg}t^kFA#>r^|g8=Rj!uD>dutRf$d0 zN#u%6wzvHqeB`))&iNw#FECP@kT#uGBWZYkHGg)Q8M1Zki%?KJ&mO3po6_O&l(?2P z+A!cjZ>f{hEb=Xst(2^ZZ1A?Fhb#0*-|njDpDDi5eOiMidohF$ARbz#l)evV^!pY$ z4*A~56uXu8`;m?Uw6Xuf4?5%O`f?IKYs;jTo!@3C`*OGx*Sdje-+YPFDYYy^{$>l$ zaqK|(Dc_qHffCVTsATx5nOCOt;kbFD2#=>ym_@|&74ujyH~*T=Y`R3s_B@s3?9(ie z2I{udjcOPWum^Z_>Pu_L__gXiD%ykfC>}FNyWRKXKaGgcABN208oZWWz&in?yH%zcpOw$)M)J^8KpHXNVLmsD7Oo6@P#Z)}Ms_2gK00oXOKuf>9^(s~dV9Ki>mE2BZkS}b5d6WBE*R2I5$w)wfr#~I1)#5>$$ z%`felLG!F{Eb!iQIvZU~>tevMD6uP1a{bfbWfP})f5~#oM(cby-}YMr9IkZX)h|;y zalY+H#3M_$FVbB`2$qgxuxq$N=|(9U7IT5~td$0v(W%ak+}!T`e~m@mQTjO)L*N>j-441^<9$iU1^_?% znzFY}T$WtK?jabxA9U|+rG!o%+wrjl`O}-$y#w%Kt9B}+DMn1ohBWyBS> zo)tTd_k!9EmQQgE+H+~b-Iy${K-G7y%_M)76(su~<)(yTHOz6FB^%dNU2xmd#Oiyr zPrn?;r;M1e)XtZcF<0a}Y&t@;8kq3Au+A0io!8>M8&>sY%4V7da1)*BW-Bg}Tp?D6 zz*wMfii>V>vann&qRGhD#51chs$ATw?OGM5u;P^BEWX^Ds8~7TWB~+CrXk$lr@)G7 zo;~BSNeLdCSMioIVye(Nixu0n-0z6U`QQ6#h?77@J?_7_$TD{1en1*YB5hF6t!l17 zFLXIU{t*xHpiEBqh%%-oTmS>^|M#%MA8$-i?0z_&o{pnh9qHx0U0%1 zJo(~sOCOX4ARz#*Pk*BDvw=b4vL%txe$%5~rpjT#@KY>;{59gHOkrlE7j+G6xM_SG zv{-VDr8d6XczM#HG4OL1Ov7deP#ZTKOwWr%O5{YWn;mcR>>6){lNg8RV^T)syznB<2hm4C$?xBY~eCqN~J=_bHhhJxf`j4 zTPb|V+d?Ueq`4F3-S6qWY3!~X8`Pour{mIqcmKHmGoX!3{n!p>KXT;lkT4j>d@niq zPhAE=Wnx`)De zUJ683qu>A&V?Qwgl>2fe8`yF-TXThX&+KQt7fuxT^-XQD?%1f+(0>rmN8Q({dbk!5 zFZGcVh@Y!Awj+-WuIscIWGQyEzjP)K`SyjtX71EOW!lzBy;PfFB%b1sIxStsy2_A7 z9Qj;@Kr?HJBac4yl1-Z@8V3aIcvCi;XRVjYnnV}>!})Dwsw_P$d*}hnAvkw3x412(y1gHd?qB_Y9pX(!#4wKKob%CA~$DhUzl&Ol~FF)te4LJMF8>~P! z2OyeUe~kv@f4muApA_SZjRkuXaX+U?i`fc`Nr|A!9;s89)_xBn4x>n4D#a4-?__Ey z_p4k|Kjy2fY^0T8m{rkb2#5VhB8ss+B){k^xsFTO`qDCPUbc zOv|D@Uri^1^C2Ku+OY1+F?B1*S1!JgVb{9J=qu1^F>^|(`56|gkHL7Z$DqFT@tthj z?HK`g?1W!MLW0-5J`S_X8@6JG0-sUoo!WBgazVdy6#ax&>`D{j>Amg%ak@`4M(ma+$}$*viZOmDQ*p`H?yB|;q(qA-%$GZ$ETzNCro-S(&z~>hWd*4 z7Szfln_Aw(QpeS}?x|t;p~+zJ=#m7XiopJT<4IVhKLZ4mrTYe?{vR&?Q{XOk%98yx zLFLjR{pJ7g6ROCQq!g+0e`@(tU)(oxwBY}-D9(Qs{9hZMEuuXBr?g-{oz3IlB6bts zjvkyi?VhL{k9cx|e$*y>0NTZfvG{NEQEwht{(kfuUKHDAC@P)g10PN%Q_Np~Mg!1W zE0#0GpyuPI_|CUy45s@*sLmg40sJ8ZmHj8lsFFgadbtN zc7*jpLObD~CUf}JD$_rC(Iad=7Y)xjwa~;Hb>r|`c{v}F&h3|}8=Lka(_062^UiC* zlhv2(0{#Wiz8@#rxx--&`<&X1b(BT0$kS}@m@5Ifqn|ogI?^3uWUJS*6XU;^)E$-d zKZ|1R>%$SgO$tPga6Om)^~tvAy4NK1yZ4A`k8Dg)2=7a3R*-z6?ChAVmvl``bi?>> zuEA?<9}CB#5ncer{TlO06Y1P>qn^{c=b0{4{$w|mAXUifU^?Z=jR7S9w{NfH-OlKw zoM8-nhv?Rw-L9{H=@XoO*}k;@Zo|a=>CMd&-}(4NW_uPm>bGve{k(Vp38b96KHq{2(L(`DLJ=0cZD)Q^o9Z8K9WB7$X0-H|3gIuiZFEmYpy4LfrI8JUT-yH zM*AE?7P8=N#gx>iH{A%IKahssA?Rwz9=8T4ul-!>7LEr-D_y0ucnDmO>ajXc} z4WS5fGWAFO)|uTrdzpVZH2Z`Lv1a99Ocj{+{`l)xR;$~p)k5nB)tk`WFK$WMVTKcN zGsakkZq*X@+hdfLAoS)IGx{tD|6~itPOrbA6Mql)-P8e5rYZPW%-!tt=HBtdpvj!0 z;BIrbtKTzz!Z~L$9Et7md{B|7hYJ_Yeoe^$zR6{$n6J)k{&yII+f4Z;6z!%^w4RaI z#EC0@QR05O+m$Z;byk`Kb}i|{BbiahSyvn6bP}$)Eh%`d-qH7t8|DF+pIZjw?gQ_+ zbKtIBdE3)HOr~zT$=^Jb)7|ZCSGS<~1jP|Hl&DjE7uUT0t}oZs-4HU6xeur?meMuk zcO_)E7K(Bh7aC{Zd!Bdto}G;+bVOEQtqB9rN>I5eEM0Y{)~9{yz7c3*)h56kjXv$% zshbmQZu8oF8HQ>!kZy@dxbt(T<7Mm^N~+8UZyd&00PVzR;Z~lWWdnGddgfI_GZbyS zC!XUTs|ATKO%yj4w3vLUvRDsLuJs;yQDbvXJ@bmLa>=+I_n{EHn{u*sU5bFRYQD}Thg7bHEj2{f z0lb2N(7S^8!wV<=Gp@D0MV8WQmnc71DIXCMbE5DMELZ+v!5^^SPO(^h*b2-xexPw}wY28S}R zbbMSU7uor~m@~KzTwLwCH>NjnK4O_VBRP$o&4>J!D^7^ZRmIUz1GUFZZ(Vi?A3Kj? z0Dc6#r^#2Dow3U_@3ljgd(648(pm|b97jzF4qEP4H5fH4!vQTjQPp5xIp5L$uDLtV zyG}7g!T>@^2PIUNc9A9~Pw3I@4F?^@0u&D%7IuYZwg>7T*@(|Gm3c(r=Cye&*(J-7 zEuP)6;l+U)<(v69$rjLbZA;FkCs4@RNam9Y9u?bUE!Tqt7Z_Z#g*HQ~KcG045c6dl zSTSPS`nd8#E_ZP?Id98Da6Ye0&}XKE=Hwbq&l_yc-3&)%vWAW{m`{RqM7L%!5ea-? z9|h|crkbJPxH>Iv?y~p?*YA4c`?JlTbHGnJF5Hnt%|_@hanaLMPMnh=v;$_UC6k$% zd@&gfOUx&S&KNX0xMLm1QTXl9hZDs$rT$I^&zWQ6SJRineVsAIB z5X;hm`yQvcPK?rSO>XVJ@tsx5KqqSrRCK0RX4@MHJzE23yLoy_nCR}OW$n9V3a)fP zPxSnK-pS@$e8GmgA~8Z(1&^GX%#3v)Q#IF@l?x}e21JXOC?iAyi~gk+hA;E%MG%AR zVY;F-gJ-Rtm-f%5lg2!EvoSb@?*~|srFgrzE=>d9`tA5P8W8qjk3MYYK-F0cSV~0G z|Ku9w+ctgr2B)dT`-NL5@i|l4=C`T{-0rU)I7Ix4T4O7Vk#xGE!#WXvcCo@^Nw1+; zLu6yCqfk)RQWTmw3;vK1{Xmw`xHcO$WoJq}t8bvo_%FfNrY33o)b}8N_5(fN+gP#L z(%KASY@Dah+w+;-%QbPE$tp_BG zhS{>U=nf2=#$k7PE7Uwz31^y_|w%l0sBkd%5t)}oNniH1I%~VKz%+s zwynt7s2^U#yYdP5#zRmQT%exyxsoG3)DPeire-g)fGzcMC2|V$22Qm~jVS1p>I*#v z#ctPo=A?t(1aQ+11p!snrYuzwo-P%u@tJEx1yQUf$(!IaB1)G2PQk1$JsIi4tp&@m zTrG}tmnjqw#cG3FbyCVfY zvAkIdh)!#_f@Rb;^-F60pto?tAQ``MupKW1iC~ZBKpLnfyKx%?$vrgFKZ+mH&-HX1 zpGp()eYA4sch6}xc^`4i-KQ8=2}fTAjX@eqv|Cn*?MK(KGeoTl`34<>xmSC1Kj`|@ z0;h}B4iKOb^_YZWY$P;2{l)q-}s=>t{8 z(cNB+hu{0mBOK1X8VDsU+E=C8{Vo5t*|zk}pNb=1VHsDB7~xLFD$_aKRjw44iy2Rn zqdl80k>?a$$=lrsEhE)Vmeu#Pjy5kO_P^C%UD$FO$N_KZ;Fpu~ldHMZE93w_((?Hj z+Sl`4mFeJ6_N@wDP z?bZj^TB!>UU2`2qRZNrb!};1m-z)0hyMAV)#jYD@^z{=w+{{R(W)`1C*Kg-YfO*_j zDP%RX8*6TKAp@u#5B%Iqs%fY#O5885FTei&{*fuZWX0^I2uOp-6YA1q;cIgxjcyD zqw7<~PViZAf;;Bup2BNZ9&2md)=1LR&N~_$AXzxYt?u02R;7CJTRIwZ7LTKMc9GDA z4v&`Oval6bMyfenUFFR|T@0A3c`kmpNTGPbN^cnXFi*kr>PWWkoyDTdl~Ks7(aR$@ zbKbz&PN__Hj|R*z5>jvlLie_1_9tKy+W0flAoZF(v7OD5YRUKa4SXKmvM=b5;|29a zGDnvYzPv5i(bgJ8&Q><{mNjlxGWj~Y)CNNlTc;eo%RDi^eo-fK3Al^OV0hvAcc;d# zlxVcPyGg!vam=)up%SXG0^oPPmqa^I`8H)%zV-X^EwYY)ria~byed_Qn^S*jDg(v3 z&G4_zYp+@pfirCM(R2Dq;rJd5#t+a&f2k50Kkgb`U|C#*Sm6DC`MIO${$i&8GRTU{ zjy*a$p*?M=bBy(T39^+j%3C$;%FpR?Q)m=EiqDH1r#CS|rhwle!g30u#tImFCYJPz zhbi~g2XAV}7&jJ9HaG~+7I>D`qx4kQ3WyU}qkd9K(d*`dbjHil@`fna&<3HuiX(_*R4#RM*+Ski!TFD7K5U#?VM=se#nq$8eHg* z$ETLOb6P=&EcybyUq7q>a-`RU$>-be$UCDxVuVAkQltK^L9Cfp%WXY@m&j%=(XvGJ zsbVjRVrNdk;F&e>D~yYZEJa5Y+2k>c^n;jvXqs}TYDx`Z zzXDR%=cR_r!G;|}Kgu|(^d|}$tE{;_ok!=yVF;4ZrgzDgPYCdFBu(KB9)$tjrX@w+ z*^-X3ou22&5^sBJOBIt+(d$0BT?n>XMgA(Rpr71f$GR3<14w0W^|O&%7=}7MnywPk zVnF}eQd(j;ZfC!zHmo6k5cTlN`J9$G&!*&w;T@WCQ^j3uh?PRm7!*jCQnOn;8q}rq z%W}&T4H!EoV=f(PYn+}^?N!;4#~@RR<~9B7U=ZS5xM8Wq4KF@r0HE6XnfgGhnRE`N zJ`2o>u#>TCZ@lqx^(SE0%XQC93hkrTZcK4I{*UYCX|E5gLHU%ev3yLxc z5>DPYG`DNO<^q!C@m)@`$%PE#P`A^b?5(Wet^uqE2Gt7EBh?!NG~A1?BIDy=4H|8Y z5;<_6-Aa4$Mt)bX{>X;*TF&~&{%aHqhT+JTS{%j16n^hRCr$ceH|MFBC71mhQFx7t zMDXQoJ&HTqb7TTJhrUSD?|V5h#2f%-SJr3|dD#m{^4urXf+t z7F4>-YUvo~DzetV@P2?a%3e&l*oCWb7r2Yy!t?UHg878D2{mhM89zE6oXFZXLL{14 zQqLTWe2J0%1btw-cqYQXS+_O)c&S&brR%fb16|}p?3P%+GX33CU{@kJOF>zVo9)=l zJ%N)<)~9#%Uet*Q^dn8?7ZODFgfFjwK#Hc#AnpO3+44E64`9IV36=!1mV95!6$6R|Z8YXfj1dbn5 zeO9S?lUn1tG|vCWh(4<9HGKrUL}(HZ$34HCf*J0SSi*A8)s7{+x;RI*+%E;iPEiwsEJl~6$)k~v?iGf;3+{)dY~FC_c*|xwh7R<-!zIIR4l?G|?{TAH)HpI8 z>6NUe+!~CD>+VrwVQy`q4cnPn@xJ{oinBzOaU(^S-1%-sGhu0w zATqDy>ws8GN@f~fUr;2WTxJcS;-If3Ix@?OMXr4!!l7U7v_*JqqTTE- za`>Mnu?nO}kQ9l8#N|6bGUQxbovTu7EgUo!y@4;Y{wCX{9(5%JbRAXy33FU-9f~^?cMTBSf=hV#{bgpJ zd7d?EUeB8I;wCG}&ARs<+h_03+1~(z^dfQU^v{3WJ|K2z5B}EdtD=i#ZAV&oDiPlu z$ZUyojHmOuFPTw~7Sz!weDBNg6Q8n4a^FV^J~IuKuHyXzvbx?QN_-EfVYQrkFND9f+W#M=T8 z<8v){%+&*V!CR5Q%Q6koWs@o6Wn+eR>CW5Tnn;Vk+ryqhY_nDPJ8G1>rngeuA};Z= z&TFq#zN|^_KKX72{T@8g#6{*#HVOU&tV>bJ9WS&z|<~8I_W%!)el-^(Jgvt9vb=&GE7WK~^OORynUO z#f8JYc~&zC-Kt$aj6$?PAu?WZpMNX}6UdDtm{l_wMD;z9zwZ3vp+fB)CASRfjW=wEpy6+~yP-ToqJ{ zAZxBxN@GmZzd3XrQi0nA{lj|`KuV`)4M&7*e*_D zUR1-!Gs`Z&>~9#}qhIqakt;fzSF>s*K5nYr!OS%CiWTA82TzFc&dk4fNA6z_N<@r{ zk?^?#tY%o{9uEK$*vq2J>NBw~0dsV_p`n^adXwWETM%FT4Edkpi6&bRPSj5#i!nK{};kr4DWP1`?z~Z@0nCY?wber=G=^GG_&iSA8TNPeEN= zX3PYX`M7GA^s;u0OCpGz6S{*bQvLW=-Mrcgv>#Lt^bpoxyq(n!1F^kI=KnhUNpWy& z8O81<_Tfdn(z=pj2AHb7J&YU@f^G&)cZ?DYw0YO8hZ7>_Ay4fYAWBGKE2qUA$eTr5 z;p?YSKgEsWeAtHIi~nQXQ)u=88b)1oHq@J+!0 zxV#-Vf_Z&wuln%Oev1G{WGx;@As~N^nmuXysq%NetKSq}nMBuObHUsFqw~yS3BxS~6_rKmL*QEA{PFsJxEm5fmctEpP zQ1yBXWjh)1R#!cXcC&o|i&~|_vxI7GB|muo;{7XnwyN7OdWvTEsKLQjZPD5wdC@gV zC2&*IvxkB%Htm5MAWH0cV+rYH$zvNRO$yW!wA5YO{$>k^{<6JqRCdp~%kzRTeYaX6 z2@;*-@K}~Ok&+^G_Qu3))SI}39wgR3osE0^GJ;*7n{0!r0@JtCj2`3_z8Hog)!?xaY9)*MGNj)Xx!x z_&nki5zRG+qrg8Tq)fT%y!>8DA_vSX=Zzy(9^k z^1FCMS@)?=U_s%msAr8VcTa#w{R;6W=!@g4Ed$?USJeLqN!J2pd8wTcQaYs(%61w~ zU=pX!jWk^x`w*gH;6}27@3&|f2o5xdZ|idpW^Iz|uRtd&xI6RmKy|NXz}OSBaT{Ab zz3F2ND!YsK&~K|w0s{P@r|7W9raM7zpu%+lS5Jeh5?cG{ED7>uRCALxlgCMiKr$d| z6{|4RNLr{dtLKK$SDY$CqpwBTOfi{fpC?KgtdoU6%_=&Y)>W}?P3X;@j42gmM=vSc zr%GA$wjptUJ@Y*`bLvAP~O{CkVDqc$)4XDPVG zV~f=mxolWES0lD2kK+Jo?E%3u-nWT7e!YhjYu@3apQ#B5$$Q#IlA6=>#j4}Aa+srn zU9$^K&vQu)*8!DI_x_6JQ=`lfX7?Q&)Wq^16YOeeI6i}U4xer=mEi|4F#Qzg;6E*hCdi!EB@<-o3ur)m0h2L7h&;+cj zK53ynH$RY+j&L}@+yTtfUME<9U3Megta_o~Swoc?=Cv$URPNsOwSn|SAesk(Vnuhp zTjvmoWW#6^L2sM6NuUds@jX<2v208%rlcJGVBMc7C>+DME7#n`I6T7WVEzd$InPiz zA6;1V^y6)MVZX$yzuPWGe@0)8rZ+^NurkJsR4|m^U-pSQM%YaP%qWpkcyWmUnqApA zguzM|26~eTkv-f$i5`dulPN(-cVWLwiScFGmYM6wp^@Z?Ge5L&E&-*U-3x4; zf4zp~9M^IBn5C*sC5uCMsu#Qh4nxX|(hL|Lu#Kjvh5)zOItWCql7GGB_|OB=&q=w% zd|MDr-yQyTU%S}{njr4hCZ6{VUM!U1v5z#V}POea0218|w_0;n?Do&>pj-@g8$owAE+o-J1sEmXGli%tl zi2-u=smK;mlSaGXSb!CiS}QivWB5qH-z^f3WZfi2&K3!=LFHik2zOwS)$7BbNB7fP ztbrWcOFxL5&Wse`xeJ0G=SoRSa;?d5WUh%wbdKoXww!e zBoPYPzQ(71Sovei$HTG~j#FR3Sqc4IA=>b#!x=$YvLUaI=r1F_-J^x^c&q&PbRw1@ zb5~pv#lnlqsX#j)dnP*S@hpKKBqMrJ^mhrO8oW-FAezh=(@g5+8Xhw~=l83^^1=iB z^W+gJ1@}!Ai!W@BU*# z!6Uf$slLM3QmyaBt&ML^GTB>1eT(5GMY$&DuA z{*rl~;9y#tQCj?5G+Ts$@0K&bvg|oW)P1e0R*Bl)^uewywZ2OwmgO0Z{)lhjbB5VA z%hGZIb&#VZ1XpC6@}K6Pta_DxTyAV?Wtu;kBZ?cZGDl{b!e561?LYivAv2scqxO zW6w^p!6EL0d1)`2sF^`q&IQ(15A$ZSb;kOr4jo!;S@BZzsWkt2Y`vZHt&gDkf5HPV zNW~Jj{)G#6xE|5x;Kq-BZn^;*l+z2Gg0Y1m#&4LFGEitu{vJN9^ardn-I?PW9XdnQ z`j(S=^ZPPxJz-$WHQXOZB-CZkk)unkpS?yWrG4e#5@JlJ-dK)ztrbX^%rC6yI(et1 z_}0HEkzk+7=LsKyon#@}|2t?#5^F*?TrU5=pk@sJKWg?rqh`Sr(l4D0?usS&rR|&{ z20WY>2RNF>u*rzLXJt48hns7^7zk^W!&Y-XyNb`0n;-g@|4lNzIC!5Cr0Fo;_mWF* zEG*o+Mu~p%MVtpx`$roLu)r#{T`?KNx~ekxHp*ppVlsJEUOpsLuX@Tws1o zy+M_gI91^};di`!6nl-)0kqEQ8?H`);4*_r3n3qG16936>$Hi-kNzQBp43$& z;bI-xfxk$0LzSdd4#!JPP3}{FItiZ&ZJ@*CjL)%6Rrj*I!WMt(%GayUOLb?MvN_q} z``rGyHpBDaid&D?&4aa{bM1w-jJLC#Y0o3DqJs$2U~u$ce$M*Y>En`*h*l(@>_f9ME#I#%(d5hHl|Q zKitcT4i?q#F0Ornw3ynkHYcm*cBJojHX)=?*0Y0GCSH2EVOy|35H9USRt&U ztG(Wn?n`O5Tb`#t2@*;7;dH{{_}!~*bn}?wXtn9rTFR0cfw(XDfRI!CFiq(8!dnUO zwrL6=jbg2tgi8%dI<|MC5!o!c0` zL?d6?hL31@hVY%j@$M}l`y&RqoY#R(jgKCQL?{%*kI%IBon{lNK)AUOt3(|&Q2mkM zCX(Igant}{w>12RY}`?!DTXKnrMB7}i_sWYUpB;if&8gV>hN!D@u6@qFug6J^UoAq zFGtm-8|&S*5avVphUJdpKFEhVAr$IIhkeKO6cSC^fP;Jop7_+Q<(%#RiJyK8&%?X1 zyJB#6`vyY5x^VmDAF-@8quRmZ*3<#Gok&W2VT>x8JcqmP;G297- z8#(C?(|T*3vnCxQk~X$6^fiwthB0!w1=B~8`xPaQnCi)JSV%=c)KhXs*EF*T4k&RZ zsQY@W13o;q_aHOR0TsLeUt-D09WvTJr(&Q637=i{^LzPEe ziH*lSA7>pk7tOwehHYK5AEvb0q5&;kxMt@Mu3`zF!Kp&m3-nY56U`zsGdr9M1TVUc zI5A(#n)aQak{&8EU7X>B;E4WEGU}^>!1Ke!uUWb~+eWNT9^?A@eTLia?kQH&1DxhE zb`7Ie0ajDg07~x6taQKRHp!sXFwq4LD~zXIBKeqg~@{*L|K*D>pycD)MrSIMLI#yeHP ze0??S3|yYYgj%R`f0`crL1uc`cV7K9Eqbh@39mCHv<=p`Qo^ZI(zp)YzioE9#{7pC z?tMn;SJrtMr`B53tvzq(v+A$AR>v@(6&b3oh8@1yi_6TIsfHhY(pR=wam>7Rq8JJs zVUKNOd0%#PG|;;`pgz;cvnHS>l;&)-l2|&#vdWd?R1}m1u{q+u;8;DV?H%!elI4c~ zmj$@nl<*?^4wTWM`(GKkgN*-|kP&$qmTXf`m)jHC<<&>%%YY$VSoLJS8^w61{uzw8z7F%SK(2w|bU8UgOR zHps6eW=p&aQ&r&$ZDN;?D=9A8`5Wf4rl< zYsrIM=8ZI!ugGys(CvdiHRDQgHMCOmWE@p7X~y0>BH1>E&>Wq}eg`mZ?TIhe7%4v| zR5x{3kI=saJ$T(BT#`!ha1_jpMiQNJ{~l-YCrv~(=mtz?+uF<{d#ta`%bIpT>&ze?+y%Vftp>DqVEZv!!XSCB0Et@M1aOw)t$N#ZnfklAq#3 zc_FpZr>}%}5EnN}>XR(923@=bgR#XUV0^KSKWn{sP+yp;tt%T%TOa#7cM$*i(a+s(zi_W{#bi% zH(Sr`*qG2!zAJM=`ltD#TpNw!HY!hOMXX8do#In8Is8Iy*ZMk9&abQ***|YW7lehY z=@U_jv^sSk254JzEB)Vqs&yDeLj1hGySl=Qwony3=9nYOEm=s;L-6Ry$C`Kv9a``h zc@)n-ZQ1Q?aaXOhxt2n0%Ct;)59p0Y?WS7r+X_E4=_XrentAUu5Wo2I)T|$c=SFv- z7~*YqJmqzQ*S_dIE#RgCItYF{GeDuy@v43rz~FrGJJyd!6}ha6P5*$Ag6v{;Z+Ial z`vG0k$NuF{bf1S2|Us$rx80ZXs4%JF~L*P z>5uiqeQw#6(Z6=BOcORwgDT*VgA>L|h`Ar33nBO&&lpMQ~%^_ZM;a-@u-x0(iRPRDH5i zh`8_tD!k+S_}&s`8ffF~JK=xj*$GB}D%IA1I@v~FJo;dVduuh_NFi>mp9Y_;{52z?$a#wToR|OM1D(Je{}0DhJfrUat*{f5jw7cgvB;|u8L4R1 zJHU(Yni4`P zxtmKiHZcZ3$J;OJF+9A{&ssM%&}M{zP0EvHk z>23{a$~fNKj@&{IM-QFzCt6=!Z+l#S?XG5DK*JVV9B5%8tYbEM*G5a#jAl=o-P8c` z>hvOaF^wimA{{N>F5nG2g}@b{J{&%$NP$`T5vekEv9ZS@qcKnjS@vt*7{{*7!bk@; z#MYxOk$qm91|2&<=oiU1(exdlGk!&pIA!KYx2QQDcN2cYBGnh;no0{&i?x3BxT~hv zW;(-PxV4^#JTg3&xoxCH?0N(ynOTU<(=sXi@df?_qgP*r+k_^nVb^UoPSN3v!#5>$m*hJgNa1XBVY@v#ZPg`3z3n z>a(YkHq<6ZR>#FtjtIiS5WX5HG1lBxe&Mf(C~BV1*XLrx2er_`1&1<@h$m*lf&%%c zV#TN$16*#*gUInoGM43%*NzsUS9e|%9?AAJWMv0V`9UW9p^J%<^Bu@4aa+YxA}@ik z>o-nCpY%WK*dwpM`;Kcq%Gq!f_M+5^kgDpGZF+9+>PW4C`T#6jcBADXE&QMOuQwHY zvONSz{Epl-{G@q8TfLJkZ(!CR^Fhh}GmP}P`jWb(ztlAZzfR8LPS_Lu%(M^$T}aJZ zV_E8wYOz?zxOvO8YU1)&J=289d zg%$>(QVCFAEz9jBLg@;;zYq5kqy0aCRSp;0ee=SVzrQ+{!DA5N)Zaio<}x3ZNluk8 z_pLhuD_AwBN{n2b7qG$S8!dFAshpV>>)}TAZY~UtWzjua^~^;?Jf>6ca0gtnJnIGI zH?QGdE5*~{?g8(_co=kc51p^h&d&bGLqF9T6c%wXT`eU*o z>F1kNFH9LeQA;>!>C1M$>x!Ql<-!S`4_r+WY&aE9zQHpcpwKq#PW@3S>+hx7uRIQy zxJ$C4fI;6>-!S_>UJ|-wkZ%~dW~~FZZ`)o0rCc;!JEit>Ro?(KP(^PDgj&3Uk`Piq zyIzw28aVD7Rc}pkgH0&&m{tvS&}iUny=Qb{1&?WwT5wUT$^Bi|mq@yAI&{2sOdhW% zqfMS{Cu@EMv!5+0dk)a?5DQWM6oCu9t+`5%3}Y1eB+*R$c`xknNSH(sX12`#f-rz} zy4ATIsS0&Te7>NVqPP0$B_9YA4#q1= zX?DmLM-RDJk(erv-S|><-eZUQIX?cklH>`RLP%jPNb2nsg69*ox3F7=#LGkJnbDos zv)rTh^mgE76mKUieR+Am`=M}x@inKQp=rlgnaFxP(HH3^6ZJDB9KRH()$?9aw2S7g zMiN;H5y?#qkEe)Kvg%Qa9>9=2#;-S#K-f@c>i3POfD)}<`@trTX4mIhqwtI2ppxEm zx=R2#G%8o^=tm;1b04z~!6kjpd)yD(N9iY|zKL&{l?Cj`L8nZ&s&XEyzvubccis;a z;2P9Vr8EndfLj;{(fF%U<>$kuYfKzuRs{$9y0sLioi`jBpY)lBEw!l@|*zf_W*)peKitE8;+ zfR^--js^yXbBDi;KZtuiRy5(^RA~1nX0Z{%KYz*#fk=CbXp;XB7?Vlt;;jExuP&(e zXLphHL|A!4288?!sb&M1My zF5FGL)!Cjq_Ek3wIqn~Dvg;v!nvp2;zQ{aeZU#IX?-(=hw@jSUz@h<4lM^gGKp9}d zG)(gS{UbwS+1kM3#s`AQ*vm(J>DkMnrxq0M0HHn5U;>VqNzC3k@*!DTsNWm@;2_IG z&HmIz;M@Qch>J&CVMU();wcz^CfQbMWAvD5-Fs@S1}I*h~pa zhZt0{Bk~#UsTmKQaIIkK3Gx%|=~2SmF0^iBb3ID#pBxF<)8?v3pCLLox@@cu_PX=; zNQ$a5w!^&_c}kVYV2PHc?mBl`w;!KMI>{)xIJ%W|_Gxw6Sw3Nj)k0ujjN4QUll0ctLLxxmBIxnY4IUWeR$w zRFm;x2R{ENhI>f1cI#ljL+ZS&bD#s5G&-TL~K%x4Vh$cVOMsothf$x zh@~yQDq?0%)@?78_K0I>447o*V9h7v&3!ECp}>9)?uSO&4~?f~W2EHwh3`)?=6jVC zd~|9VU6rh5ky@zs#s)8!7mn}I%UP!5{$C(8j?otgZ9KOmo0Cr!0q9T*IMj}7M5Ct& zL&*Q2)l8O{GnU*0!4kHZV>H6h@^X%K5~Q)C+=v1*5W)fp*sP2p389|c4d$9UW`MyA zL1JV~(iL8nxGb8v-8R?jgA|jL>LOn~KOw9ohtOp6st0kUR6zMp(oXWE*WBCH4Ta)E z+izJT^M{q$_6g)W7y2i%nx}sMG|+gfGWL7yeXG8F;xz0V&E5V;hg|#q*i3VV?f4IS z&F0zwX@}2dRh~?%! zc#JojjGKr+d2h1D)4W8DRO*M`s5fT=a+0g@$ zEU(k;d(^^9qgy2#5%SJA)Hf?65a%=Bb;m>2E<ezf)e?$)T+5b9YpGl)cJ;76V z!fov-{adC~4M21?+bLVw9Vxv%TSeY>w-9pZS1ZhJPpQ(#aC-ukMsN^%3CF5yX~{KK zI557_=8`t|OP>j{jI6KVQ9Ra+yK~BSMR6L!f#zECA)Od8Usw*AdLP!L+OY9LXSo`h z?%N02B%fUID8o3TfY+Tq3aEa9zSfdh^AxrmyR<%r_l`6Vh;IgAKhti20M3Q&u0B@W z4TXISo}wQZc`wSy6h3!4E1jeR!G04Q-k*HT`CqYf)0*1pwfJx+nU6etWj!`fOzeTB z^+vaHSkucgP}$r}eYfo#$RX0J*|Y_f%*#?gzQO;e#B_FEuDeKxZ|_&z6Rv8RDF=`NjcXA`YSitxKE<_sxgVE*4jQD@P7s-P3Xuqo21aEDl@Vh9PVZ^!&qn) zoPM3x9~ncIuKY8-GIu*dG<4^}>4(1#?-D>O_?75pMTqEJ3JAkAfQbM_jb(>)p$x}L zUId5Nj6R);A@=!ke&(c{E%*59SAf==!1JDt^$(l#$rq$!$7DxUwSKp^P4Uv({w)5C zl~%XOlmR6uQ(nEE#_fq&!YrNi-ACfZToNH7E-4G2izmWlMNqMLE8-n_?BJsT@pL>R zNNjqMGL()(v7~YQ$8*LsysXAa${dvBJBluBQ`O?t4(AM1x6X7;O|wG{OtSg*=oC-; zjqx6+sg(}r_K0kKwZoRb1{NIyyl+#TEI=0yi~5Lhh}!%HoGF}X5a}9t9eXQXw>J0pqJN9XIgQKsLr5xo6~t8Hz;7bfsI=|!^yzG^pN3(ygR zi$2F}rhep#C8wCD=Wsi7mO8J~f@4UJRBjR8qrD*>KF50aws9)+KgkeXFBFnPgxq4a z#;uz)P}#ZkKWxa>3V?2nA!NLsgz`PRPSQ!!@$%0%K@0H(o}|D+#ms5}XFGpNQSAI- z4I?$C0TJs9y{XsW5%lI>KWQ8^enjIC!OezdG|Hsij)2z|1niO@f_c- z3EBKxK)K@DA0#l2Vkx7V+xB$&=*hR!k(v0A)Z|!*))%(>Ly9$7KFVcybhef_m|Z!8 zdFbt&@3(r3Z@2euY?_oE;oIAf#{Gu81`NKpw~qA0|8Cqq!IlNc;^NzioGth>YiU7; z10059?CG6EL$ffc#d>?%&b3ay5)Q zMroe!{7T&T*rB`9P3Iet$B4g}eCoYm4fr0h!kuS;#VcVRDU(viWN!pc~I?*v0c-HkLF%1^Dc(tST|1bW3ml&_?Z zJ${@zwo6;Vea(_}ek}=NR;z+GT50i7TfCYUj%3&S6S@5*BxTSqZQ)*-1iz8`Y-3j= zGCi_8Cm3@aMXRLVd;*w6%xSr092uy`$h8FsddVhtSnSG?*w5aCp{5#CwXs2;n@ht- zMbYBKVM)zsZ$mgiS;W z)?cxxyJ9trI$1ZPIGXMhelYe_i$UhckqM;3Gb55B?RRdaIyO3nv^wH{M4+Rdh zJ9k5-4EtGy2K%&>w1we3gaxMzinY$%UlQ7rLym@xT86JD+|Tzb6ZYYfUY6_`+r1%# z>3nvv4l&h#XuVTbBL7&!_>2(KE_jMAY>6?6pT&Eh__lJ{VUSqa^K4xiLYQN6JcZj}Xs>JWCK$3FpSjZOBZ zq!41NLh)(+%ptZ9=C=JT3t@Y+)9SK>siH}r1{u=er?{LgQ+2h})fS9g;0X=umB{VO z@qM|aTeS6Sd4i;(v3TsAwAUNXeDVCP6G~(2pz{u^0*>8wvMsH`?Cp#C=Mh0nBMthql^~FzY)gjThb)l`k;pK&g7B?ZmLUGXB5%7W zblHFkV4al-EZCImO zwDU5BL$2y6Uf@K^3xalhdsM+1P|Ces&vZJ~&pMLg{paIm;3pwLBNEgV53X!EhGp_} zvy~g#NN?T@(68N?(yyGu?)6ZW&S|Eb`&OS=p6LP(nk_1HoB$@h%C}kAmx}3UPBsGz zc*)m&+9-}>eBBEaxO330OekJALVRl3 zrL&FU2dg?Qe2xCofy8&Y|6>W1@>6Ur*0pmj>jYF_JAWXYPUg(;!Cd6DPFRvoqG;0b z17y7<#+Y<4oPQR(z^==R!ir2T0ZS@=@zTd9*cY+GFNNp?w2Nu-Qe;Hc3+I#RLHFxz_e3F;7jxEh|&ul=TE zEw$s6+h_&{fpF8Jp92<)+apQ8YErjL-uB$ha!K8ccDh)p%*lus~V_+U1Isa=p^)n=+Ug}!~id5>mSJAmn_jNN8z?^9_> zR8oP{Z^@{kaPCNx{*7_D=F^{PQY<3m%K;d?KZjb@$Oez!jN}Alah+<@;mTcaJCuJp zP6Vec9cxe^1O+0c`~~`(y3e6Jmbvj$uZx;9B*E`FcaDEI5xFB6(>uJEe2JuvEA)X@K)Invmo z!lQHuiW@1cnS9ervfgX-b*EGMzgG~GuKZ3(TpZ3oaBH>8a;fW<);&=ViHmxQ=cJ=i zlvsgR*WK$2Hq90;pJlrK@QwrB%}>`$ivO!@OYd0#TcmRTb0PnF`CZ^acR&^L@8Rt~ z$JkfM1QP!~<=;m-%Ky)O0DlEgI^2h>`m*=PnPtUNgwb0bGdkj2^w)kuumPZnNx`T` zJ$>_qHP6Od&_>O&9tgLbal1gSSfYJ}UXtS5r?ke;A2b`&M%jIOIu2}vjXASS)>zgq z)k*JyDk*&qFJ5Z+rH+x`e0hU^C$_clAT4%q>u7R*#~p`;E|omP{Bsi(-jYHI3#i$j>gyrk~Rgq`UDM>WjpVt-S6^|H}fnXC!;@`t7GAdtu?0`AI!B zs=EpML3ePgY(2#EgoK68zC5Tchjj@;VjDg1h=#Y}8D?t>aV<{mrh-==(2LhtkP@|y zsOEmPWcn>!pPl;|TR(Cn;p&=6idCZA)}M6kMHuv|m+9RuK%Pqd)};hP6dEH^&Lk=~ zHCk~#<>AIIm=i3kS>fUe37IPwRxn~CX~GV&i#;9K+!!;HcXo*iJ)O z2|WyV8ix~dFz8|!4+tQn)Eu|;x#i~KEPHLFmLH5V(q6d_O}VZui>j^v{ZzX8vvJ;- z?;&aJ1z8n+?TeL#nx2ZQC|vtuqF2+dkwsXcAm7vz5S0*(HJL;f=+0 z#fP6(uYq@QUN`SC(u;|pj6#79Ui*#B zjbj#3>YW-hSn#fb4c~IrKD45}R8z+ku(I43a(Wb|^nKufi7UGDT%SlSs!O0BEYO<% ziI(j`F25IuN8AwXa_=c0Gwwwqnk_(|ZYBMPGtKuS;--a&(b64alctqvM^$*2-~8s| zfz&9B-S6&bUXE%q3cr~MSRRVk~S$wJExK|tt^G~o<) z^uQ@0(IxE99EH?nJ)va^zptXGWA=tgMz#oyItS6loRhArdY<~T4&cwc<5W5 zG1Y;|(W}3Tq;;JFDwrf3dg`e#`(@SK!^@qz@J0caM31AiOSaM#^9}RYvNQ75%d^l` z{?o4$@6E`?s6qt_+UR)5wPm&;>U+*KnoiwgC6pSqn=QUIU~Bb>pTI)lA{mvvp)R%iM6+bVb>+F;>A@QZ&)%`xAbta7|2EQ$ zmFKs9K%V8BT;nMpNY9PK#;+8vV%~d95{l~9mRNLIzhsyu^3jyYGqus+j?DROhClp~ zw_eCWLl_pUC+32uR+p@@0bhssXjw;yaa~fA1}!v!Hpk|tMee0~9bkj=fR(r~oc1)4 zKKCDkwnAq%;O1w_+CrVJ#R{vU!~0u9wtt4ui_?pl4u+TBf3xtl!%0m@!AE+qA#ES% zUzS`!tv{DN{u*@ITWW`?`{arrlpCx2&n(3@Mq2MiU5n~WSM|xO?QwbPCJbq1@Ic;S z&=Z2Xi*q1uE5`1LDERj|?6fJ7qB;aJIuG7k-jP7`6gt1Kkgt?(ZAy)o#B20Gdr_(} zIbAYvG&_*@QuNV1xZz*d8DXCIlL>tyxApP(AXN_J1~e{Ey1$U^ZA|G02jI=MNh8mT zB~0!M^PfIxx^fHlnY}-%*ErW}Ao7NY6=opu>0bJ)K4u%T{Nwi{A#Y zd6K6w`qGc_DS|;sXG0dOrbqkIJsS#bH#;5i_GuHqN(;<>O1^=)$x8eqh&;5oc5nI& zx&O(seLCva?mLHw7s#GPg+cY3@z>=EegmfuTRs#LUm=PM-LVv`v zaJU_Ke(mrILIFJ&!{Q9h7+dM{DZkvNgJR11DAL9TM$+ACO_1yvMa>yJZ$OW_ zIlxn=>)+`XcFe9W(71WoS|76yW^u|?z(mt8v^RR_t8TO^S|OqxDK=EMTY9u!`&bId zOuGl6rCMgpZm*|dJMIgT+S~RFv+OWuV+KzMo;=CKwwKNFGf9y?5FQEZ#?DciLtZE6S=$wUBrQ>?yC1EQ z7>a{`r5l0nOO3(O7TwqGk;eReYEvAj;4^kL?j#6Ek#}KAE3*j%CvS`P=OZdQ0-7B5 z?NJtU^gEhD(P87t;0?KgYYTZl6XtInhS&!@UIFunGK?tV0Nh)z9#i+a>w%H!pr6iO z$&X_JzjNIxg(Qwy~}nt?W0a74$${63$^GeJ(lcg%w(yq6961xE&G01?WY6w(by;m%GpNRio8x6G@+CJYht> zc-CLf%4^|Iw%UA1o-3utZr9bHX?y?r)@u;wFoQS4P*RY+g{>)hr=BviZDm(Wlq~~SotwLZzsK?Jsq!B3qwdG!Q)s0iucnAkYvswF+~*$|GSdQ%);U5! zTD;xUweMf*9c-m8W8Si}rj+P;iP0sJ>5AGhnteCms-JZKIn9Z3%w5jd79f@IIVk1P zT+2ONYa}MkM~9*6pXqS$ITZ25xb#U5XlfH_n{E~8^oH|4oEUR061n`+IQ9Nxyi63X z{vs0x6Db(#Nw}nLO0JD!WCApzWeC0GZxk_JYqq5>6P`RUoVJNJ9i5^NhEPD*uB%-2;pO zMyre(4oQl&IqPew)ZlI5*>#e2@SNa<&9VaEb0NEw?RLBfxVY6f(JT}EHYT6efZaIF#}C|~k79N%IWy06U)eu&^2aW?L4??WR} zG?xm|jv&2SouF7*$T9)CXZ;{LInf#Ie~8#8`Thn%&FWI4j>D=qgdBxa#~eqZ%CXwf zmyxDLGR5_?KLW|^d=~fC{xl)}KIv6hEpe9}a zv8=JOIA?@%?Zr`UyPMIJ@|M$WUv(d;x5WX}80&itZY&j1xLbx-hSH{vN5K4tLGaWL z(Pi)1=%nrhPdA4d?K!K}LE=>m2mNmDju3ZG6nX#3^wC|!N`e}5Bo+qeUbj;dtaLYS z*u7a`sITvv7dy|xM%0@c)8*QF>w%z)+pbaMD^YF-rF17d?Kx>puzXI~U6c1~AZjGo z<>J0sap?wHW!+B+zTV(pG3N{3uVtJnS5fw)@YWx=nH*Y99zQX#5tn!*PZ))dOc5rK zhr3~XEwDBT9M>L8{9EZ-xWy!rRTU^}jC9xjaFJ>A+M(a(UAN`tT%&TbcNv2hD#}*u zW~5M`9U22$S@IwEpoVbGx=bFu4*9!v6RyWnh>vy1l;zTfD3}5fePEaK=?r+8RUi2B z1Bs!$Y1hILJm(f?#veQGM7YHxJ2K;5u2Ba|b7AC}XuxxUZ&#nKl*+b;DRR4oFkp}Rz;Y*_#+iq&#{n%a2FLyhh zaiXkRWw7%`;O(t%2w0WS8s)_@;NXbm*qDRx2cASk3W=h%LkP3L;YKNpdEi`?##Sh4U zK+EQoiod6nX?T~xbZzNCrWU%h)u%O-Wl{Y-*Q(PwLSgDoH-tD(2T}rO%+36=kfTqoXTTz(3ctlg9 zN`tX%Rhrt?}P~(r;DoJ*3wc^+R9iOUU+U}JViPk;e z`h?CLr3Y%@S3i@^l=Wey4L&8w_vHN~!gCm^xUm@7v#&*u2FIYD;KkKA_e)0R+aoEsl3TjRc)t z-T+&^(Pa5musm$YMm_L+C&wcU%c!fNF??s<6Yq;JF4zJ!|Z5M!RQuQri7YSwn@E)rH#F;ma-=AMwGRBhfY%e`>i0 zT~=mG2BZJmmyxsc8{pCu=30t`^Q`%syrq;O!}k`O7k%_fg%~?*vV-iBXCp&TJPmkK z6~GU4Cz~=!0YWG?mi;$Ein~+?2xHHgU3ty0 zJFZ#RZMBt;IoTeXy)!=#f-_zq4U~D3*(=WzbG_ESy)h*ef0MP#^M*k@$n!y-p_Ghp~kb!j(m2yG2+sMZz|GNfODj!zL*>_tCSG+64EBfO2&plY<|7APYxT$q{2 zh#8C;{a|sii;K-n0z!Vre^=>iE@9V^Y-#Y1@v9iWV}C9#M#+!O(mc7@#y$VMNZF2* zrC1>|JC+_U;QtMG?q1bJ0q~k_yFx5!bApi18?y#JATEAKrf9ihI5l!v;XMgXO4l1# zjC7<{fpZmoS~JdsuT6uq8&3;p0e}H#aHBU(6xqm4z8rO*FeQ@)-bpIVmY{-NM$a

wG`G&2MRAhSN{9BJuMA}I#0>9r}J56brD9{$DXVS50j>w zGSfx5GpPGVLy0{r9<-1bK-KcXNPGp!_}x|^mZF#Q<&(t;!O*yyAzJH7^e5tjw%HG- zuEq<#0>oo{vtRPU2(!5pO-Jo7#UHBYKR4T8(NqZeEZ!JPwBvQ^@=G3F&gB~`F-obIZl>+q7;g zhU(Y87Wuw#0i8+}17eX5tMBF4`|01=`xj|Vhj3dUB@utGbZ{HWW2+_2I~Yt)pin50 zB{whz>@~cbC(%@uYTV>}PLr(o^gWA}0>EDBX9WDsiTq>6rH(Gcd4o%JPj}Sny`^Hv z@13ugcuZ+OaRaJo3uJZ`{AdUyB?+BUIvIHdfdQgSl2YFrwBzTc!Y8T97V9WtxI*am zlbzOhgYqbD3WF0#@J#4h>TiCer{oT3RK=^+r=XtffPR?Gv~VyO!iatgy*g}&)+ij= zerOIVPO*a8SfYxnw*--#=6Agcs+A{xB**5P1fYcH1m6xT9Pg~-OS2lQF-LOZR90N0 zrb}Axva;6`TvQ;<>lO(l_1{t(do{Y@^*=1F5+|Yq3X=dh&C&GCZiT%`$2QMTy!!ZG zaJL%NmjFyoyN?tKA+803kNs$A;n6X9+S7u=STI?Yr(=#Q45OFCC+c_&H`6FK!Iv#d z3+~(jhG68Q73c7wu4pH}ipGp&g72~OW4T(gcQ*r6C)5FCCrn=u+f(D;wUKpjKfz5G zP5lh-^+q$5{vmsBsPs|sE1`1#+gItzJG4nYhA2F|JArpRn~WyQjE|B)GOhr!SpI@| zcoaB7XI{fYZ48Z)`~5uxMCd6vtxv*jj#9hEF&rrGu*%O+KHS)uk0n<)4rZrgU#~b0 z-5<06a9uNf#e$uam_FmRRBuXafaGQn(P`tAYU|CIb}__&cadBerB=DJw;i0v8}SA+ zm7=>z4$tSK=1L2TyVlSL?hE!GzC^y=tP6REgbQ^cwk`hss*90BeVJ@Fe19DoFC1D5 z-r?V+MsZiP*?%p)TTN|@>SVV1TAOtZvf;vI!6jB?#;vUL?Pgk7|3r4{lt^!R{dIR4 z%SjGUU8&i*x0*Jg!ml#xRX&m$`RIn7TOXZkKft&c1sFw$&E>%u{H%&~x_ zS*cl_ja@}YBBoSq8hjGfTQN*spZ<7PnoJSua9D=1R`Mw!Huc4AWEzJm%x6&62G7QJ zim}Fm!>k^fEF}E$5l_|+0OJi`5>6Gc?22#^I_5@5K1;#f($NwtsaSEI+SIcRq4Zz;LjZywhcz3ej(~8AaA@cm-#CFi4Y!%C&G320%Vcsq9da-Sya& z_M2W`4+vrTQ&#gd;x6Y0&gTcQC{ zZUBRAJSalon95vPo9k4svUzMde7Nm(1ZKKj%2<$Wn~zU7FnF>{{mS7WA)u;J14davmMx(sJpZKRe8@S?aD z-iWasz}pVg7K1;{+KzN{b*2{j;o4L$jrF5oU3iDBF14_lo}lk`Sk!vyjXvaDnU03) z`N?>g0ISxe^WrHB7|!4`s`(1Cbg8v>20iWE8^K6{y7tl}-LA*|HC7YR2{ zyd~*3Jo(|}^J>W59ZL3YgpHzxGlEC&Dnx1|`kXY;Nc(r7@Y8C*?Sk?%EH#6LjHfxl zra!(c=(bFyXGF{(mU4C$d#dbkS^8Cq%f!#C=SLpOa}VJ!wvVw|X0A_fHx{3DpOl03GoXu|WvByrjTIrGytk&Y*b=5h_LslfLX154Pi+nYj zjSXQ?LVNwU^Vify5(vaMdU#^;d$uJJUapYE9Baip z3WMS2*ZD3xH2tbmZhD7aK&mD*NwiA4c2DqINP;%T%eB=zF@kDdW(lO0CL{lA4shvi zvOM~Ru5>zns`~KfAT35*TCt}WMpPmMhlDXC9}vpaFDhny~;dzvt_+7 z0<1ek6nqdE@kKUs@L@M&O=n5?Y&853e z48|^7P*kSDocdz&!{#J$+Ayvj!oeDBgDs|(%d8Qn<)it1jxc??-^~A)SBPo%mscpw zh3R=g*^sf~Q1kp*1PoZp}uwC)7UIa=w*GomV>CW|>ZBXgCC!d(ghkpNR=; zAnCF>V<@{gy*>%SLOHRgr5$|x^w6X^LSk^JT1_}0-89k=b8g*;9L-M&hu+oQlg?fF zEEq=TgKBK1M`3tHx!A{OHoaLnd@2iw0Fv!Lmh_Y1$qMfdH$skEP$prm(XiIdzJ zE_27&oifmLXI!4k*AK^Kbexe=rQ-T0PvLad<~&W*Cq5j@^9Hf}x3mmIY(T^acOFUY`rIp=E+ zbt_j?G0KMBE@9MIaII;LAT4^==7}yC00jL|CCkOoT_Nx3`$sn%_PJ_3nGPMgvUfC# z7;TqhSe*A#js!eI1_nUEL@1_x<;ba3T?LxxjUE{Zx5I|bBNJ3L2yGc=p+Kj2HL?=O z^#!AyK2PV7bV;K+Rce5fmV7V_F5Ll%+z2~zYI_TAm%K{ycPL0%r+)0YdqEy5zdSr~ zb|@y1u`^y@U?Z4R(}z4&lXDT7dDd8)>WGZybgqu25$it^yx%f7RZ4`aY-(6wZovLS zA_iWo$xzt+;7&z2LHbg6hhF6Z=+_RRL&C%+7C$l@{hc?6X|x-;R&}yu!0Ud@#5ysD z$nTbAVU82QgAZM5>ze$jK!t}?qUw+}3VY|RN0|1swxmNXg)Bh$qZ#57!?O#5R1KEC z9W!4&(?Av-g_bLA#sIyf7WY@;Fd0sYU)89o-Kn=$q_#9qVx?V(Pzgk*QLa0btpjru z?MbU%$Y2LK{wBE`(>mDpaN9^+p8_F4F_{Q5yYeZPyMaXxOXIk9H|f86t9xw~qoHnz zjdtop!4^L-P!gQI+`s7<$+Ni}5IUBhj_BugJnR&Enp_T+X!`?x)^lj3ED=^EyQkH! zC)nE;$z=%r8w;|I0P(Tj>!WP_3(8eQ*T1*`y@SZ;y2!%7dgf4;OAochy3(}II?{ir z?((uvy~A4lzgCxpXD&R8wV=F5vrWhUVm$^m+v-3}S1kc4zIk33U8N(B4v!q>-F{ms zZRrd5hm6A#8fk7f6mG|2W=FiB=%u)fRo+Z#0+NlTcEnkF!e@W>W~`n*r}$I1EQ*6s z#*<#{YD?>SRWzjB-YRi9ZRf<2FJE-|Y9scodrH|ttIZi^Grp}{);LPg#tqUuk6nxU zD{wviuKNF>GS=$(_fofOp26bSt!@tGm_`|RWMIe#4gQ1_nEfY{k$MZMb5BCa+n%_b zq~|+lhr_crrN$VE>D*BCW17#)0L@j=LE(S%81+kMeT^4W@{@v`2rjV;eofgbki?R{ zL#|43JBMrr(aBz?b!)Nj%9eBZ#p(-Pd)O+;8ezwne9;^g3C9o1zdj`n(*#)!$8mZ` z%zDMSQ&*TA?~x+m)sU=h>k0@n1BQ|@HNu-+zxmb((Oi8dO5=LsQ~+pG||kXi^ABa&3N}FkC|8AF3a0VhE_QPecD^HtL0~1IpB@iioAR( ztr!h_#=qTDv1<=qp?ApHNg6hIW(=LcfBmcG`m5aMWPBe`F)^rO57PFNz4bKvE$la5 z{#uV(u9L^{qbQ%^W-^`dj=&O&-)4zwbx_olUlzv0?^ae*fg2Tb^otv#@_t-LCap~1 zN;XVXBNQGQ+oHgotVY}(QiOEsqwG0EQ>oj3RVn4o6i`6Fn04v4TiViYLCq67lGZ{vE}vU)MHLGQOqzbrH`d7P1iH1>pYh29XDLj-jZfQrblL7%1>>=|qRKOj zVmX$4t{EjN7C1JW#BPANcJUNuhHncs(Wy7VtVmqv*9(492fsz}Jh(gwb_cMy@!8kn zf73hHZoP9O(bmbS~x6kvyQHyiSWUl!0lUzleK zztv*bs!YXTDF7*ff+6@hByHWb6plPtw9;x7H*#4s-@r9UNYpiX+Ebro$`ShVJdYVh z!$S6mL@k?OS#YaH)B=>p?k_i{8_q{ky1*uLqfP@po|v+3{F5%#2KP&yraE@WW-70D zx4CS|4wk5Fyw*n~M^xB&G4TcuU1Hm6!dOqOu+SJh9le1mCo0~T%EeJjG9M+l(>Thy zN@YVVPE<&P!a@C;ygbE3T?w7g{gtePV8-YE{bZ5pg&>Ct>vq5MxJ^4Pf28MO;pOvV zxeOI961=O>u8F1g63!0;*Wi_IN)9&G>QfM>F+8`)u|k*|zi2T@F2S3bVN z*$M}8m*J*+_YjfQ-^s2zH5JcrTf*UAucG{F0X8`fb%m`nEtu#Dr-h!krMQl^*Ti}a z?q9nyF-*F3X0IVqp#L z=z>Q?THf>@33a#vzonGDeXHA9mz=;Cn`LN$czgx)pPi2e^lG^>@2`NDQ)~Z0eC+sd z;v+o#3tF`3vkZQifak;51`lOjDUgu&E2U!B>+qLhH=hLd(7>+NwWTDzo))$PBa|i( z$4}7Rt!1){aL>^q{ms}VkS6oa#>ih^(^9r8x4`4M&#o3pC;7m++Jr%5Tym?C)$KJ! z3j_F@3Y`G|O+nEqjg5D!eDdElp6-J+e0w*-X2wPH!|DUtXo`s2di@R0vxWlXZ+R|N`#pdmTZHwF)7eXxFu zPna>QpQmTHGrvD-{2?>03jd4D=zecw0wH-&oDt)A_Yd2$BRvXx1^OrCl(paK#Sews zKdWb0FvNnhM&Sk&8xEyNrcs^1Ot8B1H2HiakpCQYF0ZZrW&0%^NcK+~xC=f(*@y^+ z?R6n&OHmuVtHAn^8~0!^pWWgmWGLlB~Wz>g~ff z?ENQiF+S}i?w2=Z#6REo=PCvFhk*BQCC=sll^ZrwLZsy63;gaUx{G1|Vq0$8m)a~; zTasX)$Jx;Spe4V6Z)$2P@Rj=We@aOvgZ9$%{6AjxJWvXA(VyE+pJdbj^B40W{Z|+K zzkTCTw%1^Bw0HH`;@~hS*5A1~bz+F zLbu7oR)Y-l%*^KUApcy|_x}G3Z-u4o!n=cLLU=rK#|6`&UrttJLW&c7%E#n3Jw&uONaN7JlZmuy#JTbfTVT!AbfJ$JY?coqt&D`dNQlT5YS1UBVnz+rRU0QE<(3B2<-CJVd^3roc> zXS%V(S^0L8>3ADNce&t3nc>%)WJk#5 ziL{}9?y2ulB5Yg2`?>pz$l|S0pBg(Z3a+j5hVFdk?<_`-r|4u=$Qs)0-x;=z<|V^E z4<#p}U(S@F9J0o*a1Y}WUaGYIg1x2axpORmbv5v$I^Olq*umB)2O6>N_MNuj`7D!K zk`MTQs5`}_+unSx{FtczSM+;4u&+VkF_KP7fa`~_i!4c`Vdx=YM% z_y#1G?B|}g#Kkm?aL~2B8tup#TI1QvlAOu)4MMB~$7vMlt0$z%|K{?HD~cbXy{Ca~ zXbqjg{myklle+va4utr&c=bv?4LD|a73!far{Di`vC0PT>UH*UkXO(g>v_m)T!sd% zmy<~7)2pJ~>|87wlR}wP4$C(9L!q7Jh7*avj!Y(6FPo8a$x(EF_Tkc#@7-kEvtAwu9OiFD~e0F7Yb{Z z+Y|fV?iQsfpb?ormD3t^ExUQ3PKR|Ki#DM03W>(WZ#!MmT*DcPdx{;-bQkYpk0O;OE~&h(ZO3XLn71n!z7ZAX zV@4xcYmFZK@A^edi)YRSL+j}Kzvak7EF@o#p=!!YC0Li@at4ZjMNDNZ@mwseUROeTRVf*f{MWiz0aMg^{@*bEw7m6G=Z|_Yv4=>4&9PqFdflQoL2ca=sxiX z-%IrM;pEL@=q=f=X2ErdNSG(y6-(L@|ASsh{n>8?E7V$Gj&AqNt`Fv9M#eiUj~TH_BWrBc`6Qm!|h0mkjQ zSoB$Uuv~NT1)e+OaQ9&byX}2Jfm+W_a(@@+bNS@cs*;#kR(xvq-AyOaFwltOc!HA^ zx<4AEejH1Z)s30*i5~xY%3?JWt6Ps%WHpnU%5-GDU+EXMiWjnT?hVLW;PiXN{tiI< zrRq%+vF=8SS{`o4KAahdLas@6J#Fq4oTSW*^(b=JwL^mOxoB6N~7Rb z0SL@UH{7w`;q@&VRG=Sf25;V&YjXM1d;y#UggdvmJ8=;oS-b8YQ`=GQYU2+cJ`>JF z&?v+%?8a<#pA&2|X%KXDdSUrv0Z%+daohGoXP%H4m)H2e-R@?SDO$Q`Y?V7%>^Ov$ zwFcm^@bF;}xdrY8CKaEajyb0eh z&5>$0*wG|jd4h(a^EXH2!fc({pSICa29HYzecDrdofERkk3cJxQ2sL)0fCk_!YC(5 z69(opy0}v1PjXV_d<3Zq>40%r{NUP_ho`t#tT>cLFCN*>Jd)<-?&kJR@q?}W?7CH+ zn`QwUYEfrgww6oH)Ku2B1%PZ+b~9LOH)@z4t%vYjz|?sp&Rp>i@a8duWL&Jt6q2Kx z!%lioInxBGIRy_1dmIY#-Keul6&4b$jPCR)bbw=omy@d&(W7 zcx#|=dfJp!;D)OCGU)VSsyiBNys`3E@8^L<71v!@)1umd2aW7Mb1X>rS@k`1a|-jk;h7l3=7~@#?;Q!#A0DW<+b+UoI88&zc21U0w5*U z?bo1`3h;CMHQk;RrPT~!^09n~3?0)G&7~Yxe+8l0+-;5kryuJWxMg@0GRC?K;6~8p ziBDVgv|w?6=0GOtcaY0uSRWLRqWMHjJ}xJGnfua=ujYf=8ARUoJm2(5#wrc; zYLXFP&g=N`E$FOJWaeV~)qpFSnk?^~mCEeU!j^h5a%?S=q7bZugd@jCbdXcE_iT?# zT2gVYV2ms)b+NgE)9h=>a|f%p`7cOFNNr&v^@16Ls!wppprJb~Tc$i6JUJsbrrm4z zlDzJ` zZ`rqk>Iw>fz~vLhZssH2PO4)B)!s-gP+r%-O@ zqkD`5(}GiT-(v|bVeRNl*^zuzw-LV#z#O04t47DM0B$_BH3E6ud}c3=U#zX(x=*;P6)9=Wsm zWyLGyz}Erqhp%FLSUQ^r<}kCj+M+cERr1i7uS!4XARDlq^o{YlZe1Pb*v?Wv5_5F` zYs*+8GY`3|tm2}NaTu3B*8a3SBl44n&d=?aG|8&^D8O18dJ3kL+hte({E&5~03D%j zr)gpK#>TfWMx1s)5l%Chy1|_shgm0;Ci&vvu;21(mf#A>5ng+udO}Bl&qhEN)HVnk z!fr4TWAHmR;v}J|{I7xesA2Nd0`2~1%234*R_bS+4q4lvg={CDb8VEJ?TdYtUY;(Bd@S*! z^{7X{F^+ua_GV0WxJ#g5~%X5JAt@}i(* zWC(l1U{+$>RP(bDv|KMdA=0J6_Q-5L&a|qQXvBJ5AAG6Mw-upJBpqMs9AOR!gv%GV z*#Sp^1sm+q%yqf$nt-1N(`M)lYor!0V4S(KXc)J?Ct-+QEr58K#n6)sjd*@LtOX>i zmS{%k>Nf4mLH%UROK6m^)aLF-l#6fD{YjrQ$jJ{rCXb|etq|usCLslHyEr|`!Zkj? zD-XOfpEj${7`zjhG~#jOdoH}~K9NF`Qi+!yee{taKBy~E)Hj-M+=L(1$x^kOKzNB- z8!#LGJX^8YxiPQf_h%V7%~fq=P2rHB1*MX7n6zm{^L^0UMKePCDDHSGYV$(1;q}tn zOepW7>-UJX=qMrS&|QPIgKWS**V!V+Ipmo|x@j`|oE1%8cc^@{%AiB(5)-zg!xobT|2^Ii1)q(sK7Y&7QlEMqFcm@Rgg zTeh!%s-myi1}OIRy0m2>T=Gt&mmtP>u@kREHLB6SRlsYXwo*>eO`ml-$}8Gy>0oGL zCN??UAMOz$PW-6xx`E^dqt1jdk2$G-{uWl4+QfWsAdF9s?~75-10lqN@+q&o_{SP0 zwG~cFD~ZWOqW7RmGi!9DU-tvLFl&<{i8@?Qcu>F8nkjf!K*XEvR~MhYF6Q7Q`t4<# z!7ERjlqL@$xV48>C?@*(jmN^gyC;pT*(kUcOx%7afPZL)8y=Ld5BfzZ@L(;c! z;EMh3W7a-U;~eYsLJ=`N16r}z*O%t4 z3kkjVc(IpP+$ralyx(@lX5I9R{%Eu(A7^(WGh{D2K@l5!p|PI6yIc|1{t0|dD5L-) z_WO&Y6Eiu#7My}M>xm{`{VWe?ey00(`T9AUT&?`snc~{#4wn2kkvnvmS4_w_oaB|p z;ul(Ai0R;KKdF*|b>;FN*!mw2p1B}26o1$E6y#nL#df@}eOIKSBhBW*O-g+L);g3l zy4&3l6Bd>4{8a$)Q`=5#zTn^G1T23MlL7x=vp-v1h?DBx*fbz-Pq&2c(Q|J2`U%@%YtA^?;KTE7*dhAX|-n1U$xhl8it_4qQk$I=tjg5_4WmW^Ehjz`*cmtok#{(&RNVEPF_IO4+QRI*J zA4WcmefsU{omF-F0cb}53Di$g8C%`y(39`G5!Wl+ZK;p-piUz2sUo3c9?{vJu7g^g4w=)vNq+~iz(7IT>ckLHwS-9b%@4XL7 zK!T;d5UEiW@~=m`h2~|TK}5ylZ|A2vGNhkRHV3)`-FxV-ET>X|wh!!%)?~y!u2>@% ztND_~9x9&eD69Aj?{A4tFE_W^_Gjy8&|*`Ey}Wsj50)+9^Q&#W+!q_{$J&TU)^n;z zoC~`tEP-I|WU^Vv3gO>VzR=}L@%5X4z z==nmy=2iLw@srH5S3~y;8@my)uXhJ1PJjr|Zo;GXGE8NB8sKUQ&kiRr+TVXU={F=9 z^(%|Q`U6zMhX~^k5k0LPDMDx5qog?W95M`y_&QCoZEie?-LC1D1Zw{8-Qe=g4jC--!Kp9S*b+qVy6OtybX?Jr2$i>lDA_q5>8eFY0k zmGNZo-#Y(w*%bNwA3^2c8pN~a-1%?74bEb%3f`3>9}qXQd%k|qsN6z)YTbQ@#Lyz^bP2XZS(wmf z6yW;TV@rPJWpp{mues2PwG2rErawLNr^LP3$CSQ>MzVrs1Y1I4S2Oz~P`$%r8qVl& z<@PH>m)F3b58$4qg+t3cW`})36zlc6$_(*Df|k$d)l&1-aGKIAz&<|s*oG#8r^2MR z50rc;kKd=LaYIoX2%G4zoX}ADgnI*$sm||O1bou{S`W`H z47yDrqD9Qcpe@C!YEFyAIyAK44|m3M&nlXfG?YIQT|$C;Yopi_kabed7B)z=h2O`# z2(L-~X*=`oTdpzBB+G0Y>W>paokO-B+xwE1SbNEJ)cH@{i7~KuXNicR-$vqsbl(OR zD;2?_p&or|(tUC1m;OgASU=~Z+d$28yvu2gsQs7MC1U{GEKh{9OVW*_&s@GcpGQ{P zYScTNXW!;3Qhi!J&*HlP zv+>sW5Gwk|OF+qd(9G#M_vH`||tuCv%B6Hc%~Qd}-O+qbcd0XmL`6L_Dq~qO_`#VzUnA)9*Kexeq#j4wmo$ z;8OeVHX*DG0^4ucvzWtXsTq7&vh`X!Cv(aymYUz@b;x(%t1~Qtg~Xu=MG)-gv%V#0 zRBtA6saC9Jq;MC#M)cw3$f&GzAbt^mL+)(uo$Q|kh5BZu7R!%#I6g^pwezONpjuBs zMxJ=2x2{691g2VuqOIm~6{ccP2s?t?&eiXqz9kyddmiVwaUfKP)|69ddGz-36G;Eu zn2Ru9(;4VDU;@{vzpVtM%-8xc?_X}(Ij_|o%h{-8rlE}*^|-5eFbBJZG%{?x(J_@qdl|!{?4Yi{ye& zC!V3)k3aCNlEv!AFMG?3Yw?Pnx=2HACWPwkU7iacJ8gO00lO-T= zz#br!E1bE+m19@tfM0tu_}!h`X%yg?l;^ysUxSaDtu5U=LE?EJkTv#V^LOO_XW{@r=WT^R z*Kl^`o1}N-{Ye(}ejG6xDcL=>@s7JF$sjsE$2 z{Q5b#V?6^gpUCj!mm|Z1UmE3D;9U!e*~XEY?xP@TrCi47)e5Aw)!FcshQ0e5$|$U} z=Rru4?l4YZhdH^qoA2n|s-EY;7OhVNHjw5T1HL1Dls=Mz6^7o9s6yzT!+DN)*J_nb zm3DJ|dNriUa`fCYS7Z8L;h7N7;?Upsar>wcy#eb zPsX!gteV5=y%K~tP;jb*w-qyd*_rqh11e(KG6M30O0zg4Qj)VlwFWtTJ9QNo+ZjFE z4A@cjLv;0wUNNkZli7o3SCr44`WP3<1#UHFsv`tFSrvusMnj9xgSsv$L0O)ODLD4z z61Ed52WJ-kQ*6{4XOH^`gX(9VL~+!To>z#e3!Hnf6ruLKXVPs9zy)Qz2Z|_ zolyyx0P~M&<9x$i`#oXd(oq}Z8|;5}ZZSKmV_NuFFO_nUxI_O|0$zHhVokz_o6JF= zUmtZi4R_@Mm~v>YSSG7J%gyn zQuvW<^jNq5`HlT=7ymSKtHy7x;RD0mHJb}}*Rj2+Zbv$b8IK;T4I5=z? z!%PXX;AM;JBLQMd<+RsX(xt?0Q3I;*A0A?iq)HxtsN>N)QG)#L(@2=>a-R0>A-fhx z9wDqI&vL}HC|@q&$8C2{mO=rufC`VQHT6b^3}Y|`nZ3N1+wbo|pi;fZrQ72~{w@4M z=_-ADHvdnE%Cpfss~ZMRbX3Bb<~{@CN(3#1f}dVOXubd1L`qaI+oCwxk>r7_3UL z)Z`+ONoiwMN=+&=^e*&qWac7BHADIm@-THA{dBb8`J?vT+lm+EhKw$k^mC!0p?Th? zL1UM!%8;kEa_Zlb!Xigx0z41aUQ0Hbh0;UJ?Jk~oq6){W;`udXJF!gW=gA%n6$dE3 z{?wVUHb2~wO4Q%kJbLX0l^F`cT`lw+ki2u`)!-!d*`@o<^GdFDF>!s%J+|`s;g~W+ zV<%;XpwVIx(QY+D<|J!Av~6U&8GKfPUQYGL`)NnTo7ISAAfxL|hT55&-r7XAYlK40 zeaM|(dA>6{?TzYC7;m#v=oFjcQ6PIJfAEjK`pET1QT6`-8battiPDK$%oiixNX0zr z>;4rl+@}2e@oVrrcQwrg3wp5)S3`X6hrGU5os<$9|#j=Wl!k5@-kCx}6PxCzr!6B$*V zUvjzXV5j`wekjL}$hPG!Vpr?Hqu+Ur?mb0dby+;}%QImgcl{N(cxg2yiWFT3Mx+G0 z4`x0vkj^9scMK^w>GlmnU?8l610g+Q5=Vv@Q8f7o8v;t13_5%On%eJ_uJakMQ4%4M zD7F3jGCwMQ+adcgRSP4)tByK@eCgpBG{8Nu$Y$O@{tgQh^qNIVenhi@BZ$#l>*EJ^ z*q`fyNr=_98+*kT3jAbu%W%7LzF{A9VjOOqkj#MGKNvYfI7oLiDJQ7sBWHew8UVS= zDJ1W2W$PCZELXC;cjb-x@6{d&!GCmAR$l;np^h!m(FYv zw;)l=IKmjYJkz&xBu&YtO)uVryX3w}Q3xd=M~UsN-c}-srua^~Geq+NYeG;AhJX9zHijp!|Wz_I2fl z?v5X*qk7aAdw9cV-R10X7<25t+bHc3*M*a?#-5GRESA4q}CuS$Z^_R|1GzPINZ2 zW}}*o_+>7W;-4&r4V!=zfh@;fAi$xl-vo$(3Eu>IXuiRD#WE1b9q_Zei6wx%i;6C; zK5W)l*tDlpyY^~O+u*NWygh68>>dU+WWRt^-Q(DVn4wUYigEB$uAb@=EuI&+#zvXf-buyF>^7 z8|>tFCiBhi3>G|!7yvT4-Mosx0e$;Q<#=KxWW{0eDF_bLV*DqQ8hsM^02J*M{viUF zPqflq4@Em$%(;u~JGvD4bo|nUXJbF|#?K3aIk6=RO%L1MfeI6oeU9u5qepjuEz5wU z&YLoLsUC^vO|os)&aa9j)9eZHb_|aY?QIU(CRiPnJY`FtCZ{R9V)ad8ztwD%)C`CxKaE`F+c)T)bTJb8ebqaKez8Iv zs3lV884Bh8)~`=sKHuR%;otCV1#_R3DJI89)kpc$kd-*0Qb4?z@@_?5To~Lf7bQuG z<_iRWR4Yr>awXUr?>C-vfa}`xK+gP;A2gg&>_j96BazuN$=6#!cMWZ#xjI%4eKnDR zDD_x)kFhA+vM}ClOP=DNtZPXKDvLV9eTYkl+K4FveYeA5dj^ZH&RJ$S}pILA0^ z{AMxR;aJ4B1_hxCZ7iszr_R^#8^HHe2}(8^Z5Mo*Yp#r)%0C0NAuhWA zK`Sf*B5y?Q|J8g{hygGW*J_$JSvxN|EZ7`3=XJAQlI!5(k7a4%f0JG&C7E?}{?^y* z*l;#jv6#OG8iksEO}=21_i2i_R_}~G`@}mP2CcM6EQb9^&;f?t`~&fxDk40WPf*~1 z=@PIYdTQ@w+^JQ-n$5`Sbg5xq1I?z$B&BQlVZ=d5@P^U&bgei2^IL}wuWvQroWbp+ z*7w?}pO^`}neENbYj1gyjJZK6S)I)b@L++_Fh!TIl8$H!r;((1%py8mWb+%+ai83y zRH{n-UtD)jq)kCZ^x=ap=-vSATDnVC`{O0xcGhi8wU}Kwp>SZ$$P$BJsaRope$59Y z^6{Q^hvB)_=~K$jH?6fE5Gp>n`Lo}vS-am1&bq>RqB5WsQ{^XMfH}rtAH|4Q9;lgxG*gUjRE`#iP(!RIjv);m?P^`Zkg(d%?Yk77O$N3R9tKqR>xj#Tw*|)1 z{Dwc;m5ZX+ney-!>M`DCekpx1qbQ2J+MemRPM{#zQpiezySP78oD^aZKIiW+?$fYd z_@MKh&lQxPeX1VgR}qsVS`f&Cq2Rz!MG0FNo!fPaKkZcH5=*nMIS)+AUNh^iwpfO$ z$NVOD(?;_In0Vv0467+OHT2|wKsA0Aqg~2oZ29c)y)Svpimt!KKHqwf-x5)+vg7DW za0x{+psYD$dAU=I^3Re|HY7y$$2w9^B&?^66n~kA;XOPb_!Xl%l-<3$b9v_xhK44G zFOO+37rF3Shi(1Y5QALNVS)WSzM#QoACq|H0@CFDU`{PVQu=&ZHp@#*4;CIr2-%E) zeZ~>3c!e}3-h8RR+cNf}ryWupXZ6A_NrBv`-l%KTC|=c6rWcEi9v&q932lQrabKp| z1W*s}zd}P*GS#O+&c{f0ZxTO>6IZk*(h>b^Nc*Fv43aA5p|{SmZy?}@yfKa*Mv%eh}3S~jd$!l%`%+^EAhSdUn zvF$>rz(+tt9Mpb1HP_nr-qCY*#bkE(3M{~pJsa&BcZ+8e%kkDW3p*Uv0?o&9o6xAK3guOKR#tRT zx=0#219;4+>2G=s^(!necstOpyL;FV%pOao;)?kvUp3-G7%R|D#UBcxIz&F_?KN>X z$axMk)9ZlhT+N9H-v+W5k>lqw?_CZrIHaW;d|*+$gj9Na zdVDpxcTUAwxL>6uwpmHNkJg@W9XKo%bUn67xD=KvG-$!DwRmBf;?{3o9jo8;Gt^D# zzuWeDP@WeqLCeh2>kE;4GWk1YnmGTOFT4azuR%)XE?+L3LRYMXwr3>o&zFZ(=HLS+ z@fvsNWSTB^=nUEgc0eiQq(v~{>7KrOJVJ{e!92H*SQjDlc{-4rB23L21HJWZQ(Rmv z(4P_6R{Yv$zcrH5-jBDJR6Le6RIa7S?rZ-AZd9{7V&~GM-4Y38klLvM_UEUQU3?MR z!tPs7x!vt~-N5gsA|0m1;l*$miDtgJ&f;XwF}>^0T;cObCo2T`-ZUlQ>>np3q#Y)G>dxrxy8twRc`oO>WV?52CV_;#LqunhMg37z9F#O7Ci< zhAJg=1f)btKm@jc^j;E?5;_9Xq)R6Rr1u`c&;o>B!U^tk9?m#-+}C>_&U{YB$TwHk z{O0_x-0l1m%-6IN-94~PW1 zB6xz1`baE#3Hosv(BoEkV$I`sfX&gvz8Uiu*WQJ>B}(Ww6i~IODcywUT`t1F=M{C- zI{e5}fEcq&3g^-*?(h0mU1Cobc0L^X;%gzznNXOC(5K$X4g0raQYxwQ@9*TA!en3O zI3_lSk{ zb*kE=s~Md-i56&c1u*x`Jj9Gct3_!;XW!tj6U9s$q~dcE`gZh_B&H>D_Z^j9H5M z^Wv-i6@F3srb7RE7&bnya2KY9Y-^QR^jAEc;G>bkeWtj>KFloU2*?Ol&QaH*R2_K0a7%SoJ$?zk_uISu9pNP{_6AjqY7bVZwZhj|rWpTaFevw1f!aoSQj0 zk6$#iEl?w2b1c$_o0GmWM#ucKg>qrG82J|1%u1W}k;=GitX!l|XKDD_WT?k-8$Jl6 z!u*qZt^VRrA$qp&f%yrv?$G@uGHpBs;C=lB;XW0NIC0Ua+Y!{>TW_sB(zr#*J{O-S z%jY>5@#ZlVQ@BM&rpRDzLhJMqks#woulwL+8H}xSF8<|XC&BLpqup$hsYzr#~JfdhnWWZReua$0Ic@CdghF4R2U<8f@jZ3iWVi6)sm;9PvAhjH<6~glFX8}9? zKkp=5m5rqy`2Z1SZe6G1bSD`>1I=nzD0L>7>~BtEW+?kxQ~l@Vf47i@xrxQ^C2^@3 z??%hzUo$dt&NmR2Fs8_SM_b4c{TfcqWOY2CSRPGuZ`A?BWJb9Dyi7b*YbW2nL~Eu0 z1`=U0LSfZO`U7vYTvkJ`MYs;-#ou6=Gsxk;T?Yh5@=#>Z+TK|`FIdXD_*q=uJ;Yg{ z%YxtC2xbk<#AJ5>qaK}4+leVM``rq~?&?s~9KCuH2*FtlS%Y`7C3Tdw*>a>rM_;$Tig(TTAmo z_c)Mic`x%JbaxA;T1u*@w0H4Y8Umq)s;Wj; zzD<@=C>7huh}z2yCkk0Mcg%iUL$}rKU`!UQ8x^UVF0D5^-Vy&s2M137)cwLrH)4H? z3UE0~qODz9eSL9s?5eDxg*d+kW}U^Xk~EWii^j^nh5L!=55#Swru&dCL>2twMAd_w z0|977Vavx5Q_EL*H{cakNR6Y(+u}k7&Hb6SG`1DE_jy(ZTbA;=x`+?TY>nuIG<2=Aey*j zK%eR?lrpl_tb*t`PIwhj40CsQxf*(4H%c8nWsJLn z|F(pxw`PZpb&q72G>2CMA0~{6nX*OzPU{`kRU|t{*tW$=Qp3LIPOob}0%F}8ySp}0 zB`xDJbZr30(a?(=k&DHF!R|s-7D*YCUJel_%Y{dqf z`_I$2bN0Ez2y=SLMpoh?V)gypObPUZABKj$3(A^PN_cvzX#LoJo|Y+T6~-O|rpYAcj4dT27+u=kb0Qm_9qE8K9()}nteK%&(zhKQI zYcX{t??tv4$)t2WLZzkb=%tt>PsGB;Zxa}^$k`S>ru#O6MX% z(PP9F5aFAOx=cqvJmK4}{{0kLg9n%|AD{JpxYFf<7hDZ&qD}%XMcpm? zjOZ`mkO~g_+<;BKl>+BfiG>KvSi6@t)Is_w3(eFnqpVC7ZsjcAqoV&{%Mo?6z(f|4 z{Bi0sV>MnW<{~yci23wKZcbDEnas)RfxD+iy7k(5$ba9Mkm)B%aHyx)F!^zfjit5B zyic?@T{2>t$G_Wgq90+#6gus|_Y&%NHxl_Nvr|y+)S4xvPl=v@Ons1{X*&r6q!%o= z0$88LSM$u?@-ry^fYR{vp_H%joU_<1!Ch5)W+)>;-%)4a^W9E5Rrj_~(@Ow=!Bkb@ z+3U@g-A?r_S#0iIGs?Q${ofcGRiDrBwXc#M1Ca^ZT3KhXmZVaidgfqfQ8g+Y*Mx5H zo0>9W*2UB8)Q*Mr@3pO{q=`m~PNUsKyR?db-nQ92RJc04bty&^E0(5}NK1eIfbN5I zgn4HKNyBejp1*D;@YEmvo|#ihh?2&^X$B`^5>zRok>i}ZmU4T4A^V!@QQTRy)@pny z3AF0H)JGE+Tr{?vqAz|ZTK65M?Q=>LUz?(y>S+LvAgF_lUMW#s}e z9B3^Tc6INK)o0J-$*RMmMCE#mymjPwV8+eqBO6!g9oq}4srV)vo8|=;^oE?9Sl($j zp0zk7p5QM0abcc68CAlxo>Xt`t(?Zh?agt(jAwrC5Mpe{@@~Qj$M^4Flv&Y}7lrx1 zc7E$BJVoYt;}`cH6Av2LNDD&cdI(cvN8b|FK%?$4xkt@~?Mx0iE0JlVP6C+&CV7P-xA; zn081_U!vjo7j=PB#cDb^`xih#!Az(2##P7U@3OT11vdqf z#j{w7Rzv7bN|-ll!R^)KYm(7q<$`%&43y0_+>$?Nek?rDcl8lSypiciOa9CA8uu(q zsXv;RUm$Zw@fCSn=!r?r=8yi`?p_yx?MDLKG+!ZuKR%8+D@NWdAAi?-R3IghRcdcY zm4BYEtsmIy%r@7~OwJj7yM{7kZ3cPM?mn?|I}h*iZD8rih0Ex#jGh?_I8j4>Jjt1i z6KXYN6h(oxyxQyYbxNLX1Q-22{dH&NY@0!NTI0|99!-ME7|HHI^|`XI4C2FTJUyJB zlcG^|;;vWXGgOMj7POR0*G{gi8`g$u_SIt#bpysqMuBfJ(70C=Gj{-$ruhDs_XqWUJK#QRb?2&!B{;t(V9tT66AOl~oOLd4PLT1R zMcqp*Xd0{TW+|B`OU(JWhE{FDXV~teScw%G1?B@&J*wEGFc*dz#3hkgrnG)#vA2En z@9G8hz{Sq`Lrp2F-)FE34@4tgRURLcH$BzZ4usc~W@W#=Z&w8RxWYFB zszK%H>O?&0@mc|jv&(4bCvsS=&2%(%uzyiAVaalYE!0yUi4g$Y2S2(Os`S2{S>6JL z4QF(8qVXntqs|{~=r-}-SUyKrb4ig>-;uw9>#Jgi>3N9mdtKxP#YM;f=N5F***o{C z$LZ;nYdn={K+)?nn5>5ue;r(cor=f@j=ln z@<<2Ge^|H_eKEDb<#JbVF43GDiu@AjgQ#cUX}Tz$c9+I$u$^<85^6qrzjb7{Sq-jg z-W!h?$$nBdbPjKjNC9rvqw`k0*R(J0uj6KUj}EOpj`mkR_nXvt622FPAWpox=^6~u zl?D@0SvrcIUmhJq_=L)*cYO0A28I`{9jzVfUSbl-zWFz!TGs;H=p;dxrT`}LbM=T$ zWS=(%Z}r{bEpnlNDY(4T0s!m~%YLyem&M6*bVm8tTL8r~1|hE)9aHSm7&HBU;-{yX zL!3-Ej|xD~e-AY>AAN!iN;{X!&2$@5`e&KW%$4%Y;!i8;QpA1oxOD za@?P$Q_U>q!N4o8H)9z3sF-f}b%cwSk0(!8pw{7e1B7c*m&Zj#oR=p*i%!)kN{sE` zcuJ<5LcvYJLj4!AMROeA!lG1DoTFBRpbs)J(P1fEn^y=rl5Mo9zpspr8l=;mlH!&T z55B_E3(0lj*>rvtE&yc`_1IGI3$MDj0dQ0PY|*tp$OUI?ojrR*4J%oAlM_<9@Kw@h z^}3!G8#n)^hxP^OxZ5D2%k-hte5*Ri#tZ8(dAX(N(i?0dzd2pkjCy(DIhfdlr08 z)}OL6W~93hR&>@uI@|XfbtZuK3FCc4Il@U%Sfp0Ts;ioF=>xnpa`px4xSeevUne%k zl$f&n`!x!Udf(9X_#ibd5jMUPe7Z-4Ui#fa1?v>$flYZjVWt{8O6;v+%j?49Xl#jF zKc6ygE+7Q=yZ<~Mx>EFh#jEVpj{TT)lw9`ChTHd~b;xP>+EI>c%T7<@3Q4AXu!&}3 zPd1*P0^olT5TW3I0EHXp?{mZg!N*S`7AY9AQ5tcFK|0EsLfe8N_E@c!tDIUxB<_XUhR*M zFi&DVR8bS5xSIC5Z-?1&15SgVTuo{T18Li>?b({t!&JDbJc}~ts>R1nL(V(dEXzx1 zrtnVZW47dI7%D!N?suAJCuMz@tBGmpgKXFPLUweW|F~(Z&F%*knwn}6-|(pu>Oqa~ z^U@dtdpp}u(!4_D;_3KAnar6WDpet~Xz1Sm6$;k>P)~T<`!!UV# z)HqGVRPy1p-+R@ol~b-wTuaA2^_w4zybo4c_rRHDdYB?~Mm!^V*kVf#I>}YM5Q?&v zjp$@*IM1eWsNb_EuhoXS?(MlVU6aI$&*{M3sp#?g@nFFuxrc*Hj01)Ei42wmT=JDY zJ|@GQS(0zLe9ZbUi4(%`c>^Z!tU?I5BB{yxhHPJpki6@yo((qzaAR+~XRL3epx)v8YKKzCrTTVip%{3HE(V zPa;&rb+48CjOQ16o4c3@^2FCuS-lwT=8DJ9AW-in)_Zl+i_dQK&7XLv>I}93034Uw z*g^P$#qxvGPawZ7b7mP!uLjXt<*Vx>f<=bT6ZGChiL5gxDqPcz1Jx2_tz}7|cCVfY zGkFp{p_^bLnEl<(O854AtJXtNP<4(@O4aW)N;z*sIuoLqpTvP*AFn{r^a`Ib6+G$+Z z!Ovv6#Tq%ol>^nve%z^gJAn%HOD!nsx@g@ zN$FIPn8Qqa@!Ix%--&GfD!gsR;d0HJHt)#-D!|;Vkk$RQnhhXqwG zAt(Xo3_@kI%of0E-<^rT(Up;-&(6sF-zjnxLJ-~(t!YkTnYijmFQ&y!7j5JaJuzcU zf`iA_5^$anVT=fQZO1%bkQSRfXII{y4FcUpIm60+#BmMd8%)o4ZIE#+rhRJ#RT_77 zveYA;Y;}39#yR}rLrlgcW9xR0OI#l_l+9VBL#<{^!9{6kQ+ z=6%|*^R)3krdNZtdi>N&g<0FyE!F;mGac=cs_90(~`WW#;3|B zGK}q_PIcTb=MICNaW|&dTP}KD2W$nr)*$LS4*tlG(j%avsnj^I_c>-NudK2ljicIv zgz+<_Nuisy*`7q6$7`kW&KisYC5CZ|0#aFS9aRDcw;{Y-K_ir|n-a=stSmhcKa&k5 z^o_lS%d0JAWn^r#aZgeJ0F!~_FusxdZ80ib_H6%@BvV5S?)a{f_UJT8274T?Gp%Y7RH@<%6XSK_Qc|`-NX+;Xq{xqkh4m@xAkvD)1IjP zpgIWHGDb{pkLa^j03`0MdfeNoXii^cPr@I()s;k4$~M)B;ie5I81o63dhu)PZbpf-d0@%8_R3*2&(gPv zstFdDJ=PQafb*(IAEg`Pa`p;RQ!%x8+;hwYw3Y^eoG^kipBGSo&Q~#m`0llxFmO*_ zjA9@9tLEss@77Duh8wE)zOvlyr>I|=OJP}zwI*iVij~O6VEuETIJ><(8E3}f@oe}BK zN{ay=6wSB-001-TSRKt*!ilprx2epi5hs(gub|M{C3AAP=Uz|+j5SPo2OtzOUK}=! q_7`qnC4b|8?SDP+fAPRKArePE6ZS3tTP5Vh$5a)y6pEjlefSUE6hMCf literal 0 HcmV?d00001 diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc new file mode 100644 index 00000000..f4d7f758 --- /dev/null +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -0,0 +1,109 @@ += Security + +== Authentication +Currently the only authentication mechanism support is Kerberos, which is disabled by default. +For Kerberos to work a Kerberos server is needed, which the users needs to provide. +The xref:home:secret-operator:secretclass.adoc#backend-kerberoskeytab[secret-operator documentation] states which kind of Kerberos servers are supported and how they can be configured. +Our integration tests use a mit krb5 server which is installed on Kubernetes, but is not production ready, e.g. because no high availability is configured. + +=== 1. Prepare Kerberos server +To configure HDFS to use Kerberos you first need to collect information about your Kerberos server, e.g. hostname and port. +Additionally you need a service-user, which the secret-operator uses to create create principals for the HDFS services. + +=== 2. Create SecretClass +Afterwards you need to enter all the needed information into a SecretClass, as described in xref:home:secret-operator:secretclass.adoc#backend-kerberoskeytab[secret-operator documentation]. +The following guide assumes you have named your SecretClass `kerberos-hdfs`. + +=== 3. Configure HDFS to use SecretClass +The last step is to configure your HdfsCluster to use the newly created SecretClass. + +[source,yaml] +---- +spec: + clusterConfig: + kerberos: + tlsSecretClass: tls # Optional, defaults to "tls" + kerberosSecretClass: kerberos-hdfs # Put your SecretClass name in here +---- + +The `kerberosSecretClass` is used to give HDFS the possibility to request keytabs from the secret-operator. + +The `tlsSecretClass` is needed to request TLS certificates, used e.g. for the Web UIs. + +As an alternative you can + +=== 4. Verify Kerberos is used +One option is to use `stackablectl services list --all-namespaces` to get the endpoint the HDFS namenodes are reachable. +Open the link (note that the namenode is now using https). +You should see a Web UI similar to the following: + +image:hdfs_webui_kerberos.png[] + +The important part is + +> Security is on. + +An alternative is to shell into the namenode and try to access the file system. +`kubectl exec -it hdfs-namenode-default-0 -c namenode -- bash -c 'bin/hdfs dfs -ls /'` + +You should get a `org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]`. + +=== 5. Access HDFS +In case you want to access your HDFS it is recommended to start up a client Pod that connects to HDFS over shelling into the namenode. +We have an https://github.com/stackabletech/hdfs-operator/blob/main/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2[integration test] for this exact purpose, where you can see how to connect and get a valid keytab. + +== Authorization +We currently don't support authorization yet. +In the future support will be added by writing an opa-authorizer to match our general xref:home:concepts:opa.adoc[] mechanisms. + +In the meantime a very basic level of authorization can be reached by using `configOverrides` to set the `hadoop.user.group.static.mapping.overrides` property. +In thew following example the `dr.who=;nn=;nm=;jn=;` part is needed for HDFS internal operations and the user `testuser` is granted admin permissions. + +[source,yaml] +---- +spec: + nameNodes: + configOverrides: &configOverrides + core-site.xml: + hadoop.user.group.static.mapping.overrides: "dr.who=;nn=;nm=;jn=;testuser=supergroup;" + dataNodes: + configOverrides: *configOverrides + journalNodes: + configOverrides: *configOverrides +---- + +== Wire encryption +IMPORTANT: Wire encryption can only be enabled in combination with Kerberos + +The following modes are supported: + +[cols="1,4"] +|=== +|Wire encryption mode|Description + +|Authentication +|Establishes mutual authentication between the client and the server. + Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. + +|Integrity +|In addition to Authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. +Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. + +|Privacy +|In addition to the features offered by Authentication and Integrity, it also fully encrypts the messages exchanged between the client and the server. +Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. +|=== + +In case kerberos is enabled, the default value is `Privacy` for best security. +The security comes with a cost of a potentially degraded performance, thus wire encryption can be disabled but is recommend to be enabled for security reasons. + +You can specify the wire encryption mode to use as follows: + +[source,yaml] +---- +spec: + clusterConfig: + kerberos: + # kerberosSecretClass: kerberos + wireEncryption: Privacy +---- diff --git a/docs/modules/hdfs/partials/nav.adoc b/docs/modules/hdfs/partials/nav.adoc index 266f4f44..4db08468 100644 --- a/docs/modules/hdfs/partials/nav.adoc +++ b/docs/modules/hdfs/partials/nav.adoc @@ -7,6 +7,7 @@ * xref:hdfs:usage-guide/index.adoc[] ** xref:hdfs:usage-guide/resources.adoc[] ** xref:hdfs:usage-guide/logging-log-aggregation.adoc[] +** xref:hdfs:usage-guide/security.adoc[] ** xref:hdfs:usage-guide/monitoring.adoc[] ** xref:hdfs:usage-guide/scaling.adoc[] ** xref:hdfs:usage-guide/configuration-environment-overrides.adoc[] From 3cd03722ef009d2f2c4ad3a6d35899fa65357741 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 13:45:34 +0200 Subject: [PATCH 063/101] Update docs/modules/hdfs/pages/usage-guide/security.adoc Co-authored-by: Natalie --- docs/modules/hdfs/pages/usage-guide/security.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index f4d7f758..f9ea1621 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -1,7 +1,7 @@ = Security == Authentication -Currently the only authentication mechanism support is Kerberos, which is disabled by default. +Currently the only supported authentication mechanism is Kerberos, which is disabled by default. For Kerberos to work a Kerberos server is needed, which the users needs to provide. The xref:home:secret-operator:secretclass.adoc#backend-kerberoskeytab[secret-operator documentation] states which kind of Kerberos servers are supported and how they can be configured. Our integration tests use a mit krb5 server which is installed on Kubernetes, but is not production ready, e.g. because no high availability is configured. From 6eddc490c8a4ae1e67b05061f38eb39a6a9775b9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 13:46:22 +0200 Subject: [PATCH 064/101] Update docs/modules/hdfs/pages/usage-guide/security.adoc Co-authored-by: Natalie --- docs/modules/hdfs/pages/usage-guide/security.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index f9ea1621..44785836 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -2,7 +2,7 @@ == Authentication Currently the only supported authentication mechanism is Kerberos, which is disabled by default. -For Kerberos to work a Kerberos server is needed, which the users needs to provide. +For Kerberos to work a Kerberos KDC is needed, which the users needs to provide. The xref:home:secret-operator:secretclass.adoc#backend-kerberoskeytab[secret-operator documentation] states which kind of Kerberos servers are supported and how they can be configured. Our integration tests use a mit krb5 server which is installed on Kubernetes, but is not production ready, e.g. because no high availability is configured. From 58738de654f439b04dfbacb73804303bf1579c26 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 13:48:05 +0200 Subject: [PATCH 065/101] Update docs/modules/hdfs/pages/usage-guide/security.adoc Co-authored-by: Natalie --- docs/modules/hdfs/pages/usage-guide/security.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index 44785836..d3d4a989 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -10,7 +10,7 @@ Our integration tests use a mit krb5 server which is installed on Kubernetes, bu To configure HDFS to use Kerberos you first need to collect information about your Kerberos server, e.g. hostname and port. Additionally you need a service-user, which the secret-operator uses to create create principals for the HDFS services. -=== 2. Create SecretClass +=== 2. Create Kerberos SecretClass Afterwards you need to enter all the needed information into a SecretClass, as described in xref:home:secret-operator:secretclass.adoc#backend-kerberoskeytab[secret-operator documentation]. The following guide assumes you have named your SecretClass `kerberos-hdfs`. From dbb11bde20a194525c372fbff836c27d5e96d967 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 13:49:42 +0200 Subject: [PATCH 066/101] Apply suggestions from code review Co-authored-by: Natalie --- docs/modules/hdfs/pages/usage-guide/security.adoc | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index d3d4a989..524287ad 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -30,10 +30,9 @@ The `kerberosSecretClass` is used to give HDFS the possibility to request keytab The `tlsSecretClass` is needed to request TLS certificates, used e.g. for the Web UIs. -As an alternative you can -=== 4. Verify Kerberos is used -One option is to use `stackablectl services list --all-namespaces` to get the endpoint the HDFS namenodes are reachable. +=== 4. Verify that Kerberos is used +Use `stackablectl services list --all-namespaces` to get the endpoints where the HDFS namenodes are reachable. Open the link (note that the namenode is now using https). You should see a Web UI similar to the following: @@ -46,10 +45,10 @@ The important part is An alternative is to shell into the namenode and try to access the file system. `kubectl exec -it hdfs-namenode-default-0 -c namenode -- bash -c 'bin/hdfs dfs -ls /'` -You should get a `org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]`. +You should get the error message `org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]`. === 5. Access HDFS -In case you want to access your HDFS it is recommended to start up a client Pod that connects to HDFS over shelling into the namenode. +In case you want to access your HDFS it is recommended to start up a client Pod that connects to HDFS, rather than shelling into the namenode. We have an https://github.com/stackabletech/hdfs-operator/blob/main/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2[integration test] for this exact purpose, where you can see how to connect and get a valid keytab. == Authorization @@ -89,7 +88,7 @@ The following modes are supported: |In addition to Authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. -|Privacy +|Privacy (default) |In addition to the features offered by Authentication and Integrity, it also fully encrypts the messages exchanged between the client and the server. Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. |=== From 083d02d2ea33cf60e834c1fd7b531ed8cfd789d7 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 13:52:26 +0200 Subject: [PATCH 067/101] Apply review comment --- docs/modules/hdfs/pages/usage-guide/security.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index 524287ad..e630827c 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -42,7 +42,7 @@ The important part is > Security is on. -An alternative is to shell into the namenode and try to access the file system. +You can also shell into the namenode and try to access the file system: `kubectl exec -it hdfs-namenode-default-0 -c namenode -- bash -c 'bin/hdfs dfs -ls /'` You should get the error message `org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]`. From 61e08d0bdac4296318f875f016242f81e75104d5 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 13:53:10 +0200 Subject: [PATCH 068/101] Remove sentence --- docs/modules/hdfs/pages/usage-guide/security.adoc | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index e630827c..12474a2f 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -4,7 +4,6 @@ Currently the only supported authentication mechanism is Kerberos, which is disabled by default. For Kerberos to work a Kerberos KDC is needed, which the users needs to provide. The xref:home:secret-operator:secretclass.adoc#backend-kerberoskeytab[secret-operator documentation] states which kind of Kerberos servers are supported and how they can be configured. -Our integration tests use a mit krb5 server which is installed on Kubernetes, but is not production ready, e.g. because no high availability is configured. === 1. Prepare Kerberos server To configure HDFS to use Kerberos you first need to collect information about your Kerberos server, e.g. hostname and port. From 693ceb28bc69c67d46213606d463d52f3c8c3745 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 14:55:57 +0200 Subject: [PATCH 069/101] Re-enable tests --- tests/test-definition.yaml | 41 +++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 8300a182..46da5464 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -2,8 +2,7 @@ dimensions: - name: hadoop values: - # - 3.2.2-stackable0.0.0-dev - # - 3.3.3-stackable0.0.0-dev + - 3.2.2-stackable0.0.0-dev - 3.3.4-stackable0.0.0-dev - name: hadoop-latest values: @@ -43,13 +42,13 @@ dimensions: # - Integrity # - Authentication tests: - # - name: smoke - # dimensions: - # - hadoop - # - zookeeper - # - number-of-datanodes - # - datanode-pvcs - # - listener-class + - name: smoke + dimensions: + - hadoop + - zookeeper + - number-of-datanodes + - datanode-pvcs + - listener-class - name: kerberos dimensions: - hadoop @@ -58,15 +57,15 @@ tests: - kerberos-realm - kerberos-backend - wire-encryption - # - name: orphaned-resources - # dimensions: - # - hadoop-latest - # - zookeeper-latest - # - name: logging - # dimensions: - # - hadoop - # - zookeeper-latest - # - name: cluster-operation - # dimensions: - # - hadoop-latest - # - zookeeper-latest + - name: orphaned-resources + dimensions: + - hadoop-latest + - zookeeper-latest + - name: logging + dimensions: + - hadoop + - zookeeper-latest + - name: cluster-operation + dimensions: + - hadoop-latest + - zookeeper-latest From 4a2afb92659f448ee25e2290459a67e73afebe6c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 15:25:31 +0200 Subject: [PATCH 070/101] Rework CRD accoring to review feedback --- deploy/helm/hdfs-operator/crds/crds.yaml | 40 +++++++++++-------- .../hdfs/pages/usage-guide/security.adoc | 10 +++-- rust/crd/src/lib.rs | 20 +++++++--- rust/crd/src/{kerberos.rs => security.rs} | 13 ++++-- rust/operator/src/discovery.rs | 4 +- rust/operator/src/hdfs_controller.rs | 2 +- rust/operator/src/kerberos.rs | 32 +++++++-------- 7 files changed, 72 insertions(+), 49 deletions(-) rename rust/crd/src/{kerberos.rs => security.rs} (93%) diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index 8cbd5489..e03cbc5b 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -34,13 +34,31 @@ spec: minimum: 0.0 nullable: true type: integer - kerberos: + listenerClass: + default: cluster-internal + description: |- + In the future this setting will control, which ListenerClass will be used to expose the service. Currently only a subset of the ListenerClasses are supported by choosing the type of the created Services by looking at the ListenerClass name specified, In a future release support for custom ListenerClasses will be introduced without a breaking change: + + * cluster-internal: Use a ClusterIP service + + * external-unstable: Use a NodePort service + enum: + - cluster-internal + - external-unstable + type: string + security: description: Configuration to set up a cluster secured using Kerberos. nullable: true properties: - kerberosSecretClass: - description: Name of the SecretClass providing the keytab for the HDFS services. - type: string + kerberos: + description: Kerberos configuration + properties: + kerberosSecretClass: + description: Name of the SecretClass providing the keytab for the HDFS services. + type: string + required: + - kerberosSecretClass + type: object tlsSecretClass: default: tls description: Name of the SecretClass providing the tls certificates for the WebUIs. @@ -63,20 +81,8 @@ spec: - Privacy type: string required: - - kerberosSecretClass + - kerberos type: object - listenerClass: - default: cluster-internal - description: |- - In the future this setting will control, which ListenerClass will be used to expose the service. Currently only a subset of the ListenerClasses are supported by choosing the type of the created Services by looking at the ListenerClass name specified, In a future release support for custom ListenerClasses will be introduced without a breaking change: - - * cluster-internal: Use a ClusterIP service - - * external-unstable: Use a NodePort service - enum: - - cluster-internal - - external-unstable - type: string vectorAggregatorConfigMapName: description: Name of the Vector aggregator discovery ConfigMap. It must contain the key `ADDRESS` with the address of the Vector aggregator. nullable: true diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index 12474a2f..ab7a5562 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -20,9 +20,10 @@ The last step is to configure your HdfsCluster to use the newly created SecretCl ---- spec: clusterConfig: - kerberos: + security: tlsSecretClass: tls # Optional, defaults to "tls" - kerberosSecretClass: kerberos-hdfs # Put your SecretClass name in here + kerberos: + kerberosSecretClass: kerberos-hdfs # Put your SecretClass name in here ---- The `kerberosSecretClass` is used to give HDFS the possibility to request keytabs from the secret-operator. @@ -101,7 +102,8 @@ You can specify the wire encryption mode to use as follows: ---- spec: clusterConfig: - kerberos: - # kerberosSecretClass: kerberos + security: + kerberos: + # kerberosSecretClass: kerberos wireEncryption: Privacy ---- diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index be6c455d..8ee4270f 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -1,11 +1,11 @@ pub mod affinity; pub mod constants; -pub mod kerberos; +pub mod security; pub mod storage; use affinity::get_affinity; use constants::*; -use kerberos::KerberosConfig; +use security::{KerberosConfig, SecurityConfig}; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ @@ -106,7 +106,7 @@ pub struct HdfsClusterConfig { #[serde(default)] pub listener_class: CurrentlySupportedListenerClasses, /// Configuration to set up a cluster secured using Kerberos. - pub kerberos: Option, + pub security: Option, } // TODO: Temporary solution until listener-operator is finished @@ -613,12 +613,20 @@ impl HdfsCluster { Ok(result) } + pub fn security_config(&self) -> Option<&SecurityConfig> { + self.spec.cluster_config.security.as_ref() + } + pub fn has_kerberos_enabled(&self) -> bool { - self.spec.cluster_config.kerberos.is_some() + self.spec.cluster_config.security.is_some() } pub fn kerberos_config(&self) -> Option<&KerberosConfig> { - self.spec.cluster_config.kerberos.as_ref() + self.spec + .cluster_config + .security + .as_ref() + .map(|s| &s.kerberos) } pub fn has_https_enabled(&self) -> bool { @@ -628,7 +636,7 @@ impl HdfsCluster { pub fn https_secret_class(&self) -> Option<&str> { self.spec .cluster_config - .kerberos + .security .as_ref() .map(|k| k.tls_secret_class.as_str()) } diff --git a/rust/crd/src/kerberos.rs b/rust/crd/src/security.rs similarity index 93% rename from rust/crd/src/kerberos.rs rename to rust/crd/src/security.rs index e049fd4e..1a66a274 100644 --- a/rust/crd/src/kerberos.rs +++ b/rust/crd/src/security.rs @@ -3,12 +3,12 @@ use stackable_operator::schemars::{self, JsonSchema}; #[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] -pub struct KerberosConfig { - /// Name of the SecretClass providing the keytab for the HDFS services. - pub kerberos_secret_class: String, +pub struct SecurityConfig { /// Name of the SecretClass providing the tls certificates for the WebUIs. #[serde(default = "default_kerberos_tls_secret_class")] pub tls_secret_class: String, + /// Kerberos configuration + pub kerberos: KerberosConfig, /// Configures how communication between hdfs nodes as well as between hdfs clients and cluster are secured. /// Possible values are: /// @@ -33,6 +33,13 @@ fn default_kerberos_tls_secret_class() -> String { "tls".to_string() } +#[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct KerberosConfig { + /// Name of the SecretClass providing the keytab for the HDFS services. + pub kerberos_secret_class: String, +} + #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "PascalCase")] pub enum WireEncryption { diff --git a/rust/operator/src/discovery.rs b/rust/operator/src/discovery.rs index 964e1b02..7f732a0f 100644 --- a/rust/operator/src/discovery.rs +++ b/rust/operator/src/discovery.rs @@ -58,13 +58,13 @@ fn build_discovery_hdfs_site_xml( .dfs_namenode_rpc_address_ha(namenode_podrefs) .dfs_namenode_http_address_ha(hdfs, namenode_podrefs) .dfs_client_failover_proxy_provider() - .kerberos_discovery_config(hdfs) + .security_discovery_config(hdfs) .build_as_xml() } fn build_discovery_core_site_xml(hdfs: &HdfsCluster, logical_name: String) -> String { CoreSiteConfigBuilder::new(logical_name) .fs_default_fs() - .kerberos_discovery_config(hdfs) + .security_discovery_config(hdfs) .build_as_xml() } diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index b0aee644..c0c61b31 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -448,7 +448,7 @@ fn rolegroup_config_map( .dfs_namenode_rpc_address_ha(namenode_podrefs) .dfs_namenode_http_address_ha(hdfs, namenode_podrefs) .dfs_client_failover_proxy_provider() - .kerberos_config(hdfs) + .security_config(hdfs) .add("dfs.ha.fencing.methods", "shell(/bin/true)") .add("dfs.ha.nn.not-become-active-in-safemode", "true") .add("dfs.ha.automatic-failover.enabled", "true") diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index f1c9a327..173361ff 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,36 +1,36 @@ use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, - kerberos::{KerberosConfig, WireEncryption}, + security::{SecurityConfig, WireEncryption}, HdfsCluster, HdfsRole, }; use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; impl HdfsSiteConfigBuilder { - pub fn kerberos_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if let Some(kerberos_config) = hdfs.kerberos_config() { + pub fn security_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if let Some(security_config) = hdfs.security_config() { self.add("dfs.block.access.token.enable", "true") .add("dfs.http.policy", "HTTPS_ONLY") .add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true") .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); - self.add_wire_encryption_settings(kerberos_config); + self.add_wire_encryption_settings(security_config); } self } - pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if let Some(kerberos_config) = hdfs.kerberos_config() { + pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if let Some(security_config) = hdfs.security_config() { // We want e.g. hbase to automatically renew the Kerberos tickets. // This shouldn't harm any other consumers. self.add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true"); - self.add_wire_encryption_settings(kerberos_config); + self.add_wire_encryption_settings(security_config); } self } - fn add_wire_encryption_settings(&mut self, kerberos_config: &KerberosConfig) -> &mut Self { - match kerberos_config.wire_encryption { + fn add_wire_encryption_settings(&mut self, security_config: &SecurityConfig) -> &mut Self { + match security_config.wire_encryption { WireEncryption::Authentication => { self.add("dfs.data.transfer.protection", "authentication"); self.add("dfs.encrypt.data.transfer", "false"); @@ -56,7 +56,7 @@ impl CoreSiteConfigBuilder { hdfs_name: &str, hdfs_namespace: &str, ) -> &mut Self { - if let Some(kerberos_config) = hdfs.kerberos_config() { + if let Some(security_config) = hdfs.security_config() { self .add("hadoop.security.authentication", "kerberos") .add("hadoop.security.authorization", "true") @@ -110,21 +110,21 @@ impl CoreSiteConfigBuilder { } } - self.add_wire_encryption_settings(kerberos_config); + self.add_wire_encryption_settings(security_config); } self } - pub fn kerberos_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if let Some(kerberos_config) = hdfs.kerberos_config() { + pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { + if let Some(security_config) = hdfs.security_config() { self.add(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - self.add_wire_encryption_settings(kerberos_config); + self.add_wire_encryption_settings(security_config); } self } - fn add_wire_encryption_settings(&mut self, kerberos_config: &KerberosConfig) -> &mut Self { - match kerberos_config.wire_encryption { + fn add_wire_encryption_settings(&mut self, security_config: &SecurityConfig) -> &mut Self { + match security_config.wire_encryption { WireEncryption::Authentication => { self.add("hadoop.rpc.protection", "authentication"); } From 5396e0a98e460b2fed69b5df56dc897365dec959 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 22 May 2023 15:29:38 +0200 Subject: [PATCH 071/101] Rename kerberos.kerberosSecretClass to kerberos.secretClass --- deploy/helm/hdfs-operator/crds/crds.yaml | 4 ++-- docs/modules/hdfs/pages/usage-guide/security.adoc | 6 +++--- rust/crd/src/security.rs | 2 +- rust/operator/src/container.rs | 4 +--- tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 | 2 +- 5 files changed, 8 insertions(+), 10 deletions(-) diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index e03cbc5b..ffc70eeb 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -53,11 +53,11 @@ spec: kerberos: description: Kerberos configuration properties: - kerberosSecretClass: + secretClass: description: Name of the SecretClass providing the keytab for the HDFS services. type: string required: - - kerberosSecretClass + - secretClass type: object tlsSecretClass: default: tls diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index ab7a5562..e9530516 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -23,10 +23,10 @@ spec: security: tlsSecretClass: tls # Optional, defaults to "tls" kerberos: - kerberosSecretClass: kerberos-hdfs # Put your SecretClass name in here + secretClass: kerberos-hdfs # Put your SecretClass name in here ---- -The `kerberosSecretClass` is used to give HDFS the possibility to request keytabs from the secret-operator. +The `kerberos.secretClass` is used to give HDFS the possibility to request keytabs from the secret-operator. The `tlsSecretClass` is needed to request TLS certificates, used e.g. for the Web UIs. @@ -104,6 +104,6 @@ spec: clusterConfig: security: kerberos: - # kerberosSecretClass: kerberos + # secretClass: kerberos wireEncryption: Privacy ---- diff --git a/rust/crd/src/security.rs b/rust/crd/src/security.rs index 1a66a274..1bcdb415 100644 --- a/rust/crd/src/security.rs +++ b/rust/crd/src/security.rs @@ -37,7 +37,7 @@ fn default_kerberos_tls_secret_class() -> String { #[serde(rename_all = "camelCase")] pub struct KerberosConfig { /// Name of the SecretClass providing the keytab for the HDFS services. - pub kerberos_secret_class: String, + pub secret_class: String, } #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 77f7bc7d..9dea52d3 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -767,9 +767,7 @@ impl ContainerConfig { // However, as other containers need this volume as well, it will be also mounted in other containers. if let Some(kerberos_config) = hdfs.kerberos_config() { let mut kerberos_secret_operator_volume_builder = - SecretOperatorVolumeSourceBuilder::new( - &kerberos_config.kerberos_secret_class, - ); + SecretOperatorVolumeSourceBuilder::new(&kerberos_config.secret_class); kerberos_secret_operator_volume_builder .with_pod_scope() // FIXME We always add the node scope here, as some customers access their datanodes from outside of k8s diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index 85774f74..ba6e629f 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -19,7 +19,7 @@ commands: listenerClass: {{ test_scenario['values']['listener-class'] }} kerberos: tlsSecretClass: tls - kerberosSecretClass: kerberos-$NAMESPACE + secretClass: kerberos-$NAMESPACE wireEncryption: {{ test_scenario['values']['wire-encryption'] }} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery From 3a50250f243fca6b85730a564d1ca151b444aa7e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 24 May 2023 11:51:55 +0200 Subject: [PATCH 072/101] Adress Arch meeting feedback --- .../hdfs/pages/usage-guide/security.adoc | 39 ++------------- rust/crd/src/lib.rs | 20 +++++--- rust/crd/src/security.rs | 35 +------------ rust/operator/src/hdfs_controller.rs | 2 +- rust/operator/src/kerberos.rs | 49 ++----------------- .../kuttl/kerberos/11-install-hdfs.yaml.j2 | 6 +-- tests/test-definition.yaml | 6 --- 7 files changed, 26 insertions(+), 131 deletions(-) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index e9530516..d8f0844b 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -20,7 +20,7 @@ The last step is to configure your HdfsCluster to use the newly created SecretCl ---- spec: clusterConfig: - security: + authentication: tlsSecretClass: tls # Optional, defaults to "tls" kerberos: secretClass: kerberos-hdfs # Put your SecretClass name in here @@ -72,38 +72,5 @@ spec: ---- == Wire encryption -IMPORTANT: Wire encryption can only be enabled in combination with Kerberos - -The following modes are supported: - -[cols="1,4"] -|=== -|Wire encryption mode|Description - -|Authentication -|Establishes mutual authentication between the client and the server. - Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. - -|Integrity -|In addition to Authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. -Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. - -|Privacy (default) -|In addition to the features offered by Authentication and Integrity, it also fully encrypts the messages exchanged between the client and the server. -Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. -|=== - -In case kerberos is enabled, the default value is `Privacy` for best security. -The security comes with a cost of a potentially degraded performance, thus wire encryption can be disabled but is recommend to be enabled for security reasons. - -You can specify the wire encryption mode to use as follows: - -[source,yaml] ----- -spec: - clusterConfig: - security: - kerberos: - # secretClass: kerberos - wireEncryption: Privacy ----- +In case kerberos is enabled, `Privacy` mode is used for best security. +Wire encryption without kerberos as well as https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SecureMode.html#Data_confidentiality[other wire encryption modes] are *not* supported. diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 8ee4270f..ac842209 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -5,7 +5,7 @@ pub mod storage; use affinity::get_affinity; use constants::*; -use security::{KerberosConfig, SecurityConfig}; +use security::{AuthenticationConfig, KerberosConfig}; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ @@ -77,6 +77,7 @@ pub struct HdfsClusterSpec { pub data_nodes: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub journal_nodes: Option>, + // Cluster wide configuration pub cluster_config: HdfsClusterConfig, /// Cluster operations like pause reconciliation or cluster stop. #[serde(default)] @@ -106,7 +107,7 @@ pub struct HdfsClusterConfig { #[serde(default)] pub listener_class: CurrentlySupportedListenerClasses, /// Configuration to set up a cluster secured using Kerberos. - pub security: Option, + pub authentication: Option, } // TODO: Temporary solution until listener-operator is finished @@ -613,18 +614,23 @@ impl HdfsCluster { Ok(result) } - pub fn security_config(&self) -> Option<&SecurityConfig> { - self.spec.cluster_config.security.as_ref() + pub fn authentication_config(&self) -> Option<&AuthenticationConfig> { + self.spec.cluster_config.authentication.as_ref() } pub fn has_kerberos_enabled(&self) -> bool { - self.spec.cluster_config.security.is_some() + self.spec + .cluster_config + .authentication + .as_ref() + .map(|auth| &auth.kerberos) + .is_some() } pub fn kerberos_config(&self) -> Option<&KerberosConfig> { self.spec .cluster_config - .security + .authentication .as_ref() .map(|s| &s.kerberos) } @@ -636,7 +642,7 @@ impl HdfsCluster { pub fn https_secret_class(&self) -> Option<&str> { self.spec .cluster_config - .security + .authentication .as_ref() .map(|k| k.tls_secret_class.as_str()) } diff --git a/rust/crd/src/security.rs b/rust/crd/src/security.rs index 1bcdb415..fe869ff1 100644 --- a/rust/crd/src/security.rs +++ b/rust/crd/src/security.rs @@ -3,30 +3,12 @@ use stackable_operator::schemars::{self, JsonSchema}; #[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] -pub struct SecurityConfig { +pub struct AuthenticationConfig { /// Name of the SecretClass providing the tls certificates for the WebUIs. #[serde(default = "default_kerberos_tls_secret_class")] pub tls_secret_class: String, /// Kerberos configuration pub kerberos: KerberosConfig, - /// Configures how communication between hdfs nodes as well as between hdfs clients and cluster are secured. - /// Possible values are: - /// - /// Authentication: - /// Establishes mutual authentication between the client and the server. - /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. - /// - /// Integrity: - /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. - /// - /// Privacy: - /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. - /// - /// Defaults to privacy for best security - #[serde(default)] - pub wire_encryption: WireEncryption, } fn default_kerberos_tls_secret_class() -> String { @@ -39,18 +21,3 @@ pub struct KerberosConfig { /// Name of the SecretClass providing the keytab for the HDFS services. pub secret_class: String, } - -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "PascalCase")] -pub enum WireEncryption { - /// Establishes mutual authentication between the client and the server. - /// Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. - Authentication, - /// In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. - Integrity, - /// In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. - /// Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. - #[default] - Privacy, -} diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index c0c61b31..d110c931 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -461,7 +461,7 @@ fn rolegroup_config_map( core_site_xml = CoreSiteConfigBuilder::new(hdfs_name.to_string()) .fs_default_fs() .ha_zookeeper_quorum() - .kerberos_config(hdfs, role, hdfs_name, &hdfs_namespace) + .security_config(hdfs, role, hdfs_name, &hdfs_namespace) // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index 173361ff..940decea 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,6 +1,5 @@ use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, - security::{SecurityConfig, WireEncryption}, HdfsCluster, HdfsRole, }; @@ -8,55 +7,35 @@ use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; impl HdfsSiteConfigBuilder { pub fn security_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if let Some(security_config) = hdfs.security_config() { + if hdfs.has_kerberos_enabled() { self.add("dfs.block.access.token.enable", "true") .add("dfs.http.policy", "HTTPS_ONLY") .add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true") .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); - self.add_wire_encryption_settings(security_config); } self } pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if let Some(security_config) = hdfs.security_config() { + if hdfs.has_kerberos_enabled() { // We want e.g. hbase to automatically renew the Kerberos tickets. // This shouldn't harm any other consumers. self.add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true"); - self.add_wire_encryption_settings(security_config); - } - self - } - - fn add_wire_encryption_settings(&mut self, security_config: &SecurityConfig) -> &mut Self { - match security_config.wire_encryption { - WireEncryption::Authentication => { - self.add("dfs.data.transfer.protection", "authentication"); - self.add("dfs.encrypt.data.transfer", "false"); - } - WireEncryption::Integrity => { - self.add("dfs.data.transfer.protection", "integrity"); - self.add("dfs.encrypt.data.transfer", "false"); - } - WireEncryption::Privacy => { - self.add("dfs.data.transfer.protection", "privacy"); - self.add("dfs.encrypt.data.transfer", "true"); - } } self } } impl CoreSiteConfigBuilder { - pub fn kerberos_config( + pub fn security_config( &mut self, hdfs: &HdfsCluster, role: &HdfsRole, hdfs_name: &str, hdfs_namespace: &str, ) -> &mut Self { - if let Some(security_config) = hdfs.security_config() { + if hdfs.has_kerberos_enabled() { self .add("hadoop.security.authentication", "kerberos") .add("hadoop.security.authorization", "true") @@ -109,31 +88,13 @@ impl CoreSiteConfigBuilder { ); } } - - self.add_wire_encryption_settings(security_config); } self } pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { - if let Some(security_config) = hdfs.security_config() { + if hdfs.has_kerberos_enabled() { self.add(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - self.add_wire_encryption_settings(security_config); - } - self - } - - fn add_wire_encryption_settings(&mut self, security_config: &SecurityConfig) -> &mut Self { - match security_config.wire_encryption { - WireEncryption::Authentication => { - self.add("hadoop.rpc.protection", "authentication"); - } - WireEncryption::Integrity => { - self.add("hadoop.rpc.protection", "integrity"); - } - WireEncryption::Privacy => { - self.add("hadoop.rpc.protection", "privacy"); - } } self } diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index ba6e629f..1a8d92f5 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -17,10 +17,10 @@ commands: zookeeperConfigMapName: hdfs-zk dfsReplication: 1 listenerClass: {{ test_scenario['values']['listener-class'] }} - kerberos: + authentication: tlsSecretClass: tls - secretClass: kerberos-$NAMESPACE - wireEncryption: {{ test_scenario['values']['wire-encryption'] }} + kerberos: + secretClass: kerberos-$NAMESPACE {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 46da5464..f990bb7c 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -36,11 +36,6 @@ dimensions: # Requires manual setup, see create-kerberos-secretclass.yaml # This will *not* respect the kerberos-realm test attribute, but instead use a hard-coded realm # - activeDirectory - - name: wire-encryption - values: - - Privacy - # - Integrity - # - Authentication tests: - name: smoke dimensions: @@ -56,7 +51,6 @@ tests: - listener-class - kerberos-realm - kerberos-backend - - wire-encryption - name: orphaned-resources dimensions: - hadoop-latest From cdefa5996edd8a4a8896a78eccfbaf429c450f77 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 24 May 2023 11:52:23 +0200 Subject: [PATCH 073/101] charts --- deploy/helm/hdfs-operator/crds/crds.yaml | 57 +++++++++--------------- 1 file changed, 20 insertions(+), 37 deletions(-) diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index ffc70eeb..ba38b861 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -26,6 +26,26 @@ spec: properties: clusterConfig: properties: + authentication: + description: Configuration to set up a cluster secured using Kerberos. + nullable: true + properties: + kerberos: + description: Kerberos configuration + properties: + secretClass: + description: Name of the SecretClass providing the keytab for the HDFS services. + type: string + required: + - secretClass + type: object + tlsSecretClass: + default: tls + description: Name of the SecretClass providing the tls certificates for the WebUIs. + type: string + required: + - kerberos + type: object autoFormatFs: nullable: true type: boolean @@ -46,43 +66,6 @@ spec: - cluster-internal - external-unstable type: string - security: - description: Configuration to set up a cluster secured using Kerberos. - nullable: true - properties: - kerberos: - description: Kerberos configuration - properties: - secretClass: - description: Name of the SecretClass providing the keytab for the HDFS services. - type: string - required: - - secretClass - type: object - tlsSecretClass: - default: tls - description: Name of the SecretClass providing the tls certificates for the WebUIs. - type: string - wireEncryption: - default: Privacy - description: |- - Configures how communication between hdfs nodes as well as between hdfs clients and cluster are secured. Possible values are: - - Authentication: Establishes mutual authentication between the client and the server. Sets `hadoop.rpc.protection` to `authentication`, `hadoop.data.transfer.protection` to `authentication` and `dfs.encrypt.data.transfer` to `false`. - - Integrity: In addition to authentication, it guarantees that a man-in-the-middle cannot tamper with messages exchanged between the client and the server. Sets `hadoop.rpc.protection` to `integrity`, `hadoop.data.transfer.protection` to `integrity` and `dfs.encrypt.data.transfer` to `false`. - - Privacy: In addition to the features offered by authentication and integrity, it also fully encrypts the messages exchanged between the client and the server. Sets `hadoop.rpc.protection` to `privacy`, `hadoop.data.transfer.protection` to `privacy` and `dfs.encrypt.data.transfer` to `true`. - - Defaults to privacy for best security - enum: - - Authentication - - Integrity - - Privacy - type: string - required: - - kerberos - type: object vectorAggregatorConfigMapName: description: Name of the Vector aggregator discovery ConfigMap. It must contain the key `ADDRESS` with the address of the Vector aggregator. nullable: true From 800656432708b30f484c548cd55b6185607cd90f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 24 May 2023 14:24:38 +0200 Subject: [PATCH 074/101] Re-enable all test cases --- tests/test-definition.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index f990bb7c..a2c1f100 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -24,11 +24,11 @@ dimensions: # Used for both, zookeeper and hdfs - name: listener-class values: - # - "cluster-internal" + - "cluster-internal" - "external-unstable" - name: kerberos-realm values: - # - "CLUSTER.LOCAL" + - "CLUSTER.LOCAL" - "PROD.MYCORP" - name: kerberos-backend values: From dd333a605bf85494c240c4f5a1d55da8e9bd8837 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 24 May 2023 14:58:57 +0200 Subject: [PATCH 075/101] Re-add wire encryption privacy settings --- rust/operator/src/kerberos.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index 940decea..31919696 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -13,6 +13,7 @@ impl HdfsSiteConfigBuilder { .add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true") .add("dfs.https.server.keystore.resource", SSL_SERVER_XML) .add("dfs.https.client.keystore.resource", SSL_CLIENT_XML); + self.add_wire_encryption_settings(); } self } @@ -22,9 +23,16 @@ impl HdfsSiteConfigBuilder { // We want e.g. hbase to automatically renew the Kerberos tickets. // This shouldn't harm any other consumers. self.add("hadoop.kerberos.keytab.login.autorenewal.enabled", "true"); + self.add_wire_encryption_settings(); } self } + + fn add_wire_encryption_settings(&mut self) -> &mut Self { + self.add("dfs.data.transfer.protection", "privacy"); + self.add("dfs.encrypt.data.transfer", "true"); + self + } } impl CoreSiteConfigBuilder { @@ -88,6 +96,8 @@ impl CoreSiteConfigBuilder { ); } } + + self.add_wire_encryption_settings(); } self } @@ -95,7 +105,13 @@ impl CoreSiteConfigBuilder { pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { if hdfs.has_kerberos_enabled() { self.add(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + self.add_wire_encryption_settings(); } self } + + fn add_wire_encryption_settings(&mut self) -> &mut Self { + self.add("hadoop.rpc.protection", "authentication"); + self + } } From 3bd31709e1f4d491ba84f61bb9c5604382e7160f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 26 May 2023 15:41:45 +0200 Subject: [PATCH 076/101] fix: Only add truststore settings when https is enabled --- rust/operator/src/hdfs_controller.rs | 60 +++++++++++++++------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index d110c931..db322ccc 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -468,40 +468,44 @@ fn rolegroup_config_map( } PropertyNameKind::File(file_name) if file_name == SSL_SERVER_XML => { let mut config_opts = BTreeMap::new(); - config_opts.extend([ - ( - "ssl.server.keystore.location".to_string(), - Some(format!("{KEYSTORE_DIR_NAME}/keystore.p12")), - ), - ( - "ssl.server.keystore.password".to_string(), - Some("changeit".to_string()), - ), - ( - "ssl.server.keystore.type".to_string(), - Some("pkcs12".to_string()), - ), - ]); + if hdfs.has_https_enabled() { + config_opts.extend([ + ( + "ssl.server.keystore.location".to_string(), + Some(format!("{KEYSTORE_DIR_NAME}/keystore.p12")), + ), + ( + "ssl.server.keystore.password".to_string(), + Some("changeit".to_string()), + ), + ( + "ssl.server.keystore.type".to_string(), + Some("pkcs12".to_string()), + ), + ]); + } config_opts.extend(config.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); ssl_server_xml = stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); } PropertyNameKind::File(file_name) if file_name == SSL_CLIENT_XML => { let mut config_opts = BTreeMap::new(); - config_opts.extend([ - ( - "ssl.client.truststore.location".to_string(), - Some(format!("{KEYSTORE_DIR_NAME}/truststore.p12")), - ), - ( - "ssl.client.truststore.password".to_string(), - Some("changeit".to_string()), - ), - ( - "ssl.client.truststore.type".to_string(), - Some("pkcs12".to_string()), - ), - ]); + if hdfs.has_https_enabled() { + config_opts.extend([ + ( + "ssl.client.truststore.location".to_string(), + Some(format!("{KEYSTORE_DIR_NAME}/truststore.p12")), + ), + ( + "ssl.client.truststore.password".to_string(), + Some("changeit".to_string()), + ), + ( + "ssl.client.truststore.type".to_string(), + Some("pkcs12".to_string()), + ), + ]); + } config_opts.extend(config.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); ssl_client_xml = stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); From 0842ec7d294ddca4520e116b459943e7c7fd062c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 26 May 2023 16:01:35 +0200 Subject: [PATCH 077/101] test: Switch image to nightly --- tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 index b79634fa..1a94ea8b 100644 --- a/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/20-access-hdfs.yaml.j2 @@ -14,7 +14,7 @@ commands: spec: containers: - name: access-hdfs - image: docker.stackable.tech/stackable/hadoop:3.3.4-stackable23.4 + image: docker.stackable.tech/stackable/hadoop:3.3.4-stackable0.0.0-dev env: - name: HADOOP_CONF_DIR value: /stackable/conf/hdfs From aab00c7e3679542ad598821b9851cf0cbc146440 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 30 May 2023 09:52:03 +0200 Subject: [PATCH 078/101] Also add ssl.server.truststore.location setting --- rust/operator/src/hdfs_controller.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index db322ccc..630ebb18 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -470,6 +470,10 @@ fn rolegroup_config_map( let mut config_opts = BTreeMap::new(); if hdfs.has_https_enabled() { config_opts.extend([ + ( + "ssl.server.truststore.location".to_string(), + Some(format!("{KEYSTORE_DIR_NAME}/truststore.p12")), + ), ( "ssl.server.keystore.location".to_string(), Some(format!("{KEYSTORE_DIR_NAME}/keystore.p12")), From 2765015d34e767a691de5439922add446dec8c2e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 13:13:13 +0200 Subject: [PATCH 079/101] Add comment --- .../hdfs/pages/usage-guide/security.adoc | 2 +- rust/crd/src/constants.rs | 1 + rust/crd/src/lib.rs | 1 + rust/operator/src/container.rs | 260 ++++++++---------- rust/operator/src/hdfs_controller.rs | 13 +- rust/operator/src/kerberos.rs | 88 +++--- .../kerberos/01-install-krb5-kdc.yaml.j2 | 8 +- .../21-unleash-the-chaosmonkey.yaml.j2 | 51 ++++ tests/templates/kuttl/kerberos/22-assert.yaml | 11 + .../kuttl/kerberos/22-check-file.yaml.j2 | 65 +++++ 10 files changed, 309 insertions(+), 191 deletions(-) create mode 100644 tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 create mode 100644 tests/templates/kuttl/kerberos/22-assert.yaml create mode 100644 tests/templates/kuttl/kerberos/22-check-file.yaml.j2 diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index d8f0844b..da13604f 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -43,7 +43,7 @@ The important part is > Security is on. You can also shell into the namenode and try to access the file system: -`kubectl exec -it hdfs-namenode-default-0 -c namenode -- bash -c 'bin/hdfs dfs -ls /'` +`kubectl exec -it hdfs-namenode-default-0 -c namenode -- bash -c 'kdestroy && bin/hdfs dfs -ls /'` You should get the error message `org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]`. diff --git a/rust/crd/src/constants.rs b/rust/crd/src/constants.rs index 1dbe84da..e252d8d2 100644 --- a/rust/crd/src/constants.rs +++ b/rust/crd/src/constants.rs @@ -12,6 +12,7 @@ pub const LABEL_STS_POD_NAME: &str = "statefulset.kubernetes.io/pod-name"; pub const HDFS_SITE_XML: &str = "hdfs-site.xml"; pub const CORE_SITE_XML: &str = "core-site.xml"; +pub const HADOOP_POLICY_XML: &str = "hadoop-policy.xml"; pub const SSL_SERVER_XML: &str = "ssl-server.xml"; pub const SSL_CLIENT_XML: &str = "ssl-client.xml"; pub const LOG4J_PROPERTIES: &str = "log4j.properties"; diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index ac842209..11327a7f 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -573,6 +573,7 @@ impl HdfsCluster { let pnk = vec![ PropertyNameKind::File(HDFS_SITE_XML.to_string()), PropertyNameKind::File(CORE_SITE_XML.to_string()), + PropertyNameKind::File(HADOOP_POLICY_XML.to_string()), PropertyNameKind::File(SSL_SERVER_XML.to_string()), PropertyNameKind::File(SSL_CLIENT_XML.to_string()), PropertyNameKind::Env, diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 9dea52d3..9da1f40b 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -161,16 +161,22 @@ impl ContainerConfig { ) -> Result<(), Error> { // HDFS main container let main_container_config = Self::from(role.clone()); - pb.add_volumes(main_container_config.volumes(hdfs, merged_config, object_name)); + pb.add_volumes(main_container_config.volumes(merged_config, object_name)); pb.add_container(main_container_config.main_container( hdfs, resolved_product_image, zk_config_map_name, env_overrides, merged_config, - object_name, )?); + // We need to share `/tmp` between all containers, e.g. for Kerberos ticket cache + pb.add_volume( + VolumeBuilder::new("tmp") + .with_empty_dir(Option::::None, None) + .build(), + ); + // Vector side container if merged_config.vector_logging_enabled() { pb.add_container(product_logging::framework::vector_container( @@ -181,14 +187,16 @@ impl ContainerConfig { )); } - if let Some(https_secret_class) = hdfs.https_secret_class() { + if let Some(authentication_config) = hdfs.authentication_config() { pb.add_volume( VolumeBuilder::new("tls") .ephemeral( - SecretOperatorVolumeSourceBuilder::new(https_secret_class) - .with_pod_scope() - .with_node_scope() - .build(), + SecretOperatorVolumeSourceBuilder::new( + &authentication_config.tls_secret_class, + ) + .with_pod_scope() + .with_node_scope() + .build(), ) .build(), ); @@ -199,13 +207,40 @@ impl ContainerConfig { .build(), ); + pb.add_volume( + VolumeBuilder::new("kerberos") + .ephemeral( + SecretOperatorVolumeSourceBuilder::new( + &authentication_config.kerberos.secret_class, + ) + .with_service_scope(hdfs.name_any()) + .with_kerberos_service_name(role.kerberos_service_name()) + .with_kerberos_service_name("HTTP") + .build(), + ) + .build(), + ); + + let principal = format!( + "{service_name}/{hdfs_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", + service_name = role.kerberos_service_name(), + hdfs_name = hdfs.name_any(), + namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, + ); let create_tls_cert_bundle_init_container = ContainerBuilder::new("create-tls-cert-bundle") .unwrap() .image_from_product_image(resolved_product_image) - .command(vec!["/bin/bash".to_string(), "-c".to_string()]) + .command(Self::command()) .args(vec![formatdoc!( r###" + # We need get the kerberos ticket first, as other containers wait for the truststore to be available. + # This way it is guaranteed that the ticket is already there when the truststore is there. + export KRB5_CONFIG=/stackable/kerberos/krb5.conf + {export_kerberos_real_env_var_command} + echo "Getting ticket for {principal} from /stackable/kerberos/keytab" + kinit "{principal}" -kt /stackable/kerberos/keytab + echo "Cleaning up truststore - just in case" rm -f {KEYSTORE_DIR_NAME}/truststore.p12 echo "Creating truststore" @@ -215,11 +250,14 @@ impl ContainerConfig { echo "Cleaning up keystore - just in case" rm -f {KEYSTORE_DIR_NAME}/keystore.p12 echo "Creating keystore" - openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"### + openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"###, + export_kerberos_real_env_var_command = Self::export_kerberos_real_env_var_command(), )]) // Only this init container needs the actual cert (from tls volume) to create the truststore + keystore from + .add_volume_mount("tmp", "/tmp") .add_volume_mount("tls", "/stackable/tls") .add_volume_mount("keystore", KEYSTORE_DIR_NAME) + .add_volume_mount("kerberos", "/stackable/kerberos") .build(); pb.add_init_container(create_tls_cert_bundle_init_container); } @@ -229,24 +267,21 @@ impl ContainerConfig { HdfsRole::NameNode => { // Zookeeper fail over container let zkfc_container_config = Self::try_from(NameNodeContainer::Zkfc.to_string())?; - pb.add_volumes(zkfc_container_config.volumes(hdfs, merged_config, object_name)); + pb.add_volumes(zkfc_container_config.volumes(merged_config, object_name)); pb.add_container(zkfc_container_config.main_container( hdfs, resolved_product_image, zk_config_map_name, env_overrides, merged_config, - object_name, )?); // Format namenode init container let format_namenodes_container_config = Self::try_from(NameNodeContainer::FormatNameNodes.to_string())?; - pb.add_volumes(format_namenodes_container_config.volumes( - hdfs, - merged_config, - object_name, - )); + pb.add_volumes( + format_namenodes_container_config.volumes(merged_config, object_name), + ); pb.add_init_container(format_namenodes_container_config.init_container( hdfs, resolved_product_image, @@ -254,17 +289,14 @@ impl ContainerConfig { env_overrides, namenode_podrefs, merged_config, - object_name, )?); // Format ZooKeeper init container let format_zookeeper_container_config = Self::try_from(NameNodeContainer::FormatZooKeeper.to_string())?; - pb.add_volumes(format_zookeeper_container_config.volumes( - hdfs, - merged_config, - object_name, - )); + pb.add_volumes( + format_zookeeper_container_config.volumes(merged_config, object_name), + ); pb.add_init_container(format_zookeeper_container_config.init_container( hdfs, resolved_product_image, @@ -272,18 +304,15 @@ impl ContainerConfig { env_overrides, namenode_podrefs, merged_config, - object_name, )?); } HdfsRole::DataNode => { // Wait for namenode init container let wait_for_namenodes_container_config = Self::try_from(DataNodeContainer::WaitForNameNodes.to_string())?; - pb.add_volumes(wait_for_namenodes_container_config.volumes( - hdfs, - merged_config, - object_name, - )); + pb.add_volumes( + wait_for_namenodes_container_config.volumes(merged_config, object_name), + ); pb.add_init_container(wait_for_namenodes_container_config.init_container( hdfs, resolved_product_image, @@ -291,7 +320,6 @@ impl ContainerConfig { env_overrides, namenode_podrefs, merged_config, - object_name, )?); } HdfsRole::JournalNode => {} @@ -330,7 +358,6 @@ impl ContainerConfig { zookeeper_config_map_name: &str, env_overrides: Option<&BTreeMap>, merged_config: &(dyn MergedConfig + Send + 'static), - object_name: &str, ) -> Result { let mut cb = ContainerBuilder::new(self.name()).with_context(|_| InvalidContainerNameSnafu { @@ -340,8 +367,8 @@ impl ContainerConfig { let resources = self.resources(merged_config); cb.image_from_product_image(resolved_product_image) - .command(self.command()) - .args(self.args(hdfs, merged_config, &[], object_name)?) + .command(Self::command()) + .args(self.args(hdfs, merged_config, &[])?) .add_env_vars(self.env( hdfs, zookeeper_config_map_name, @@ -366,7 +393,6 @@ impl ContainerConfig { /// Creates respective init containers for: /// - Namenode (format-namenodes, format-zookeeper) /// - Datanode (wait-for-namenodes) - #[allow(clippy::too_many_arguments)] fn init_container( &self, hdfs: &HdfsCluster, @@ -375,13 +401,12 @@ impl ContainerConfig { env_overrides: Option<&BTreeMap>, namenode_podrefs: &[HdfsPodRef], merged_config: &(dyn MergedConfig + Send + 'static), - object_name: &str, ) -> Result { Ok(ContainerBuilder::new(self.name()) .with_context(|_| InvalidContainerNameSnafu { name: self.name() })? .image_from_product_image(resolved_product_image) - .command(self.command()) - .args(self.args(hdfs, merged_config, namenode_podrefs, object_name)?) + .command(Self::command()) + .args(self.args(hdfs, merged_config, namenode_podrefs)?) .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .build()) @@ -410,21 +435,14 @@ impl ContainerConfig { } /// Returns the container command. - fn command(&self) -> Vec { - match self { - ContainerConfig::Hdfs { .. } | ContainerConfig::Zkfc { .. } => vec![ - "/bin/bash".to_string(), - "-x".to_string(), - "-euo".to_string(), - "pipefail".to_string(), - "-c".to_string(), - ], - ContainerConfig::FormatNameNodes { .. } - | ContainerConfig::FormatZooKeeper { .. } - | ContainerConfig::WaitForNameNodes { .. } => { - vec!["/bin/bash".to_string(), "-c".to_string()] - } - } + fn command() -> Vec { + vec![ + "/bin/bash".to_string(), + "-x".to_string(), + "-euo".to_string(), + "pipefail".to_string(), + "-c".to_string(), + ] } /// Returns the container command arguments. @@ -433,47 +451,43 @@ impl ContainerConfig { hdfs: &HdfsCluster, merged_config: &(dyn MergedConfig + Send + 'static), namenode_podrefs: &[HdfsPodRef], - object_name: &str, ) -> Result, Error> { - let mut args = vec![ - self.create_config_directory_cmd(), - self.copy_config_xml_cmd(), - ]; + let mut args = String::new(); + args.push_str(&self.create_config_directory_cmd()); + args.push_str(&self.copy_config_xml_cmd()); + // We can't influence the order of the init containers. - // Some init containers - such as format-namenodes - need the tls certs, so let's wait for them to be properly set up - if hdfs.has_https_enabled() { - args.push(Self::wait_for_trust_and_keystore_command()); - } - if hdfs.has_kerberos_enabled() { - args.push(Self::export_kerberos_real_env_var_command()); + // Some init containers - such as format-namenodes - need the tls certs or kerberos tickets, so let's wait for them to be properly set up + if hdfs.authentication_config().is_some() { + args.push_str(&Self::export_kerberos_real_env_var_command()); + args.push_str(&Self::wait_for_trust_and_keystore_command()); } + match self { ContainerConfig::Hdfs { role, .. } => { - args.push(self.copy_log4j_properties_cmd( + args.push_str(&self.copy_log4j_properties_cmd( HDFS_LOG4J_CONFIG_FILE, merged_config.hdfs_logging(), )); - - args.push(format!( - "{hadoop_home}/bin/hdfs {role}", + args.push_str(&format!( + "{hadoop_home}/bin/hdfs {role}\n", hadoop_home = Self::HADOOP_HOME, )); } ContainerConfig::Zkfc { .. } => { if let Some(container_config) = merged_config.zkfc_logging() { - args.push( - self.copy_log4j_properties_cmd(ZKFC_LOG4J_CONFIG_FILE, container_config), + args.push_str( + &self.copy_log4j_properties_cmd(ZKFC_LOG4J_CONFIG_FILE, container_config), ); } - - args.push(format!( - "{hadoop_home}/bin/hdfs zkfc", + args.push_str(&format!( + "{hadoop_home}/bin/hdfs zkfc\n", hadoop_home = Self::HADOOP_HOME )); } - ContainerConfig::FormatNameNodes { role, .. } => { + ContainerConfig::FormatNameNodes { .. } => { if let Some(container_config) = merged_config.format_namenodes_logging() { - args.push(self.copy_log4j_properties_cmd( + args.push_str(&self.copy_log4j_properties_cmd( FORMAT_NAMENODES_LOG4J_CONFIG_FILE, container_config, )); @@ -485,17 +499,13 @@ impl ContainerConfig { // $NAMENODE_DIR/current/VERSION. Then we don't do anything. // If there is no active namenode, the current pod is not formatted we format as // active namenode. Otherwise as standby node. - if hdfs.has_kerberos_enabled() { - args.push(Self::get_kerberos_ticket(hdfs, role, object_name)?); - } - args.push(formatdoc!( + args.push_str(&formatdoc!( r###" - cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Start formatting namenode $POD_NAME. Checking for active namenodes:" for namenode_id in {pod_names} do echo -n "Checking pod $namenode_id... " - {get_service_state_command} + {get_service_state_command} if [ "$SERVICE_STATE" == "active" ] then ACTIVE_NAMENODE=$namenode_id @@ -506,7 +516,7 @@ impl ContainerConfig { done set -e - if ! ls {NAMENODE_ROOT_DATA_DIR}/current/fsimage_* + if [ ! -f "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" ] then if [ -z ${{ACTIVE_NAMENODE+x}} ] then @@ -531,33 +541,31 @@ impl ContainerConfig { } ContainerConfig::FormatZooKeeper { .. } => { if let Some(container_config) = merged_config.format_zookeeper_logging() { - args.push(self.copy_log4j_properties_cmd( + args.push_str(&self.copy_log4j_properties_cmd( FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, container_config, )); } - args.push(formatdoc!( + args.push_str(&formatdoc!( r###" echo "Attempt to format ZooKeeper..." if [[ "0" -eq "$(echo $POD_NAME | sed -e 's/.*-//')" ]] ; then {hadoop_home}/bin/hdfs zkfc -formatZK -nonInteractive || true else echo "ZooKeeper already formatted!" - fi"###, + fi + "###, hadoop_home = Self::HADOOP_HOME )); } - ContainerConfig::WaitForNameNodes { role, .. } => { + ContainerConfig::WaitForNameNodes { .. } => { if let Some(container_config) = merged_config.wait_for_namenodes() { - args.push(self.copy_log4j_properties_cmd( + args.push_str(&self.copy_log4j_properties_cmd( WAIT_FOR_NAMENODES_LOG4J_CONFIG_FILE, container_config, )); } - if hdfs.has_kerberos_enabled() { - args.push(Self::get_kerberos_ticket(hdfs, role, object_name)?); - } - args.push(formatdoc!( + args.push_str(&formatdoc!( r###" echo "Waiting for namenodes to get ready:" n=0 @@ -584,7 +592,8 @@ impl ContainerConfig { echo "" n=$(( n + 1)) sleep 5 - done"###, + done + "###, get_service_state_command = Self::get_service_state_command(hdfs)?, pod_names = namenode_podrefs .iter() @@ -594,35 +603,27 @@ impl ContainerConfig { )); } } - Ok(vec![args.join(" && ")]) + Ok(vec![args]) } /// Wait until the init container has created global trust and keystore shared between all containers fn wait_for_trust_and_keystore_command() -> String { - format!( - "until [ -f {KEYSTORE_DIR_NAME}/truststore.p12 ]; do echo 'Waiting for truststore to be created' && sleep 1; done && until [ -f {KEYSTORE_DIR_NAME}/keystore.p12 ]; do echo 'Waiting for keystore to be created' && sleep 1; done" + formatdoc!( + r###"until [ -f {KEYSTORE_DIR_NAME}/truststore.p12 ]; do + echo 'Waiting for truststore to be created' + sleep 1 + done + until [ -f {KEYSTORE_DIR_NAME}/keystore.p12 ]; do + echo 'Waiting for keystore to be created' + sleep 1 + done + "### ) } - /// `kinit` a ticket using the principal created for the specified hdfs role - /// Needs the KERBEROS_REALM env var to be present, as `Self::export_kerberos_real_env_var_command` does - /// Needs the POD_NAME env var to be present, which will be provided by the PodSpec - fn get_kerberos_ticket( - hdfs: &HdfsCluster, - role: &HdfsRole, - object_name: &str, - ) -> Result { - let principal = format!( - "{service_name}/${{POD_NAME}}.{object_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", - service_name = role.kerberos_service_name(), - namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, - ); - Ok(format!("echo \"Getting ticket for {principal}\" from /stackable/kerberos/keytab && kinit \"{principal}\" -kt /stackable/kerberos/keytab")) - } - // Command to export `KERBEROS_REALM` env var to default real from krb5.conf, e.g. `CLUSTER.LOCAL` fn export_kerberos_real_env_var_command() -> String { - "export KERBEROS_REALM=$(grep -oP 'default_realm = \\K.*' /stackable/kerberos/krb5.conf)" + "export KERBEROS_REALM=$(grep -oP 'default_realm = \\K.*' /stackable/kerberos/krb5.conf)\n" .to_string() } @@ -630,15 +631,16 @@ impl ContainerConfig { Ok(if hdfs.has_kerberos_enabled() { formatdoc!( r###" - PRINCIPAL=$(echo "nn/${{namenode_id}}.$(echo $namenode_id | grep -o '.*[^-0-9]').{namespace}.svc.cluster.local@${{KERBEROS_REALM}}") - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $namenode_id | tail -n1)"###, + PRINCIPAL=$(echo "nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{KERBEROS_REALM}}") + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $namenode_id | tail -n1 || true)"###, hadoop_home = Self::HADOOP_HOME, - namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, + hdfs_name = hdfs.name_any(), + hdfs_namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, ) } else { formatdoc!( r###" - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $namenode_id | tail -n1)"###, + SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $namenode_id | tail -n1 || true)"###, hadoop_home = Self::HADOOP_HOME ) }) @@ -743,14 +745,13 @@ impl ContainerConfig { /// Return the container volumes. fn volumes( &self, - hdfs: &HdfsCluster, merged_config: &(dyn MergedConfig + Send + 'static), object_name: &str, ) -> Vec { let mut volumes = vec![]; let container_log_config = match self { - ContainerConfig::Hdfs { role, .. } => { + ContainerConfig::Hdfs { .. } => { volumes.push( VolumeBuilder::new(ContainerConfig::STACKABLE_LOG_VOLUME_MOUNT_NAME) .empty_dir(EmptyDirVolumeSource { @@ -763,26 +764,6 @@ impl ContainerConfig { .build(), ); - // Note that we create the volume here, only for the main container. - // However, as other containers need this volume as well, it will be also mounted in other containers. - if let Some(kerberos_config) = hdfs.kerberos_config() { - let mut kerberos_secret_operator_volume_builder = - SecretOperatorVolumeSourceBuilder::new(&kerberos_config.secret_class); - kerberos_secret_operator_volume_builder - .with_pod_scope() - // FIXME We always add the node scope here, as some customers access their datanodes from outside of k8s - // In the future listener-op will work together with secret-op, so that the scope automatically matches however the services are exposed - .with_node_scope() - .with_kerberos_service_name(role.kerberos_service_name()) - .with_kerberos_service_name("HTTP"); - - volumes.push( - VolumeBuilder::new("kerberos") - .ephemeral(kerberos_secret_operator_volume_builder.build()) - .build(), - ); - } - Some(merged_config.hdfs_logging()) } ContainerConfig::Zkfc { .. } => merged_config.zkfc_logging(), @@ -808,6 +789,7 @@ impl ContainerConfig { merged_config: &(dyn MergedConfig + Send + 'static), ) -> Vec { let mut volume_mounts = vec![ + VolumeMountBuilder::new("tmp", "/tmp").build(), VolumeMountBuilder::new(Self::STACKABLE_LOG_VOLUME_MOUNT_NAME, STACKABLE_LOG_DIR) .build(), VolumeMountBuilder::new( @@ -874,7 +856,7 @@ impl ContainerConfig { /// Create a config directory for the respective container. fn create_config_directory_cmd(&self) -> String { format!( - "mkdir -p {config_dir_name}", + "mkdir -p {config_dir_name}\n", config_dir_name = self.volume_mount_dirs().final_config() ) } @@ -882,7 +864,7 @@ impl ContainerConfig { /// Copy all the configuration files to the respective container config dir. fn copy_config_xml_cmd(&self) -> String { format!( - "cp {config_dir_mount}/*.xml {config_dir_name}", + "cp {config_dir_mount}/*.xml {config_dir_name}\n", config_dir_mount = self.volume_mount_dirs().config_mount(), config_dir_name = self.volume_mount_dirs().final_config() ) @@ -907,7 +889,7 @@ impl ContainerConfig { }; format!( - "cp {log4j_properties_dir}/{file_name} {config_dir}/{LOG4J_PROPERTIES}", + "cp {log4j_properties_dir}/{file_name} {config_dir}/{LOG4J_PROPERTIES}\n", log4j_properties_dir = source_log4j_properties_dir, file_name = log4j_config_file, config_dir = self.volume_mount_dirs().final_config() diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 630ebb18..c1b21afb 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -277,7 +277,6 @@ pub async fn reconcile_hdfs(hdfs: Arc, ctx: Arc) -> HdfsOperat rolegroup_service(&hdfs, &role, &rolegroup_ref, &resolved_product_image)?; let rg_configmap = rolegroup_config_map( &hdfs, - &role, &rolegroup_ref, rolegroup_config, &namenode_podrefs, @@ -395,7 +394,6 @@ fn rolegroup_service( #[allow(clippy::too_many_arguments)] fn rolegroup_config_map( hdfs: &HdfsCluster, - role: &HdfsRole, rolegroup_ref: &RoleGroupRef, rolegroup_config: &HashMap>, namenode_podrefs: &[HdfsPodRef], @@ -420,6 +418,7 @@ fn rolegroup_config_map( let mut hdfs_site_xml = String::new(); let mut core_site_xml = String::new(); + let mut hadoop_policy_xml = String::new(); let mut ssl_server_xml = String::new(); let mut ssl_client_xml = String::new(); @@ -461,11 +460,18 @@ fn rolegroup_config_map( core_site_xml = CoreSiteConfigBuilder::new(hdfs_name.to_string()) .fs_default_fs() .ha_zookeeper_quorum() - .security_config(hdfs, role, hdfs_name, &hdfs_namespace) + .security_config(hdfs, hdfs_name, &hdfs_namespace) // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); } + PropertyNameKind::File(file_name) if file_name == HADOOP_POLICY_XML => { + // We don't add any settings here, the main purpose is to have a configOverride for users. + let mut config_opts: BTreeMap> = BTreeMap::new(); + config_opts.extend(config.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + hadoop_policy_xml = + stackable_operator::product_config::writer::to_hadoop_xml(config_opts.iter()); + } PropertyNameKind::File(file_name) if file_name == SSL_SERVER_XML => { let mut config_opts = BTreeMap::new(); if hdfs.has_https_enabled() { @@ -540,6 +546,7 @@ fn rolegroup_config_map( ) .add_data(CORE_SITE_XML.to_string(), core_site_xml) .add_data(HDFS_SITE_XML.to_string(), hdfs_site_xml) + .add_data(HADOOP_POLICY_XML.to_string(), hadoop_policy_xml) .add_data(SSL_SERVER_XML, ssl_server_xml) .add_data(SSL_CLIENT_XML, ssl_client_xml); diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index 31919696..e30efe28 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,6 +1,6 @@ use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, - HdfsCluster, HdfsRole, + HdfsCluster, }; use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; @@ -39,64 +39,64 @@ impl CoreSiteConfigBuilder { pub fn security_config( &mut self, hdfs: &HdfsCluster, - role: &HdfsRole, hdfs_name: &str, hdfs_namespace: &str, ) -> &mut Self { - if hdfs.has_kerberos_enabled() { - self - .add("hadoop.security.authentication", "kerberos") - .add("hadoop.security.authorization", "true") - // Otherwise we fail with `java.io.IOException: No groups found for user nn` - // Default value is `dr.who=`, so we include that here - .add("hadoop.user.group.static.mapping.overrides", "dr.who=;nn=;nm=;jn=;") + if hdfs.authentication_config().is_some() { + // For a long time we tried using `_HOST` in principals, e.g. `jn/_HOST@REALM.COM`. + // Turns out there are a lot of code paths that check the principal of the requester using a reverse lookup of the incoming IP address + // and getting a different hostname than the principal has. + // What ultimately killed this approach was + // + // 2023-05-30 09:23:01,745 ERROR namenode.EditLogInputStream (EditLogFileInputStream.java:nextOpImpl(220)) - caught exception initializing https://hdfs-journalnode-default-1.hdfs-journalnode-default.kuttl-test-fine-rat.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A595659877%3A1685437352616%3ACID-90c52400-5b07-49bf-bdbe-3469bbdc5ebb&inProgressOk=true + // org.apache.hadoop.hdfs.server.common.HttpGetFailedException: Fetch of https://hdfs-journalnode-default-1.hdfs-journalnode-default.kuttl-test-fine-rat.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A595659877%3A1685437352616%3ACID-90c52400-5b07-49bf-bdbe-3469bbdc5ebb&inProgressOk=true failed with status code 403 + // Response message: + // Only Namenode and another JournalNode may access this servlet + // + // After we have switched to using the following principals everything worked without problems + + let principal_host_part = + format!("{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}"); + self.add("hadoop.security.authentication", "kerberos") .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") .add( - "dfs.web.authentication.kerberos.principal", - "HTTP/_HOST@${env.KERBEROS_REALM}", + "dfs.journalnode.kerberos.principal", + &format!("jn/{principal_host_part}"), + ) + .add( + "dfs.journalnode.kerberos.internal.spnego.principal", + &format!("jn/{principal_host_part}"), + ) + .add( + "dfs.namenode.kerberos.principal", + &format!("nn/{principal_host_part}"), ) .add( - "dfs.web.authentication.keytab.file", - "/stackable/kerberos/keytab", + "dfs.datanode.kerberos.principal", + &format!("dn/{principal_host_part}"), ) + .add( + "dfs.web.authentication.kerberos.principal", + &format!("HTTP/{principal_host_part}"), + ) + .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") + .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab") + .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab") .add( "dfs.journalnode.kerberos.principal.pattern", - // E.g. jn/hdfs-test-journalnode-default-0.hdfs-test-journalnode-default.test.svc.cluster.local@CLUSTER.LOCAL - format!("jn/{hdfs_name}-journalnode-*.{hdfs_name}-journalnode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + &format!("jn/{principal_host_part}"), ) .add( "dfs.namenode.kerberos.principal.pattern", - format!("nn/{hdfs_name}-namenode-*.{hdfs_name}-namenode-*.{hdfs_namespace}.svc.cluster.local@${{env.KERBEROS_REALM}}").as_str(), + &format!("nn/{principal_host_part}"), + ) + // Otherwise we fail with `java.io.IOException: No groups found for user nn` + // Default value is `dr.who=`, so we include that here + .add( + "hadoop.user.group.static.mapping.overrides", + "dr.who=;nn=;nm=;jn=;", ); - match role { - HdfsRole::NameNode => { - self.add( - "dfs.namenode.kerberos.principal", - "nn/_HOST@${env.KERBEROS_REALM}", - ) - .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab"); - } - HdfsRole::DataNode => { - self.add( - "dfs.datanode.kerberos.principal", - "dn/_HOST@${env.KERBEROS_REALM}", - ) - .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab"); - } - HdfsRole::JournalNode => { - self.add( - "dfs.journalnode.kerberos.principal", - "jn/_HOST@${env.KERBEROS_REALM}", - ) - .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") - .add( - "dfs.journalnode.kerberos.internal.spnego.principal", - "HTTP/_HOST@${env.KERBEROS_REALM}", - ); - } - } - self.add_wire_encryption_settings(); } self diff --git a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 index 88ef2ce2..d8070ec6 100644 --- a/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 +++ b/tests/templates/kuttl/kerberos/01-install-krb5-kdc.yaml.j2 @@ -14,7 +14,7 @@ spec: spec: initContainers: - name: init - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable0.0.0-dev args: - sh - -euo @@ -35,7 +35,7 @@ spec: name: data containers: - name: kdc - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable0.0.0-dev args: - krb5kdc - -n @@ -48,7 +48,7 @@ spec: - mountPath: /var/kerberos/krb5kdc name: data - name: kadmind - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable0.0.0-dev args: - kadmind - -nofork @@ -61,7 +61,7 @@ spec: - mountPath: /var/kerberos/krb5kdc name: data - name: client - image: docker.stackable.tech/stackable/krb5:1.18.2-stackable23.4 + image: docker.stackable.tech/stackable/krb5:1.18.2-stackable0.0.0-dev tty: true stdin: true env: diff --git a/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 new file mode 100644 index 00000000..23a7bbde --- /dev/null +++ b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 @@ -0,0 +1,51 @@ +# Tribute to https://github.com/Netflix/chaosmonkey +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 3600 +commands: + # First, let's delete the first pod of every HDFS service + # Should trigger failover of the namenode to 1 + - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-0 hdfs-namenode-default-0 hdfs-datanode-default-0 + timeout: 600 + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m + timeout: 600 + + # Also delete the last pod of every HDFS service + # Should trigger failover of the namenode back to 0 + - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-2 hdfs-namenode-default-1 hdfs-datanode-default-1 + timeout: 600 + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m + timeout: 600 + + # Also delete the Zookeeper + - script: kubectl -n $NAMESPACE delete pod hdfs-zk-server-default-0 + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m + timeout: 600 + + # And now everything +{% for n in range(3) %} + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + timeout: 600 + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + timeout: 600 + - script: sleep 10 + # Delete just after they have started up again, just to make things worse + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + timeout: 600 + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m + timeout: 600 + - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m + timeout: 600 +{% endfor %} diff --git a/tests/templates/kuttl/kerberos/22-assert.yaml b/tests/templates/kuttl/kerberos/22-assert.yaml new file mode 100644 index 00000000..374d2e79 --- /dev/null +++ b/tests/templates/kuttl/kerberos/22-assert.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: check-hdfs +status: + succeeded: 1 diff --git a/tests/templates/kuttl/kerberos/22-check-file.yaml.j2 b/tests/templates/kuttl/kerberos/22-check-file.yaml.j2 new file mode 100644 index 00000000..6477d384 --- /dev/null +++ b/tests/templates/kuttl/kerberos/22-check-file.yaml.j2 @@ -0,0 +1,65 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + kubectl apply -n $NAMESPACE -f - < Date: Wed, 31 May 2023 14:37:08 +0200 Subject: [PATCH 080/101] Remove redundant set -e --- rust/operator/src/container.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 9da1f40b..a0eb7f4a 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -515,7 +515,6 @@ impl ContainerConfig { echo "" done - set -e if [ ! -f "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" ] then if [ -z ${{ACTIVE_NAMENODE+x}} ] @@ -529,7 +528,8 @@ impl ContainerConfig { else cat "{NAMENODE_ROOT_DATA_DIR}/current/VERSION" echo "Pod $POD_NAME already formatted. Skipping..." - fi"###, + fi + "###, get_service_state_command = Self::get_service_state_command(hdfs)?, hadoop_home = Self::HADOOP_HOME, pod_names = namenode_podrefs From effae5accbb6361c0347df4620d2d8e926af57da Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 14:48:53 +0200 Subject: [PATCH 081/101] We only support Kerberos for HDFS >= 3.3.x --- rust/operator/src/hdfs_controller.rs | 6 ++ rust/operator/src/kerberos.rs | 93 ++++++++++++++++++- .../kuttl/kerberos/10-install-zk.yaml.j2 | 1 - .../kuttl/kerberos/11-install-hdfs.yaml.j2 | 5 +- tests/test-definition.yaml | 3 +- 5 files changed, 101 insertions(+), 7 deletions(-) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index c1b21afb..1f5ee225 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -4,6 +4,7 @@ use crate::{ container::ContainerConfig, discovery::build_discovery_configmap, event::{build_invalid_replica_message, publish_event}, + kerberos, product_logging::{extend_role_group_config_map, resolve_vector_aggregator_address}, OPERATOR_NAME, }; @@ -154,6 +155,10 @@ pub enum Error { BuildRbacResources { source: stackable_operator::error::Error, }, + #[snafu(display( + "kerberos not supported for HDFS versions < 3.3.x. Please use at least version 3.3.x" + ))] + KerberosNotSupported {}, } impl ReconcilerError for Error { @@ -174,6 +179,7 @@ pub async fn reconcile_hdfs(hdfs: Arc, ctx: Arc) -> HdfsOperat let client = &ctx.client; let resolved_product_image = hdfs.spec.image.resolve(DOCKER_IMAGE_BASE_NAME); + kerberos::check_if_supported(&resolved_product_image)?; let vector_aggregator_address = resolve_vector_aggregator_address(&hdfs, client) .await diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index e30efe28..f8fd7b4f 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -2,8 +2,20 @@ use stackable_hdfs_crd::{ constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, HdfsCluster, }; +use stackable_operator::commons::product_image_selection::ResolvedProductImage; -use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; +use crate::{ + config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}, + hdfs_controller::Error, +}; + +pub fn check_if_supported(resolved_product_image: &ResolvedProductImage) -> Result<(), Error> { + if resolved_product_image.product_version.starts_with("3.2.") { + Err(Error::KerberosNotSupported {}) + } else { + Ok(()) + } +} impl HdfsSiteConfigBuilder { pub fn security_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { @@ -115,3 +127,82 @@ impl CoreSiteConfigBuilder { self } } + +// IMPORTANT: We only support Kerberos for HDFS >= 3.3.x +// With HDFS 3.2.2 we got weird errors, which *might* be caused by DNS lookup issues +// +// 2023-05-31 12:34:18,319 ERROR namenode.EditLogInputStream (EditLogFileInputStream.java:nextOpImpl(220)) - caught exception initializing https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true +// java.io.IOException: org.apache.hadoop.security.authentication.client.AuthenticationException: Error while authenticating with endpoint: https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true +// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:482) +// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:474) +// at java.base/java.security.AccessController.doPrivileged(Native Method) +// at java.base/javax.security.auth.Subject.doAs(Subject.java:423) +// at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) +// at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:535) +// at org.apache.hadoop.security.SecurityUtil.doAsCurrentUser(SecurityUtil.java:529) +// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog.getInputStream(EditLogFileInputStream.java:473) +// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.init(EditLogFileInputStream.java:157) +// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.nextOpImpl(EditLogFileInputStream.java:218) +// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.nextOp(EditLogFileInputStream.java:276) +// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) +// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151) +// at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:190) +// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) +// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151) +// at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:190) +// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) +// at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadEditRecords(FSEditLogLoader.java:243) +// at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadFSEdits(FSEditLogLoader.java:182) +// at org.apache.hadoop.hdfs.server.namenode.FSImage.loadEdits(FSImage.java:914) +// at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:761) +// at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:338) +// at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1135) +// at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:750) +// at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:658) +// at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:734) +// at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:977) +// at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:950) +// at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1716) +// at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1783) +// Caused by: org.apache.hadoop.security.authentication.client.AuthenticationException: Error while authenticating with endpoint: https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true +// at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) +// at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) +// at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) +// at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490) +// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.wrapExceptionWithMessage(KerberosAuthenticator.java:232) +// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.authenticate(KerberosAuthenticator.java:219) +// at org.apache.hadoop.security.authentication.client.AuthenticatedURL.openConnection(AuthenticatedURL.java:348) +// at org.apache.hadoop.hdfs.web.URLConnectionFactory.openConnection(URLConnectionFactory.java:186) +// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:480) +// ... 30 more +// Caused by: org.apache.hadoop.security.authentication.client.AuthenticationException: GSSException: No valid credentials provided (Mechanism level: Server not found in Kerberos database (7) - LOOKING_UP_SERVER) +// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.doSpnegoSequence(KerberosAuthenticator.java:360) +// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.authenticate(KerberosAuthenticator.java:204) +// ... 33 more +// Caused by: GSSException: No valid credentials provided (Mechanism level: Server not found in Kerberos database (7) - LOOKING_UP_SERVER) +// at java.security.jgss/sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:773) +// at java.security.jgss/sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:266) +// at java.security.jgss/sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:196) +// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator$1.run(KerberosAuthenticator.java:336) +// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator$1.run(KerberosAuthenticator.java:310) +// at java.base/java.security.AccessController.doPrivileged(Native Method) +// at java.base/javax.security.auth.Subject.doAs(Subject.java:423) +// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.doSpnegoSequence(KerberosAuthenticator.java:310) +// ... 34 more +// Caused by: KrbException: Server not found in Kerberos database (7) - LOOKING_UP_SERVER +// at java.security.jgss/sun.security.krb5.KrbTgsRep.(KrbTgsRep.java:73) +// at java.security.jgss/sun.security.krb5.KrbTgsReq.getReply(KrbTgsReq.java:226) +// at java.security.jgss/sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:237) +// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCredsSingle(CredentialsUtil.java:477) +// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:340) +// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:314) +// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:169) +// at java.security.jgss/sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:490) +// at java.security.jgss/sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:697) +// ... 41 more +// Caused by: KrbException: Identifier doesn't match expected value (906) +// at java.security.jgss/sun.security.krb5.internal.KDCRep.init(KDCRep.java:140) +// at java.security.jgss/sun.security.krb5.internal.TGSRep.init(TGSRep.java:65) +// at java.security.jgss/sun.security.krb5.internal.TGSRep.(TGSRep.java:60) +// at java.security.jgss/sun.security.krb5.KrbTgsRep.(KrbTgsRep.java:55) +// ... 49 more diff --git a/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 b/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 index 9e1e7bb9..da745dd5 100644 --- a/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 +++ b/tests/templates/kuttl/kerberos/10-install-zk.yaml.j2 @@ -8,7 +8,6 @@ spec: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" clusterConfig: - listenerClass: {{ test_scenario['values']['listener-class'] }} {% if lookup('env', 'VECTOR_AGGREGATOR') %} logging: vectorAggregatorConfigMapName: vector-aggregator-discovery diff --git a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 index 1a8d92f5..99faadb6 100644 --- a/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-hdfs.yaml.j2 @@ -11,12 +11,11 @@ commands: name: hdfs spec: image: - productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" - stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" + productVersion: "{{ test_scenario['values']['hadoop-latest'].split('-stackable')[0] }}" + stackableVersion: "{{ test_scenario['values']['hadoop-latest'].split('-stackable')[1] }}" clusterConfig: zookeeperConfigMapName: hdfs-zk dfsReplication: 1 - listenerClass: {{ test_scenario['values']['listener-class'] }} authentication: tlsSecretClass: tls kerberos: diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index a2c1f100..821a79ac 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -46,9 +46,8 @@ tests: - listener-class - name: kerberos dimensions: - - hadoop + - hadoop-latest # We only support Kerberos for HDFS >= 3.3.x. See rust/operator/src/kerberos.rs for details - zookeeper-latest - - listener-class - kerberos-realm - kerberos-backend - name: orphaned-resources From d69b2dd27dac1ae0eb537350e6bd0a90808e1c7f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 15:28:11 +0200 Subject: [PATCH 082/101] Add chaos monkey to smoke test --- .../21-unleash-the-chaosmonkey.yaml.j2 | 4 -- .../smoke/05-unleash-the-chaosmonkey.yaml.j2 | 47 +++++++++++++++++++ 2 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 diff --git a/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 index 23a7bbde..aed2beb4 100644 --- a/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 +++ b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 @@ -8,8 +8,6 @@ commands: # Should trigger failover of the namenode to 1 - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-0 hdfs-namenode-default-0 hdfs-datanode-default-0 timeout: 600 - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper - timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m timeout: 600 @@ -18,8 +16,6 @@ commands: # Should trigger failover of the namenode back to 0 - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-2 hdfs-namenode-default-1 hdfs-datanode-default-1 timeout: 600 - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper - timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m timeout: 600 diff --git a/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 b/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 new file mode 100644 index 00000000..f7e1a869 --- /dev/null +++ b/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 @@ -0,0 +1,47 @@ +# Tribute to https://github.com/Netflix/chaosmonkey +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 3600 +commands: + # First, let's delete the first pod of every HDFS service + # Should trigger failover of the namenode to 1 + - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-0 hdfs-namenode-default-0 hdfs-datanode-default-0 + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m + timeout: 600 + + # Also delete the last pod of every HDFS service + # Should trigger failover of the namenode back to 0 + - script: kubectl -n $NAMESPACE delete pod hdfs-namenode-default-1 hdfs-datanode-default-1 + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m + timeout: 600 + + # Also delete the Zookeeper + - script: kubectl -n $NAMESPACE delete pod hdfs-zk-server-default-0 + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m + timeout: 600 + + # And now everything +{% for n in range(3) %} + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + timeout: 600 + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + timeout: 600 + - script: sleep 10 + # Delete just after they have started up again, just to make things worse + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + timeout: 600 + - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + timeout: 600 + - script: sleep 10 + - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m + timeout: 600 + - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m + timeout: 600 +{% endfor %} From 42e2dec09494d119c9f74728c8545aba84edc4e4 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 15:31:21 +0200 Subject: [PATCH 083/101] docs --- docs/modules/hdfs/pages/usage-guide/security.adoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/modules/hdfs/pages/usage-guide/security.adoc b/docs/modules/hdfs/pages/usage-guide/security.adoc index da13604f..8926c6e9 100644 --- a/docs/modules/hdfs/pages/usage-guide/security.adoc +++ b/docs/modules/hdfs/pages/usage-guide/security.adoc @@ -5,6 +5,8 @@ Currently the only supported authentication mechanism is Kerberos, which is disa For Kerberos to work a Kerberos KDC is needed, which the users needs to provide. The xref:home:secret-operator:secretclass.adoc#backend-kerberoskeytab[secret-operator documentation] states which kind of Kerberos servers are supported and how they can be configured. +IMPORTANT: Kerberos is supported staring from HDFS version 3.3.x + === 1. Prepare Kerberos server To configure HDFS to use Kerberos you first need to collect information about your Kerberos server, e.g. hostname and port. Additionally you need a service-user, which the secret-operator uses to create create principals for the HDFS services. From 93e0a7c955ed7f67cf2a39250cf02a52b48fe9c3 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 15:53:21 +0200 Subject: [PATCH 084/101] Use constants consistently --- rust/crd/src/constants.rs | 1 - rust/operator/src/kerberos.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/rust/crd/src/constants.rs b/rust/crd/src/constants.rs index e252d8d2..55a4463b 100644 --- a/rust/crd/src/constants.rs +++ b/rust/crd/src/constants.rs @@ -56,7 +56,6 @@ pub const DFS_HA_NAMENODES: &str = "dfs.ha.namenodes"; // core-site.xml pub const FS_DEFAULT_FS: &str = "fs.defaultFS"; pub const HA_ZOOKEEPER_QUORUM: &str = "ha.zookeeper.quorum"; -pub const HADOOP_SECURITY_AUTHENTICATION: &str = "hadoop.security.authentication"; pub const STACKABLE_ROOT_DATA_DIR: &str = "/stackable/data"; pub const NAMENODE_ROOT_DATA_DIR: &str = "/stackable/data/namenode"; diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index f8fd7b4f..b7a7a36d 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -1,5 +1,5 @@ use stackable_hdfs_crd::{ - constants::{HADOOP_SECURITY_AUTHENTICATION, SSL_CLIENT_XML, SSL_SERVER_XML}, + constants::{SSL_CLIENT_XML, SSL_SERVER_XML}, HdfsCluster, }; use stackable_operator::commons::product_image_selection::ResolvedProductImage; @@ -116,7 +116,7 @@ impl CoreSiteConfigBuilder { pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> &mut Self { if hdfs.has_kerberos_enabled() { - self.add(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + self.add("hadoop.security.authentication", "kerberos"); self.add_wire_encryption_settings(); } self From 9e484cd75dc2dc820cb6fd2fb8532c6ef1d38b09 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 15:55:40 +0200 Subject: [PATCH 085/101] Simplify --- rust/crd/src/lib.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 11327a7f..f4f315dc 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -620,12 +620,7 @@ impl HdfsCluster { } pub fn has_kerberos_enabled(&self) -> bool { - self.spec - .cluster_config - .authentication - .as_ref() - .map(|auth| &auth.kerberos) - .is_some() + self.kerberos_config().is_some() } pub fn kerberos_config(&self) -> Option<&KerberosConfig> { From 1ffa879506863b25f514b2923874e6e206dad851 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 16:00:20 +0200 Subject: [PATCH 086/101] Minor improvements --- rust/crd/src/security.rs | 4 ++-- rust/operator/src/config.rs | 8 ++++---- rust/operator/src/kerberos.rs | 14 +++++++------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/rust/crd/src/security.rs b/rust/crd/src/security.rs index fe869ff1..456e08a9 100644 --- a/rust/crd/src/security.rs +++ b/rust/crd/src/security.rs @@ -5,13 +5,13 @@ use stackable_operator::schemars::{self, JsonSchema}; #[serde(rename_all = "camelCase")] pub struct AuthenticationConfig { /// Name of the SecretClass providing the tls certificates for the WebUIs. - #[serde(default = "default_kerberos_tls_secret_class")] + #[serde(default = "default_tls_secret_class")] pub tls_secret_class: String, /// Kerberos configuration pub kerberos: KerberosConfig, } -fn default_kerberos_tls_secret_class() -> String { +fn default_tls_secret_class() -> String { "tls".to_string() } diff --git a/rust/operator/src/config.rs b/rust/operator/src/config.rs index eeee77f2..ec5f1997 100644 --- a/rust/operator/src/config.rs +++ b/rust/operator/src/config.rs @@ -24,8 +24,8 @@ impl HdfsSiteConfigBuilder { } } - pub fn add(&mut self, property: &str, value: &str) -> &mut Self { - self.config.insert(property.to_string(), value.to_string()); + pub fn add(&mut self, property: impl Into, value: impl Into) -> &mut Self { + self.config.insert(property.into(), value.into()); self } @@ -206,8 +206,8 @@ impl CoreSiteConfigBuilder { } } - pub fn add(&mut self, property: &str, value: &str) -> &mut Self { - self.config.insert(property.to_string(), value.to_string()); + pub fn add(&mut self, property: impl Into, value: impl Into) -> &mut Self { + self.config.insert(property.into(), value.into()); self } diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index b7a7a36d..f8625384 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -73,34 +73,34 @@ impl CoreSiteConfigBuilder { .add("hadoop.registry.kerberos.realm", "${env.KERBEROS_REALM}") .add( "dfs.journalnode.kerberos.principal", - &format!("jn/{principal_host_part}"), + format!("jn/{principal_host_part}"), ) .add( "dfs.journalnode.kerberos.internal.spnego.principal", - &format!("jn/{principal_host_part}"), + format!("jn/{principal_host_part}"), ) .add( "dfs.namenode.kerberos.principal", - &format!("nn/{principal_host_part}"), + format!("nn/{principal_host_part}"), ) .add( "dfs.datanode.kerberos.principal", - &format!("dn/{principal_host_part}"), + format!("dn/{principal_host_part}"), ) .add( "dfs.web.authentication.kerberos.principal", - &format!("HTTP/{principal_host_part}"), + format!("HTTP/{principal_host_part}"), ) .add("dfs.journalnode.keytab.file", "/stackable/kerberos/keytab") .add("dfs.namenode.keytab.file", "/stackable/kerberos/keytab") .add("dfs.datanode.keytab.file", "/stackable/kerberos/keytab") .add( "dfs.journalnode.kerberos.principal.pattern", - &format!("jn/{principal_host_part}"), + format!("jn/{principal_host_part}"), ) .add( "dfs.namenode.kerberos.principal.pattern", - &format!("nn/{principal_host_part}"), + format!("nn/{principal_host_part}"), ) // Otherwise we fail with `java.io.IOException: No groups found for user nn` // Default value is `dr.who=`, so we include that here From 4a2ff34d40b8d28c222f1500faf408f19541f162 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 16:03:23 +0200 Subject: [PATCH 087/101] Fix kerberos hdfs version check --- rust/operator/src/hdfs_controller.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 1f5ee225..ce99aa51 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -179,7 +179,9 @@ pub async fn reconcile_hdfs(hdfs: Arc, ctx: Arc) -> HdfsOperat let client = &ctx.client; let resolved_product_image = hdfs.spec.image.resolve(DOCKER_IMAGE_BASE_NAME); - kerberos::check_if_supported(&resolved_product_image)?; + if hdfs.has_kerberos_enabled() { + kerberos::check_if_supported(&resolved_product_image)?; + } let vector_aggregator_address = resolve_vector_aggregator_address(&hdfs, client) .await From 7817bc69559c6edcdb4d42e1f72db883dea2c7d3 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 16:06:12 +0200 Subject: [PATCH 088/101] cleanup --- rust/operator/src/container.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index a0eb7f4a..03781133 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -96,8 +96,6 @@ pub enum ContainerConfig { volume_mounts: ContainerVolumeDirs, }, FormatNameNodes { - /// HDFS role (name-, data-, journal-node) which will be the container_name. - role: HdfsRole, /// The provided custom container name. container_name: String, /// Volume mounts for config and logging. @@ -110,8 +108,6 @@ pub enum ContainerConfig { volume_mounts: ContainerVolumeDirs, }, WaitForNameNodes { - /// HDFS role (name-, data-, journal-node) which will be the container_name. - role: HdfsRole, /// The provided custom container name. container_name: String, /// Volume mounts for config and logging. @@ -1109,7 +1105,6 @@ impl TryFrom for ContainerConfig { // namenode init containers name if name == NameNodeContainer::FormatNameNodes.to_string() => { Ok(Self::FormatNameNodes { - role: HdfsRole::NameNode, volume_mounts: ContainerVolumeDirs::try_from(name.as_str())?, container_name: name, }) @@ -1123,7 +1118,6 @@ impl TryFrom for ContainerConfig { // datanode init containers name if name == DataNodeContainer::WaitForNameNodes.to_string() => { Ok(Self::WaitForNameNodes { - role: HdfsRole::DataNode, volume_mounts: ContainerVolumeDirs::try_from(name.as_str())?, container_name: name, }) From 0e425bc079791ffcfcc5ad85d7b94d7b4cdbb951 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 16:12:05 +0200 Subject: [PATCH 089/101] intendation --- rust/operator/src/container.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 03781133..351ecd08 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -571,7 +571,7 @@ impl ContainerConfig { for namenode_id in {pod_names} do echo -n "Checking pod $namenode_id... " - {get_service_state_command} + {get_service_state_command} if [ "$SERVICE_STATE" = "active" ] || [ "$SERVICE_STATE" = "standby" ] then echo "$SERVICE_STATE" From 701d97ffc7a6e34c14b90726eedda4c270d43e4f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 16:18:34 +0200 Subject: [PATCH 090/101] Sett hadoop.rpc.protection to privacy --- rust/operator/src/kerberos.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index f8625384..d8e83349 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -123,7 +123,7 @@ impl CoreSiteConfigBuilder { } fn add_wire_encryption_settings(&mut self) -> &mut Self { - self.add("hadoop.rpc.protection", "authentication"); + self.add("hadoop.rpc.protection", "privacy"); self } } From 3b6d52d7b01b96dd262d8e20787532dd659a8f59 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 31 May 2023 22:18:39 +0200 Subject: [PATCH 091/101] Check exit code of zk format --- rust/operator/src/container.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 351ecd08..0f0ea3cb 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -546,7 +546,19 @@ impl ContainerConfig { r###" echo "Attempt to format ZooKeeper..." if [[ "0" -eq "$(echo $POD_NAME | sed -e 's/.*-//')" ]] ; then - {hadoop_home}/bin/hdfs zkfc -formatZK -nonInteractive || true + set +e + {hadoop_home}/bin/hdfs zkfc -formatZK -nonInteractive + EXITCODE=$? + set -e + if [[ $EXITCODE -eq 0 ]]; then + echo "Successfully formatted" + elif [[ $EXITCODE -eq 2 ]]; then + echo "ZNode already existed, did nothing" + else + echo "Zookeeper format failed with exit code $EXITCODE" + exit $EXITCODE + fi + else echo "ZooKeeper already formatted!" fi From 2b5405a2ed30c68d72351177047106cc4a1f1a70 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 1 Jun 2023 10:08:56 +0200 Subject: [PATCH 092/101] Reduce test duration by restricting chaosmonkey in smoke test --- .../templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 | 5 +++++ tests/test-definition.yaml | 1 + 2 files changed, 6 insertions(+) diff --git a/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 b/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 index f7e1a869..3be9a350 100644 --- a/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 +++ b/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 @@ -1,4 +1,8 @@ # Tribute to https://github.com/Netflix/chaosmonkey + +# We need to reduce the number of monkeys, otherwise the tests literally take days +# We only run them on some hand-picked test cases +{% if test_scenario['values']['number-of-datanodes'] == '2' and test_scenario['values']['datanode-pvcs'] == '2hdd-1ssd' and test_scenario['values']['listener-class'] == 'cluster-internal' and test_scenario['values']['zookeeper'] == test_scenario['values']['zookeeper-latest'] %} --- apiVersion: kuttl.dev/v1beta1 kind: TestStep @@ -45,3 +49,4 @@ commands: - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m timeout: 600 {% endfor %} +{% endif %} diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 821a79ac..5a1483d1 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -41,6 +41,7 @@ tests: dimensions: - hadoop - zookeeper + - zookeeper-latest # Needed for smoke test to detect if zk versions is the latest we support - number-of-datanodes - datanode-pvcs - listener-class From d15620b151b0e072392a23a7ec3b85118588cf67 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 1 Jun 2023 10:59:13 +0200 Subject: [PATCH 093/101] Force delte pods --- .../kerberos/21-unleash-the-chaosmonkey.yaml.j2 | 16 +++++++++------- .../smoke/05-unleash-the-chaosmonkey.yaml.j2 | 16 +++++++++------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 index aed2beb4..e8b3880a 100644 --- a/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 +++ b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 @@ -1,4 +1,6 @@ # Tribute to https://github.com/Netflix/chaosmonkey + +# We need to force-delete the Pods, because IONOS is sometimes unable to delete the pod (it's stuck in Terminating for > 20 minutes) --- apiVersion: kuttl.dev/v1beta1 kind: TestStep @@ -6,7 +8,7 @@ timeout: 3600 commands: # First, let's delete the first pod of every HDFS service # Should trigger failover of the namenode to 1 - - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-0 hdfs-namenode-default-0 hdfs-datanode-default-0 + - script: kubectl -n $NAMESPACE delete pod --force hdfs-journalnode-default-0 hdfs-namenode-default-0 hdfs-datanode-default-0 timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m @@ -14,14 +16,14 @@ commands: # Also delete the last pod of every HDFS service # Should trigger failover of the namenode back to 0 - - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-2 hdfs-namenode-default-1 hdfs-datanode-default-1 + - script: kubectl -n $NAMESPACE delete pod --force hdfs-journalnode-default-2 hdfs-namenode-default-1 hdfs-datanode-default-1 timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m timeout: 600 # Also delete the Zookeeper - - script: kubectl -n $NAMESPACE delete pod hdfs-zk-server-default-0 + - script: kubectl -n $NAMESPACE delete pod --force hdfs-zk-server-default-0 timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m @@ -29,15 +31,15 @@ commands: # And now everything {% for n in range(3) %} - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=hdfs timeout: 600 - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=zookeeper timeout: 600 - script: sleep 10 # Delete just after they have started up again, just to make things worse - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=hdfs timeout: 600 - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=zookeeper timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m diff --git a/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 b/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 index 3be9a350..fde4a865 100644 --- a/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 +++ b/tests/templates/kuttl/smoke/05-unleash-the-chaosmonkey.yaml.j2 @@ -3,6 +3,8 @@ # We need to reduce the number of monkeys, otherwise the tests literally take days # We only run them on some hand-picked test cases {% if test_scenario['values']['number-of-datanodes'] == '2' and test_scenario['values']['datanode-pvcs'] == '2hdd-1ssd' and test_scenario['values']['listener-class'] == 'cluster-internal' and test_scenario['values']['zookeeper'] == test_scenario['values']['zookeeper-latest'] %} + +# We need to force-delete the Pods, because IONOS is sometimes unable to delete the pod (it's stuck in Terminating for > 20 minutes) --- apiVersion: kuttl.dev/v1beta1 kind: TestStep @@ -10,7 +12,7 @@ timeout: 3600 commands: # First, let's delete the first pod of every HDFS service # Should trigger failover of the namenode to 1 - - script: kubectl -n $NAMESPACE delete pod hdfs-journalnode-default-0 hdfs-namenode-default-0 hdfs-datanode-default-0 + - script: kubectl -n $NAMESPACE delete pod --force hdfs-journalnode-default-0 hdfs-namenode-default-0 hdfs-datanode-default-0 timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m @@ -18,14 +20,14 @@ commands: # Also delete the last pod of every HDFS service # Should trigger failover of the namenode back to 0 - - script: kubectl -n $NAMESPACE delete pod hdfs-namenode-default-1 hdfs-datanode-default-1 + - script: kubectl -n $NAMESPACE delete pod --force hdfs-namenode-default-1 hdfs-datanode-default-1 timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available hdfs hdfs --timeout 10m timeout: 600 # Also delete the Zookeeper - - script: kubectl -n $NAMESPACE delete pod hdfs-zk-server-default-0 + - script: kubectl -n $NAMESPACE delete pod --force hdfs-zk-server-default-0 timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m @@ -33,15 +35,15 @@ commands: # And now everything {% for n in range(3) %} - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=hdfs timeout: 600 - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=zookeeper timeout: 600 - script: sleep 10 # Delete just after they have started up again, just to make things worse - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=hdfs + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=hdfs timeout: 600 - - script: kubectl -n $NAMESPACE delete pod -l app.kubernetes.io/name=zookeeper + - script: kubectl -n $NAMESPACE delete pod --force -l app.kubernetes.io/name=zookeeper timeout: 600 - script: sleep 10 - script: kubectl -n $NAMESPACE wait --for=condition=Available zookeepercluster hdfs-zk --timeout 10m From bfaafa4ffe28c96d9eb69c302b799f621bff64df Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 5 Jun 2023 16:27:12 +0200 Subject: [PATCH 094/101] Link to journalnodes bug ticket --- .../templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 index e8b3880a..ce18d41a 100644 --- a/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 +++ b/tests/templates/kuttl/kerberos/21-unleash-the-chaosmonkey.yaml.j2 @@ -1,4 +1,5 @@ # Tribute to https://github.com/Netflix/chaosmonkey +# We added this test case after running into problems reported in https://github.com/stackabletech/hdfs-operator/issues/338 # We need to force-delete the Pods, because IONOS is sometimes unable to delete the pod (it's stuck in Terminating for > 20 minutes) --- From 01d4a5a895bf8bddb4f18be66968e13af3e9e570 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 7 Jun 2023 16:18:37 +0200 Subject: [PATCH 095/101] Don't share /tmp ticket cache between containers --- rust/operator/src/container.rs | 60 ++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 0f0ea3cb..75422945 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -160,19 +160,13 @@ impl ContainerConfig { pb.add_volumes(main_container_config.volumes(merged_config, object_name)); pb.add_container(main_container_config.main_container( hdfs, + role, resolved_product_image, zk_config_map_name, env_overrides, merged_config, )?); - // We need to share `/tmp` between all containers, e.g. for Kerberos ticket cache - pb.add_volume( - VolumeBuilder::new("tmp") - .with_empty_dir(Option::::None, None) - .build(), - ); - // Vector side container if merged_config.vector_logging_enabled() { pb.add_container(product_logging::framework::vector_container( @@ -217,12 +211,6 @@ impl ContainerConfig { .build(), ); - let principal = format!( - "{service_name}/{hdfs_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", - service_name = role.kerberos_service_name(), - hdfs_name = hdfs.name_any(), - namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, - ); let create_tls_cert_bundle_init_container = ContainerBuilder::new("create-tls-cert-bundle") .unwrap() @@ -230,13 +218,6 @@ impl ContainerConfig { .command(Self::command()) .args(vec![formatdoc!( r###" - # We need get the kerberos ticket first, as other containers wait for the truststore to be available. - # This way it is guaranteed that the ticket is already there when the truststore is there. - export KRB5_CONFIG=/stackable/kerberos/krb5.conf - {export_kerberos_real_env_var_command} - echo "Getting ticket for {principal} from /stackable/kerberos/keytab" - kinit "{principal}" -kt /stackable/kerberos/keytab - echo "Cleaning up truststore - just in case" rm -f {KEYSTORE_DIR_NAME}/truststore.p12 echo "Creating truststore" @@ -246,14 +227,11 @@ impl ContainerConfig { echo "Cleaning up keystore - just in case" rm -f {KEYSTORE_DIR_NAME}/keystore.p12 echo "Creating keystore" - openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"###, - export_kerberos_real_env_var_command = Self::export_kerberos_real_env_var_command(), + openssl pkcs12 -export -in {KEYSTORE_DIR_NAME}/chain.crt -inkey /stackable/tls/tls.key -out {KEYSTORE_DIR_NAME}/keystore.p12 --passout pass:changeit"### )]) // Only this init container needs the actual cert (from tls volume) to create the truststore + keystore from - .add_volume_mount("tmp", "/tmp") .add_volume_mount("tls", "/stackable/tls") .add_volume_mount("keystore", KEYSTORE_DIR_NAME) - .add_volume_mount("kerberos", "/stackable/kerberos") .build(); pb.add_init_container(create_tls_cert_bundle_init_container); } @@ -266,6 +244,7 @@ impl ContainerConfig { pb.add_volumes(zkfc_container_config.volumes(merged_config, object_name)); pb.add_container(zkfc_container_config.main_container( hdfs, + role, resolved_product_image, zk_config_map_name, env_overrides, @@ -280,6 +259,7 @@ impl ContainerConfig { ); pb.add_init_container(format_namenodes_container_config.init_container( hdfs, + role, resolved_product_image, zk_config_map_name, env_overrides, @@ -295,6 +275,7 @@ impl ContainerConfig { ); pb.add_init_container(format_zookeeper_container_config.init_container( hdfs, + role, resolved_product_image, zk_config_map_name, env_overrides, @@ -311,6 +292,7 @@ impl ContainerConfig { ); pb.add_init_container(wait_for_namenodes_container_config.init_container( hdfs, + role, resolved_product_image, zk_config_map_name, env_overrides, @@ -350,6 +332,7 @@ impl ContainerConfig { fn main_container( &self, hdfs: &HdfsCluster, + role: &HdfsRole, resolved_product_image: &ResolvedProductImage, zookeeper_config_map_name: &str, env_overrides: Option<&BTreeMap>, @@ -364,7 +347,7 @@ impl ContainerConfig { cb.image_from_product_image(resolved_product_image) .command(Self::command()) - .args(self.args(hdfs, merged_config, &[])?) + .args(self.args(hdfs, role, merged_config, &[])?) .add_env_vars(self.env( hdfs, zookeeper_config_map_name, @@ -389,9 +372,11 @@ impl ContainerConfig { /// Creates respective init containers for: /// - Namenode (format-namenodes, format-zookeeper) /// - Datanode (wait-for-namenodes) + #[allow(clippy::too_many_arguments)] fn init_container( &self, hdfs: &HdfsCluster, + role: &HdfsRole, resolved_product_image: &ResolvedProductImage, zookeeper_config_map_name: &str, env_overrides: Option<&BTreeMap>, @@ -402,7 +387,7 @@ impl ContainerConfig { .with_context(|_| InvalidContainerNameSnafu { name: self.name() })? .image_from_product_image(resolved_product_image) .command(Self::command()) - .args(self.args(hdfs, merged_config, namenode_podrefs)?) + .args(self.args(hdfs, role, merged_config, namenode_podrefs)?) .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) .build()) @@ -445,6 +430,7 @@ impl ContainerConfig { fn args( &self, hdfs: &HdfsCluster, + role: &HdfsRole, merged_config: &(dyn MergedConfig + Send + 'static), namenode_podrefs: &[HdfsPodRef], ) -> Result, Error> { @@ -455,7 +441,7 @@ impl ContainerConfig { // We can't influence the order of the init containers. // Some init containers - such as format-namenodes - need the tls certs or kerberos tickets, so let's wait for them to be properly set up if hdfs.authentication_config().is_some() { - args.push_str(&Self::export_kerberos_real_env_var_command()); + args.push_str(&Self::get_kerberos_ticket(hdfs, role)?); args.push_str(&Self::wait_for_trust_and_keystore_command()); } @@ -635,6 +621,25 @@ impl ContainerConfig { .to_string() } + /// Command to `kinit` a ticket using the principal created for the specified hdfs role + /// Needs the POD_NAME env var to be present, which will be provided by the PodSpec + fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole) -> Result { + let principal = format!( + "{service_name}/{hdfs_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", + service_name = role.kerberos_service_name(), + hdfs_name = hdfs.name_any(), + namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, + ); + Ok(formatdoc!( + r###" + {export_kerberos_real_env_var_command} + echo "Getting ticket for {principal}" from /stackable/kerberos/keytab + kinit "{principal}" -kt /stackable/kerberos/keytab + "###, + export_kerberos_real_env_var_command = Self::export_kerberos_real_env_var_command(), + )) + } + fn get_service_state_command(hdfs: &HdfsCluster) -> Result { Ok(if hdfs.has_kerberos_enabled() { formatdoc!( @@ -797,7 +802,6 @@ impl ContainerConfig { merged_config: &(dyn MergedConfig + Send + 'static), ) -> Vec { let mut volume_mounts = vec![ - VolumeMountBuilder::new("tmp", "/tmp").build(), VolumeMountBuilder::new(Self::STACKABLE_LOG_VOLUME_MOUNT_NAME, STACKABLE_LOG_DIR) .build(), VolumeMountBuilder::new( From 8aadcde7341c9ffadbc6ec4d70f66eb1c525a4c2 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 7 Jun 2023 16:26:43 +0200 Subject: [PATCH 096/101] Removed uneeded -D dfs.namenode.kerberos.principal=$PRINCIPAL This is not needed any more after changing the princial names to not contain _HOST --- rust/operator/src/container.rs | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 75422945..5e6b5d0a 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -512,7 +512,7 @@ impl ContainerConfig { echo "Pod $POD_NAME already formatted. Skipping..." fi "###, - get_service_state_command = Self::get_service_state_command(hdfs)?, + get_service_state_command = Self::get_namenode_service_state_command(), hadoop_home = Self::HADOOP_HOME, pod_names = namenode_podrefs .iter() @@ -588,7 +588,7 @@ impl ContainerConfig { sleep 5 done "###, - get_service_state_command = Self::get_service_state_command(hdfs)?, + get_service_state_command = Self::get_namenode_service_state_command(), pod_names = namenode_podrefs .iter() .map(|pod_ref| pod_ref.pod_name.as_ref()) @@ -640,23 +640,12 @@ impl ContainerConfig { )) } - fn get_service_state_command(hdfs: &HdfsCluster) -> Result { - Ok(if hdfs.has_kerberos_enabled() { - formatdoc!( - r###" - PRINCIPAL=$(echo "nn/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{KERBEROS_REALM}}") - SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -D dfs.namenode.kerberos.principal=$PRINCIPAL -getServiceState $namenode_id | tail -n1 || true)"###, - hadoop_home = Self::HADOOP_HOME, - hdfs_name = hdfs.name_any(), - hdfs_namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, - ) - } else { - formatdoc!( - r###" + fn get_namenode_service_state_command() -> String { + formatdoc!( + r###" SERVICE_STATE=$({hadoop_home}/bin/hdfs haadmin -getServiceState $namenode_id | tail -n1 || true)"###, - hadoop_home = Self::HADOOP_HOME - ) - }) + hadoop_home = Self::HADOOP_HOME, + ) } /// Returns the container env variables. From 277c5503ffae6962acd3e55020dff9b0edc8194a Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 12 Jun 2023 09:23:47 +0200 Subject: [PATCH 097/101] Move stacktrace into dedicated file --- rust/operator/src/kerberos.rs | 83 +------------------ .../src/kerberos_hdfs_3.2_stacktrace.txt | 75 +++++++++++++++++ 2 files changed, 79 insertions(+), 79 deletions(-) create mode 100644 rust/operator/src/kerberos_hdfs_3.2_stacktrace.txt diff --git a/rust/operator/src/kerberos.rs b/rust/operator/src/kerberos.rs index d8e83349..fbf11bc2 100644 --- a/rust/operator/src/kerberos.rs +++ b/rust/operator/src/kerberos.rs @@ -10,6 +10,10 @@ use crate::{ }; pub fn check_if_supported(resolved_product_image: &ResolvedProductImage) -> Result<(), Error> { + // We only support Kerberos for HDFS >= 3.3.x + // With HDFS 3.2.2 we got weird errors, which *might* be caused by DNS lookup issues + // The Stacktrace is documented in rust/operator/src/kerberos_hdfs_3.2_stacktrace.txt + if resolved_product_image.product_version.starts_with("3.2.") { Err(Error::KerberosNotSupported {}) } else { @@ -127,82 +131,3 @@ impl CoreSiteConfigBuilder { self } } - -// IMPORTANT: We only support Kerberos for HDFS >= 3.3.x -// With HDFS 3.2.2 we got weird errors, which *might* be caused by DNS lookup issues -// -// 2023-05-31 12:34:18,319 ERROR namenode.EditLogInputStream (EditLogFileInputStream.java:nextOpImpl(220)) - caught exception initializing https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true -// java.io.IOException: org.apache.hadoop.security.authentication.client.AuthenticationException: Error while authenticating with endpoint: https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true -// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:482) -// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:474) -// at java.base/java.security.AccessController.doPrivileged(Native Method) -// at java.base/javax.security.auth.Subject.doAs(Subject.java:423) -// at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) -// at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:535) -// at org.apache.hadoop.security.SecurityUtil.doAsCurrentUser(SecurityUtil.java:529) -// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog.getInputStream(EditLogFileInputStream.java:473) -// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.init(EditLogFileInputStream.java:157) -// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.nextOpImpl(EditLogFileInputStream.java:218) -// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.nextOp(EditLogFileInputStream.java:276) -// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) -// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151) -// at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:190) -// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) -// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151) -// at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:190) -// at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) -// at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadEditRecords(FSEditLogLoader.java:243) -// at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadFSEdits(FSEditLogLoader.java:182) -// at org.apache.hadoop.hdfs.server.namenode.FSImage.loadEdits(FSImage.java:914) -// at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:761) -// at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:338) -// at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1135) -// at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:750) -// at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:658) -// at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:734) -// at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:977) -// at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:950) -// at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1716) -// at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1783) -// Caused by: org.apache.hadoop.security.authentication.client.AuthenticationException: Error while authenticating with endpoint: https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true -// at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) -// at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) -// at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) -// at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490) -// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.wrapExceptionWithMessage(KerberosAuthenticator.java:232) -// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.authenticate(KerberosAuthenticator.java:219) -// at org.apache.hadoop.security.authentication.client.AuthenticatedURL.openConnection(AuthenticatedURL.java:348) -// at org.apache.hadoop.hdfs.web.URLConnectionFactory.openConnection(URLConnectionFactory.java:186) -// at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:480) -// ... 30 more -// Caused by: org.apache.hadoop.security.authentication.client.AuthenticationException: GSSException: No valid credentials provided (Mechanism level: Server not found in Kerberos database (7) - LOOKING_UP_SERVER) -// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.doSpnegoSequence(KerberosAuthenticator.java:360) -// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.authenticate(KerberosAuthenticator.java:204) -// ... 33 more -// Caused by: GSSException: No valid credentials provided (Mechanism level: Server not found in Kerberos database (7) - LOOKING_UP_SERVER) -// at java.security.jgss/sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:773) -// at java.security.jgss/sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:266) -// at java.security.jgss/sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:196) -// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator$1.run(KerberosAuthenticator.java:336) -// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator$1.run(KerberosAuthenticator.java:310) -// at java.base/java.security.AccessController.doPrivileged(Native Method) -// at java.base/javax.security.auth.Subject.doAs(Subject.java:423) -// at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.doSpnegoSequence(KerberosAuthenticator.java:310) -// ... 34 more -// Caused by: KrbException: Server not found in Kerberos database (7) - LOOKING_UP_SERVER -// at java.security.jgss/sun.security.krb5.KrbTgsRep.(KrbTgsRep.java:73) -// at java.security.jgss/sun.security.krb5.KrbTgsReq.getReply(KrbTgsReq.java:226) -// at java.security.jgss/sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:237) -// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCredsSingle(CredentialsUtil.java:477) -// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:340) -// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:314) -// at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:169) -// at java.security.jgss/sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:490) -// at java.security.jgss/sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:697) -// ... 41 more -// Caused by: KrbException: Identifier doesn't match expected value (906) -// at java.security.jgss/sun.security.krb5.internal.KDCRep.init(KDCRep.java:140) -// at java.security.jgss/sun.security.krb5.internal.TGSRep.init(TGSRep.java:65) -// at java.security.jgss/sun.security.krb5.internal.TGSRep.(TGSRep.java:60) -// at java.security.jgss/sun.security.krb5.KrbTgsRep.(KrbTgsRep.java:55) -// ... 49 more diff --git a/rust/operator/src/kerberos_hdfs_3.2_stacktrace.txt b/rust/operator/src/kerberos_hdfs_3.2_stacktrace.txt new file mode 100644 index 00000000..0d583dac --- /dev/null +++ b/rust/operator/src/kerberos_hdfs_3.2_stacktrace.txt @@ -0,0 +1,75 @@ +2023-05-31 12:34:18,319 ERROR namenode.EditLogInputStream (EditLogFileInputStream.java:nextOpImpl(220)) - caught exception initializing https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true +java.io.IOException: org.apache.hadoop.security.authentication.client.AuthenticationException: Error while authenticating with endpoint: https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true + at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:482) + at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:474) + at java.base/java.security.AccessController.doPrivileged(Native Method) + at java.base/javax.security.auth.Subject.doAs(Subject.java:423) + at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) + at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:535) + at org.apache.hadoop.security.SecurityUtil.doAsCurrentUser(SecurityUtil.java:529) + at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog.getInputStream(EditLogFileInputStream.java:473) + at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.init(EditLogFileInputStream.java:157) + at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.nextOpImpl(EditLogFileInputStream.java:218) + at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.nextOp(EditLogFileInputStream.java:276) + at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) + at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151) + at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:190) + at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) + at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151) + at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:190) + at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85) + at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadEditRecords(FSEditLogLoader.java:243) + at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadFSEdits(FSEditLogLoader.java:182) + at org.apache.hadoop.hdfs.server.namenode.FSImage.loadEdits(FSImage.java:914) + at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:761) + at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:338) + at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1135) + at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:750) + at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:658) + at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:734) + at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:977) + at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:950) + at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1716) + at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1783) +Caused by: org.apache.hadoop.security.authentication.client.AuthenticationException: Error while authenticating with endpoint: https://hdfs-journalnode-default-2.hdfs-journalnode-default.kuttl-test-nice-eft.svc.cluster.local:8481/getJournal?jid=hdfs&segmentTxId=1&storageInfo=-65%3A1740831343%3A1685535647411%3ACID-5bb822a0-549e-41ce-9997-ee657b6fc23f&inProgressOk=true + at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) + at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) + at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) + at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490) + at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.wrapExceptionWithMessage(KerberosAuthenticator.java:232) + at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.authenticate(KerberosAuthenticator.java:219) + at org.apache.hadoop.security.authentication.client.AuthenticatedURL.openConnection(AuthenticatedURL.java:348) + at org.apache.hadoop.hdfs.web.URLConnectionFactory.openConnection(URLConnectionFactory.java:186) + at org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream$URLLog$1.run(EditLogFileInputStream.java:480) + ... 30 more +Caused by: org.apache.hadoop.security.authentication.client.AuthenticationException: GSSException: No valid credentials provided (Mechanism level: Server not found in Kerberos database (7) - LOOKING_UP_SERVER) + at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.doSpnegoSequence(KerberosAuthenticator.java:360) + at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.authenticate(KerberosAuthenticator.java:204) + ... 33 more +Caused by: GSSException: No valid credentials provided (Mechanism level: Server not found in Kerberos database (7) - LOOKING_UP_SERVER) + at java.security.jgss/sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:773) + at java.security.jgss/sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:266) + at java.security.jgss/sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:196) + at org.apache.hadoop.security.authentication.client.KerberosAuthenticator$1.run(KerberosAuthenticator.java:336) + at org.apache.hadoop.security.authentication.client.KerberosAuthenticator$1.run(KerberosAuthenticator.java:310) + at java.base/java.security.AccessController.doPrivileged(Native Method) + at java.base/javax.security.auth.Subject.doAs(Subject.java:423) + at org.apache.hadoop.security.authentication.client.KerberosAuthenticator.doSpnegoSequence(KerberosAuthenticator.java:310) + ... 34 more +Caused by: KrbException: Server not found in Kerberos database (7) - LOOKING_UP_SERVER + at java.security.jgss/sun.security.krb5.KrbTgsRep.(KrbTgsRep.java:73) + at java.security.jgss/sun.security.krb5.KrbTgsReq.getReply(KrbTgsReq.java:226) + at java.security.jgss/sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:237) + at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCredsSingle(CredentialsUtil.java:477) + at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:340) + at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:314) + at java.security.jgss/sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:169) + at java.security.jgss/sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:490) + at java.security.jgss/sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:697) + ... 41 more +Caused by: KrbException: Identifier doesn't match expected value (906) + at java.security.jgss/sun.security.krb5.internal.KDCRep.init(KDCRep.java:140) + at java.security.jgss/sun.security.krb5.internal.TGSRep.init(TGSRep.java:65) + at java.security.jgss/sun.security.krb5.internal.TGSRep.(TGSRep.java:60) + at java.security.jgss/sun.security.krb5.KrbTgsRep.(KrbTgsRep.java:55) + ... 49 more From b38c7c7b1e50058e41c118d968735a76aa16e791 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 12 Jun 2023 14:54:57 +0200 Subject: [PATCH 098/101] Only kinit in init(!) contains that need a ticket --- rust/operator/src/container.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 5e6b5d0a..82f5402d 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -440,8 +440,7 @@ impl ContainerConfig { // We can't influence the order of the init containers. // Some init containers - such as format-namenodes - need the tls certs or kerberos tickets, so let's wait for them to be properly set up - if hdfs.authentication_config().is_some() { - args.push_str(&Self::get_kerberos_ticket(hdfs, role)?); + if hdfs.has_https_enabled() { args.push_str(&Self::wait_for_trust_and_keystore_command()); } @@ -484,6 +483,8 @@ impl ContainerConfig { args.push_str(&formatdoc!( r###" echo "Start formatting namenode $POD_NAME. Checking for active namenodes:" + {get_kerberos_ticket} + for namenode_id in {pod_names} do echo -n "Checking pod $namenode_id... " @@ -512,6 +513,7 @@ impl ContainerConfig { echo "Pod $POD_NAME already formatted. Skipping..." fi "###, + get_kerberos_ticket = Self::get_kerberos_ticket(hdfs, role)?, get_service_state_command = Self::get_namenode_service_state_command(), hadoop_home = Self::HADOOP_HOME, pod_names = namenode_podrefs @@ -562,6 +564,8 @@ impl ContainerConfig { args.push_str(&formatdoc!( r###" echo "Waiting for namenodes to get ready:" + {get_kerberos_ticket} + n=0 while [ ${{n}} -lt 12 ]; do @@ -588,6 +592,7 @@ impl ContainerConfig { sleep 5 done "###, + get_kerberos_ticket = Self::get_kerberos_ticket(hdfs, role)?, get_service_state_command = Self::get_namenode_service_state_command(), pod_names = namenode_podrefs .iter() @@ -696,6 +701,11 @@ impl ContainerConfig { value: Some("/stackable/kerberos/krb5.conf".to_string()), ..EnvVar::default() }); + env.push(EnvVar { + name: "KRB5_CLIENT_KTNAME".to_string(), + value: Some("/stackable/kerberos/keytab".to_string()), + ..EnvVar::default() + }); } // Overrides need to come last From ae2fec4992fa05176288d5bdbaedb804e3176e3f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 12 Jun 2023 15:11:11 +0200 Subject: [PATCH 099/101] fix: Set KERBEROS_REALM in every container, not only the ones with kinit --- rust/operator/src/container.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 82f5402d..11b76a2e 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -443,6 +443,9 @@ impl ContainerConfig { if hdfs.has_https_enabled() { args.push_str(&Self::wait_for_trust_and_keystore_command()); } + if hdfs.has_kerberos_enabled() { + args.push_str(&Self::export_kerberos_real_env_var_command()); + } match self { ContainerConfig::Hdfs { role, .. } => { @@ -627,6 +630,7 @@ impl ContainerConfig { } /// Command to `kinit` a ticket using the principal created for the specified hdfs role + /// Needs the KERBEROS_REALM env var, which will be written with `export_kerberos_real_env_var_command` /// Needs the POD_NAME env var to be present, which will be provided by the PodSpec fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole) -> Result { let principal = format!( @@ -637,11 +641,9 @@ impl ContainerConfig { ); Ok(formatdoc!( r###" - {export_kerberos_real_env_var_command} echo "Getting ticket for {principal}" from /stackable/kerberos/keytab kinit "{principal}" -kt /stackable/kerberos/keytab "###, - export_kerberos_real_env_var_command = Self::export_kerberos_real_env_var_command(), )) } From 3368d1f58a30600e297f7d18afeb5a74fe431ab2 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 12 Jun 2023 16:19:26 +0200 Subject: [PATCH 100/101] fix: Only kinit when Kerberos is enabled ;) --- rust/operator/src/container.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/rust/operator/src/container.rs b/rust/operator/src/container.rs index 11b76a2e..9174dd49 100644 --- a/rust/operator/src/container.rs +++ b/rust/operator/src/container.rs @@ -483,11 +483,12 @@ impl ContainerConfig { // $NAMENODE_DIR/current/VERSION. Then we don't do anything. // If there is no active namenode, the current pod is not formatted we format as // active namenode. Otherwise as standby node. + if hdfs.has_kerberos_enabled() { + args.push_str(&Self::get_kerberos_ticket(hdfs, role)?); + } args.push_str(&formatdoc!( r###" echo "Start formatting namenode $POD_NAME. Checking for active namenodes:" - {get_kerberos_ticket} - for namenode_id in {pod_names} do echo -n "Checking pod $namenode_id... " @@ -516,7 +517,6 @@ impl ContainerConfig { echo "Pod $POD_NAME already formatted. Skipping..." fi "###, - get_kerberos_ticket = Self::get_kerberos_ticket(hdfs, role)?, get_service_state_command = Self::get_namenode_service_state_command(), hadoop_home = Self::HADOOP_HOME, pod_names = namenode_podrefs @@ -564,11 +564,12 @@ impl ContainerConfig { container_config, )); } + if hdfs.has_kerberos_enabled() { + args.push_str(&Self::get_kerberos_ticket(hdfs, role)?); + } args.push_str(&formatdoc!( r###" echo "Waiting for namenodes to get ready:" - {get_kerberos_ticket} - n=0 while [ ${{n}} -lt 12 ]; do @@ -595,7 +596,6 @@ impl ContainerConfig { sleep 5 done "###, - get_kerberos_ticket = Self::get_kerberos_ticket(hdfs, role)?, get_service_state_command = Self::get_namenode_service_state_command(), pod_names = namenode_podrefs .iter() From 9291fa364fa2f73b970a5938bdfd65613355e8b3 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 13 Jun 2023 15:00:01 +0200 Subject: [PATCH 101/101] changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca394090..4d837202 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ All notable changes to this project will be documented in this file. ### Added +- Add support for enabling secure mode with Kerberos ([#334]). - Generate OLM bundle for Release 23.4.0 ([#350]). - Missing CRD defaults for `status.conditions` field ([#354]). @@ -16,6 +17,7 @@ All notable changes to this project will be documented in this file. - Use testing-tools 0.2.0 ([#351]) - Run as root group ([#353]). +[#334]: https://github.com/stackabletech/hdfs-operator/pull/334 [#349]: https://github.com/stackabletech/hdfs-operator/pull/349 [#350]: https://github.com/stackabletech/hdfs-operator/pull/350 [#351]: https://github.com/stackabletech/hdfs-operator/pull/351