From b3bcd9e27de003db4825f23af6a9d7836a4b279e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20P=C3=A9rez?= Date: Tue, 1 Aug 2023 15:23:37 -0400 Subject: [PATCH] Add support for eks cluster not matching --cluster spark arg (#3636) We may make a different cluster for spark in dev, but we don't really want to deal with a soaconfigs migration in case we decide to rollback This PR transparently sets the right eks cluster params based on whether or not eks usage is toggled on or off --- paasta_tools/cli/cmds/spark_run.py | 39 ++++++++++++++++++++++++++---- paasta_tools/utils.py | 4 +++ 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/paasta_tools/cli/cmds/spark_run.py b/paasta_tools/cli/cmds/spark_run.py index 9638cd0467..c7f53c0933 100644 --- a/paasta_tools/cli/cmds/spark_run.py +++ b/paasta_tools/cli/cmds/spark_run.py @@ -1101,8 +1101,28 @@ def _validate_pool(args, system_paasta_config): def _get_k8s_url_for_cluster(cluster: str) -> Optional[str]: + """ + Annoyingly, there's two layers of aliases: one to figure out what + k8s server url to use (this one) and another to figure out what + soaconfigs filename to use ;_; + + This exists so that we can map something like `--cluster pnw-devc` + into spark-pnw-devc's k8s apiserver url without needing to update + any soaconfigs/alter folk's muscle memory. + + Ideally we can get rid of this entirely once spark-run reads soaconfigs + in a manner more closely aligned to what we do with other paasta workloads + (i.e., have it automatically determine where to run based on soaconfigs + filenames - and not rely on explicit config) + """ + realized_cluster = ( + load_system_paasta_config().get_eks_cluster_aliases().get(cluster, cluster) + ) return ( - load_system_paasta_config().get_kube_clusters().get(cluster, {}).get("server") + load_system_paasta_config() + .get_kube_clusters() + .get(realized_cluster, {}) + .get("server") ) @@ -1137,14 +1157,16 @@ def paasta_spark_run(args): if not _validate_pool(args, system_paasta_config): return 1 + # annoyingly, there's two layers of aliases: one for the soaconfigs to read from + # (that's this alias lookup) - and then another layer later when figuring out what + # k8s server url to use ;_; + cluster = system_paasta_config.get_cluster_aliases().get(args.cluster, args.cluster) # Use the default spark:client instance configs if not provided try: instance_config = get_instance_config( service=args.service, instance=args.instance, - cluster=system_paasta_config.get_cluster_aliases().get( - args.cluster, args.cluster - ), + cluster=cluster, load_deployments=args.build is False and args.image is None, soa_dir=args.yelpsoa_config_root, ) @@ -1231,12 +1253,19 @@ def paasta_spark_run(args): use_eks = decide_final_eks_toggle_state(args.use_eks_override) k8s_server_address = _get_k8s_url_for_cluster(args.cluster) if use_eks else None + paasta_cluster = ( + args.cluster + if not use_eks + else load_system_paasta_config() + .get_eks_cluster_aliases() + .get(args.cluster, args.cluster) + ) spark_conf = get_spark_conf( cluster_manager=args.cluster_manager, spark_app_base_name=app_base_name, docker_img=docker_image_digest, user_spark_opts=user_spark_opts, - paasta_cluster=args.cluster, + paasta_cluster=paasta_cluster, paasta_pool=args.pool, paasta_service=args.service, paasta_instance=paasta_instance, diff --git a/paasta_tools/utils.py b/paasta_tools/utils.py index 29cc57e59b..f080ed0fad 100644 --- a/paasta_tools/utils.py +++ b/paasta_tools/utils.py @@ -2017,6 +2017,7 @@ class SystemPaastaConfigDict(TypedDict, total=False): spark_kubeconfig: str kube_clusters: Dict spark_use_eks_default: bool + eks_cluster_aliases: Dict[str, str] def load_system_paasta_config( @@ -2737,6 +2738,9 @@ def get_skip_cpu_burst_validation_services(self) -> List[str]: def get_cluster_aliases(self) -> Dict[str, str]: return self.config_dict.get("cluster_aliases", {}) + def get_eks_cluster_aliases(self) -> Dict[str, str]: + return self.config_dict.get("eks_cluster_aliases", {}) + def get_cluster_pools(self) -> Dict[str, List[str]]: return self.config_dict.get("allowed_pools", {})