diff --git a/config/prod.yaml b/config/prod.yaml index 441938caa..b7d03cd43 100644 --- a/config/prod.yaml +++ b/config/prod.yaml @@ -234,10 +234,16 @@ federationRedirect: weight: 70 health: https://2i2c.mybinder.org/health versions: https://2i2c.mybinder.org/versions + hetzner-2i2c-bare: + prime: false + url: https://2i2c-bare.mybinder.org + weight: 5 + health: https://2i2c-bare.mybinder.org/health + versions: https://2i2c-bare.mybinder.org/versions gesis: prime: false url: https://notebooks.gesis.org/binder - weight: 30 + weight: 25 health: https://notebooks.gesis.org/binder/health versions: https://notebooks.gesis.org/binder/versions ovh2: diff --git a/deploy.py b/deploy.py index 5bbf46bf4..833f57c9f 100755 --- a/deploy.py +++ b/deploy.py @@ -31,7 +31,7 @@ } # Projects using raw KUBECONFIG files -KUBECONFIG_CLUSTERS = {"ovh2", "hetzner-2i2c"} +KUBECONFIG_CLUSTERS = {"ovh2", "hetzner-2i2c", "hetzner-2i2c-bare"} # Mapping of config name to cluster name for AWS EKS deployments AWS_DEPLOYMENTS = {"curvenote": "binderhub"} @@ -437,7 +437,7 @@ def main(): argparser.add_argument( "release", help="Release to deploy", - choices=["staging", "prod", "ovh", "ovh2", "curvenote", "hetzner-2i2c"], + choices=list(KUBECONFIG_CLUSTERS) + list(GCP_PROJECTS.keys()) + list(AWS_DEPLOYMENTS.keys()) + list(AZURE_RGs.keys()) ) argparser.add_argument( "--name", diff --git a/docs/source/deployment/k3s.md b/docs/source/deployment/k3s.md index b0a5f3015..31d3d51f7 100644 --- a/docs/source/deployment/k3s.md +++ b/docs/source/deployment/k3s.md @@ -55,7 +55,30 @@ do not need traefik. ## Extracting authentication information via a `KUBECONFIG` file -Follow https://docs.k3s.io/cluster-access#accessing-the-cluster-from-outside-with-kubectl +Next, we extract the `KUBECONFIG` file that the `mybinder.org-deploy` repo and team members can use to access +this cluster externally by following [upstream documentation](https://docs.k3s.io/cluster-access#accessing-the-cluster-from-outside-with-kubectl). +The short version is: + +1. Copy the `/etc/rancher/k3s/k3s.yaml` into the `secrets/` directory in this repo: + + ```bash + scp root@:/etc/rancher/k3s/k3s.yaml secrets/-kubeconfig.yml + ``` + + Pick a `` that describes what cluster this is - we will be consistently using it for other files too. + + Note the `.yml` here - everything else is `.yaml`! + +2. Change the `server` field under `clusters.0.cluster` from `https://127.0.0.1:6443` to `https://:6443`. + +## Create a new ssh key for mybinder team members + +For easy access to this node for mybinder team members, we create and check-in an ssh key as +a secret. + +1. Run `ssh-keygen -t ed25519 -f secrets/.key` to create the ssh key. Leave the passphrase blank. +2. Set appropriate permissions with `chmod 0400 secrets/.key`. +3. Copy `secrets/.key.pub` (**NOTE THE .pub**) and paste it as a **new line** in `/root/.ssh/authorized_keys` on your server. Do not replace any existing lines in this file. ## Setup DNS entries @@ -70,16 +93,30 @@ Add the following entries: Give this a few minutes because it may take a while to propagate. -## Make a config copy for this new member +## Make a config + secret copy for this new member -TODO +Now we gotta start a config file and a secret config file for this new member. We can start off by copying an existing one! -## Make a secret config for this new member +Let's copy `config/hetzner-2i2c.yaml` to `config/.yaml` and make changes! -TODO +1. Find all hostnames, and change them to point to the DNS entries you made in the previous step. +2. Change `ingress-nginx.controller.service.loadbalancerIP` to be the external public IP of your cluster +3. Adjust the following parameters based on the size of the server: + a. `binderhub.config.LaunchQuota.total_quota` + b. `dind.resources` + c. `imageCleaner` +4. TODO: Something about the registry. + +We also need a secrets file, so let's copy `secrets/config/hetzner-2i2c.yaml` to `secrets/config/.yaml` and make changes! + +1. Find all hostnames, and change them to point to the DNS entries you made in the previous step. +2. TODO: Something about the registry ## Deploy binder! +Let's tell `deploy.py` script that we have a new cluster by adding `` to `KUBECONFIG_CLUSTERS` variable in `deploy.py`. + +Once done, you can do a deployment with `./deploy.py `! If it errors out, tweak and debug until it works. ## Test and validate ## Add to the redirector