forked from redpanda-data/helm-charts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathTaskfile.yaml
359 lines (317 loc) · 13.3 KB
/
Taskfile.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
version: '3'
# NOTE: Task doesn't allow to override environment variables. Thus, when an
# environment variable is referenced in a taskfile, it is because we expect it
# to be defined by the environment where Task is being invoked, in an `.env`
# file, or in a task's `env:` attribute.
dotenv: [".env", '{{.ENV}}/.env.']
vars:
BINDIR: .local/bin
# Configuration Defaults
NAMESPACE: rn
RELEASE: rp
KIND_CLUSTERNAME: rp-helm
CLUSTER_NAME: '{{.CLUSTER_NAME | default "capi"}}'
CLUSTER_NAMESPACE: '{{.CLUSTER_NAMESPACE | default "default"}}'
# Overridable task vars
HELM_OPTIONS: ""
KIND_FLAGS: "--config .github/kind.yaml"
SRC_DIR:
sh: realpath {{default "." .SRC_DIR}}
includes:
tool:
taskfile: tasks/ToolTasks.yaml
vars:
BINDIR: "{{.BINDIR}}"
# if a task is referenced multiple times, only run it once
run: once
tasks:
lint:
cmds:
- task: lint:chart:redpanda
lint:chart:redpanda:
cmds:
- ct lint --config .github/ct-redpanda.yaml --github-groups
- .github/check-ci-files.sh charts/redpanda/ci
create-test-rack-awareness:
cmds:
- .github/annotate_kind_nodes.sh {{.KIND_CLUSTERNAME}}
create-test-metallb-resources:
cmds:
- kubectl -n metallb-system apply -f .github/metallb-config.yaml
create-test-tls-template:
cmds:
- .github/create_tls.sh "random-domain"
create-test-sasl-secret-template:
cmds:
- .github/create-sasl-secret.sh "some-users"
create-test-mv-files:
cmds:
- mv external-tls-secret.yaml charts/redpanda/templates/
- cp .github/external-service.yaml charts/redpanda/templates/
- mv some-users-updated.yaml charts/redpanda/templates/
setup-test-files:
cmds:
- task: create-test-rack-awareness
- task: create-test-tls-template
- task: create-test-sasl-secret-template
- task: create-test-mv-files
- task: create-test-metallb-resources
up:
cmds:
- task: kind-create
- task: setup-test-files
- task: install-redpanda-chart
add-jetstack-repo:
cmds:
- helm repo add jetstack https://charts.jetstack.io
- helm repo update
status:
- helm search repo -r '\vjetstack/cert-manager\v' | grep cert-manager
add-metallb-repo:
cmds:
- helm repo add metallb https://metallb.github.io/metallb
- helm repo update
status:
- helm search repo -r '\vmetallb/metallb\v' | grep metallb
add-prometheus-community-repo:
cmds:
- helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
- helm repo update
status:
- helm search repo -r '\vprometheus-community/kube-prometheus-stack\v' | grep metallb
install-metallb:
deps:
- add-metallb-repo
cmds:
- helm install metallb metallb/metallb -n metallb-system --create-namespace --version 0.13.10 --wait --wait-for-jobs
install-cert-manager:
deps:
- add-jetstack-repo
cmds:
- helm install cert-manager jetstack/cert-manager --set installCRDs=true --namespace cert-manager --create-namespace
install-kube-prometheus-stack:
deps:
- add-prometheus-community-repo
cmds:
- helm install prometheus prometheus-community/kube-prometheus-stack --namespace prometheus --create-namespace --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false --set nodeExporter.enabled=false --set grafana.enabled=false --set kubeStateMetrics.enabled=false --set alertmanager.enabled=false --wait --wait-for-jobs
aws-check-login:
internal: true
cmds:
- aws sts get-caller-identity > /dev/null
kind-create:
cmds:
- kind create cluster --name {{.KIND_CLUSTERNAME}} {{.KIND_FLAGS}}
- task: install-cert-manager
- task: install-metallb
- task: install-kube-prometheus-stack
status:
- "kind get clusters | grep {{.KIND_CLUSTERNAME}}"
kind-delete:
cmds:
- kind delete cluster --name {{.KIND_CLUSTERNAME}}
install-redpanda-chart:
aliases:
- upgrade-redpanda-chart
cmds:
- helm upgrade --install {{ .RELEASE }} ./charts/redpanda --namespace {{ .NAMESPACE }} --create-namespace --wait --debug {{ .HELM_OPTIONS }}
uninstall-redpanda-chart:
cmds:
- helm uninstall {{ .RELEASE }} -n {{ .NAMESPACE }} --wait || true
- kubectl -n {{ .NAMESPACE }} delete pods --all --grace-period=0 --force --wait=false || true
- kubectl -n {{ .NAMESPACE }} delete pvc --all --force --grace-period=0 --wait=false || true
- kubectl delete ns {{ .NAMESPACE }}
minikube-start:
cmds:
- minikube start --nodes=4
- kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.23/deploy/local-path-storage.yaml
- ./scripts/change-default-sc.sh
- task: install-cert-manager
minikube-delete:
cmds:
- minikube delete
capi-bootstrap-aws:
deps:
- tool:clusterctl
env:
EXP_MACHINE_POOL: true
CAPA_EKS_ADD_ROLES: "{{.CAPA_EKS_ADD_ROLES}}"
CAPA_EKS_IAM: "{{.CAPA_EKS_IAM}}"
AWS_B64ENCODED_CREDENTIALS: "{{.AWS_B64ENCODED_CREDENTIALS}}"
vars:
CAPI_INFRASTRUCTURE: '{{ default "unknown" .CAPI_INFRASTRUCTURE }}'
cmds:
- task: kind-create
vars:
KIND_CLUSTERNAME: bootstrap
KIND_FLAGS: ""
- "{{.BINDIR}}/clusterctl init -i {{ .CAPI_INFRASTRUCTURE }} -v7 --wait-providers"
capi-bootstrap-gke:
deps:
- tool:clusterctl
- tool:auth-gcp
env:
GCP_B64ENCODED_CREDENTIALS: "{{.GCP_B64ENCODED_CREDENTIALS}}"
EXP_CAPG_GKE: true
EXP_MACHINE_POOL: true
vars:
CAPI_INFRASTRUCTURE: gcp
GCP_PROVIDER_VERSION: v1.3.0
cmds:
- task: kind-create
vars:
KIND_CLUSTERNAME: bootstrap
KIND_FLAGS: ""
- "{{.BINDIR}}/clusterctl init -i {{ .CAPI_INFRASTRUCTURE }}:{{ .GCP_PROVIDER_VERSION }} -v7 --wait-providers"
capi-bootstrap-aks:
deps:
- tool:clusterctl
env:
EXP_MACHINE_POOL: true
vars:
CAPI_INFRASTRUCTURE: '{{ default "unknown" .CAPI_INFRASTRUCTURE }}'
AKS_PROVIDER_VERSION: v1.10.0
cmds:
- echo ~~~ Create kind cluster
- task: kind-create
vars:
KIND_CLUSTERNAME: bootstrap
KIND_FLAGS: ""
- echo ~~~ Initialize cluster API core controllers
- "{{.BINDIR}}/clusterctl init -i {{ .CAPI_INFRASTRUCTURE }}:{{ .AKS_PROVIDER_VERSION }} -v7 --wait-providers"
capi-create-eks:
deps:
- aws-check-login
- tool:clusterawsadm
env:
CAPA_EKS_IAM: true
CAPA_EKS_ADD_ROLES: true
cmds:
- task: capi-bootstrap-aws
vars:
CAPI_INFRASTRUCTURE: aws
CAPA_EKS_IAM: true
CAPA_EKS_ADD_ROLES: true
AWS_B64ENCODED_CREDENTIALS:
sh: "{{.BINDIR}}/clusterawsadm bootstrap credentials encode-as-profile 2>/dev/null"
- "{{.BINDIR}}/clusterawsadm bootstrap iam create-cloudformation-stack --config=.buildkite/capi/eks-bootstrap.yaml"
- helm install -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} .buildkite/capi/eks-cluster --create-namespace --debug
- kubectl wait --for=condition=ready cluster {{.CLUSTER_NAME}}-eks-cluster --timeout=40m
- kubectl wait --for=condition=Ready machinepool {{.CLUSTER_NAME}}-eks-cluster-pool-0 --timeout=20m
capi-create-gke:
env:
# this is done by running the following:
# export GCP_B64ENCODED_CREDENTIALS=$(base64 < "${GOOGLE_APPLICATION_CREDENTIALS}" | tr -d '\n')
GCP_B64ENCODED_CREDENTIALS: "{{.GCP_B64ENCODED_CREDENTIALS}}"
PROJECT_ID: "{{.PROJECT_ID}}"
cmds:
- task: capi-bootstrap-gke
vars:
CAPI_INFRASTRUCTURE: gcp
- helm install -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} .buildkite/capi/gke-cluster --create-namespace --set projectID={{.PROJECT_ID}} --debug
- kubectl wait --for=condition=ready cluster {{.CLUSTER_NAME}}-gke-cluster --timeout=40m
- kubectl wait --for=condition=Ready machinepool {{.CLUSTER_NAME}}-gke-cluster-mp-0 --timeout=20m
capi-create-aks:
cmds:
- task: capi-bootstrap-aks
vars:
CAPI_INFRASTRUCTURE: azure
# Create a secret to include the password of the Service Principal identity created in Azure
# This secret will be referenced by the AzureClusterIdentity used by the AzureCluster
- echo ~~~ Install Azure cluster API controllers
- kubectl create ns "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}}" || true
- kubectl delete secret "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAME}}" --namespace "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}}" || true
- kubectl create secret generic "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAME}}" --from-literal=clientSecret="{{.AZURE_CLIENT_SECRET}}" --namespace "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}}"
- |
helm install -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} .buildkite/capi/aks-cluster \
--create-namespace \
--set clientID={{.AZURE_CLIENT_ID}} \
--set tenantID={{.AZURE_TENANT_ID}} \
--set resourceGroup={{.TEST_RESOURCE_GROUP}} \
--set subscriptionID={{.AZURE_SUBSCRIPTION_ID}} \
--set workerReplicas=3 \
--set clientSecret.name={{.AZURE_CLUSTER_IDENTITY_SECRET_NAME}} \
--set clientSecret.namespace={{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}} \
--debug
- echo ~~~ Wait for Azure infra to be provisioned
- kubectl wait --for=condition=ready cluster {{.CLUSTER_NAME}}-aks-cluster --timeout=40m -n {{.CLUSTER_NAMESPACE}}
- kubectl wait --for=condition=Ready azuremachines --all --timeout=40m -n {{.CLUSTER_NAMESPACE}}
- "{{.BINDIR}}/clusterctl get kubeconfig --namespace {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}}-aks-cluster > {{.CLUSTER_KUBECONFIG_PATH}}"
- echo ~~~ Azure K8S cluster
- echo ~~~ Install Azure Cloud Controller Manager
- |
helm install \
--kubeconfig={{.CLUSTER_KUBECONFIG_PATH}} \
--repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure \
--generate-name \
--set infra.clusterName={{.CLUSTER_NAME}} \
--set cloudControllerManager.clusterCIDR="192.168.0.0/16" \
--debug
- echo "~~~ Install project calico - CNI"
- |
helm repo add projectcalico https://docs.tigera.io/calico/charts \
--kubeconfig={{.CLUSTER_KUBECONFIG_PATH}}
- |
helm install calico projectcalico/tigera-operator \
--kubeconfig={{.CLUSTER_KUBECONFIG_PATH}} \
-f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/values.yaml \
--namespace tigera-operator --create-namespace
- echo ~~~ Installing azure disk csi driver
- curl -skSLo install-driver.sh https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.28.1/deploy/install-driver.sh
- chmod +x ./install-driver.sh
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} ./install-driver.sh v1.28.1 snapshot
- rm ./install-driver.sh
- echo ~~~ Installing azure storage class
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl create -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/storageclass-azuredisk-csi.yaml
- echo ~~~ Wait for all nodes to become ready
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl get pod -A
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl get nodes
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl wait --for=condition=ready nodes --all --timeout=10m
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl get nodes
- echo ~~~ Azure K8S cluster created
capi-delete:
internal: true
cmds:
- kubectl delete -f {{.CLUSTERFILE}} --wait
capi-delete-eks:
env:
AWS_B64ENCODED_CREDENTIALS:
sh: "{{.BINDIR}}/clusterawsadm bootstrap credentials encode-as-profile 2> /dev/null"
cmds:
- "{{.BINDIR}}/clusterawsadm controller update-credentials"
- helm uninstall --wait -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} --timeout 60m
capi-delete-gke:
deps:
- tool:kubectl
- tool:helm
env:
GCP_B64ENCODED_CREDENTIALS: "{{.GCP_B64ENCODED_CREDENTIALS}}"
cmds:
- kubectl delete cluster {{.CLUSTER_NAME}}-gke-cluster --timeout=20m
- helm uninstall --wait -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} --timeout 10m --debug
capi-delete-aks:
deps:
- tool:helm
cmds:
- helm uninstall --wait -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} --timeout 60m --debug
# This is if you are running locally and not in CI
gke-auth-login:
deps:
- tool:gcloud
env:
PROJECT_ID: "{{.PROJECT_ID}}"
GOOGLE_APPLICATION_CREDENTIALS: "{{.GOOGLE_APPLICATION_CREDENTIALS}}"
cmds:
- gcloud auth activate-service-account --key-file={{.GOOGLE_APPLICATION_CREDENTIALS}}
- gcloud config set project {{.PROJECT_ID}} --quiet
gke-get-kubeconfig:
deps:
- tool:gcloud
- tool:auth-gcp
- tool:gcloud-auth-plugin
env:
GOOGLE_APPLICATION_CREDENTIALS: "{{.GOOGLE_APPLICATION_CREDENTIALS}}"
PROJECT_ID: "{{.PROJECT_ID}}"
USE_GKE_GCLOUD_AUTH_PLUGIN: True
KUBECONFIG: '{{ default "capi-gke-cluster.conf" .KUBECONFIG}}'
cmds:
- gcloud container clusters get-credentials {{.CLUSTER_NAME}}-gke-cluster --region=us-west1 --project {{.PROJECT_ID}}