forked from OWASP/wrongsecrets
-
Notifications
You must be signed in to change notification settings - Fork 24
/
k8s-aws-alb-script.sh
executable file
·103 lines (79 loc) · 3.49 KB
/
k8s-aws-alb-script.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/bin/bash
# set -o errexit
# set -o pipefail
# set -o nounset
source ../scripts/check-available-commands.sh
checkCommandsAvailable helm jq vault sed grep docker grep cat aws curl eksctl kubectl
if test -n "${AWS_REGION-}"; then
echo "AWS_REGION is set to <$AWS_REGION>"
else
AWS_REGION=eu-west-1
echo "AWS_REGION is not set or empty, defaulting to ${AWS_REGION}"
fi
if test -n "${CLUSTERNAME-}"; then
echo "CLUSTERNAME is set to <$CLUSTERNAME>"
else
CLUSTERNAME=wrongsecrets-exercise-cluster
echo "CLUSTERNAME is not set or empty, defaulting to ${CLUSTERNAME}"
fi
ACCOUNT_ID=$(aws sts get-caller-identity | jq '.Account' -r)
echo "ACCOUNT_ID=${ACCOUNT_ID}"
LBC_VERSION="v2.7.1"
echo "LBC_VERSION=$LBC_VERSION"
# echo "executing eksctl utils associate-iam-oidc-provider"
# eksctl utils associate-iam-oidc-provider \
# --region ${AWS_REGION} \
# --cluster ${CLUSTERNAME} \
# --approve
echo "creating iam policy"
curl -o iam_policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/"${LBC_VERSION}"/docs/install/iam_policy.json
aws iam create-policy \
--policy-name AWSLoadBalancerControllerIAMPolicy \
--policy-document file://iam_policy.json
echo "creating iam service account for cluster ${CLUSTERNAME}"
eksctl create iamserviceaccount \
--cluster $CLUSTERNAME \
--namespace kube-system \
--name aws-load-balancer-controller \
--attach-policy-arn arn:aws:iam::${ACCOUNT_ID}:policy/AWSLoadBalancerControllerIAMPolicy \
--override-existing-serviceaccounts \
--region $AWS_REGION \
--approve
echo "setting up kubectl"
aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTERNAME --kubeconfig ~/.kube/wrongsecrets
export KUBECONFIG=~/.kube/wrongsecrets
echo "applying aws-lbc with kubectl"
kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master"
kubectl get crd
echo "do helm eks application"
helm repo add eks https://aws.github.io/eks-charts
helm repo update eks
echo "upgrade alb controller with helm"
helm upgrade -i aws-load-balancer-controller \
eks/aws-load-balancer-controller \
-n kube-system \
--set clusterName=${CLUSTERNAME} \
--set serviceAccount.create=false \
--set serviceAccount.name=aws-load-balancer-controller \
--set image.tag="${LBC_VERSION}" \
--set region=${AWS_REGION} \
--set image.repository=602401143452.dkr.ecr.${AWS_REGION}.amazonaws.com/amazon/aws-load-balancer-controller
# You may need to modify the account ID above if you're operating in af-south-1, ap-east-1, ap-southeast-3, cn-north and cn-northwest, eu-south-1, me-south-1, or the govcloud.
# See the full list of accounts per regions here: https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
echo "wait with rollout for 10 s"
sleep 10
echo "rollout status deployment"
kubectl -n kube-system rollout status deployment aws-load-balancer-controller
echo "wait after rollout for 10 s"
sleep 10
EKS_CLUSTER_VERSION=$(aws eks describe-cluster --name $CLUSTERNAME --region $AWS_REGION --query cluster.version --output text)
echo "apply -f k8s/secret-challenge-vault-service.yml in 10 s"
sleep 10
kubectl apply -f k8s/secret-challenge-vault-service.yml
echo "apply -f k8s/secret-challenge-vault-ingress.yml in 1 s"
sleep 1
kubectl apply -f k8s/secret-challenge-vault-ingress.yml
echo "waiting 10 s for loadBalancer"
sleep 10
echo "http://$(kubectl get ingress wrongsecrets -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')"
echo "Do not forget to cleanup afterwards! Run k8s-aws-alb-script-cleanup.sh"