-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathztm-hub-agents.sh
executable file
·101 lines (89 loc) · 3.02 KB
/
ztm-hub-agents.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#!/bin/bash
# Variables
source config.sh
# Ensure the local directory exists for the hub's data
mkdir -p $HUB_DATA_DIR
# Start ztm-hub on the host
start_hub() {
if docker ps --filter "name=$HUB_CONTAINER_NAME" --format "{{.Names}}" | grep -q "^$HUB_CONTAINER_NAME$"; then
echo "ztm-hub is already running."
else
echo "Starting ztm-hub on the host..."
docker run -d --name $HUB_CONTAINER_NAME \
-p $HUB_PORT:$HUB_PORT \
-v $HUB_DATA_DIR:/permit \
-e ZTM_PORT=$HUB_PORT \
-e ZTM_NAMES=$HOST_IP \
$HUB_IMAGE
echo "ztm-hub started on port $HUB_PORT with permit stored in $HUB_DATA_DIR."
fi
}
# Create a ConfigMap for the agent permit file
create_configmap() {
local cluster_name=$1
# Wait for the permit file to be generated
while [ ! -f "$HUB_DATA_DIR/root.json" ]; do
echo "Waiting for root.json to be generated by the hub..."
sleep 2
done
echo "Creating ConfigMap for ztm-permit.json in $cluster_name..."
kubectl --context k3d-$cluster_name create configmap $AGENT_CONFIGMAP_NAME \
--from-file=ztm-permit.json=$HUB_DATA_DIR/root.json --dry-run=client -o yaml | kubectl --context k3d-$cluster_name apply -f -
# Wait for the ConfigMap to be available
until kubectl --context k3d-$cluster_name get configmap $AGENT_CONFIGMAP_NAME &>/dev/null; do
echo "Waiting for ConfigMap $AGENT_CONFIGMAP_NAME to be available in $cluster_name..."
sleep 2
done
echo "ConfigMap $AGENT_CONFIGMAP_NAME is available in $cluster_name."
}
# Deploy ztm-agent to a cluster
deploy_agent_to_cluster() {
local cluster_name=$1
echo "Deploying ztm-agent to $cluster_name..."
kubectl --context k3d-$cluster_name apply -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
name: ztm-agent
labels:
app: ztm-agent
spec:
containers:
- name: ztm-agent
image: $AGENT_IMAGE
ports:
- containerPort: $AGENT_PORT
env:
- name: ZTM_PORT
value: "$AGENT_PORT"
- name: ZTM_JOIN_MESH
value: "$AGENT_JOIN_MESH"
- name: ZTM_PERMIT
value: "/etc/ztm"
- name: ZTM_ENDPOINT
value: "${cluster_name}-ep"
volumeMounts:
- mountPath: /etc/ztm
name: permit-config-volume
readOnly: true
volumes:
- name: permit-config-volume
configMap:
name: $AGENT_CONFIGMAP_NAME
EOF
kubectl --context k3d-$cluster_name wait --for=condition=Ready pod/ztm-agent --timeout=30s
kubectl --context k3d-$cluster_name expose pod ztm-agent --name=ztm-agent --type=ClusterIP
echo "ztm-agent deployed to $cluster_name."
}
# Start ztm-hub
start_hub
# Create ConfigMap and deploy agents to both clusters
for cluster in $CLUSTER1 $CLUSTER2; do
create_configmap $cluster
deploy_agent_to_cluster $cluster
done
echo "ztm-hub and ztm-agents are deployed with ConfigMap. Verify the deployments and connectivity."
for cluster in $CLUSTER1 $CLUSTER2; do
echo "listing endpoints on $cluster"
kubectl --context k3d-$cluster exec -it ztm-agent -- ztm get ep
done