Skip to content

Commit

Permalink
Merge pull request #1754 from janavenkat/jana/add-image-pull-secrets
Browse files Browse the repository at this point in the history
feat(deployment): Enhance Docker ImagePull Secrets Configuration
  • Loading branch information
daemon1024 authored Jan 7, 2025
2 parents 73468cd + 771c253 commit a09baaf
Show file tree
Hide file tree
Showing 18 changed files with 1,386 additions and 77 deletions.
1 change: 1 addition & 0 deletions .github/workflows/ci-latest-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ jobs:

- name: Compile libbpf
run: ./.github/workflows/install-libbpf.sh

- name: Login to Docker Hub
uses: docker/login-action@v2
with:
Expand Down
32 changes: 8 additions & 24 deletions .github/workflows/ci-test-controllers.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ jobs:
make docker-build
- name: Install KubeArmor Latest and KubeArmorController using Helm
timeout-minutes: 7
run: |
# save images
docker save kubearmor/kubearmor-controller:latest | sudo k3s ctr images import -
Expand All @@ -86,32 +87,15 @@ jobs:
jq '.spec.kubearmorControllerImage.imagePullPolicy = "Never" | .spec.kubearmorImage.imagePullPolicy = "Always" | .spec.kubearmorInitImage.imagePullPolicy = "Always"' | \
kubectl apply -f -
fi
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
timeout 7m bash -c -- '
while true; do
all_running=true
echo "Checking pod status..."
for pod_status in $(kubectl get pod -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-snitch --output=jsonpath="{.items[*].status.phase}" 2>/dev/null); do
if [ "$pod_status" != "Running" ]; then
all_running=false
echo "Waiting for pods to be Running..."
break
fi
done
if $all_running; then
echo "All pods are Running."
break
fi
if kubectl get pod -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-snitch | grep CrashLoopBackOff; then
echo "Error: Pod in CrashLoopBackOff state"
exit 1
fi
sleep 1
done
'
while [ ! "$(kubectl wait --timeout=5s --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kubearmor >/dev/null 2>&1; echo $?)" -eq 0 ]; do
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-controller -n kubearmor
kubectl rollout status --timeout=5m daemonset -l kubearmor-app=kubearmor -n kubearmor
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app=kubearmor-controller -n kubearmor
kubectl get pods -A
done
- name: Test KubeArmor using Ginkgo
run: |
Expand Down
11 changes: 8 additions & 3 deletions .github/workflows/ci-test-ginkgo.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ jobs:
kubectl get pods -A
- name: Run KubeArmor
timeout-minutes: 7
run: |
if [[ ${{ matrix.runtime }} == "containerd" ]]; then
docker save kubearmor/kubearmor-test-init:latest | sudo k3s ctr images import -
Expand Down Expand Up @@ -123,9 +124,13 @@ jobs:
fi
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=7m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch,kubearmor-app!=kubearmor-controller -n kubearmor
kubectl wait --timeout=1m --for=condition=ready pod -l kubearmor-app=kubearmor-controller -n kubearmor
kubectl get pods -A
while [ ! "$(kubectl wait --timeout=5s --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kubearmor >/dev/null 2>&1; echo $?)" -eq 0 ]; do
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-controller -n kubearmor
kubectl rollout status --timeout=5m daemonset -l kubearmor-app=kubearmor -n kubearmor
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app=kubearmor-controller -n kubearmor
kubectl get pods -A
done
sleep 10
DAEMONSET_NAME=$(kubectl get daemonset -n kubearmor -o jsonpath='{.items[0].metadata.name}')
Expand Down
11 changes: 8 additions & 3 deletions .github/workflows/ci-test-ubi-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ jobs:
run: make -C pkg/KubeArmorController/ docker-build TAG=latest

- name: Run KubeArmor
timeout-minutes: 7
run: |
docker save kubearmor/kubearmor-init:latest | sudo podman load
docker save kubearmor/kubearmor-ubi:latest | sudo podman load
Expand All @@ -101,9 +102,13 @@ jobs:
fi
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=7m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch,kubearmor-app!=kubearmor-controller -n kubearmor
kubectl wait --timeout=1m --for=condition=ready pod -l kubearmor-app=kubearmor-controller -n kubearmor
kubectl get pods -A
while [ ! "$(kubectl wait --timeout=5s --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kubearmor >/dev/null 2>&1; echo $?)" -eq 0 ]; do
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-controller -n kubearmor
kubectl rollout status --timeout=5m daemonset -l kubearmor-app=kubearmor -n kubearmor
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app=kubearmor-controller -n kubearmor
kubectl get pods -A
done
- name: Operator may take upto 10 sec to enable TLS, Sleep for 15Sec
run: |
Expand Down
1 change: 0 additions & 1 deletion KubeArmor/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ require (
github.com/emicklei/go-restful/v3 v3.11.2 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
Expand Down
8 changes: 8 additions & 0 deletions deployments/helm/KubeArmor/templates/daemonset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,14 @@ spec:
labels:
kubearmor-app: kubearmor
spec:
{{- if .Values.kubearmor.image.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.kubearmor.image.imagePullSecrets | indent 6 }}
{{- end }}
{{- if .Values.kubearmor.tolerations }}
tolerations:
{{ toYaml .Values.kubearmor.tolerations | indent 6 }}
{{- end }}
containers:
- args:
- -gRPC=32767
Expand Down
16 changes: 16 additions & 0 deletions deployments/helm/KubeArmor/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,14 @@ spec:
labels:
kubearmor-app: kubearmor-relay
spec:
{{- if .Values.kubearmorRelay.image.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.kubearmorRelay.image.imagePullSecrets | indent 6 }}
{{- end }}
{{- if .Values.kubearmorRelay.tolerations }}
tolerations:
{{ toYaml .Values.kubearmorRelay.tolerations | indent 6 }}
{{- end }}
containers:
- args:
{{printf "- -tlsEnabled=%t" .Values.tls.enabled}}
Expand Down Expand Up @@ -78,6 +86,14 @@ spec:
- /manager
image: {{printf "%s:%s" .Values.kubearmorController.image.repository .Values.kubearmorController.image.tag}}
imagePullPolicy: {{ .Values.kubearmorController.imagePullPolicy }}
{{- if .Values.kubearmorController.image.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.kubearmorController.image.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.kubearmorController.tolerations }}
tolerations:
{{ toYaml .Values.kubearmorController.tolerations | indent 6 }}
{{- end }}
livenessProbe:
httpGet:
path: /healthz
Expand Down
16 changes: 15 additions & 1 deletion deployments/helm/KubeArmor/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,12 @@ kubearmorRelay:
repository: kubearmor/kubearmor-relay-server
# kubearmor-init image tag
tag: latest
# Optional, but if there are a lot of image pulls required, Docker might be rate-limited. So, it's good to add pull secrets for production.
imagePullSecrets: ""
# kubearmor-init imagePullPolicy
imagePullPolicy: Always
tolerations: ""

# Add environment variables for STDOUT logging
enableStdoutLogs: "false"
enableStdoutAlerts: "false"
Expand Down Expand Up @@ -62,6 +66,8 @@ kubearmorInit:
repository: kubearmor/kubearmor-init
# kubearmor-init image tag
tag: stable
# Optional, but if there are a lot of image pulls required, Docker might be rate-limited. So, it's good to add pull secrets for production.
imagePullSecrets: ""
# kubearmor-init imagePullPolicy
imagePullPolicy: Always

Expand All @@ -71,6 +77,8 @@ kubeRbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
# kube-rbac-proxy image tag
tag: v0.15.0
# Optional, but if there are a lot of image pulls required, Docker might be rate-limited. So, it's good to add pull secrets for production.
imagePullSecrets: ""
# kube-rbac-proxy imagePullPolicy
imagePullPolicy: Always

Expand All @@ -83,6 +91,9 @@ kubearmorController:
repository: kubearmor/kubearmor-controller
# kubearmor-controller image tag
tag: latest
# Optional, but if there are a lot of image pulls required, Docker might be rate-limited. So, it's good to add pull secrets for production.
imagePullSecrets: ""
tolerations: ""
mutation:
# kubearmor-controller failure policy
failurePolicy: Ignore
Expand All @@ -100,12 +111,15 @@ kubearmorConfigMap:

#volume mounts and volumes
kubearmor:
# https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: ""
image:
# kubearmor daemonset image repo
repository: kubearmor/kubearmor
# kubearmor daemonset image tag
tag: stable

# Optional, but if there are a lot of image pulls required, Docker might be rate-limited. So, it's good to add pull secrets for production.
imagePullSecrets: ""
# kubearmor daemonset imagePullPolicy
imagePullPolicy: Always

Expand Down
Loading

0 comments on commit a09baaf

Please sign in to comment.