-
Notifications
You must be signed in to change notification settings - Fork 360
137 lines (115 loc) · 5.51 KB
/
ci-test-controllers.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
name: ci-test-controllers
on:
pull_request:
branches:
- "main"
paths:
- "pkg/**"
- ".github/workflows/ci-test-controllers.yml"
# Declare default permissions as read only.
permissions: read-all
jobs:
kubearmor-controller-test:
name: Build and Test KubeArmorController Using Ginkgo
runs-on: ubuntu-20.04
timeout-minutes: 30
env:
RUNTIME: "containerd"
steps:
- uses: actions/checkout@v3
with:
submodules: true
- uses: actions/setup-go@v5
with:
go-version-file: 'KubeArmor/go.mod'
- name: Check what paths were updated
uses: dorny/paths-filter@v2
id: filter
with:
filters: |
kubearmor:
- "KubeArmor/**"
- "protobuf/**"
controller:
- 'pkg/KubeArmorController/**'
- name: Setup a Kubernetes environment
run: ./.github/workflows/install-k3s.sh
- name: Install the latest LLVM toolchain
if: steps.filter.outputs.kubearmor == 'true'
run: ./.github/workflows/install-llvm.sh
- name: Compile libbpf
if: steps.filter.outputs.kubearmor == 'true'
run: ./.github/workflows/install-libbpf.sh
- name: Generate KubeArmor artifacts
if: steps.filter.outputs.kubearmor == 'true'
run: GITHUB_SHA=$GITHUB_SHA ./KubeArmor/build/build_kubearmor.sh
- name: Build KubeArmorController
run: make -C pkg/KubeArmorController/ docker-build TAG=latest
- name: Build Kubearmor-Operator
working-directory: pkg/KubeArmorOperator
run: |
make docker-build
- name: Install KubeArmor Latest and KubeArmorController using Helm
timeout-minutes: 7
run: |
# save images
docker save kubearmor/kubearmor-controller:latest | sudo k3s ctr images import -
docker save kubearmor/kubearmor-operator:latest | sudo k3s ctr images import -
docker save kubearmor/kubearmor-snitch:latest | sudo k3s ctr images import -
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator
kubectl get pods -A
# create kubearmorconfig
if [[ ${{ steps.filter.outputs.kubearmor }} == 'true' ]]; then
docker save kubearmor/kubearmor:latest | sudo k3s ctr images import -
docker save kubearmor/kubearmor-init:latest | sudo k3s ctr images import -
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml --dry-run=client -o json | \
jq '.spec.kubearmorControllerImage.imagePullPolicy = "Never"' | \
kubectl apply -f -
else
# use latest kubearmor if no changes in kubearmor or protobuf dir
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml --dry-run=client -o json | \
jq '.spec.kubearmorControllerImage.imagePullPolicy = "Never" | .spec.kubearmorImage.imagePullPolicy = "Always" | .spec.kubearmorInitImage.imagePullPolicy = "Always"' | \
kubectl apply -f -
fi
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
while [ ! "$(kubectl wait --timeout=5s --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kubearmor >/dev/null 2>&1; echo $?)" -eq 0 ]; do
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-controller -n kubearmor
kubectl rollout status --timeout=5m daemonset -l kubearmor-app=kubearmor -n kubearmor
kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app=kubearmor-controller -n kubearmor
kubectl get pods -A
done
- name: Get KubeArmor POD info
run: |
DAEMONSET_NAME=$(kubectl get daemonset -n kubearmor -o jsonpath='{.items[0].metadata.name}')
LABEL_SELECTOR=$(kubectl get daemonset $DAEMONSET_NAME -n kubearmor -o jsonpath='{.spec.selector.matchLabels}' | jq -r 'to_entries[] | "\(.key)=\(.value)"' | paste -sd, -)
POD_NAME=$(kubectl get pods -n kubearmor -l "$LABEL_SELECTOR" -o jsonpath='{.items[*].metadata.name}')
echo "Pod: $POD_NAME"
echo "POD_NAME=$POD_NAME" >> $GITHUB_ENV
sleep 15
kubectl get pods -A
kubectl logs -n kubearmor "$POD_NAME"
- name: Test KubeArmor using Ginkgo
run: |
go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
ginkgo --vv --flake-attempts=3 --timeout=10m blockposture/
working-directory: ./tests/k8s_env
timeout-minutes: 30
- name: Controller logs
if: ${{ failure() }}
run: |
kubectl logs -n kubearmor deployments/kubearmor-controller
- name: Get karmor sysdump
if: ${{ failure() }}
run: |
kubectl describe pod -n kubearmor -l kubearmor-app=kubearmor
curl -sfL http://get.kubearmor.io/ | sudo sh -s -- -b /usr/local/bin
mkdir -p /tmp/kubearmor/ && cd /tmp/kubearmor && karmor sysdump
- name: Archive log artifacts
if: ${{ failure() }}
uses: actions/upload-artifact@v4
with:
name: kubearmor.logs
path: |
/tmp/kubearmor/
/tmp/kubearmor.*