diff --git a/.github/workflows/ci-test-controllers.yml b/.github/workflows/ci-test-controllers.yml index 44c0642ac5..cd56b03a06 100644 --- a/.github/workflows/ci-test-controllers.yml +++ b/.github/workflows/ci-test-controllers.yml @@ -95,14 +95,30 @@ jobs: kubectl rollout status --timeout=5m daemonset -l kubearmor-app=kubearmor -n kubearmor kubectl rollout status --timeout=5m deployment -n kubearmor -l kubearmor-app=kubearmor-controller -n kubearmor kubectl get pods -A - done + done + + - name: Get KubeArmor POD info + run: | + DAEMONSET_NAME=$(kubectl get daemonset -n kubearmor -o jsonpath='{.items[0].metadata.name}') + LABEL_SELECTOR=$(kubectl get daemonset $DAEMONSET_NAME -n kubearmor -o jsonpath='{.spec.selector.matchLabels}' | jq -r 'to_entries[] | "\(.key)=\(.value)"' | paste -sd, -) + POD_NAME=$(kubectl get pods -n kubearmor -l "$LABEL_SELECTOR" -o jsonpath='{.items[*].metadata.name}') + echo "Pod: $POD_NAME" + echo "POD_NAME=$POD_NAME" >> $GITHUB_ENV + sleep 15 + kubectl get pods -A + kubectl logs -n kubearmor "$POD_NAME" - name: Test KubeArmor using Ginkgo run: | go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - ginkgo --vv --flake-attempts=10 --timeout=10m smoke/ + ginkgo --vv --flake-attempts=3 --timeout=10m blockposture/ working-directory: ./tests/k8s_env timeout-minutes: 30 + + - name: Controller logs + if: ${{ failure() }} + run: | + kubectl logs -n kubearmor deployments/kubearmor-controller - name: Get karmor sysdump if: ${{ failure() }} diff --git a/.github/workflows/ci-test-ginkgo.yml b/.github/workflows/ci-test-ginkgo.yml index 53bf273d71..db936b5619 100644 --- a/.github/workflows/ci-test-ginkgo.yml +++ b/.github/workflows/ci-test-ginkgo.yml @@ -172,6 +172,10 @@ jobs: POD_NAME=$(kubectl get pods -n kubearmor -l "$LABEL_SELECTOR" -o jsonpath='{.items[*].metadata.name}') echo "Pod: $POD_NAME" echo "POD_NAME=$POD_NAME" >> $GITHUB_ENV + sleep 15 + kubectl get pods -A + kubectl logs -n kubearmor "$POD_NAME" + kubectl logs -l app=kubearmor-controller -n kubearmor --all-containers=true - name: Test KubeArmor using Ginkgo run: | @@ -179,7 +183,10 @@ jobs: make working-directory: ./tests/k8s_env timeout-minutes: 30 - + - name: Controller logs + if: ${{ failure() }} + run: | + kubectl logs -n kubearmor deployments/kubearmor-controller - name: Kill KubeArmor prcoess in the pod run: | KUBEARMOR_PID=$(kubectl exec ${{ env.POD_NAME }} -n kubearmor -c kubearmor -- sh -c "ps aux | grep '[K]ubeArmor/kubearmor-test' | awk '{print \$1}'") diff --git a/pkg/KubeArmorController/handlers/pod_mutation.go b/pkg/KubeArmorController/handlers/pod_mutation.go index f73c289827..53b9ca2332 100644 --- a/pkg/KubeArmorController/handlers/pod_mutation.go +++ b/pkg/KubeArmorController/handlers/pod_mutation.go @@ -6,6 +6,7 @@ package handlers import ( "context" "encoding/json" + "fmt" "net/http" "github.com/go-logr/logr" @@ -64,8 +65,11 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss } a.Cluster.ClusterLock.RUnlock() if annotate { + fmt.Println("updating pod annotation") common.AppArmorAnnotatorBinding(binding, pod) } + fmt.Println("annotation binidng", binding) + // == // // send the mutation response marshaledPod, err := json.Marshal(binding) @@ -100,8 +104,10 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss } a.Cluster.ClusterLock.RUnlock() if annotate { + fmt.Println("updating pod annotation") common.AppArmorAnnotator(pod) } + fmt.Println("annotation pod", pod) } // == // diff --git a/pkg/KubeArmorController/informer/nodewatcher.go b/pkg/KubeArmorController/informer/nodewatcher.go index b027f2975b..53e2a38002 100644 --- a/pkg/KubeArmorController/informer/nodewatcher.go +++ b/pkg/KubeArmorController/informer/nodewatcher.go @@ -94,19 +94,19 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge if enforcer != cluster.Nodes[node.Name].Enforcer { delete(cluster.Nodes, node.Name) } - } else { - if enforcer == "apparmor" { - cluster.Nodes[node.Name].Enforcer = enforcer - var err error - kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c) - if err != nil { - log.Error(err, fmt.Sprintf("unable to get kubearmor status on node %s", node.Name)) - } - cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus + } - if !cluster.Nodes[node.Name].KubeArmorActive { - log.Info(fmt.Sprintf("kubearmor not found on node %s", node.Name)) - } + if enforcer == "apparmor" { + cluster.Nodes[node.Name].Enforcer = enforcer + var err error + kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c) + if err != nil { + log.Error(err, fmt.Sprintf("unable to get kubearmor status on node %s", node.Name)) + } + cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus + + if !cluster.Nodes[node.Name].KubeArmorActive { + log.Info(fmt.Sprintf("kubearmor not found on node %s", node.Name)) } } // re-compute homogeneous status diff --git a/tests/k8s_env/Makefile b/tests/k8s_env/Makefile index 8e8a090053..68cfbd0d24 100644 --- a/tests/k8s_env/Makefile +++ b/tests/k8s_env/Makefile @@ -6,8 +6,8 @@ build: @go mod tidy # run in two steps as syscall suite fails if run at the very end # see - https://github.com/kubearmor/KubeArmor/issues/1269 - @ginkgo --vv --flake-attempts=10 --timeout=15m syscalls/ - @ginkgo -r --vv --flake-attempts=10 --timeout=30m --skip-package "syscalls" + # @ginkgo --vv --flake-attempts=10 --timeout=15m syscalls/ + @ginkgo -r --vv --flake-attempts=3 --timeout=30m --skip-package "syscalls" .PHONY: test test: @ginkgo -r -v \ No newline at end of file diff --git a/tests/util/kartutil.go b/tests/util/kartutil.go index 39433f599c..7d481953f7 100644 --- a/tests/util/kartutil.go +++ b/tests/util/kartutil.go @@ -273,6 +273,7 @@ func K8sGetPods(podstr string, ns string, ants []string, timeout int) ([]string, } pods = []string{} for _, p := range podList.Items { + fmt.Printf("pod name := %s , pod annotation:= %s", p.Name, p.Annotations) if p.Status.Phase != corev1.PodRunning || p.DeletionTimestamp != nil { continue }