diff --git a/KubeArmor/core/kubeUpdate.go b/KubeArmor/core/kubeUpdate.go old mode 100644 new mode 100755 index 87e4dbd457..18def91059 --- a/KubeArmor/core/kubeUpdate.go +++ b/KubeArmor/core/kubeUpdate.go @@ -2761,6 +2761,10 @@ func (dm *KubeArmorDaemon) WatchConfigMap() cache.InformerSynced { if cm, ok := obj.(*corev1.ConfigMap); ok && cm.Namespace == cmNS { cfg.GlobalCfg.HostVisibility = cm.Data[cfg.ConfigHostVisibility] cfg.GlobalCfg.Visibility = cm.Data[cfg.ConfigVisibility] + cfg.GlobalCfg.Cluster = cm.Data[cfg.ConfigCluster] + dm.NodeLock.Lock() + dm.Node.ClusterName = cm.Data[cfg.ConfigCluster] + dm.NodeLock.Unlock() if _, ok := cm.Data[cfg.ConfigDefaultPostureLogs]; ok { cfg.GlobalCfg.DefaultPostureLogs = (cm.Data[cfg.ConfigDefaultPostureLogs] == "true") } @@ -2806,6 +2810,8 @@ func (dm *KubeArmorDaemon) WatchConfigMap() cache.InformerSynced { if cm, ok := new.(*corev1.ConfigMap); ok && cm.Namespace == cmNS { cfg.GlobalCfg.HostVisibility = cm.Data[cfg.ConfigHostVisibility] cfg.GlobalCfg.Visibility = cm.Data[cfg.ConfigVisibility] + cfg.GlobalCfg.Cluster = cm.Data[cfg.ConfigCluster] + dm.Node.ClusterName = cm.Data[cfg.ConfigCluster] if _, ok := cm.Data[cfg.ConfigDefaultPostureLogs]; ok { cfg.GlobalCfg.DefaultPostureLogs = (cm.Data[cfg.ConfigDefaultPostureLogs] == "true") } diff --git a/KubeArmor/feeder/feeder.go b/KubeArmor/feeder/feeder.go index c1c4203c1b..60201056fe 100644 --- a/KubeArmor/feeder/feeder.go +++ b/KubeArmor/feeder/feeder.go @@ -498,7 +498,7 @@ func (fd *Feeder) PushMessage(level, message string) { pbMsg.UpdatedTime = updatedTime //pbMsg.ClusterName = cfg.GlobalCfg.Cluster - pbMsg.ClusterName = fd.Node.ClusterName + pbMsg.ClusterName = cfg.GlobalCfg.Cluster pbMsg.HostName = cfg.GlobalCfg.Host pbMsg.HostIP = fd.Node.NodeIP @@ -598,7 +598,7 @@ func (fd *Feeder) PushLog(log tp.Log) { pbAlert.Timestamp = log.Timestamp pbAlert.UpdatedTime = log.UpdatedTime - pbAlert.ClusterName = fd.Node.ClusterName + pbAlert.ClusterName = cfg.GlobalCfg.Cluster pbAlert.HostName = fd.Node.NodeName pbAlert.NamespaceName = log.NamespaceName @@ -696,7 +696,7 @@ func (fd *Feeder) PushLog(log tp.Log) { pbLog.Timestamp = log.Timestamp pbLog.UpdatedTime = log.UpdatedTime - pbLog.ClusterName = fd.Node.ClusterName + pbLog.ClusterName = cfg.GlobalCfg.Cluster pbLog.HostName = fd.Node.NodeName pbLog.NamespaceName = log.NamespaceName diff --git a/deployments/helm/KubeArmorOperator/templates/deployment.yaml b/deployments/helm/KubeArmorOperator/templates/deployment.yaml index 24f397ba8d..9a117a1b76 100644 --- a/deployments/helm/KubeArmorOperator/templates/deployment.yaml +++ b/deployments/helm/KubeArmorOperator/templates/deployment.yaml @@ -31,7 +31,9 @@ spec: {{- if or (eq $tag "latest") (and (hasPrefix "v" $tag) (semverCompare "^1.4.0" $tag)) }} # initDeploy flag is only supported from v1.4.0 args: - - --initDeploy={{.Values.kubearmorOperator.initDeploy }} + {{- if .Values.kubearmorOperator.args -}} + {{- toYaml .Values.kubearmorOperator.args | trim | nindent 8 }} + {{- end }} {{- end }} serviceAccountName: {{ .Values.kubearmorOperator.name }} diff --git a/deployments/helm/KubeArmorOperator/values.yaml b/deployments/helm/KubeArmorOperator/values.yaml index 33629fc5ea..b54582d81b 100644 --- a/deployments/helm/KubeArmorOperator/values.yaml +++ b/deployments/helm/KubeArmorOperator/values.yaml @@ -33,7 +33,8 @@ kubearmorOperator: repository: kubearmor/kubearmor-operator tag: "" imagePullPolicy: IfNotPresent - initDeploy: true + args: + - "--initDeploy=true" kubearmorConfig: defaultCapabilitiesPosture: audit diff --git a/pkg/KubeArmorOperator/cmd/operator/main.go b/pkg/KubeArmorOperator/cmd/operator/main.go old mode 100644 new mode 100755 index 8041557fab..de1001cd08 --- a/pkg/KubeArmorOperator/cmd/operator/main.go +++ b/pkg/KubeArmorOperator/cmd/operator/main.go @@ -32,6 +32,7 @@ var Opv1Client *opv1client.Clientset var Secv1Client *secv1client.Clientset var InitDeploy bool var LogLevel string +var ProviderHostname, ProviderEndpoint string // Cmd represents the base command when called without any subcommands var Cmd = &cobra.Command{ @@ -55,7 +56,7 @@ var Cmd = &cobra.Command{ return nil }, Run: func(cmd *cobra.Command, args []string) { - nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, Secv1Client, PathPrefix, DeploymentName, InitDeploy) + nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, Secv1Client, PathPrefix, DeploymentName, ProviderHostname, ProviderEndpoint, InitDeploy) go nodeWatcher.WatchConfigCrd() nodeWatcher.WatchNodes() @@ -81,6 +82,8 @@ func init() { Cmd.PersistentFlags().StringVar(&LsmOrder, "lsm", "bpf,apparmor,selinux", "lsm preference order to use") Cmd.PersistentFlags().StringVar(&PathPrefix, "pathprefix", "/rootfs/", "path prefix for runtime search") Cmd.PersistentFlags().StringVar(&DeploymentName, "deploymentName", "kubearmor-operator", "operator deployment name") + Cmd.PersistentFlags().StringVar(&ProviderHostname, "providerHostname", "", "IMDS URL hostname for retrieving cluster name") + Cmd.PersistentFlags().StringVar(&ProviderEndpoint, "providerEndpoint", "", "IMDS URL endpoint for retrieving cluster name") // TODO:- set initDeploy to false by default once this change is added to stable Cmd.PersistentFlags().BoolVar(&InitDeploy, "initDeploy", true, "Init container deployment") Cmd.PersistentFlags().StringVar(&LogLevel, "loglevel", "info", "log level, e.g., debug, info, warn, error") diff --git a/pkg/KubeArmorOperator/internal/controller/cluster.go b/pkg/KubeArmorOperator/internal/controller/cluster.go old mode 100644 new mode 100755 index d627b208ff..e54b4fa91a --- a/pkg/KubeArmorOperator/internal/controller/cluster.go +++ b/pkg/KubeArmorOperator/internal/controller/cluster.go @@ -45,6 +45,7 @@ var deployment_uuid types.UID var deployment_name string = "kubearmor-operator" var PathPrefix string var initDeploy bool +var ProviderHostname, ProviderEndpoint string type ClusterWatcher struct { Nodes []Node @@ -68,7 +69,7 @@ type Node struct { Seccomp string } -func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, secv1Client *secv1client.Clientset, pathPrefix, deploy_name string, initdeploy bool) *ClusterWatcher { +func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, secv1Client *secv1client.Clientset, pathPrefix, deploy_name, providerHostname, providerEndpoint string, initdeploy bool) *ClusterWatcher { if informer == nil { informer = informers.NewSharedInformerFactory(client, 0) } @@ -85,6 +86,9 @@ func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, ext PathPrefix = pathPrefix deployment_name = deploy_name initDeploy = initdeploy + ProviderHostname = providerHostname + ProviderEndpoint = providerEndpoint + return &ClusterWatcher{ Nodes: []Node{}, Daemonsets: make(map[string]int), diff --git a/pkg/KubeArmorOperator/internal/controller/resources.go b/pkg/KubeArmorOperator/internal/controller/resources.go old mode 100644 new mode 100755 index 6facc45811..6ef0b326fd --- a/pkg/KubeArmorOperator/internal/controller/resources.go +++ b/pkg/KubeArmorOperator/internal/controller/resources.go @@ -7,6 +7,9 @@ import ( "bytes" "context" "fmt" + "io" + "net/http" + "regexp" "strconv" "strings" "time" @@ -470,6 +473,160 @@ func (clusterWatcher *ClusterWatcher) deployControllerDeployment(deployment *app return nil } +func (clusterWatcher *ClusterWatcher) getProvider(providerHostname, providerEndpoint string) (string, string, string) { + nodes, err := clusterWatcher.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + clusterWatcher.Log.Warnf("Error listing nodes: %s\n", err.Error()) + } + + for _, node := range nodes.Items { + for key, label := range node.Labels { + if strings.Contains(key, "gke") || strings.Contains(label, "gke") { + if providerHostname != "" && providerEndpoint == "" { + providerEndpoint = "/computeMetadata/v1/instance/attributes/cluster-name" + } else if providerHostname == "" && providerEndpoint != "" { + providerHostname = "http://metadata.google.internal" + } else if providerHostname == "" && providerEndpoint == "" { + providerHostname = "http://metadata.google.internal" + providerEndpoint = "/computeMetadata/v1/instance/attributes/cluster-name" + } + return "gke", providerHostname, providerEndpoint + } else if strings.Contains(key, "eks") || strings.Contains(label, "eks") { + if providerHostname != "" && providerEndpoint == "" { + providerEndpoint = "/latest/user-data" + } else if providerHostname == "" && providerEndpoint != "" { + providerHostname = "http://169.254.169.254" + } else if providerHostname == "" && providerEndpoint == "" { + providerHostname = "http://169.254.169.254" + providerEndpoint = "/latest/user-data" + } + return "eks", providerHostname, providerEndpoint + } + } + } + return "default", "", "" +} + +func (clusterWatcher *ClusterWatcher) fetchClusterNameFromGKE(providerHostname, providerEndpoint string) (string, error) { + url := providerHostname + providerEndpoint + req, err := http.NewRequest("GET", url, nil) + if err != nil { + clusterWatcher.Log.Warnf("failed to create request: %w, check provider host name and endpoint", err) + return "", err + } + + // Set the required header + req.Header.Set("Metadata-Flavor", "Google") + + // Create an HTTP client and make the request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + clusterWatcher.Log.Warnf("error making request: %w, check provider host name and endpoint", err) + return "", err + } + defer resp.Body.Close() + + // Check for a successful response + if resp.StatusCode != http.StatusOK { + clusterWatcher.Log.Warnf("failed to fetch from metadata, status code: %d", resp.StatusCode) + return "", err + } + + // Read the response body + body, err := io.ReadAll(resp.Body) + if err != nil { + clusterWatcher.Log.Warnf("error reading response body: %w", err) + return "", err + } + + return string(body), nil +} + +func (clusterWatcher *ClusterWatcher) fetchClusterNameFromAWS(providerHostname, providerEndpoint string) (string, error) { + var token []byte + client := &http.Client{Timeout: 2 * time.Second} + req, err := http.NewRequest("PUT", providerHostname+"/latest/api/token", nil) + if err != nil { + clusterWatcher.Log.Warnf("failed to create request for fetching token: %w, check provider host name", err) + return "", err + } + req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "21600") + + resp, err := client.Do(req) + if err != nil { + clusterWatcher.Log.Warnf("error making request: %w", err) + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + token, err = io.ReadAll(resp.Body) + if err != nil { + clusterWatcher.Log.Warnf("failed to read token: %d", err) + return "", err + } + } + + // Fetch the EKS cluster name from user data + url := providerHostname + providerEndpoint + req, err = http.NewRequest("GET", url, nil) + client = &http.Client{Timeout: 2 * time.Second} + if err != nil { + clusterWatcher.Log.Warnf("failed to create request for fetching metadata: %w, check provider host name and endpoint", err) + return "", err + } + req.Header.Set("X-aws-ec2-metadata-token", string(token)) + + resp, err = client.Do(req) + if err != nil { + clusterWatcher.Log.Warnf("error making request: %w", err) + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + clusterWatcher.Log.Warnf("failed to fetch from metadata, status code: %d", resp.StatusCode) + return "", err + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + clusterWatcher.Log.Warnf("failed to read metadata: %d", err) + return "", err + } + + // Extract EKS cluster name + re := regexp.MustCompile(`/etc/eks/bootstrap\.sh (\S+)`) + match := re.FindStringSubmatch(string(body)) + if len(match) > 0 { + return match[1], nil + } + + return "", err +} + +func (clusterWatcher *ClusterWatcher) GetClusterName(providerHostname, providerEndpoint string) string { + provider, pHostname, pEndpoint := clusterWatcher.getProvider(ProviderHostname, providerEndpoint) + if provider == "gke" { + clusterWatcher.Log.Infof("Provider is GKE") + if clusterName, err := clusterWatcher.fetchClusterNameFromGKE(pHostname, pEndpoint); err != nil { + clusterWatcher.Log.Warnf("Cannot fetch cluster name for GKE %s", err.Error()) + } else { + return clusterName + } + } else if provider == "eks" { + clusterWatcher.Log.Infof("Provider is EKS") + if clusterName, err := clusterWatcher.fetchClusterNameFromAWS(pHostname, pEndpoint); err != nil { + clusterWatcher.Log.Warnf("Cannot fetch cluster name for EKS %s", err.Error()) + } else { + return clusterName + } + } + + return "default" +} + func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { var caCert, tlsCrt, tlsKey *bytes.Buffer var kGenErr, err, installErr error @@ -647,6 +804,7 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { // kubearmor configmap configmap := addOwnership(deployments.GetKubearmorConfigMap(common.Namespace, deployments.KubeArmorConfigMapName)).(*corev1.ConfigMap) configmap.Data = common.ConfigMapData + configmap.Data["cluster"] = clusterWatcher.GetClusterName(ProviderHostname, ProviderEndpoint) for { caCert, tlsCrt, tlsKey, kGenErr = common.GeneratePki(common.Namespace, deployments.KubeArmorControllerWebhookServiceName)