Skip to content

Commit

Permalink
Fixed PR comments
Browse files Browse the repository at this point in the history
Signed-off-by: Jones Jefferson <[email protected]>
  • Loading branch information
Jones Jefferson committed Aug 14, 2024
1 parent 844868e commit c67b00c
Show file tree
Hide file tree
Showing 9 changed files with 70 additions and 43 deletions.
2 changes: 1 addition & 1 deletion deployments/nimbus-k8tls/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ data:
[INPUT]
Name tail
Path /tmp/compact_report.json
Parser JSON
Parser json
Tag json.data
DB /tmp/compact_report.db
Read_from_Head true
Expand Down
6 changes: 2 additions & 4 deletions deployments/nimbus-k8tls/templates/daemonset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: K8TLS_NAMESPACE
value: {{ include "nimbus-k8tls.fullname" . }}-env
{{- if .Values.output.elasticsearch.enabled }}
- name: OUTPUT
value: "ELASTICSEARCH"
- name: TTLSECONDSAFTERFINISHED
value: "{{ .Values.output.elasticsearch.ttlsecondsafterfinished }}"
{{- end }}
terminationGracePeriodSeconds: 10
4 changes: 4 additions & 0 deletions deployments/nimbus-k8tls/templates/namespace.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: {{ include "nimbus-k8tls.fullname" . }}-env
6 changes: 6 additions & 0 deletions deployments/nimbus-k8tls/templates/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,9 @@ rules:
- delete
- get
- update
{{- if .Values.output.elasticsearch.enabled }}
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["elasticsearch-password"]
verbs: ["get"]
{{- end }}
4 changes: 3 additions & 1 deletion deployments/nimbus-k8tls/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,6 @@ output:
user: elastic
port: 9200
index: "findings"
password: "" # Password in base64 encoded format
password: "" # Password in base64 encoded format
ttlsecondsafterfinished: "10" # Amount of time to keep the pod around after job has been completed

7 changes: 7 additions & 0 deletions pkg/adapter/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,10 @@ type Request struct {
Name string
Namespace string
}

type ContextKey string

const (
K8sClientKey ContextKey = "k8sClient"
NamespaceNameKey ContextKey = "NamespaceName"
)
55 changes: 34 additions & 21 deletions pkg/adapter/nimbus-k8tls/builder/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,30 +6,31 @@ package builder
import (
"context"
"fmt"
"strings"
"os"
"strconv"
"strings"

"github.com/5GSEC/nimbus/api/v1alpha1"
"github.com/5GSEC/nimbus/pkg/adapter/common"
"github.com/5GSEC/nimbus/pkg/adapter/idpool"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"

"github.com/5GSEC/nimbus/api/v1alpha1"
"github.com/5GSEC/nimbus/pkg/adapter/idpool"
)

var (
DefaultSchedule = "@weekly"
backOffLimit = int32(5)
ttlSecondsAfterFinished = int32(3600)
hostPathDirectoryOrCreate = corev1.HostPathDirectoryOrCreate
DefaultSchedule = "@weekly"
backOffLimit = int32(5)
)

func BuildCronJob(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy) (*batchv1.CronJob, *corev1.ConfigMap) {
logger := log.FromContext(ctx)
for _, nimbusRule := range cwnp.Spec.NimbusRules {
id := nimbusRule.ID
if idpool.IsIdSupportedBy(id, "k8tls") {
cronJob, configMap := cronJobFor(id, nimbusRule)
cronJob, configMap := cronJobFor(ctx, id, nimbusRule)
cronJob.SetName(cwnp.Name + "-" + strings.ToLower(id))
cronJob.SetAnnotations(map[string]string{
"app.kubernetes.io/managed-by": "nimbus-k8tls",
Expand All @@ -42,32 +43,32 @@ func BuildCronJob(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy) (*batc
return nil, nil
}

func cronJobFor(id string, rule v1alpha1.NimbusRules) (*batchv1.CronJob, *corev1.ConfigMap) {
func cronJobFor(ctx context.Context, id string, rule v1alpha1.NimbusRules) (*batchv1.CronJob, *corev1.ConfigMap) {
switch id {
case idpool.EnsureTLS:
return ensureTlsCronJob(rule)
return ensureTlsCronJob(ctx, rule)
default:
return nil, nil
}
}

func ensureTlsCronJob(rule v1alpha1.NimbusRules) (*batchv1.CronJob, *corev1.ConfigMap) {
func ensureTlsCronJob(ctx context.Context, rule v1alpha1.NimbusRules) (*batchv1.CronJob, *corev1.ConfigMap) {
schedule, scheduleKeyExists := rule.Rule.Params["schedule"]
externalAddresses, addrKeyExists := rule.Rule.Params["external_addresses"]
if scheduleKeyExists && addrKeyExists {
return cronJobForEnsureTls(schedule[0], externalAddresses...)
return cronJobForEnsureTls(ctx, schedule[0], externalAddresses...)
}
if scheduleKeyExists {
return cronJobForEnsureTls(schedule[0])
return cronJobForEnsureTls(ctx, schedule[0])
}
if addrKeyExists {
return cronJobForEnsureTls(DefaultSchedule, externalAddresses...)
return cronJobForEnsureTls(ctx, DefaultSchedule, externalAddresses...)
}
return cronJobForEnsureTls(DefaultSchedule)
return cronJobForEnsureTls(ctx, DefaultSchedule)
}

func cronJobForEnsureTls(schedule string, externalAddresses ...string) (*batchv1.CronJob, *corev1.ConfigMap) {
output := os.Getenv("OUTPUT")
func cronJobForEnsureTls(ctx context.Context, schedule string, externalAddresses ...string) (*batchv1.CronJob, *corev1.ConfigMap) {
logger := log.FromContext(ctx)
cj := &batchv1.CronJob{
Spec: batchv1.CronJobSpec{
Schedule: schedule,
Expand Down Expand Up @@ -105,7 +106,8 @@ func cronJobForEnsureTls(schedule string, externalAddresses ...string) (*batchv1
{
Name: "fluent-bit-config",
MountPath: "/fluent-bit/etc/fluent-bit.conf",
SubPath: "fluent-bit.conf",
SubPath: "fluent-bit.conf",
ReadOnly: true,
},
{
Name: "k8tls-report",
Expand Down Expand Up @@ -149,8 +151,19 @@ func cronJobForEnsureTls(schedule string, externalAddresses ...string) (*batchv1
},
}

if output == "ELASTICSEARCH" {
// If we are sending the report to elasticsearch, then we delete the pod spawned by job after 1 hour. Else we keep the pod
// Fetch the elasticsearch password secret. If the secret is present, set TTLSecondsAfterFinished and reference the secret in the cronjob templateZ
var elasticsearchPasswordSecret corev1.Secret
err := ctx.Value(common.K8sClientKey).(client.Client).Get(ctx, client.ObjectKey{Namespace: ctx.Value(common.NamespaceNameKey).(string), Name: "elasticsearch-password"}, &elasticsearchPasswordSecret)
if err == nil {
// Convert string to int
i, err := strconv.ParseInt(os.Getenv("TTLSECONDSAFTERFINISHED"), 10, 32)
if err != nil {
logger.Error(err, "Error converting string to int", "TTLSECONDSAFTERFINISHED: ", os.Getenv("TTLSECONDSAFTERFINISHED"))
return nil, nil
}
// Convert int to int32
ttlSecondsAfterFinished := int32(i)
// If we are sending the report to elasticsearch, then we delete the pod spawned by job after 1 hour. Else we keep the pod
cj.Spec.JobTemplate.Spec.TTLSecondsAfterFinished = &ttlSecondsAfterFinished
cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{
{
Expand Down
20 changes: 8 additions & 12 deletions pkg/adapter/nimbus-k8tls/manager/k8tls.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,14 @@ import (
func setupK8tlsEnv(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy, scheme *runtime.Scheme, k8sClient client.Client) error {
logger := log.FromContext(ctx)

ns := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: NamespaceName,
Labels: cwnp.Labels,
Annotations: map[string]string{
"app.kubernetes.io/managed-by": "nimbus-k8tls",
},
},
// Retrieve the namespace
ns := &corev1.Namespace{}
err := k8sClient.Get(ctx, client.ObjectKey{Name: NamespaceName}, ns)
if err != nil {
if errors.IsNotFound(err) {
logger.Error(err, "failed to fetch Namespace", "Namespace.Name", NamespaceName)
}
return err
}

cm := &corev1.ConfigMap{
Expand Down
9 changes: 5 additions & 4 deletions pkg/adapter/nimbus-k8tls/manager/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ package manager

import (
"context"
"os"
"strings"

"github.com/go-logr/logr"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
Expand All @@ -31,7 +31,7 @@ import (
var (
scheme = runtime.NewScheme()
k8sClient client.Client
NamespaceName string
NamespaceName = "nimbus-k8tls-env"
)

func init() {
Expand All @@ -56,7 +56,6 @@ func Run(ctx context.Context) {
go watcher.WatchCronJobs(ctx, updateCronJobCh, deletedCronJobCh)

// Get the namespace name within which the k8tls environment needs to be set
NamespaceName = os.Getenv("K8TLS_NAMESPACE")
for {
select {
case <-ctx.Done():
Expand Down Expand Up @@ -108,7 +107,9 @@ func createOrUpdateCronJob(ctx context.Context, cwnpName string) {
}

deleteDanglingCj(ctx, logger, cwnp)
cronJob, configMap := builder.BuildCronJob(ctx, cwnp)
newCtx := context.WithValue(ctx, common.K8sClientKey, k8sClient)
newCtx = context.WithValue(newCtx, common.NamespaceNameKey, NamespaceName)
cronJob, configMap := builder.BuildCronJob(newCtx, cwnp)

if cronJob != nil {
if err := setupK8tlsEnv(ctx, cwnp, scheme, k8sClient); err != nil {
Expand Down

0 comments on commit c67b00c

Please sign in to comment.