From fbcb5b1c1e1dbe0a15bd2dfd019ef0ab3bf8ebea Mon Sep 17 00:00:00 2001 From: Stanislav Khalash Date: Mon, 9 Dec 2024 17:19:58 +0100 Subject: [PATCH] chore: Refactor OTel resources (#1659) --- .testcoverage.yml | 2 +- .../telemetry/logpipeline_controller.go | 54 +- .../telemetry/metricpipeline_controller.go | 112 +-- .../telemetry/tracepipeline_controller.go | 57 +- internal/labels/labels.go | 53 -- internal/labels/labels_test.go | 42 -- .../reconciler/logpipeline/otel/reconciler.go | 15 +- .../logpipeline/otel/reconciler_test.go | 11 +- .../reconciler/logpipeline/otel/status.go | 3 +- .../reconciler/metricpipeline/reconciler.go | 36 +- .../metricpipeline/reconciler_test.go | 52 +- internal/reconciler/metricpipeline/status.go | 5 +- internal/reconciler/telemetry/reconciler.go | 6 +- internal/reconciler/telemetry/status.go | 5 +- internal/reconciler/telemetry/status_test.go | 86 ++- .../reconciler/tracepipeline/reconciler.go | 15 +- .../tracepipeline/reconciler_test.go | 29 +- internal/reconciler/tracepipeline/status.go | 3 +- internal/resources/common/labels.go | 11 + internal/resources/otelcollector/agent.go | 119 ++- .../resources/otelcollector/agent_test.go | 378 ++-------- internal/resources/otelcollector/config.go | 45 -- internal/resources/otelcollector/core.go | 13 +- internal/resources/otelcollector/gateway.go | 219 +++++- .../resources/otelcollector/gateway_test.go | 711 ++++-------------- internal/resources/otelcollector/rbac.go | 42 +- internal/resources/otelcollector/rbac_test.go | 304 -------- .../otelcollector/testdata/log-gateway.yaml | 263 +++++++ .../otelcollector/testdata/metric-agent.yaml | 334 ++++++++ .../testdata/metric-gateway-istio.yaml | 353 +++++++++ .../testdata/metric-gateway.yaml | 335 +++++++++ .../otelcollector/testdata/trace-gateway.yaml | 264 +++++++ internal/resources/selfmonitor/resources.go | 19 +- internal/utils/test/marshal.go | 85 +++ main.go | 12 +- 35 files changed, 2270 insertions(+), 1823 deletions(-) delete mode 100644 internal/labels/labels.go delete mode 100644 internal/labels/labels_test.go create mode 100644 internal/resources/common/labels.go delete mode 100644 internal/resources/otelcollector/config.go delete mode 100644 internal/resources/otelcollector/rbac_test.go create mode 100644 internal/resources/otelcollector/testdata/log-gateway.yaml create mode 100644 internal/resources/otelcollector/testdata/metric-agent.yaml create mode 100644 internal/resources/otelcollector/testdata/metric-gateway-istio.yaml create mode 100644 internal/resources/otelcollector/testdata/metric-gateway.yaml create mode 100644 internal/resources/otelcollector/testdata/trace-gateway.yaml create mode 100644 internal/utils/test/marshal.go diff --git a/.testcoverage.yml b/.testcoverage.yml index 6b1e98b4d..8d2cc4a12 100644 --- a/.testcoverage.yml +++ b/.testcoverage.yml @@ -35,7 +35,7 @@ override: path: ^internal/reconciler/telemetry$ - threshold: 74 path: ^internal/reconciler/tracepipeline$ - - threshold: 82 + - threshold: 83 path: ^internal/resources/otelcollector$ - threshold: 78 path: ^internal/resources/selfmonitor$ diff --git a/controllers/telemetry/logpipeline_controller.go b/controllers/telemetry/logpipeline_controller.go index f0579d33d..9d672877a 100644 --- a/controllers/telemetry/logpipeline_controller.go +++ b/controllers/telemetry/logpipeline_controller.go @@ -52,7 +52,6 @@ import ( ) const ( - // FluentBit fbBaseName = "telemetry-fluent-bit" fbSectionsConfigMapName = fbBaseName + "-sections" fbFilesConfigMapName = fbBaseName + "-files" @@ -61,9 +60,6 @@ const ( fbEnvConfigSecretName = fbBaseName + "-env" fbTLSFileConfigSecretName = fbBaseName + "-output-tls-config" fbDaemonSetName = fbBaseName - - // OTel - otelLogGatewayName = "telemetry-log-gateway" ) var ( @@ -72,17 +68,6 @@ var ( fbMemoryLimit = resource.MustParse("1Gi") fbCPURequest = resource.MustParse("100m") fbMemoryRequest = resource.MustParse("50Mi") - - // OTel - // TODO: Check if these values need to be adjusted - logGatewayBaseCPULimit = resource.MustParse("700m") - logGatewayDynamicCPULimit = resource.MustParse("500m") - logGatewayBaseMemoryLimit = resource.MustParse("500Mi") - logGatewayDynamicMemoryLimit = resource.MustParse("1500Mi") - logGatewayBaseCPURequest = resource.MustParse("100m") - logGatewayDynamicCPURequest = resource.MustParse("100m") - logGatewayBaseMemoryRequest = resource.MustParse("32Mi") - logGatewayDynamicMemoryRequest = resource.MustParse("0") ) // LogPipelineController reconciles a LogPipeline object @@ -99,7 +84,6 @@ type LogPipelineControllerConfig struct { OTelCollectorImage string FluentBitPriorityClassName string LogGatewayPriorityClassName string - LogGatewayServiceName string RestConfig *rest.Config SelfMonitorName string TelemetryNamespace string @@ -216,48 +200,14 @@ func configureFluentBitReconciler(client client.Client, config LogPipelineContro //nolint:unparam // error is always nil: An error could be returned after implementing the IstioStatusChecker (TODO) func configureOtelReconciler(client client.Client, config LogPipelineControllerConfig, _ *prober.LogPipelineProber) (*logpipelineotel.Reconciler, error) { - otelConfig := logpipelineotel.Config{ - LogGatewayName: otelLogGatewayName, - TelemetryNamespace: config.TelemetryNamespace, - } - - gatewayConfig := otelcollector.GatewayConfig{ - Config: otelcollector.Config{ - BaseName: otelLogGatewayName, - Namespace: config.TelemetryNamespace, - }, - Deployment: otelcollector.DeploymentConfig{ - Image: config.OTelCollectorImage, - PriorityClassName: config.LogGatewayPriorityClassName, - BaseCPULimit: logGatewayBaseCPULimit, - DynamicCPULimit: logGatewayDynamicCPULimit, - BaseMemoryLimit: logGatewayBaseMemoryLimit, - DynamicMemoryLimit: logGatewayDynamicMemoryLimit, - BaseCPURequest: logGatewayBaseCPURequest, - DynamicCPURequest: logGatewayDynamicCPURequest, - BaseMemoryRequest: logGatewayBaseMemoryRequest, - DynamicMemoryRequest: logGatewayDynamicMemoryRequest, - }, - OTLPServiceName: config.LogGatewayServiceName, - } - pipelineValidator := &logpipelineotel.Validator{ // TODO: Add validators } - rbac := otelcollector.MakeLogGatewayRBAC( - types.NamespacedName{ - Name: otelLogGatewayName, - Namespace: config.TelemetryNamespace, - }) - otelReconciler := logpipelineotel.New( client, - otelConfig, - &otelcollector.GatewayApplierDeleter{ - Config: gatewayConfig, - RBAC: rbac, - }, + config.TelemetryNamespace, + otelcollector.NewLogGatewayApplierDeleter(config.OTelCollectorImage, config.TelemetryNamespace, config.LogGatewayPriorityClassName), &gateway.Builder{Reader: client}, &workloadstatus.DeploymentProber{Client: client}, pipelineValidator, diff --git a/controllers/telemetry/metricpipeline_controller.go b/controllers/telemetry/metricpipeline_controller.go index 3ac303c2d..115aeccf6 100644 --- a/controllers/telemetry/metricpipeline_controller.go +++ b/controllers/telemetry/metricpipeline_controller.go @@ -24,7 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" @@ -56,25 +55,7 @@ import ( ) const ( - maxMetricPipelines = 3 - metricGatewayBaseName = "telemetry-metric-gateway" - metricAgentBaseName = "telemetry-metric-agent" -) - -var ( - metricAgentCPULimit = resource.MustParse("1") - metricAgentMemoryLimit = resource.MustParse("1200Mi") - metricAgentCPURequest = resource.MustParse("15m") - metricAgentMemoryRequest = resource.MustParse("50Mi") - - metricGatewayBaseCPULimit = resource.MustParse("900m") - metricGatewayDynamicCPULimit = resource.MustParse("100m") - metricGatewayBaseMemoryLimit = resource.MustParse("512Mi") - metricGatewayDynamicMemoryLimit = resource.MustParse("512Mi") - metricGatewayBaseCPURequest = resource.MustParse("25m") - metricGatewayDynamicCPURequest = resource.MustParse("0") - metricGatewayBaseMemoryRequest = resource.MustParse("32Mi") - metricGatewayDynamicMemoryRequest = resource.MustParse("0") + maxMetricPipelines = 3 ) // MetricPipelineController reconciles a MetricPipeline object @@ -88,7 +69,6 @@ type MetricPipelineController struct { type MetricPipelineControllerConfig struct { MetricAgentPriorityClassName string MetricGatewayPriorityClassName string - MetricGatewayServiceName string ModuleVersion string OTelCollectorImage string RestConfig *rest.Config @@ -123,25 +103,24 @@ func NewMetricPipelineController(client client.Client, reconcileTriggerChan <-ch return nil, err } - reconcilerConfig := metricpipeline.Config{ - AgentName: metricAgentBaseName, - GatewayName: metricGatewayBaseName, - ModuleVersion: config.ModuleVersion, - TelemetryNamespace: config.TelemetryNamespace, + agentConfigBuilder := &agent.Builder{ + Config: agent.BuilderConfig{ + GatewayOTLPServiceName: types.NamespacedName{Namespace: config.TelemetryNamespace, Name: otelcollector.MetricOTLPServiceName}, + }, } + + gatewayConfigBuilder := &gateway.Builder{Reader: client} + reconciler := metricpipeline.New( client, - reconcilerConfig, - newMetricAgentApplierDeleter(config), - &agent.Builder{ - Config: agent.BuilderConfig{ - GatewayOTLPServiceName: types.NamespacedName{Namespace: config.TelemetryNamespace, Name: config.MetricGatewayServiceName}, - }, - }, + config.TelemetryNamespace, + config.ModuleVersion, + otelcollector.NewMetricAgentApplierDeleter(config.OTelCollectorImage, config.TelemetryNamespace, config.MetricAgentPriorityClassName), + agentConfigBuilder, &workloadstatus.DaemonSetProber{Client: client}, flowHealthProber, - newMetricGatewayApplierDeleter(config), - &gateway.Builder{Reader: client}, + otelcollector.NewMetricGatewayApplierDeleter(config.OTelCollectorImage, config.TelemetryNamespace, config.MetricGatewayPriorityClassName), + gatewayConfigBuilder, &workloadstatus.DeploymentProber{Client: client}, istiostatus.NewChecker(discoveryClient), overrides.New(client, overrides.HandlerConfig{SystemNamespace: config.TelemetryNamespace}), @@ -157,69 +136,6 @@ func NewMetricPipelineController(client client.Client, reconcileTriggerChan <-ch }, nil } -func newMetricAgentApplierDeleter(config MetricPipelineControllerConfig) *otelcollector.AgentApplierDeleter { - rbac := otelcollector.MakeMetricAgentRBAC( - types.NamespacedName{ - Name: metricAgentBaseName, - Namespace: config.TelemetryNamespace, - }, - ) - - agentConfig := otelcollector.AgentConfig{ - Config: otelcollector.Config{ - BaseName: metricAgentBaseName, - Namespace: config.TelemetryNamespace, - }, - DaemonSet: otelcollector.DaemonSetConfig{ - Image: config.OTelCollectorImage, - PriorityClassName: config.MetricAgentPriorityClassName, - CPULimit: metricAgentCPULimit, - MemoryLimit: metricAgentMemoryLimit, - CPURequest: metricAgentCPURequest, - MemoryRequest: metricAgentMemoryRequest, - }, - } - - return &otelcollector.AgentApplierDeleter{ - Config: agentConfig, - RBAC: rbac, - } -} - -func newMetricGatewayApplierDeleter(config MetricPipelineControllerConfig) *otelcollector.GatewayApplierDeleter { - rbac := otelcollector.MakeMetricGatewayRBAC( - types.NamespacedName{ - Name: metricGatewayBaseName, - Namespace: config.TelemetryNamespace, - }, - ) - - gatewayConfig := otelcollector.GatewayConfig{ - Config: otelcollector.Config{ - BaseName: metricGatewayBaseName, - Namespace: config.TelemetryNamespace, - }, - Deployment: otelcollector.DeploymentConfig{ - Image: config.OTelCollectorImage, - PriorityClassName: config.MetricGatewayPriorityClassName, - BaseCPULimit: metricGatewayBaseCPULimit, - DynamicCPULimit: metricGatewayDynamicCPULimit, - BaseMemoryLimit: metricGatewayBaseMemoryLimit, - DynamicMemoryLimit: metricGatewayDynamicMemoryLimit, - BaseCPURequest: metricGatewayBaseCPURequest, - DynamicCPURequest: metricGatewayDynamicCPURequest, - BaseMemoryRequest: metricGatewayBaseMemoryRequest, - DynamicMemoryRequest: metricGatewayDynamicMemoryRequest, - }, - OTLPServiceName: config.MetricGatewayServiceName, - } - - return &otelcollector.GatewayApplierDeleter{ - Config: gatewayConfig, - RBAC: rbac, - } -} - func (r *MetricPipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { return r.reconciler.Reconcile(ctx, req) } diff --git a/controllers/telemetry/tracepipeline_controller.go b/controllers/telemetry/tracepipeline_controller.go index 10a582b00..ca141a832 100644 --- a/controllers/telemetry/tracepipeline_controller.go +++ b/controllers/telemetry/tracepipeline_controller.go @@ -24,7 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" @@ -55,19 +54,7 @@ import ( ) const ( - maxTracePipelines = 3 - traceGatewayBaseName = "telemetry-trace-gateway" -) - -var ( - traceGatewayBaseCPULimit = resource.MustParse("700m") - traceGatewayDynamicCPULimit = resource.MustParse("500m") - traceGatewayBaseMemoryLimit = resource.MustParse("500Mi") - traceGatewayDynamicMemoryLimit = resource.MustParse("1500Mi") - traceGatewayBaseCPURequest = resource.MustParse("100m") - traceGatewayDynamicCPURequest = resource.MustParse("100m") - traceGatewayBaseMemoryRequest = resource.MustParse("32Mi") - traceGatewayDynamicMemoryRequest = resource.MustParse("0") + maxTracePipelines = 3 ) // TracePipelineController reconciles a TracePipeline object @@ -83,7 +70,6 @@ type TracePipelineControllerConfig struct { TelemetryNamespace string OTelCollectorImage string TraceGatewayPriorityClassName string - TraceGatewayServiceName string } func NewTracePipelineController(client client.Client, reconcileTriggerChan <-chan event.GenericEvent, config TracePipelineControllerConfig) (*TracePipelineController, error) { @@ -113,15 +99,11 @@ func NewTracePipelineController(client client.Client, reconcileTriggerChan <-cha return nil, err } - reconcilerConfig := tracepipeline.Config{ - TraceGatewayName: traceGatewayBaseName, - TelemetryNamespace: config.TelemetryNamespace, - } reconciler := tracepipeline.New( client, - reconcilerConfig, + config.TelemetryNamespace, flowHealthProber, - newTraceGatewayApplierDeleter(config), + otelcollector.NewTraceGatewayApplierDeleter(config.OTelCollectorImage, config.TelemetryNamespace, config.TraceGatewayPriorityClassName), &gateway.Builder{Reader: client}, &workloadstatus.DeploymentProber{Client: client}, istiostatus.NewChecker(discoveryClient), @@ -137,39 +119,6 @@ func NewTracePipelineController(client client.Client, reconcileTriggerChan <-cha }, nil } -func newTraceGatewayApplierDeleter(config TracePipelineControllerConfig) *otelcollector.GatewayApplierDeleter { - rbac := otelcollector.MakeTraceGatewayRBAC( - types.NamespacedName{ - Name: traceGatewayBaseName, - Namespace: config.TelemetryNamespace, - }) - - gatewayConfig := otelcollector.GatewayConfig{ - Config: otelcollector.Config{ - BaseName: traceGatewayBaseName, - Namespace: config.TelemetryNamespace, - }, - Deployment: otelcollector.DeploymentConfig{ - Image: config.OTelCollectorImage, - PriorityClassName: config.TraceGatewayPriorityClassName, - BaseCPULimit: traceGatewayBaseCPULimit, - DynamicCPULimit: traceGatewayDynamicCPULimit, - BaseMemoryLimit: traceGatewayBaseMemoryLimit, - DynamicMemoryLimit: traceGatewayDynamicMemoryLimit, - BaseCPURequest: traceGatewayBaseCPURequest, - DynamicCPURequest: traceGatewayDynamicCPURequest, - BaseMemoryRequest: traceGatewayBaseMemoryRequest, - DynamicMemoryRequest: traceGatewayDynamicMemoryRequest, - }, - OTLPServiceName: config.TraceGatewayServiceName, - } - - return &otelcollector.GatewayApplierDeleter{ - Config: gatewayConfig, - RBAC: rbac, - } -} - func (r *TracePipelineController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { return r.reconciler.Reconcile(ctx, req) } diff --git a/internal/labels/labels.go b/internal/labels/labels.go deleted file mode 100644 index bc024cee1..000000000 --- a/internal/labels/labels.go +++ /dev/null @@ -1,53 +0,0 @@ -package labels - -const ( - selectorLabelKey = "app.kubernetes.io/name" - traceGatewayIngestSelector = "telemetry.kyma-project.io/trace-ingest" - traceGatewayExportSelector = "telemetry.kyma-project.io/trace-export" - metricAgentScrapeSelector = "telemetry.kyma-project.io/metric-scrape" - metricGatewayIngestSelector = "telemetry.kyma-project.io/metric-ingest" - metricGatewayExportSelector = "telemetry.kyma-project.io/metric-export" - logGatewayIngestSelector = "telemetry.kyma-project.io/log-ingest" - logGatewayExportSelector = "telemetry.kyma-project.io/log-export" - istioSidecarInjectLabel = "sidecar.istio.io/inject" -) - -func MakeDefaultLabel(baseName string) map[string]string { - return map[string]string{ - selectorLabelKey: baseName, - } -} - -func MakeMetricAgentSelectorLabel(baseName string) map[string]string { - return map[string]string{ - selectorLabelKey: baseName, - metricAgentScrapeSelector: "true", - istioSidecarInjectLabel: "true", - } -} - -func MakeMetricGatewaySelectorLabel(baseName string) map[string]string { - return map[string]string{ - selectorLabelKey: baseName, - metricGatewayIngestSelector: "true", - metricGatewayExportSelector: "true", - istioSidecarInjectLabel: "true", - } -} - -func MakeTraceGatewaySelectorLabel(baseName string) map[string]string { - return map[string]string{ - selectorLabelKey: baseName, - traceGatewayIngestSelector: "true", - traceGatewayExportSelector: "true", - istioSidecarInjectLabel: "true", - } -} - -func MakeLogGatewaySelectorLabel(baseName string) map[string]string { - return map[string]string{ - selectorLabelKey: baseName, - logGatewayIngestSelector: "true", - logGatewayExportSelector: "true", - } -} diff --git a/internal/labels/labels_test.go b/internal/labels/labels_test.go deleted file mode 100644 index 89fad5390..000000000 --- a/internal/labels/labels_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package labels - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMakeDefaultLabel(t *testing.T) { - podLabel := MakeDefaultLabel("my-pod") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": "my-pod", - }, podLabel) -} - -func TestMakeMetricAgentSelectorLabel(t *testing.T) { - metricAgentSelectorLabel := MakeMetricAgentSelectorLabel("metric-agent") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": "metric-agent", - "telemetry.kyma-project.io/metric-scrape": "true", - "sidecar.istio.io/inject": "true", - }, metricAgentSelectorLabel) -} - -func TestMakeMetricGatewaySelectorLabel(t *testing.T) { - metricGatewaySelectorLabel := MakeMetricGatewaySelectorLabel("metric-gateway") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": "metric-gateway", - "telemetry.kyma-project.io/metric-ingest": "true", - "telemetry.kyma-project.io/metric-export": "true", - "sidecar.istio.io/inject": "true", - }, metricGatewaySelectorLabel) -} -func TestMakeTraceGatewaySelectorLabel(t *testing.T) { - traceGatewaySelectorLabel := MakeTraceGatewaySelectorLabel("trace-gateway") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": "trace-gateway", - "telemetry.kyma-project.io/trace-ingest": "true", - "telemetry.kyma-project.io/trace-export": "true", - "sidecar.istio.io/inject": "true", - }, traceGatewaySelectorLabel) -} diff --git a/internal/reconciler/logpipeline/otel/reconciler.go b/internal/reconciler/logpipeline/otel/reconciler.go index fcbb6554a..c744b76ad 100644 --- a/internal/reconciler/logpipeline/otel/reconciler.go +++ b/internal/reconciler/logpipeline/otel/reconciler.go @@ -12,7 +12,6 @@ import ( operatorv1alpha1 "github.com/kyma-project/telemetry-manager/apis/operator/v1alpha1" telemetryv1alpha1 "github.com/kyma-project/telemetry-manager/apis/telemetry/v1alpha1" "github.com/kyma-project/telemetry-manager/internal/errortypes" - "github.com/kyma-project/telemetry-manager/internal/labels" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config/log/gateway" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config/otlpexporter" "github.com/kyma-project/telemetry-manager/internal/reconciler/commonstatus" @@ -26,11 +25,6 @@ import ( const defaultReplicaCount int32 = 2 -type Config struct { - LogGatewayName string - TelemetryNamespace string -} - type GatewayConfigBuilder interface { Build(ctx context.Context, pipelines []telemetryv1alpha1.LogPipeline) (*gateway.Config, otlpexporter.EnvVars, error) } @@ -49,7 +43,7 @@ var _ logpipeline.LogPipelineReconciler = &Reconciler{} type Reconciler struct { client.Client - config Config + telemetryNamespace string // Dependencies gatewayApplierDeleter GatewayApplierDeleter @@ -61,7 +55,7 @@ type Reconciler struct { func New( client client.Client, - config Config, + telemetryNamespace string, gatewayApplierDeleter GatewayApplierDeleter, gatewayConfigBuilder GatewayConfigBuilder, gatewayProber commonstatus.Prober, @@ -70,7 +64,7 @@ func New( ) *Reconciler { return &Reconciler{ Client: client, - config: config, + telemetryNamespace: telemetryNamespace, gatewayApplierDeleter: gatewayApplierDeleter, gatewayConfigBuilder: gatewayConfigBuilder, gatewayProber: gatewayProber, @@ -180,12 +174,9 @@ func (r *Reconciler) reconcileLogGateway(ctx context.Context, pipeline *telemetr return fmt.Errorf("failed to marshal collector config: %w", err) } - logGatewaySelectorLabels := labels.MakeLogGatewaySelectorLabel(r.config.LogGatewayName) - opts := otelcollector.GatewayApplyOptions{ CollectorConfigYAML: string(collectorConfigYAML), CollectorEnvVars: collectorEnvVars, - ComponentSelectorLabels: logGatewaySelectorLabels, Replicas: r.getReplicaCountFromTelemetry(ctx), ResourceRequirementsMultiplier: len(allPipelines), } diff --git a/internal/reconciler/logpipeline/otel/reconciler_test.go b/internal/reconciler/logpipeline/otel/reconciler_test.go index 90fbdc52d..1759c49f2 100644 --- a/internal/reconciler/logpipeline/otel/reconciler_test.go +++ b/internal/reconciler/logpipeline/otel/reconciler_test.go @@ -32,10 +32,7 @@ func TestReconcile(t *testing.T) { overridesHandlerStub := &logpipelinemocks.OverridesHandler{} overridesHandlerStub.On("LoadOverrides", context.Background()).Return(&overrides.Config{}, nil) - testConfig := Config{ - LogGatewayName: "gateway", - TelemetryNamespace: "default", - } + telemetryNamespace := "default" t.Run("log gateway probing failed", func(t *testing.T) { pipeline := testutils.NewLogPipelineBuilder().WithName("pipeline").WithOTLPOutput().Build() @@ -62,7 +59,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, gatewayApplierDeleterMock, gatewayConfigBuilderMock, gatewayProberStub, @@ -110,7 +107,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, gatewayApplierDeleterMock, gatewayConfigBuilderMock, gatewayProberStub, @@ -158,7 +155,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, gatewayApplierDeleterMock, gatewayConfigBuilderMock, gatewayProberStub, diff --git a/internal/reconciler/logpipeline/otel/status.go b/internal/reconciler/logpipeline/otel/status.go index 23bd95e09..7a1449d33 100644 --- a/internal/reconciler/logpipeline/otel/status.go +++ b/internal/reconciler/logpipeline/otel/status.go @@ -16,6 +16,7 @@ import ( "github.com/kyma-project/telemetry-manager/internal/errortypes" "github.com/kyma-project/telemetry-manager/internal/reconciler/commonstatus" "github.com/kyma-project/telemetry-manager/internal/resourcelock" + "github.com/kyma-project/telemetry-manager/internal/resources/otelcollector" "github.com/kyma-project/telemetry-manager/internal/validators/endpoint" "github.com/kyma-project/telemetry-manager/internal/validators/secretref" ) @@ -49,7 +50,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, pipelineName string) erro func (r *Reconciler) setGatewayHealthyCondition(ctx context.Context, pipeline *telemetryv1alpha1.LogPipeline) { condition := commonstatus.GetGatewayHealthyCondition(ctx, - r.gatewayProber, types.NamespacedName{Name: r.config.LogGatewayName, Namespace: r.config.TelemetryNamespace}, + r.gatewayProber, types.NamespacedName{Name: otelcollector.LogGatewayName, Namespace: r.telemetryNamespace}, r.errToMessageConverter, commonstatus.SignalTypeLogs) condition.ObservedGeneration = pipeline.Generation diff --git a/internal/reconciler/metricpipeline/reconciler.go b/internal/reconciler/metricpipeline/reconciler.go index 7ed9ebe32..204758e1c 100644 --- a/internal/reconciler/metricpipeline/reconciler.go +++ b/internal/reconciler/metricpipeline/reconciler.go @@ -14,7 +14,6 @@ import ( operatorv1alpha1 "github.com/kyma-project/telemetry-manager/apis/operator/v1alpha1" telemetryv1alpha1 "github.com/kyma-project/telemetry-manager/apis/telemetry/v1alpha1" "github.com/kyma-project/telemetry-manager/internal/errortypes" - "github.com/kyma-project/telemetry-manager/internal/labels" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config/metric/agent" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config/metric/gateway" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config/otlpexporter" @@ -29,13 +28,6 @@ import ( const defaultReplicaCount int32 = 2 -type Config struct { - AgentName string - GatewayName string - ModuleVersion string - TelemetryNamespace string -} - type AgentConfigBuilder interface { Build(pipelines []telemetryv1alpha1.MetricPipeline, options agent.BuildOptions) *agent.Config } @@ -74,7 +66,9 @@ type IstioStatusChecker interface { type Reconciler struct { client.Client - config Config + telemetryNamespace string + // TODO(skhalash): introduce an embed pkg exposing the module version set by go build + moduleVersion string agentApplierDeleter AgentApplierDeleter agentConfigBuilder AgentConfigBuilder @@ -92,7 +86,8 @@ type Reconciler struct { func New( client client.Client, - config Config, + telemetryNamespace string, + moduleVersion string, agentApplierDeleter AgentApplierDeleter, agentConfigBuilder AgentConfigBuilder, agentProber commonstatus.Prober, @@ -108,7 +103,8 @@ func New( ) *Reconciler { return &Reconciler{ Client: client, - config: config, + telemetryNamespace: telemetryNamespace, + moduleVersion: moduleVersion, agentApplierDeleter: agentApplierDeleter, agentConfigBuilder: agentConfigBuilder, agentProber: agentProber, @@ -247,8 +243,8 @@ func isMetricAgentRequired(pipeline *telemetryv1alpha1.MetricPipeline) bool { func (r *Reconciler) reconcileMetricGateway(ctx context.Context, pipeline *telemetryv1alpha1.MetricPipeline, allPipelines []telemetryv1alpha1.MetricPipeline) error { collectorConfig, collectorEnvVars, err := r.gatewayConfigBuilder.Build(ctx, allPipelines, gateway.BuildOptions{ - GatewayNamespace: r.config.TelemetryNamespace, - InstrumentationScopeVersion: r.config.ModuleVersion, + GatewayNamespace: r.telemetryNamespace, + InstrumentationScopeVersion: r.moduleVersion, }) if err != nil { @@ -267,13 +263,10 @@ func (r *Reconciler) reconcileMetricGateway(ctx context.Context, pipeline *telem allowedPorts = append(allowedPorts, ports.IstioEnvoy) } - metricGatewaySelectorLabels := labels.MakeMetricGatewaySelectorLabel(r.config.GatewayName) - opts := otelcollector.GatewayApplyOptions{ AllowedPorts: allowedPorts, CollectorConfigYAML: string(collectorConfigYAML), CollectorEnvVars: collectorEnvVars, - ComponentSelectorLabels: metricGatewaySelectorLabels, IstioEnabled: isIstioActive, IstioExcludePorts: []int32{ports.Metrics}, Replicas: r.getReplicaCountFromTelemetry(ctx), @@ -296,8 +289,8 @@ func (r *Reconciler) reconcileMetricAgents(ctx context.Context, pipeline *teleme agentConfig := r.agentConfigBuilder.Build(allPipelines, agent.BuildOptions{ IstioEnabled: isIstioActive, IstioCertPath: otelcollector.IstioCertPath, - InstrumentationScopeVersion: r.config.ModuleVersion, - AgentNamespace: r.config.TelemetryNamespace, + InstrumentationScopeVersion: r.moduleVersion, + AgentNamespace: r.telemetryNamespace, }) agentConfigYAML, err := yaml.Marshal(agentConfig) @@ -310,15 +303,12 @@ func (r *Reconciler) reconcileMetricAgents(ctx context.Context, pipeline *teleme allowedPorts = append(allowedPorts, ports.IstioEnvoy) } - metricAgentSelectorLabels := labels.MakeMetricAgentSelectorLabel(r.config.AgentName) - if err := r.agentApplierDeleter.ApplyResources( ctx, k8sutils.NewOwnerReferenceSetter(r.Client, pipeline), otelcollector.AgentApplyOptions{ - AllowedPorts: allowedPorts, - CollectorConfigYAML: string(agentConfigYAML), - ComponentSelectorLabels: metricAgentSelectorLabels, + AllowedPorts: allowedPorts, + CollectorConfigYAML: string(agentConfigYAML), }, ); err != nil { return fmt.Errorf("failed to apply agent resources: %w", err) diff --git a/internal/reconciler/metricpipeline/reconciler_test.go b/internal/reconciler/metricpipeline/reconciler_test.go index 4bbedb59c..6f4ed3bee 100644 --- a/internal/reconciler/metricpipeline/reconciler_test.go +++ b/internal/reconciler/metricpipeline/reconciler_test.go @@ -46,11 +46,8 @@ func TestReconcile(t *testing.T) { istioStatusCheckerStub := &stubs.IstioStatusChecker{IsActive: false} - testConfig := Config{ - AgentName: "agent", - GatewayName: "gateway", - TelemetryNamespace: "default", - } + telemetryNamespace := "default" + moduleVersion := "1.0.0" t.Run("metric gateway deployment is not ready", func(t *testing.T) { pipeline := testutils.NewMetricPipelineBuilder().Build() @@ -83,7 +80,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -143,7 +141,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -203,7 +202,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -269,7 +269,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, agentApplierDeleterMock, agentConfigBuilderMock, agentProberStub, @@ -337,7 +338,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, agentApplierDeleterMock, agentConfigBuilderMock, agentProberStub, @@ -405,7 +407,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, agentApplierDeleterMock, agentConfigBuilderMock, agentProberStub, @@ -474,7 +477,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -537,7 +541,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, agentApplierDeleterMock, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -601,7 +606,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -763,7 +769,8 @@ func TestReconcile(t *testing.T) { errToMsg := &conditions.ErrorToMessageConverter{} sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -907,7 +914,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, agentApplierDeleterMock, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -992,7 +1000,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -1062,7 +1071,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, &mocks.AgentApplierDeleter{}, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -1136,7 +1146,8 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, + moduleVersion, agentApplierDeleterMock, &mocks.AgentConfigBuilder{}, agentProberStub, @@ -1261,7 +1272,8 @@ func TestReconcile(t *testing.T) { sut := Reconciler{ Client: fakeClient, - config: testConfig, + telemetryNamespace: telemetryNamespace, + moduleVersion: moduleVersion, agentConfigBuilder: agentConfigBuilderMock, gatewayConfigBuilder: gatewayConfigBuilderMock, agentApplierDeleter: agentApplierDeleterMock, diff --git a/internal/reconciler/metricpipeline/status.go b/internal/reconciler/metricpipeline/status.go index 1cafb7b02..d57de2f17 100644 --- a/internal/reconciler/metricpipeline/status.go +++ b/internal/reconciler/metricpipeline/status.go @@ -16,6 +16,7 @@ import ( "github.com/kyma-project/telemetry-manager/internal/errortypes" "github.com/kyma-project/telemetry-manager/internal/reconciler/commonstatus" "github.com/kyma-project/telemetry-manager/internal/resourcelock" + "github.com/kyma-project/telemetry-manager/internal/resources/otelcollector" "github.com/kyma-project/telemetry-manager/internal/selfmonitor/prober" "github.com/kyma-project/telemetry-manager/internal/validators/endpoint" "github.com/kyma-project/telemetry-manager/internal/validators/secretref" @@ -60,7 +61,7 @@ func (r *Reconciler) setAgentHealthyCondition(ctx context.Context, pipeline *tel if isMetricAgentRequired(pipeline) { condition = commonstatus.GetAgentHealthyCondition(ctx, r.agentProber, - types.NamespacedName{Name: r.config.AgentName, Namespace: r.config.TelemetryNamespace}, + types.NamespacedName{Name: otelcollector.MetricAgentName, Namespace: r.telemetryNamespace}, r.errToMsgConverter, commonstatus.SignalTypeMetrics) } @@ -71,7 +72,7 @@ func (r *Reconciler) setAgentHealthyCondition(ctx context.Context, pipeline *tel func (r *Reconciler) setGatewayHealthyCondition(ctx context.Context, pipeline *telemetryv1alpha1.MetricPipeline) { condition := commonstatus.GetGatewayHealthyCondition(ctx, - r.gatewayProber, types.NamespacedName{Name: r.config.GatewayName, Namespace: r.config.TelemetryNamespace}, + r.gatewayProber, types.NamespacedName{Name: otelcollector.MetricGatewayName, Namespace: r.telemetryNamespace}, r.errToMsgConverter, commonstatus.SignalTypeMetrics) condition.ObservedGeneration = pipeline.Generation diff --git a/internal/reconciler/telemetry/reconciler.go b/internal/reconciler/telemetry/reconciler.go index 8b75f92f6..bc168d725 100644 --- a/internal/reconciler/telemetry/reconciler.go +++ b/internal/reconciler/telemetry/reconciler.go @@ -39,13 +39,11 @@ type Config struct { } type TracesConfig struct { - OTLPServiceName string - Namespace string + Namespace string } type MetricsConfig struct { - OTLPServiceName string - Namespace string + Namespace string } type WebhookConfig struct { diff --git a/internal/reconciler/telemetry/status.go b/internal/reconciler/telemetry/status.go index 3107df17b..61114f952 100644 --- a/internal/reconciler/telemetry/status.go +++ b/internal/reconciler/telemetry/status.go @@ -14,6 +14,7 @@ import ( telemetryv1alpha1 "github.com/kyma-project/telemetry-manager/apis/telemetry/v1alpha1" "github.com/kyma-project/telemetry-manager/internal/conditions" "github.com/kyma-project/telemetry-manager/internal/otelcollector/ports" + "github.com/kyma-project/telemetry-manager/internal/resources/otelcollector" ) type ComponentHealthChecker interface { @@ -112,7 +113,7 @@ func (r *Reconciler) traceEndpoints(ctx context.Context, config Config, telemetr return nil, nil //nolint:nilnil //it is ok in this context, even if it is not go idiomatic } - return makeOTLPEndpoints(config.Traces.OTLPServiceName, config.Traces.Namespace), nil + return makeOTLPEndpoints(otelcollector.TraceOTLPServiceName, config.Traces.Namespace), nil } func (r *Reconciler) metricEndpoints(ctx context.Context, config Config, telemetryInDeletion bool) (*operatorv1alpha1.OTLPEndpoints, error) { @@ -125,7 +126,7 @@ func (r *Reconciler) metricEndpoints(ctx context.Context, config Config, telemet return nil, nil //nolint:nilnil //it is ok in this context, even if it is not go idiomatic } - return makeOTLPEndpoints(config.Metrics.OTLPServiceName, config.Metrics.Namespace), nil + return makeOTLPEndpoints(otelcollector.MetricOTLPServiceName, config.Metrics.Namespace), nil } func makeOTLPEndpoints(serviceName, namespace string) *operatorv1alpha1.OTLPEndpoints { diff --git a/internal/reconciler/telemetry/status_test.go b/internal/reconciler/telemetry/status_test.go index b5ae04dc9..3c568f80d 100644 --- a/internal/reconciler/telemetry/status_test.go +++ b/internal/reconciler/telemetry/status_test.go @@ -40,8 +40,8 @@ func TestUpdateStatus(t *testing.T) { { name: "all components are healthy", config: &Config{ - Traces: TracesConfig{OTLPServiceName: "traces", Namespace: "telemetry-system"}, - Metrics: MetricsConfig{OTLPServiceName: "metrics", Namespace: "telemetry-system"}, + Traces: TracesConfig{Namespace: "telemetry-system"}, + Metrics: MetricsConfig{Namespace: "telemetry-system"}, }, telemetry: &operatorv1alpha1.Telemetry{ObjectMeta: metav1.ObjectMeta{Name: "default"}}, logsCheckerReturn: &metav1.Condition{Type: conditions.TypeLogComponentsHealthy, Status: metav1.ConditionTrue, Reason: conditions.ReasonComponentsRunning}, @@ -55,19 +55,20 @@ func TestUpdateStatus(t *testing.T) { }, expectedEndpoints: operatorv1alpha1.GatewayEndpoints{ Traces: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://traces.telemetry-system:4317", - HTTP: "http://traces.telemetry-system:4318", + GRPC: "http://telemetry-otlp-traces.telemetry-system:4317", + HTTP: "http://telemetry-otlp-traces.telemetry-system:4318", }, Metrics: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://metrics.telemetry-system:4317", - HTTP: "http://metrics.telemetry-system:4318", - }}, + GRPC: "http://telemetry-otlp-metrics.telemetry-system:4317", + HTTP: "http://telemetry-otlp-metrics.telemetry-system:4318", + }, + }, }, { name: "log components are unhealthy", config: &Config{ - Traces: TracesConfig{OTLPServiceName: "traces", Namespace: "telemetry-system"}, - Metrics: MetricsConfig{OTLPServiceName: "metrics", Namespace: "telemetry-system"}, + Traces: TracesConfig{Namespace: "telemetry-system"}, + Metrics: MetricsConfig{Namespace: "telemetry-system"}, }, telemetry: &operatorv1alpha1.Telemetry{ObjectMeta: metav1.ObjectMeta{Name: "default"}}, logsCheckerReturn: &metav1.Condition{Type: conditions.TypeLogComponentsHealthy, Status: metav1.ConditionFalse, Reason: conditions.ReasonAgentNotReady}, @@ -81,18 +82,19 @@ func TestUpdateStatus(t *testing.T) { }, expectedEndpoints: operatorv1alpha1.GatewayEndpoints{ Traces: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://traces.telemetry-system:4317", - HTTP: "http://traces.telemetry-system:4318", + GRPC: "http://telemetry-otlp-traces.telemetry-system:4317", + HTTP: "http://telemetry-otlp-traces.telemetry-system:4318", }, Metrics: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://metrics.telemetry-system:4317", - HTTP: "http://metrics.telemetry-system:4318", - }}, + GRPC: "http://telemetry-otlp-metrics.telemetry-system:4317", + HTTP: "http://telemetry-otlp-metrics.telemetry-system:4318", + }, + }, }, { name: "trace components are unhealthy", config: &Config{ - Metrics: MetricsConfig{OTLPServiceName: "metrics", Namespace: "telemetry-system"}, + Metrics: MetricsConfig{Namespace: "telemetry-system"}, }, telemetry: &operatorv1alpha1.Telemetry{ObjectMeta: metav1.ObjectMeta{Name: "default"}}, logsCheckerReturn: &metav1.Condition{Type: conditions.TypeLogComponentsHealthy, Status: metav1.ConditionTrue, Reason: conditions.ReasonComponentsRunning}, @@ -104,15 +106,17 @@ func TestUpdateStatus(t *testing.T) { {Type: conditions.TypeMetricComponentsHealthy, Status: metav1.ConditionTrue, Reason: conditions.ReasonComponentsRunning}, {Type: conditions.TypeTraceComponentsHealthy, Status: metav1.ConditionFalse, Reason: conditions.ReasonGatewayNotReady}, }, - expectedEndpoints: operatorv1alpha1.GatewayEndpoints{Metrics: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://metrics.telemetry-system:4317", - HTTP: "http://metrics.telemetry-system:4318", - }}, + expectedEndpoints: operatorv1alpha1.GatewayEndpoints{ + Metrics: &operatorv1alpha1.OTLPEndpoints{ + GRPC: "http://telemetry-otlp-metrics.telemetry-system:4317", + HTTP: "http://telemetry-otlp-metrics.telemetry-system:4318", + }, + }, }, { name: "metric components are unhealthy", config: &Config{ - Traces: TracesConfig{OTLPServiceName: "traces", Namespace: "telemetry-system"}, + Traces: TracesConfig{Namespace: "telemetry-system"}, }, telemetry: &operatorv1alpha1.Telemetry{ObjectMeta: metav1.ObjectMeta{Name: "default"}}, logsCheckerReturn: &metav1.Condition{Type: conditions.TypeLogComponentsHealthy, Status: metav1.ConditionTrue, Reason: conditions.ReasonComponentsRunning}, @@ -124,10 +128,12 @@ func TestUpdateStatus(t *testing.T) { {Type: conditions.TypeMetricComponentsHealthy, Status: metav1.ConditionFalse, Reason: conditions.ReasonGatewayNotReady}, {Type: conditions.TypeTraceComponentsHealthy, Status: metav1.ConditionTrue, Reason: conditions.ReasonComponentsRunning}, }, - expectedEndpoints: operatorv1alpha1.GatewayEndpoints{Traces: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://traces.telemetry-system:4317", - HTTP: "http://traces.telemetry-system:4318", - }}, + expectedEndpoints: operatorv1alpha1.GatewayEndpoints{ + Traces: &operatorv1alpha1.OTLPEndpoints{ + GRPC: "http://telemetry-otlp-traces.telemetry-system:4317", + HTTP: "http://telemetry-otlp-traces.telemetry-system:4318", + }, + }, }, { name: "log components check error", @@ -163,8 +169,8 @@ func TestUpdateStatus(t *testing.T) { { name: "deleting with no dependent resources", config: &Config{ - Traces: TracesConfig{OTLPServiceName: "traces", Namespace: "telemetry-system"}, - Metrics: MetricsConfig{OTLPServiceName: "metrics", Namespace: "telemetry-system"}, + Traces: TracesConfig{Namespace: "telemetry-system"}, + Metrics: MetricsConfig{Namespace: "telemetry-system"}, }, telemetry: &operatorv1alpha1.Telemetry{ ObjectMeta: metav1.ObjectMeta{ @@ -184,18 +190,20 @@ func TestUpdateStatus(t *testing.T) { }, expectedEndpoints: operatorv1alpha1.GatewayEndpoints{ Traces: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://traces.telemetry-system:4317", - HTTP: "http://traces.telemetry-system:4318", - }, Metrics: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://metrics.telemetry-system:4317", - HTTP: "http://metrics.telemetry-system:4318", - }}, + GRPC: "http://telemetry-otlp-traces.telemetry-system:4317", + HTTP: "http://telemetry-otlp-traces.telemetry-system:4318", + }, + Metrics: &operatorv1alpha1.OTLPEndpoints{ + GRPC: "http://telemetry-otlp-metrics.telemetry-system:4317", + HTTP: "http://telemetry-otlp-metrics.telemetry-system:4318", + }, + }, }, { name: "deleting with dependent resources", config: &Config{ - Traces: TracesConfig{OTLPServiceName: "traces", Namespace: "telemetry-system"}, - Metrics: MetricsConfig{OTLPServiceName: "metrics", Namespace: "telemetry-system"}, + Traces: TracesConfig{Namespace: "telemetry-system"}, + Metrics: MetricsConfig{Namespace: "telemetry-system"}, }, telemetry: &operatorv1alpha1.Telemetry{ ObjectMeta: metav1.ObjectMeta{ @@ -217,14 +225,14 @@ func TestUpdateStatus(t *testing.T) { {Type: conditions.TypeTraceComponentsHealthy, Status: metav1.ConditionFalse, Reason: conditions.ReasonComponentsRunning}, }, expectedEndpoints: operatorv1alpha1.GatewayEndpoints{Metrics: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://metrics.telemetry-system:4317", - HTTP: "http://metrics.telemetry-system:4318", + GRPC: "http://telemetry-otlp-metrics.telemetry-system:4317", + HTTP: "http://telemetry-otlp-metrics.telemetry-system:4318", }}, }, { name: "metric agent is unhealthy", config: &Config{ - Traces: TracesConfig{OTLPServiceName: "traces", Namespace: "telemetry-system"}, + Traces: TracesConfig{Namespace: "telemetry-system"}, }, telemetry: &operatorv1alpha1.Telemetry{ObjectMeta: metav1.ObjectMeta{Name: "default"}}, logsCheckerReturn: &metav1.Condition{Type: conditions.TypeLogComponentsHealthy, Status: metav1.ConditionTrue, Reason: conditions.ReasonComponentsRunning}, @@ -240,8 +248,8 @@ func TestUpdateStatus(t *testing.T) { {Type: conditions.TypeTraceComponentsHealthy, Status: metav1.ConditionTrue, Reason: conditions.ReasonComponentsRunning}, }, expectedEndpoints: operatorv1alpha1.GatewayEndpoints{Traces: &operatorv1alpha1.OTLPEndpoints{ - GRPC: "http://traces.telemetry-system:4317", - HTTP: "http://traces.telemetry-system:4318", + GRPC: "http://telemetry-otlp-traces.telemetry-system:4317", + HTTP: "http://telemetry-otlp-traces.telemetry-system:4318", }}, }, } diff --git a/internal/reconciler/tracepipeline/reconciler.go b/internal/reconciler/tracepipeline/reconciler.go index ebb6f5116..60686541e 100644 --- a/internal/reconciler/tracepipeline/reconciler.go +++ b/internal/reconciler/tracepipeline/reconciler.go @@ -30,7 +30,6 @@ import ( operatorv1alpha1 "github.com/kyma-project/telemetry-manager/apis/operator/v1alpha1" telemetryv1alpha1 "github.com/kyma-project/telemetry-manager/apis/telemetry/v1alpha1" "github.com/kyma-project/telemetry-manager/internal/errortypes" - "github.com/kyma-project/telemetry-manager/internal/labels" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config/otlpexporter" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config/trace/gateway" "github.com/kyma-project/telemetry-manager/internal/otelcollector/ports" @@ -44,11 +43,6 @@ import ( const defaultReplicaCount int32 = 2 -type Config struct { - TraceGatewayName string - TelemetryNamespace string -} - type GatewayConfigBuilder interface { Build(ctx context.Context, pipelines []telemetryv1alpha1.TracePipeline) (*gateway.Config, otlpexporter.EnvVars, error) } @@ -78,7 +72,7 @@ type IstioStatusChecker interface { type Reconciler struct { client.Client - config Config + telemetryNamespace string // Dependencies flowHealthProber FlowHealthProber @@ -94,7 +88,7 @@ type Reconciler struct { func New( client client.Client, - config Config, + telemetryNamespace string, flowHealthProber FlowHealthProber, gatewayApplierDeleter GatewayApplierDeleter, gatewayConfigBuilder GatewayConfigBuilder, @@ -107,7 +101,7 @@ func New( ) *Reconciler { return &Reconciler{ Client: client, - config: config, + telemetryNamespace: telemetryNamespace, flowHealthProber: flowHealthProber, gatewayApplierDeleter: gatewayApplierDeleter, gatewayConfigBuilder: gatewayConfigBuilder, @@ -246,13 +240,10 @@ func (r *Reconciler) reconcileTraceGateway(ctx context.Context, pipeline *teleme allowedPorts = append(allowedPorts, ports.IstioEnvoy) } - traceGatewaySelectorLabels := labels.MakeTraceGatewaySelectorLabel(r.config.TraceGatewayName) - opts := otelcollector.GatewayApplyOptions{ AllowedPorts: allowedPorts, CollectorConfigYAML: string(collectorConfigYAML), CollectorEnvVars: collectorEnvVars, - ComponentSelectorLabels: traceGatewaySelectorLabels, IstioEnabled: isIstioActive, IstioExcludePorts: []int32{ports.Metrics}, Replicas: r.getReplicaCountFromTelemetry(ctx), diff --git a/internal/reconciler/tracepipeline/reconciler_test.go b/internal/reconciler/tracepipeline/reconciler_test.go index 237b3a2e2..4cd1a7ee6 100644 --- a/internal/reconciler/tracepipeline/reconciler_test.go +++ b/internal/reconciler/tracepipeline/reconciler_test.go @@ -45,10 +45,7 @@ func TestReconcile(t *testing.T) { istioStatusCheckerStub := &stubs.IstioStatusChecker{IsActive: false} - testConfig := Config{ - TraceGatewayName: "gateway", - TelemetryNamespace: "default", - } + telemetryNamespace := "default" t.Run("trace gateway probing failed", func(t *testing.T) { pipeline := testutils.NewTracePipelineBuilder().WithName("pipeline").Build() @@ -80,7 +77,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -137,7 +134,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -194,7 +191,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -250,7 +247,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -324,7 +321,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -377,7 +374,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, &mocks.GatewayApplierDeleter{}, gatewayConfigBuilderMock, @@ -534,7 +531,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -669,7 +666,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -752,7 +749,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -817,7 +814,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -880,7 +877,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, @@ -971,7 +968,7 @@ func TestReconcile(t *testing.T) { sut := New( fakeClient, - testConfig, + telemetryNamespace, flowHealthProberStub, gatewayApplierDeleterMock, gatewayConfigBuilderMock, diff --git a/internal/reconciler/tracepipeline/status.go b/internal/reconciler/tracepipeline/status.go index 6c8477715..688716c30 100644 --- a/internal/reconciler/tracepipeline/status.go +++ b/internal/reconciler/tracepipeline/status.go @@ -16,6 +16,7 @@ import ( "github.com/kyma-project/telemetry-manager/internal/errortypes" "github.com/kyma-project/telemetry-manager/internal/reconciler/commonstatus" "github.com/kyma-project/telemetry-manager/internal/resourcelock" + "github.com/kyma-project/telemetry-manager/internal/resources/otelcollector" "github.com/kyma-project/telemetry-manager/internal/selfmonitor/prober" "github.com/kyma-project/telemetry-manager/internal/validators/endpoint" "github.com/kyma-project/telemetry-manager/internal/validators/secretref" @@ -50,7 +51,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, pipelineName string) erro func (r *Reconciler) setGatewayHealthyCondition(ctx context.Context, pipeline *telemetryv1alpha1.TracePipeline) { condition := commonstatus.GetGatewayHealthyCondition(ctx, - r.gatewayProber, types.NamespacedName{Name: r.config.TraceGatewayName, Namespace: r.config.TelemetryNamespace}, + r.gatewayProber, types.NamespacedName{Name: otelcollector.TraceGatewayName, Namespace: r.telemetryNamespace}, r.errToMsgConverter, commonstatus.SignalTypeTraces) condition.ObservedGeneration = pipeline.Generation diff --git a/internal/resources/common/labels.go b/internal/resources/common/labels.go new file mode 100644 index 000000000..de6632c99 --- /dev/null +++ b/internal/resources/common/labels.go @@ -0,0 +1,11 @@ +package common + +const ( + nameLabelKey = "app.kubernetes.io/name" +) + +func MakeDefaultLabels(baseName string) map[string]string { + return map[string]string{ + nameLabelKey: baseName, + } +} diff --git a/internal/resources/otelcollector/agent.go b/internal/resources/otelcollector/agent.go index c6a279784..9a1f0b2b4 100644 --- a/internal/resources/otelcollector/agent.go +++ b/internal/resources/otelcollector/agent.go @@ -15,7 +15,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/kyma-project/telemetry-manager/internal/configchecksum" - "github.com/kyma-project/telemetry-manager/internal/labels" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config" "github.com/kyma-project/telemetry-manager/internal/otelcollector/ports" commonresources "github.com/kyma-project/telemetry-manager/internal/resources/common" @@ -23,25 +22,63 @@ import ( ) const ( - istioCertVolumeName = "istio-certs" - IstioCertPath = "/etc/istio-output-certs" + IstioCertPath = "/etc/istio-output-certs" + MetricAgentName = "telemetry-metric-agent" + + istioCertVolumeName = "istio-certs" + metricAgentScrapeKey = "telemetry.kyma-project.io/metric-scrape" +) + +var ( + metricAgentCPULimit = resource.MustParse("1") + metricAgentMemoryLimit = resource.MustParse("1200Mi") + metricAgentCPURequest = resource.MustParse("15m") + metricAgentMemoryRequest = resource.MustParse("50Mi") ) +func NewMetricAgentApplierDeleter(image, namespace, priorityClassName string) *AgentApplierDeleter { + extraLabels := map[string]string{ + metricAgentScrapeKey: "true", + istioSidecarInjectKey: "true", // inject Istio sidecar for SDS certificates and agent-to-gateway communication + } + + return &AgentApplierDeleter{ + baseName: MetricAgentName, + extraPodLabel: extraLabels, + image: image, + namespace: namespace, + priorityClassName: priorityClassName, + rbac: makeMetricAgentRBAC(namespace), + cpuLimit: metricAgentCPULimit, + memoryLimit: metricAgentMemoryLimit, + cpuRequest: metricAgentCPURequest, + memoryRequest: metricAgentMemoryRequest, + } +} + type AgentApplierDeleter struct { - Config AgentConfig - RBAC Rbac + baseName string + extraPodLabel map[string]string + image string + namespace string + priorityClassName string + rbac rbac + + cpuLimit resource.Quantity + memoryLimit resource.Quantity + cpuRequest resource.Quantity + memoryRequest resource.Quantity } type AgentApplyOptions struct { - AllowedPorts []int32 - CollectorConfigYAML string - ComponentSelectorLabels map[string]string + AllowedPorts []int32 + CollectorConfigYAML string } func (aad *AgentApplierDeleter) ApplyResources(ctx context.Context, c client.Client, opts AgentApplyOptions) error { - name := types.NamespacedName{Namespace: aad.Config.Namespace, Name: aad.Config.BaseName} + name := types.NamespacedName{Namespace: aad.namespace, Name: aad.baseName} - if err := applyCommonResources(ctx, c, name, aad.RBAC, opts.AllowedPorts); err != nil { + if err := applyCommonResources(ctx, c, name, aad.rbac, opts.AllowedPorts); err != nil { return fmt.Errorf("failed to create common resource: %w", err) } @@ -51,7 +88,7 @@ func (aad *AgentApplierDeleter) ApplyResources(ctx context.Context, c client.Cli } configChecksum := configchecksum.Calculate([]corev1.ConfigMap{*configMap}, []corev1.Secret{}) - if err := k8sutils.CreateOrUpdateDaemonSet(ctx, c, aad.makeAgentDaemonSet(configChecksum, opts)); err != nil { + if err := k8sutils.CreateOrUpdateDaemonSet(ctx, c, aad.makeAgentDaemonSet(configChecksum)); err != nil { return fmt.Errorf("failed to create daemonset: %w", err) } @@ -62,14 +99,14 @@ func (aad *AgentApplierDeleter) DeleteResources(ctx context.Context, c client.Cl // Attempt to clean up as many resources as possible and avoid early return when one of the deletions fails var allErrors error = nil - name := types.NamespacedName{Name: aad.Config.BaseName, Namespace: aad.Config.Namespace} + name := types.NamespacedName{Name: aad.baseName, Namespace: aad.namespace} if err := deleteCommonResources(ctx, c, name); err != nil { allErrors = errors.Join(allErrors, err) } objectMeta := metav1.ObjectMeta{ - Name: aad.Config.BaseName, - Namespace: aad.Config.Namespace, + Name: aad.baseName, + Namespace: aad.namespace, } configMap := corev1.ConfigMap{ObjectMeta: objectMeta} @@ -85,36 +122,45 @@ func (aad *AgentApplierDeleter) DeleteResources(ctx context.Context, c client.Cl return allErrors } -func (aad *AgentApplierDeleter) makeAgentDaemonSet(configChecksum string, opts AgentApplyOptions) *appsv1.DaemonSet { - selectorLabels := labels.MakeDefaultLabel(aad.Config.BaseName) +func (aad *AgentApplierDeleter) makeAgentDaemonSet(configChecksum string) *appsv1.DaemonSet { + selectorLabels := commonresources.MakeDefaultLabels(aad.baseName) annotations := map[string]string{"checksum/config": configChecksum} - maps.Copy(annotations, makeIstioTLSPodAnnotations(IstioCertPath)) + maps.Copy(annotations, makeIstioAnnotations(IstioCertPath)) - dsConfig := aad.Config.DaemonSet resources := aad.makeAgentResourceRequirements() - podSpec := makePodSpec( - aad.Config.BaseName, - dsConfig.Image, - commonresources.WithPriorityClass(dsConfig.PriorityClassName), + + opts := []podSpecOption{ + commonresources.WithPriorityClass(aad.priorityClassName), commonresources.WithResources(resources), withEnvVarFromSource(config.EnvVarCurrentPodIP, fieldPathPodIP), withEnvVarFromSource(config.EnvVarCurrentNodeName, fieldPathNodeName), - commonresources.WithGoMemLimitEnvVar(dsConfig.MemoryLimit), - withVolume(corev1.Volume{Name: istioCertVolumeName, VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }}), + commonresources.WithGoMemLimitEnvVar(aad.memoryLimit), + + // emptyDir volume for Istio certificates + withVolume(corev1.Volume{ + Name: istioCertVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }), withVolumeMount(corev1.VolumeMount{ Name: istioCertVolumeName, MountPath: IstioCertPath, ReadOnly: true, }), - ) + } + + podSpec := makePodSpec(aad.baseName, aad.image, opts...) + + podLabels := make(map[string]string) + maps.Copy(podLabels, selectorLabels) + maps.Copy(podLabels, aad.extraPodLabel) return &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: aad.Config.BaseName, - Namespace: aad.Config.Namespace, + Name: aad.baseName, + Namespace: aad.namespace, Labels: selectorLabels, }, Spec: appsv1.DaemonSetSpec{ @@ -123,7 +169,7 @@ func (aad *AgentApplierDeleter) makeAgentDaemonSet(configChecksum string, opts A }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: opts.ComponentSelectorLabels, + Labels: podLabels, Annotations: annotations, }, Spec: podSpec, @@ -133,21 +179,20 @@ func (aad *AgentApplierDeleter) makeAgentDaemonSet(configChecksum string, opts A } func (aad *AgentApplierDeleter) makeAgentResourceRequirements() corev1.ResourceRequirements { - dsConfig := aad.Config.DaemonSet - return corev1.ResourceRequirements{ Limits: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceCPU: dsConfig.CPULimit, - corev1.ResourceMemory: dsConfig.MemoryLimit, + corev1.ResourceCPU: aad.cpuLimit, + corev1.ResourceMemory: aad.memoryLimit, }, Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceCPU: dsConfig.CPURequest, - corev1.ResourceMemory: dsConfig.MemoryRequest, + corev1.ResourceCPU: aad.cpuRequest, + corev1.ResourceMemory: aad.memoryRequest, }, } } -func makeIstioTLSPodAnnotations(istioCertPath string) map[string]string { +func makeIstioAnnotations(istioCertPath string) map[string]string { + // Provision Istio certificates for Prometheus Receiver running as a part of MetricAgent by injecting a sidecar which will rotate SDS certificates and output them to a volume. However, the sidecar should not intercept scraping requests because Prometheus’s model of direct endpoint access is incompatible with Istio’s sidecar proxy model. return map[string]string{ "proxy.istio.io/config": fmt.Sprintf(`# configure an env variable OUTPUT_CERTS to write certificates to the given folder proxyMetadata: diff --git a/internal/resources/otelcollector/agent_test.go b/internal/resources/otelcollector/agent_test.go index 09dc1009e..a0cde51f0 100644 --- a/internal/resources/otelcollector/agent_test.go +++ b/internal/resources/otelcollector/agent_test.go @@ -2,360 +2,84 @@ package otelcollector import ( "context" + "os" "testing" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" + istiosecurityclientv1 "istio.io/client-go/pkg/apis/security/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" - "github.com/kyma-project/telemetry-manager/internal/labels" + testutils "github.com/kyma-project/telemetry-manager/internal/utils/test" ) -var ( - agentNamespace = "my-namespace" - agentName = "my-agent" - agentCfg = "dummy otel collector config" -) +func TestAgent_ApplyResources(t *testing.T) { + var objects []client.Object -func TestApplyAgentResources(t *testing.T) { - ctx := context.Background() - client := fake.NewClientBuilder().Build() + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(istiosecurityclientv1.AddToScheme(scheme)) - sut := AgentApplierDeleter{ - Config: AgentConfig{ - Config: Config{ - BaseName: agentName, - Namespace: agentNamespace, - }, + client := fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{ + Create: func(_ context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error { + objects = append(objects, obj) + // Nothing has to be created, just add created object to the list + return nil }, - RBAC: createAgentRBAC(), - } - - err := sut.ApplyResources(ctx, client, AgentApplyOptions{ - AllowedPorts: []int32{5555, 6666}, - CollectorConfigYAML: agentCfg, - ComponentSelectorLabels: labels.MakeMetricAgentSelectorLabel(agentName), - }) - require.NoError(t, err) - - t.Run("should create service account", func(t *testing.T) { - var sas corev1.ServiceAccountList - - require.NoError(t, client.List(ctx, &sas)) - require.Len(t, sas.Items, 1) - - sa := sas.Items[0] - require.NotNil(t, sa) - require.Equal(t, agentName, sa.Name) - require.Equal(t, agentNamespace, sa.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, sa.Labels) - }) + }).Build() - t.Run("should create cluster role", func(t *testing.T) { - var crs rbacv1.ClusterRoleList + image := "opentelemetry/collector:latest" + namespace := "kyma-system" + priorityClassName := "normal" + sut := NewMetricAgentApplierDeleter(image, namespace, priorityClassName) - require.NoError(t, client.List(ctx, &crs)) - require.Len(t, crs.Items, 1) - - cr := crs.Items[0] - require.NotNil(t, cr) - require.Equal(t, agentName, cr.Name) - require.Equal(t, agentNamespace, cr.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, cr.Labels) - require.Equal(t, sut.RBAC.clusterRole.Rules, cr.Rules) - }) - - t.Run("should create cluster role binding", func(t *testing.T) { - var crbs rbacv1.ClusterRoleBindingList - - require.NoError(t, client.List(ctx, &crbs)) - require.Len(t, crbs.Items, 1) - - crb := crbs.Items[0] - require.NotNil(t, crb) - require.Equal(t, agentName, crb.Name) - require.Equal(t, agentNamespace, crb.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, crb.Labels) - - subject := crb.Subjects[0] - require.Equal(t, "ServiceAccount", subject.Kind) - require.Equal(t, agentName, subject.Name) - require.Equal(t, agentNamespace, subject.Namespace) - - require.Equal(t, "rbac.authorization.k8s.io", crb.RoleRef.APIGroup) - require.Equal(t, "ClusterRole", crb.RoleRef.Kind) - require.Equal(t, agentName, crb.RoleRef.Name) - }) - - t.Run("should create metrics service", func(t *testing.T) { - var svcs corev1.ServiceList - - require.NoError(t, client.List(ctx, &svcs)) - require.Len(t, svcs.Items, 1) - - svc := svcs.Items[0] - require.NotNil(t, svc) - require.Equal(t, agentName+"-metrics", svc.Name) - require.Equal(t, agentNamespace, svc.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - "telemetry.kyma-project.io/self-monitor": "enabled", - }, svc.Labels) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, svc.Spec.Selector) - require.Equal(t, map[string]string{ - "prometheus.io/port": "8888", - "prometheus.io/scheme": "http", - "prometheus.io/scrape": "true", - }, svc.Annotations) - require.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) - require.Len(t, svc.Spec.Ports, 1) - require.Equal(t, corev1.ServicePort{ - Name: "http-metrics", - Protocol: corev1.ProtocolTCP, - Port: 8888, - TargetPort: intstr.FromInt32(8888), - }, svc.Spec.Ports[0]) - }) - - t.Run("should create network policy", func(t *testing.T) { - var nps networkingv1.NetworkPolicyList - - require.NoError(t, client.List(ctx, &nps)) - require.Len(t, nps.Items, 1) - - np := nps.Items[0] - require.NotNil(t, np) - require.Equal(t, agentName, np.Name) - require.Equal(t, agentNamespace, np.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, np.Labels) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, np.Spec.PodSelector.MatchLabels) - require.Equal(t, []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, np.Spec.PolicyTypes) - require.Len(t, np.Spec.Ingress, 1) - require.Len(t, np.Spec.Ingress[0].From, 2) - require.Equal(t, "0.0.0.0/0", np.Spec.Ingress[0].From[0].IPBlock.CIDR) - require.Equal(t, "::/0", np.Spec.Ingress[0].From[1].IPBlock.CIDR) - require.Len(t, np.Spec.Ingress[0].Ports, 2) - - tcpProtocol := corev1.ProtocolTCP - port5555 := intstr.FromInt32(5555) - port6666 := intstr.FromInt32(6666) - require.Equal(t, []networkingv1.NetworkPolicyPort{ - { - Protocol: &tcpProtocol, - Port: &port5555, - }, - { - Protocol: &tcpProtocol, - Port: &port6666, - }, - }, np.Spec.Ingress[0].Ports) - require.Len(t, np.Spec.Egress, 1) - require.Len(t, np.Spec.Egress[0].To, 2) - require.Equal(t, "0.0.0.0/0", np.Spec.Egress[0].To[0].IPBlock.CIDR) - require.Equal(t, "::/0", np.Spec.Egress[0].To[1].IPBlock.CIDR) - }) - - t.Run("should create collector config configmap", func(t *testing.T) { - var cms corev1.ConfigMapList - - require.NoError(t, client.List(ctx, &cms)) - require.Len(t, cms.Items, 1) - - cm := cms.Items[0] - require.Equal(t, agentName, cm.Name) - require.Equal(t, agentNamespace, cm.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, cm.Labels) - require.Contains(t, cm.Data, "relay.conf") - require.Equal(t, agentCfg, cm.Data["relay.conf"]) + err := sut.ApplyResources(context.Background(), client, AgentApplyOptions{ + AllowedPorts: []int32{5555, 6666}, + CollectorConfigYAML: "dummy", }) + require.NoError(t, err) - t.Run("should create a daemonset", func(t *testing.T) { - var dss appsv1.DaemonSetList - - require.NoError(t, client.List(ctx, &dss)) - require.Len(t, dss.Items, 1) - - ds := dss.Items[0] - require.Equal(t, agentName, ds.Name) - require.Equal(t, agentNamespace, ds.Namespace) - - // labels - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, ds.Labels, "must have expected daemonset labels") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - }, ds.Spec.Selector.MatchLabels, "must have expected daemonset selector labels") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": agentName, - "sidecar.istio.io/inject": "true", - "telemetry.kyma-project.io/metric-scrape": "true", - }, ds.Spec.Template.ObjectMeta.Labels, "must have expected pod labels") - - // annotations - podAnnotations := ds.Spec.Template.ObjectMeta.Annotations - require.NotEmpty(t, podAnnotations["checksum/config"]) - require.Equal(t, "# configure an env variable OUTPUT_CERTS to write certificates to the given folder\nproxyMetadata:\n OUTPUT_CERTS: /etc/istio-output-certs\n", podAnnotations["proxy.istio.io/config"]) - require.Equal(t, "[{\"name\": \"istio-certs\", \"mountPath\": \"/etc/istio-output-certs\"}]", podAnnotations["sidecar.istio.io/userVolumeMount"]) - require.Equal(t, "", podAnnotations["traffic.sidecar.istio.io/includeInboundPorts"]) - require.Equal(t, "4317", podAnnotations["traffic.sidecar.istio.io/includeOutboundPorts"]) - require.Equal(t, "8888", podAnnotations["traffic.sidecar.istio.io/excludeInboundPorts"]) - require.Equal(t, "", podAnnotations["traffic.sidecar.istio.io/includeOutboundIPRanges"]) - - // collector container - require.Len(t, ds.Spec.Template.Spec.Containers, 1) - container := ds.Spec.Template.Spec.Containers[0] - - require.NotNil(t, container.LivenessProbe, "liveness probe must be defined") - require.NotNil(t, container.ReadinessProbe, "readiness probe must be defined") - - envVars := container.Env - require.Len(t, envVars, 3) - require.Equal(t, envVars[0].Name, "MY_POD_IP") - require.Equal(t, envVars[1].Name, "MY_NODE_NAME") - require.Equal(t, envVars[2].Name, "GOMEMLIMIT") - require.Equal(t, envVars[0].ValueFrom.FieldRef.FieldPath, "status.podIP") - require.Equal(t, envVars[1].ValueFrom.FieldRef.FieldPath, "spec.nodeName") + bytes, err := testutils.MarshalYAML(scheme, objects) + require.NoError(t, err) - // security contexts - podSecurityContext := ds.Spec.Template.Spec.SecurityContext - require.NotNil(t, podSecurityContext, "pod security context must be defined") - require.NotZero(t, podSecurityContext.RunAsUser, "must run as non-root") - require.True(t, *podSecurityContext.RunAsNonRoot, "must run as non-root") + goldenFileBytes, err := os.ReadFile("testdata/metric-agent.yaml") + require.NoError(t, err) - containerSecurityContext := container.SecurityContext - require.NotNil(t, containerSecurityContext, "container security context must be defined") - require.NotZero(t, containerSecurityContext.RunAsUser, "must run as non-root") - require.True(t, *containerSecurityContext.RunAsNonRoot, "must run as non-root") - require.False(t, *containerSecurityContext.Privileged, "must not be privileged") - require.False(t, *containerSecurityContext.AllowPrivilegeEscalation, "must not escalate to privileged") - require.True(t, *containerSecurityContext.ReadOnlyRootFilesystem, "must use readonly fs") - }) + require.Equal(t, string(goldenFileBytes), string(bytes)) } -func TestDeleteAgentResources(t *testing.T) { - ctx := context.Background() - client := fake.NewClientBuilder().Build() +func TestAgent_DeleteResources(t *testing.T) { + var created []client.Object - sut := AgentApplierDeleter{ - Config: AgentConfig{ - Config: Config{ - BaseName: agentName, - Namespace: agentNamespace, - }, + fakeClient := fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{ + Create: func(ctx context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error { + created = append(created, obj) + return c.Create(ctx, obj) }, - RBAC: createAgentRBAC(), - } + }).Build() - // Create agent resources before testing deletion - err := sut.ApplyResources(ctx, client, AgentApplyOptions{ + image := "opentelemetry/collector:latest" + namespace := "kyma-system" + priorityClassName := "normal" + sut := NewMetricAgentApplierDeleter(image, namespace, priorityClassName) + + err := sut.ApplyResources(context.Background(), fakeClient, AgentApplyOptions{ AllowedPorts: []int32{5555, 6666}, - CollectorConfigYAML: agentCfg, + CollectorConfigYAML: "dummy", }) require.NoError(t, err) - // Delete agent resources - err = sut.DeleteResources(ctx, client) + err = sut.DeleteResources(context.Background(), fakeClient) require.NoError(t, err) - t.Run("should delete service account", func(t *testing.T) { - var serviceAccount corev1.ServiceAccount - err := client.Get(ctx, types.NamespacedName{Name: agentName, Namespace: agentNamespace}, &serviceAccount) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete cluster role", func(t *testing.T) { - var clusterRole rbacv1.ClusterRole - err := client.Get(ctx, types.NamespacedName{Name: agentName}, &clusterRole) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete cluster role binding", func(t *testing.T) { - var clusterRoleBinding rbacv1.ClusterRoleBinding - err := client.Get(ctx, types.NamespacedName{Name: agentName, Namespace: agentNamespace}, &clusterRoleBinding) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete metrics service", func(t *testing.T) { - var service corev1.Service - err := client.Get(ctx, types.NamespacedName{Name: agentName + "-metrics", Namespace: agentNamespace}, &service) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete network policy", func(t *testing.T) { - var networkPolicy networkingv1.NetworkPolicy - err := client.Get(ctx, types.NamespacedName{Name: agentName, Namespace: agentNamespace}, &networkPolicy) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete collector config configmap", func(t *testing.T) { - var configMap corev1.ConfigMap - err := client.Get(ctx, types.NamespacedName{Name: agentName, Namespace: agentNamespace}, &configMap) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete daemonset", func(t *testing.T) { - var daemonSet appsv1.DaemonSet - err := client.Get(ctx, types.NamespacedName{Name: agentName, Namespace: agentNamespace}, &daemonSet) - require.True(t, apierrors.IsNotFound(err)) - }) -} - -func createAgentRBAC() Rbac { - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: agentName, - Namespace: agentNamespace, - Labels: labels.MakeDefaultLabel(agentName), - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"test"}, - Resources: []string{"test"}, - Verbs: []string{"test"}, - }, - }, - } - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: agentName, - Namespace: agentNamespace, - Labels: labels.MakeDefaultLabel(agentName), - }, - Subjects: []rbacv1.Subject{{Name: agentName, Namespace: agentNamespace, Kind: rbacv1.ServiceAccountKind}}, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: agentName, - }, - } - - return Rbac{ - clusterRole: clusterRole, - clusterRoleBinding: clusterRoleBinding, - role: nil, - roleBinding: nil, + for i := range created { + // an update operation on a non-existent object should return a NotFound error + err = fakeClient.Get(context.Background(), client.ObjectKeyFromObject(created[i]), created[i]) + require.True(t, apierrors.IsNotFound(err), "want not found, got %v: %#v", err, created[i]) } } diff --git a/internal/resources/otelcollector/config.go b/internal/resources/otelcollector/config.go deleted file mode 100644 index 292c48cff..000000000 --- a/internal/resources/otelcollector/config.go +++ /dev/null @@ -1,45 +0,0 @@ -package otelcollector - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -type Config struct { - BaseName string - Namespace string -} - -type GatewayConfig struct { - Config - - Deployment DeploymentConfig - OTLPServiceName string -} - -type DeploymentConfig struct { - Image string - PriorityClassName string - BaseCPULimit resource.Quantity - DynamicCPULimit resource.Quantity - BaseMemoryLimit resource.Quantity - DynamicMemoryLimit resource.Quantity - BaseCPURequest resource.Quantity - DynamicCPURequest resource.Quantity - BaseMemoryRequest resource.Quantity - DynamicMemoryRequest resource.Quantity -} - -type AgentConfig struct { - Config - - DaemonSet DaemonSetConfig -} - -type DaemonSetConfig struct { - Image string - PriorityClassName string - CPULimit resource.Quantity - CPURequest resource.Quantity - MemoryLimit resource.Quantity - MemoryRequest resource.Quantity -} diff --git a/internal/resources/otelcollector/core.go b/internal/resources/otelcollector/core.go index 7a6b718bc..d962806ae 100644 --- a/internal/resources/otelcollector/core.go +++ b/internal/resources/otelcollector/core.go @@ -15,14 +15,13 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/kyma-project/telemetry-manager/internal/labels" "github.com/kyma-project/telemetry-manager/internal/otelcollector/ports" commonresources "github.com/kyma-project/telemetry-manager/internal/resources/common" k8sutils "github.com/kyma-project/telemetry-manager/internal/utils/k8s" ) // applyCommonResources applies resources to gateway and agent deployment node -func applyCommonResources(ctx context.Context, c client.Client, name types.NamespacedName, rbac Rbac, allowedPorts []int32) error { +func applyCommonResources(ctx context.Context, c client.Client, name types.NamespacedName, rbac rbac, allowedPorts []int32) error { // Create service account before RBAC resources if err := k8sutils.CreateOrUpdateServiceAccount(ctx, c, makeServiceAccount(name)); err != nil { return fmt.Errorf("failed to create service account: %w", err) @@ -65,7 +64,7 @@ func applyCommonResources(ctx context.Context, c client.Client, name types.Names return fmt.Errorf("failed to create metrics service: %w", err) } - if err := k8sutils.CreateOrUpdateNetworkPolicy(ctx, c, commonresources.MakeNetworkPolicy(name, allowedPorts, labels.MakeDefaultLabel(name.Name))); err != nil { + if err := k8sutils.CreateOrUpdateNetworkPolicy(ctx, c, commonresources.MakeNetworkPolicy(name, allowedPorts, commonresources.MakeDefaultLabels(name.Name))); err != nil { return fmt.Errorf("failed to create network policy: %w", err) } @@ -124,7 +123,7 @@ func makeServiceAccount(name types.NamespacedName) *corev1.ServiceAccount { ObjectMeta: metav1.ObjectMeta{ Name: name.Name, Namespace: name.Namespace, - Labels: labels.MakeDefaultLabel(name.Name), + Labels: commonresources.MakeDefaultLabels(name.Name), }, } @@ -136,7 +135,7 @@ func makeConfigMap(name types.NamespacedName, collectorConfigYAML string) *corev ObjectMeta: metav1.ObjectMeta{ Name: name.Name, Namespace: name.Namespace, - Labels: labels.MakeDefaultLabel(name.Name), + Labels: commonresources.MakeDefaultLabels(name.Name), }, Data: map[string]string{ configMapKey: collectorConfigYAML, @@ -149,14 +148,14 @@ func makeSecret(name types.NamespacedName, secretData map[string][]byte) *corev1 ObjectMeta: metav1.ObjectMeta{ Name: name.Name, Namespace: name.Namespace, - Labels: labels.MakeDefaultLabel(name.Name), + Labels: commonresources.MakeDefaultLabels(name.Name), }, Data: secretData, } } func makeMetricsService(name types.NamespacedName) *corev1.Service { - labels := labels.MakeDefaultLabel(name.Name) + labels := commonresources.MakeDefaultLabels(name.Name) selectorLabels := make(map[string]string) maps.Copy(selectorLabels, labels) labels["telemetry.kyma-project.io/self-monitor"] = "enabled" diff --git a/internal/resources/otelcollector/gateway.go b/internal/resources/otelcollector/gateway.go index a933b5549..0d6e04294 100644 --- a/internal/resources/otelcollector/gateway.go +++ b/internal/resources/otelcollector/gateway.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "maps" "strings" istiosecurityv1 "istio.io/api/security/v1" @@ -19,25 +20,163 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/kyma-project/telemetry-manager/internal/configchecksum" - "github.com/kyma-project/telemetry-manager/internal/labels" "github.com/kyma-project/telemetry-manager/internal/otelcollector/config" "github.com/kyma-project/telemetry-manager/internal/otelcollector/ports" commonresources "github.com/kyma-project/telemetry-manager/internal/resources/common" k8sutils "github.com/kyma-project/telemetry-manager/internal/utils/k8s" ) +const ( + LogGatewayName = "telemetry-log-gateway" + MetricGatewayName = "telemetry-metric-gateway" + TraceGatewayName = "telemetry-trace-gateway" + + MetricOTLPServiceName = "telemetry-otlp-metrics" + TraceOTLPServiceName = "telemetry-otlp-traces" + LogOTLPServiceName = "telemetry-otlp-logs" + + // label keys + logGatewayIngestKey = "telemetry.kyma-project.io/log-ingest" + logGatewayExportKey = "telemetry.kyma-project.io/log-export" + traceGatewayIngestKey = "telemetry.kyma-project.io/trace-ingest" + traceGatewayExportKey = "telemetry.kyma-project.io/trace-export" + metricGatewayIngestKey = "telemetry.kyma-project.io/metric-ingest" + metricGatewayExportKey = "telemetry.kyma-project.io/metric-export" + istioSidecarInjectKey = "sidecar.istio.io/inject" +) + +var ( + // TODO(skhalash): the resource requirements are copy-pasted from the trace gateway and need to be adjusted + logGatewayBaseCPULimit = resource.MustParse("700m") + logGatewayDynamicCPULimit = resource.MustParse("500m") + logGatewayBaseMemoryLimit = resource.MustParse("500Mi") + logGatewayDynamicMemoryLimit = resource.MustParse("1500Mi") + logGatewayBaseCPURequest = resource.MustParse("100m") + logGatewayDynamicCPURequest = resource.MustParse("100m") + logGatewayBaseMemoryRequest = resource.MustParse("32Mi") + logGatewayDynamicMemoryRequest = resource.MustParse("0") + + metricGatewayBaseCPULimit = resource.MustParse("900m") + metricGatewayDynamicCPULimit = resource.MustParse("100m") + metricGatewayBaseMemoryLimit = resource.MustParse("512Mi") + metricGatewayDynamicMemoryLimit = resource.MustParse("512Mi") + metricGatewayBaseCPURequest = resource.MustParse("25m") + metricGatewayDynamicCPURequest = resource.MustParse("0") + metricGatewayBaseMemoryRequest = resource.MustParse("32Mi") + metricGatewayDynamicMemoryRequest = resource.MustParse("0") + + traceGatewayBaseCPULimit = resource.MustParse("700m") + traceGatewayDynamicCPULimit = resource.MustParse("500m") + traceGatewayBaseMemoryLimit = resource.MustParse("500Mi") + traceGatewayDynamicMemoryLimit = resource.MustParse("1500Mi") + traceGatewayBaseCPURequest = resource.MustParse("100m") + traceGatewayDynamicCPURequest = resource.MustParse("100m") + traceGatewayBaseMemoryRequest = resource.MustParse("32Mi") + traceGatewayDynamicMemoryRequest = resource.MustParse("0") +) + +func NewLogGatewayApplierDeleter(image, namespace, priorityClassName string) *GatewayApplierDeleter { + extraLabels := map[string]string{ + logGatewayIngestKey: "true", + logGatewayExportKey: "true", + } + + return &GatewayApplierDeleter{ + baseName: LogGatewayName, + extraPodLabels: extraLabels, + image: image, + namespace: namespace, + otlpServiceName: LogOTLPServiceName, + priorityClassName: priorityClassName, + rbac: makeLogGatewayRBAC(namespace), + baseCPULimit: logGatewayBaseCPULimit, + dynamicCPULimit: logGatewayDynamicCPULimit, + baseMemoryLimit: logGatewayBaseMemoryLimit, + dynamicMemoryLimit: logGatewayDynamicMemoryLimit, + baseCPURequest: logGatewayBaseCPURequest, + dynamicCPURequest: logGatewayDynamicCPURequest, + baseMemoryRequest: logGatewayBaseMemoryRequest, + dynamicMemoryRequest: logGatewayDynamicMemoryRequest, + } +} + +func NewMetricGatewayApplierDeleter(image, namespace, priorityClassName string) *GatewayApplierDeleter { + extraLabels := map[string]string{ + metricGatewayIngestKey: "true", + metricGatewayExportKey: "true", + istioSidecarInjectKey: "true", // inject istio sidecar + } + + return &GatewayApplierDeleter{ + baseName: MetricGatewayName, + extraPodLabels: extraLabels, + image: image, + namespace: namespace, + otlpServiceName: MetricOTLPServiceName, + priorityClassName: priorityClassName, + rbac: makeMetricGatewayRBAC(namespace), + baseCPULimit: metricGatewayBaseCPULimit, + dynamicCPULimit: metricGatewayDynamicCPULimit, + baseMemoryLimit: metricGatewayBaseMemoryLimit, + dynamicMemoryLimit: metricGatewayDynamicMemoryLimit, + baseCPURequest: metricGatewayBaseCPURequest, + dynamicCPURequest: metricGatewayDynamicCPURequest, + baseMemoryRequest: metricGatewayBaseMemoryRequest, + dynamicMemoryRequest: metricGatewayDynamicMemoryRequest, + } +} + +func NewTraceGatewayApplierDeleter(image, namespace, priorityClassName string) *GatewayApplierDeleter { + extraLabels := map[string]string{ + traceGatewayIngestKey: "true", + traceGatewayExportKey: "true", + istioSidecarInjectKey: "true", // inject istio sidecar + } + + return &GatewayApplierDeleter{ + baseName: TraceGatewayName, + extraPodLabels: extraLabels, + image: image, + namespace: namespace, + otlpServiceName: TraceOTLPServiceName, + priorityClassName: priorityClassName, + rbac: makeTraceGatewayRBAC(namespace), + baseCPULimit: traceGatewayBaseCPULimit, + dynamicCPULimit: traceGatewayDynamicCPULimit, + baseMemoryLimit: traceGatewayBaseMemoryLimit, + dynamicMemoryLimit: traceGatewayDynamicMemoryLimit, + baseCPURequest: traceGatewayBaseCPURequest, + dynamicCPURequest: traceGatewayDynamicCPURequest, + baseMemoryRequest: traceGatewayBaseMemoryRequest, + dynamicMemoryRequest: traceGatewayDynamicMemoryRequest, + } +} + type GatewayApplierDeleter struct { - Config GatewayConfig - RBAC Rbac + baseName string + extraPodLabels map[string]string + image string + namespace string + otlpServiceName string + priorityClassName string + rbac rbac + + baseCPULimit resource.Quantity + dynamicCPULimit resource.Quantity + baseMemoryLimit resource.Quantity + dynamicMemoryLimit resource.Quantity + baseCPURequest resource.Quantity + dynamicCPURequest resource.Quantity + baseMemoryRequest resource.Quantity + dynamicMemoryRequest resource.Quantity } type GatewayApplyOptions struct { - AllowedPorts []int32 - CollectorConfigYAML string - CollectorEnvVars map[string][]byte - ComponentSelectorLabels map[string]string - IstioEnabled bool - IstioExcludePorts []int32 + AllowedPorts []int32 + CollectorConfigYAML string + CollectorEnvVars map[string][]byte + IstioEnabled bool + IstioExcludePorts []int32 // Replicas specifies the number of gateway replicas. Replicas int32 // ResourceRequirementsMultiplier is a coefficient affecting the CPU and memory resource limits for each replica. @@ -47,9 +186,9 @@ type GatewayApplyOptions struct { } func (gad *GatewayApplierDeleter) ApplyResources(ctx context.Context, c client.Client, opts GatewayApplyOptions) error { - name := types.NamespacedName{Namespace: gad.Config.Namespace, Name: gad.Config.BaseName} + name := types.NamespacedName{Namespace: gad.namespace, Name: gad.baseName} - if err := applyCommonResources(ctx, c, name, gad.RBAC, opts.AllowedPorts); err != nil { + if err := applyCommonResources(ctx, c, name, gad.rbac, opts.AllowedPorts); err != nil { return fmt.Errorf("failed to create common resource: %w", err) } @@ -85,14 +224,14 @@ func (gad *GatewayApplierDeleter) DeleteResources(ctx context.Context, c client. // Attempt to clean up as many resources as possible and avoid early return when one of the deletions fails var allErrors error = nil - name := types.NamespacedName{Name: gad.Config.BaseName, Namespace: gad.Config.Namespace} + name := types.NamespacedName{Name: gad.baseName, Namespace: gad.namespace} if err := deleteCommonResources(ctx, c, name); err != nil { allErrors = errors.Join(allErrors, err) } objectMeta := metav1.ObjectMeta{ - Name: gad.Config.BaseName, - Namespace: gad.Config.Namespace, + Name: gad.baseName, + Namespace: gad.namespace, } secret := corev1.Secret{ObjectMeta: objectMeta} @@ -110,7 +249,7 @@ func (gad *GatewayApplierDeleter) DeleteResources(ctx context.Context, c client. allErrors = errors.Join(allErrors, fmt.Errorf("failed to delete deployment: %w", err)) } - OTLPService := corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: gad.Config.OTLPServiceName, Namespace: gad.Config.Namespace}} + OTLPService := corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: gad.otlpServiceName, Namespace: gad.namespace}} if err := k8sutils.DeleteObject(ctx, c, &OTLPService); err != nil { allErrors = errors.Join(allErrors, fmt.Errorf("failed to delete otlp service: %w", err)) } @@ -126,19 +265,17 @@ func (gad *GatewayApplierDeleter) DeleteResources(ctx context.Context, c client. } func (gad *GatewayApplierDeleter) makeGatewayDeployment(configChecksum string, opts GatewayApplyOptions) *appsv1.Deployment { - selectorLabels := labels.MakeDefaultLabel(gad.Config.BaseName) + selectorLabels := commonresources.MakeDefaultLabels(gad.baseName) annotations := gad.makeAnnotations(configChecksum, opts) resources := gad.makeGatewayResourceRequirements(opts) affinity := makePodAffinity(selectorLabels) - deploymentConfig := gad.Config.Deployment - podSpec := makePodSpec( - gad.Config.BaseName, - deploymentConfig.Image, - commonresources.WithPriorityClass(deploymentConfig.PriorityClassName), + gad.baseName, + gad.image, + commonresources.WithPriorityClass(gad.priorityClassName), commonresources.WithResources(resources), withAffinity(affinity), withEnvVarFromSource(config.EnvVarCurrentPodIP, fieldPathPodIP), @@ -146,10 +283,14 @@ func (gad *GatewayApplierDeleter) makeGatewayDeployment(configChecksum string, o commonresources.WithGoMemLimitEnvVar(resources.Limits[corev1.ResourceMemory]), ) + podLabels := make(map[string]string) + maps.Copy(podLabels, selectorLabels) + maps.Copy(podLabels, gad.extraPodLabels) + return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: gad.Config.BaseName, - Namespace: gad.Config.Namespace, + Name: gad.baseName, + Namespace: gad.namespace, Labels: selectorLabels, }, Spec: appsv1.DeploymentSpec{ @@ -159,7 +300,7 @@ func (gad *GatewayApplierDeleter) makeGatewayDeployment(configChecksum string, o }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: opts.ComponentSelectorLabels, + Labels: podLabels, Annotations: annotations, }, Spec: podSpec, @@ -169,18 +310,16 @@ func (gad *GatewayApplierDeleter) makeGatewayDeployment(configChecksum string, o } func (gad *GatewayApplierDeleter) makeGatewayResourceRequirements(opts GatewayApplyOptions) corev1.ResourceRequirements { - deploymentConfig := gad.Config.Deployment - - memoryRequest := deploymentConfig.BaseMemoryRequest.DeepCopy() - memoryLimit := deploymentConfig.BaseMemoryLimit.DeepCopy() - cpuRequest := deploymentConfig.BaseCPURequest.DeepCopy() - cpuLimit := deploymentConfig.BaseCPULimit.DeepCopy() + memoryRequest := gad.baseMemoryRequest.DeepCopy() + memoryLimit := gad.baseMemoryLimit.DeepCopy() + cpuRequest := gad.baseCPURequest.DeepCopy() + cpuLimit := gad.baseCPULimit.DeepCopy() for range opts.ResourceRequirementsMultiplier { - memoryRequest.Add(deploymentConfig.DynamicMemoryRequest) - memoryLimit.Add(deploymentConfig.DynamicMemoryLimit) - cpuRequest.Add(deploymentConfig.DynamicCPURequest) - cpuLimit.Add(deploymentConfig.DynamicCPULimit) + memoryRequest.Add(gad.dynamicMemoryRequest) + memoryLimit.Add(gad.dynamicMemoryLimit) + cpuRequest.Add(gad.dynamicCPURequest) + cpuLimit.Add(gad.dynamicCPULimit) } resources := corev1.ResourceRequirements{ @@ -225,12 +364,12 @@ func makePodAffinity(labels map[string]string) corev1.Affinity { } func (gad *GatewayApplierDeleter) makeOTLPService() *corev1.Service { - labels := labels.MakeDefaultLabel(gad.Config.BaseName) + labels := commonresources.MakeDefaultLabels(gad.baseName) return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: gad.Config.OTLPServiceName, - Namespace: gad.Config.Namespace, + Name: gad.otlpServiceName, + Namespace: gad.namespace, Labels: labels, }, Spec: corev1.ServiceSpec{ @@ -255,12 +394,12 @@ func (gad *GatewayApplierDeleter) makeOTLPService() *corev1.Service { } func (gad *GatewayApplierDeleter) makePeerAuthentication() *istiosecurityclientv1.PeerAuthentication { - labels := labels.MakeDefaultLabel(gad.Config.BaseName) + labels := commonresources.MakeDefaultLabels(gad.baseName) return &istiosecurityclientv1.PeerAuthentication{ ObjectMeta: metav1.ObjectMeta{ - Name: gad.Config.BaseName, - Namespace: gad.Config.Namespace, + Name: gad.baseName, + Namespace: gad.namespace, Labels: labels, }, Spec: istiosecurityv1.PeerAuthentication{ diff --git a/internal/resources/otelcollector/gateway_test.go b/internal/resources/otelcollector/gateway_test.go index 227d72aa5..ae42b3a4b 100644 --- a/internal/resources/otelcollector/gateway_test.go +++ b/internal/resources/otelcollector/gateway_test.go @@ -2,613 +2,158 @@ package otelcollector import ( "context" + "os" "testing" "github.com/stretchr/testify/require" - istiosecurityv1 "istio.io/api/security/v1" istiosecurityclientv1 "istio.io/client-go/pkg/apis/security/v1" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" - "github.com/kyma-project/telemetry-manager/internal/labels" + testutils "github.com/kyma-project/telemetry-manager/internal/utils/test" ) -var ( - gatewayNamespace = "my-namespace" - gatewayName = "my-gateway" - gatewayCfg = "dummy otel collector config" - baseCPURequest = resource.MustParse("150m") - dynamicCPURequest = resource.MustParse("75m") - baseCPULimit = resource.MustParse("300m") - dynamicCPULimit = resource.MustParse("150m") - baseMemoryRequest = resource.MustParse("150m") - dynamicMemoryRequest = resource.MustParse("75m") - baseMemoryLimit = resource.MustParse("300m") - dynamicMemoryLimit = resource.MustParse("150m") - envVars = map[string][]byte{ - "BASIC_AUTH_HEADER": []byte("basicAuthHeader"), - "OTLP_ENDPOINT": []byte("otlpEndpoint"), - } - otlpServiceName = "telemetry" - replicas int32 = 3 -) - -func TestApplyGatewayResources(t *testing.T) { - ctx := context.Background() - client := fake.NewClientBuilder().Build() - - sut := GatewayApplierDeleter{ - Config: createGatewayConfig(), - RBAC: createGatewayRBAC(), - } - - err := sut.ApplyResources(ctx, client, GatewayApplyOptions{ - AllowedPorts: []int32{5555, 6666}, - CollectorConfigYAML: gatewayCfg, - CollectorEnvVars: envVars, - ComponentSelectorLabels: labels.MakeTraceGatewaySelectorLabel(gatewayName), - Replicas: replicas, - ResourceRequirementsMultiplier: 1, - }) - require.NoError(t, err) - - t.Run("should create service account", func(t *testing.T) { - var sas corev1.ServiceAccountList - - require.NoError(t, client.List(ctx, &sas)) - require.Len(t, sas.Items, 1) - - sa := sas.Items[0] - require.NotNil(t, sa) - require.Equal(t, gatewayName, sa.Name) - require.Equal(t, gatewayNamespace, sa.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, sa.Labels) - }) - - t.Run("should create cluster role", func(t *testing.T) { - var crs rbacv1.ClusterRoleList - - require.NoError(t, client.List(ctx, &crs)) - require.Len(t, crs.Items, 1) - - cr := crs.Items[0] - require.NotNil(t, cr) - require.Equal(t, gatewayName, cr.Name) - require.Equal(t, gatewayNamespace, cr.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, cr.Labels) - require.Equal(t, sut.RBAC.clusterRole.Rules, cr.Rules) - }) - - t.Run("should create cluster role binding", func(t *testing.T) { - var crbs rbacv1.ClusterRoleBindingList - - require.NoError(t, client.List(ctx, &crbs)) - require.Len(t, crbs.Items, 1) - - crb := crbs.Items[0] - require.NotNil(t, crb) - require.Equal(t, gatewayName, crb.Name) - require.Equal(t, gatewayNamespace, crb.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, crb.Labels) - - subject := crb.Subjects[0] - require.Equal(t, "ServiceAccount", subject.Kind) - require.Equal(t, gatewayName, subject.Name) - require.Equal(t, gatewayNamespace, subject.Namespace) - - require.Equal(t, "rbac.authorization.k8s.io", crb.RoleRef.APIGroup) - require.Equal(t, "ClusterRole", crb.RoleRef.Kind) - require.Equal(t, gatewayName, crb.RoleRef.Name) - }) - - t.Run("should create role", func(t *testing.T) { - var rs rbacv1.RoleList - - require.NoError(t, client.List(ctx, &rs)) - require.Len(t, rs.Items, 1) - - r := rs.Items[0] - require.NotNil(t, r) - require.Equal(t, gatewayName, r.Name) - require.Equal(t, gatewayNamespace, r.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, r.Labels) - require.Equal(t, sut.RBAC.role.Rules, r.Rules) - }) - - t.Run("should create role binding", func(t *testing.T) { - var rbs rbacv1.RoleBindingList - - require.NoError(t, client.List(ctx, &rbs)) - require.Len(t, rbs.Items, 1) - - rb := rbs.Items[0] - require.NotNil(t, rb) - require.Equal(t, gatewayName, rb.Name) - require.Equal(t, gatewayNamespace, rb.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, rb.Labels) - - subject := rb.Subjects[0] - require.Equal(t, "ServiceAccount", subject.Kind) - require.Equal(t, gatewayName, subject.Name) - require.Equal(t, gatewayNamespace, subject.Namespace) - - require.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) - require.Equal(t, "Role", rb.RoleRef.Kind) - require.Equal(t, gatewayName, rb.RoleRef.Name) - }) - - t.Run("should create metrics service", func(t *testing.T) { - var svc corev1.Service - - require.NoError(t, client.Get(ctx, types.NamespacedName{Namespace: gatewayNamespace, Name: gatewayName + "-metrics"}, &svc)) - - require.NotNil(t, svc) - require.Equal(t, gatewayName+"-metrics", svc.Name) - require.Equal(t, gatewayNamespace, svc.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - "telemetry.kyma-project.io/self-monitor": "enabled", - }, svc.Labels) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, svc.Spec.Selector) - require.Equal(t, map[string]string{ - "prometheus.io/port": "8888", - "prometheus.io/scheme": "http", - "prometheus.io/scrape": "true", - }, svc.Annotations) - require.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) - require.Len(t, svc.Spec.Ports, 1) - require.Equal(t, corev1.ServicePort{ - Name: "http-metrics", - Protocol: corev1.ProtocolTCP, - Port: 8888, - TargetPort: intstr.FromInt32(8888), - }, svc.Spec.Ports[0]) - }) - - t.Run("should create network policy", func(t *testing.T) { - var nps networkingv1.NetworkPolicyList - - require.NoError(t, client.List(ctx, &nps)) - require.Len(t, nps.Items, 1) - - np := nps.Items[0] - require.NotNil(t, np) - require.Equal(t, gatewayName, np.Name) - require.Equal(t, gatewayNamespace, np.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, np.Labels) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, np.Spec.PodSelector.MatchLabels) - require.Equal(t, []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, np.Spec.PolicyTypes) - require.Len(t, np.Spec.Ingress, 1) - require.Len(t, np.Spec.Ingress[0].From, 2) - require.Equal(t, "0.0.0.0/0", np.Spec.Ingress[0].From[0].IPBlock.CIDR) - require.Equal(t, "::/0", np.Spec.Ingress[0].From[1].IPBlock.CIDR) - require.Len(t, np.Spec.Ingress[0].Ports, 2) - - tcpProtocol := corev1.ProtocolTCP - port5555 := intstr.FromInt32(5555) - port6666 := intstr.FromInt32(6666) - require.Equal(t, []networkingv1.NetworkPolicyPort{ - { - Protocol: &tcpProtocol, - Port: &port5555, - }, - { - Protocol: &tcpProtocol, - Port: &port6666, - }, - }, np.Spec.Ingress[0].Ports) - require.Len(t, np.Spec.Egress, 1) - require.Len(t, np.Spec.Egress[0].To, 2) - require.Equal(t, "0.0.0.0/0", np.Spec.Egress[0].To[0].IPBlock.CIDR) - require.Equal(t, "::/0", np.Spec.Egress[0].To[1].IPBlock.CIDR) - }) - - t.Run("should create env secret", func(t *testing.T) { - var secrets corev1.SecretList - - require.NoError(t, client.List(ctx, &secrets)) - require.Len(t, secrets.Items, 1) - - secret := secrets.Items[0] - require.Equal(t, gatewayName, secret.Name) - require.Equal(t, gatewayNamespace, secret.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, secret.Labels) - - for k, v := range envVars { - require.Equal(t, v, secret.Data[k]) - } - }) - - t.Run("should create collector config configmap", func(t *testing.T) { - var cms corev1.ConfigMapList - - require.NoError(t, client.List(ctx, &cms)) - require.Len(t, cms.Items, 1) - - cm := cms.Items[0] - require.Equal(t, gatewayName, cm.Name) - require.Equal(t, gatewayNamespace, cm.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, cm.Labels) - require.Equal(t, gatewayCfg, cm.Data["relay.conf"]) - }) - - t.Run("should create a deployment", func(t *testing.T) { - var deps appsv1.DeploymentList - - require.NoError(t, client.List(ctx, &deps)) - require.Len(t, deps.Items, 1) - - dep := deps.Items[0] - require.Equal(t, gatewayName, dep.Name) - require.Equal(t, gatewayNamespace, dep.Namespace) - require.Equal(t, replicas, *dep.Spec.Replicas) - - // labels - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, dep.Labels, "must have expected deployment labels") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, dep.Spec.Selector.MatchLabels, "must have expected deployment selector labels") - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - "sidecar.istio.io/inject": "true", - "telemetry.kyma-project.io/trace-ingest": "true", - "telemetry.kyma-project.io/trace-export": "true", - }, dep.Spec.Template.ObjectMeta.Labels, "must have expected pod labels") - - // annotations - podAnnotations := dep.Spec.Template.ObjectMeta.Annotations - require.NotEmpty(t, podAnnotations["checksum/config"]) - - // collector container - require.Len(t, dep.Spec.Template.Spec.Containers, 1) - container := dep.Spec.Template.Spec.Containers[0] - - require.NotNil(t, container.LivenessProbe, "liveness probe must be defined") - require.NotNil(t, container.ReadinessProbe, "readiness probe must be defined") - resources := container.Resources - - CPURequest := baseCPURequest - CPURequest.Add(dynamicCPURequest) - require.Equal(t, CPURequest.String(), resources.Requests.Cpu().String(), "cpu requests should be calculated correctly") - - memoryRequest := baseMemoryRequest - memoryRequest.Add(dynamicMemoryRequest) - require.Equal(t, memoryRequest.String(), resources.Requests.Memory().String(), "memory requests should be calculated correctly") - - CPULimit := baseCPULimit - CPULimit.Add(dynamicCPULimit) - require.Equal(t, CPULimit.String(), resources.Limits.Cpu().String(), "cpu limit should be calculated correctly") - - memoryLimit := baseMemoryLimit - memoryLimit.Add(dynamicMemoryLimit) - require.Equal(t, memoryLimit.String(), resources.Limits.Memory().String(), "memory limit should be calculated correctly") - - envVars := container.Env - require.Len(t, envVars, 3) - require.Equal(t, envVars[0].Name, "MY_POD_IP") - require.Equal(t, envVars[1].Name, "MY_NODE_NAME") - require.Equal(t, envVars[2].Name, "GOMEMLIMIT") - require.Equal(t, envVars[0].ValueFrom.FieldRef.FieldPath, "status.podIP") - require.Equal(t, envVars[1].ValueFrom.FieldRef.FieldPath, "spec.nodeName") - - // security contexts - podSecurityContext := dep.Spec.Template.Spec.SecurityContext - require.NotNil(t, podSecurityContext, "pod security context must be defined") - require.NotZero(t, podSecurityContext.RunAsUser, "must run as non-root") - require.True(t, *podSecurityContext.RunAsNonRoot, "must run as non-root") - - containerSecurityContext := container.SecurityContext - require.NotNil(t, containerSecurityContext, "container security context must be defined") - require.NotZero(t, containerSecurityContext.RunAsUser, "must run as non-root") - require.True(t, *containerSecurityContext.RunAsNonRoot, "must run as non-root") - require.False(t, *containerSecurityContext.Privileged, "must not be privileged") - require.False(t, *containerSecurityContext.AllowPrivilegeEscalation, "must not escalate to privileged") - require.True(t, *containerSecurityContext.ReadOnlyRootFilesystem, "must use readonly fs") - }) - - t.Run("should create OTLP service", func(t *testing.T) { - var svc corev1.Service - - require.NoError(t, client.Get(ctx, types.NamespacedName{Namespace: gatewayNamespace, Name: otlpServiceName}, &svc)) - - require.NotNil(t, svc) - require.Equal(t, otlpServiceName, svc.Name) - require.Equal(t, gatewayNamespace, svc.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, svc.Labels) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - }, svc.Spec.Selector) - require.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) - require.Len(t, svc.Spec.Ports, 2) - require.Equal(t, corev1.ServicePort{ - Name: "grpc-collector", - Protocol: corev1.ProtocolTCP, - Port: 4317, - TargetPort: intstr.FromInt32(4317), - }, svc.Spec.Ports[0]) - require.Equal(t, corev1.ServicePort{ - Name: "http-collector", - Protocol: corev1.ProtocolTCP, - Port: 4318, - TargetPort: intstr.FromInt32(4318), - }, svc.Spec.Ports[1]) - }) -} - -func TestApplyGatewayResourcesWithIstioEnabled(t *testing.T) { - ctx := context.Background() - scheme := runtime.NewScheme() - require.NoError(t, istiosecurityclientv1.AddToScheme(scheme)) - require.NoError(t, clientgoscheme.AddToScheme(scheme)) - client := fake.NewClientBuilder().WithScheme(scheme).Build() - - sut := GatewayApplierDeleter{ - Config: createGatewayConfig(), - RBAC: createGatewayRBAC(), - } - - err := sut.ApplyResources(ctx, client, GatewayApplyOptions{ - CollectorConfigYAML: gatewayCfg, - CollectorEnvVars: envVars, - ComponentSelectorLabels: labels.MakeTraceGatewaySelectorLabel(gatewayName), - IstioEnabled: true, - IstioExcludePorts: []int32{1111, 2222}, - Replicas: replicas, - }) - require.NoError(t, err) - - t.Run("should have permissive peer authentication created", func(t *testing.T) { - var peerAuth istiosecurityclientv1.PeerAuthentication - - require.NoError(t, client.Get(ctx, types.NamespacedName{Namespace: gatewayNamespace, Name: gatewayName}, &peerAuth)) - - require.Equal(t, gatewayName, peerAuth.Name) - require.Equal(t, istiosecurityv1.PeerAuthentication_MutualTLS_PERMISSIVE, peerAuth.Spec.Mtls.Mode) - }) - - t.Run("should have istio enabled with ports excluded", func(t *testing.T) { - var deps appsv1.DeploymentList - - require.NoError(t, client.List(ctx, &deps)) - require.Len(t, deps.Items, 1) - dep := deps.Items[0] - require.Equal(t, gatewayName, dep.Name) - require.Equal(t, gatewayNamespace, dep.Namespace) - require.Equal(t, replicas, *dep.Spec.Replicas) - - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": gatewayName, - "telemetry.kyma-project.io/trace-ingest": "true", - "telemetry.kyma-project.io/trace-export": "true", - "sidecar.istio.io/inject": "true", - }, dep.Spec.Template.ObjectMeta.Labels, "must have expected pod labels") - - // annotations - podAnnotations := dep.Spec.Template.ObjectMeta.Annotations - require.NotEmpty(t, podAnnotations["checksum/config"]) - require.Equal(t, "TPROXY", podAnnotations["sidecar.istio.io/interceptionMode"]) - require.Equal(t, "1111, 2222", podAnnotations["traffic.sidecar.istio.io/excludeInboundPorts"]) - }) -} - -func TestDeleteGatewayResources(t *testing.T) { - ctx := context.Background() - scheme := runtime.NewScheme() - require.NoError(t, istiosecurityclientv1.AddToScheme(scheme)) - require.NoError(t, clientgoscheme.AddToScheme(scheme)) - client := fake.NewClientBuilder().WithScheme(scheme).Build() - - sut := GatewayApplierDeleter{ - Config: createGatewayConfig(), - RBAC: createGatewayRBAC(), - } - - // Create gateway resources before testing deletion - err := sut.ApplyResources(ctx, client, GatewayApplyOptions{ - CollectorConfigYAML: gatewayCfg, - CollectorEnvVars: envVars, - ComponentSelectorLabels: labels.MakeTraceGatewaySelectorLabel(gatewayName), - IstioEnabled: true, - IstioExcludePorts: []int32{1111, 2222}, - Replicas: replicas, - }) - require.NoError(t, err) - - // Delete gateway resources - err = sut.DeleteResources(ctx, client, true) - require.NoError(t, err) - - t.Run("should delete service account", func(t *testing.T) { - var serviceAccount corev1.ServiceAccount - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &serviceAccount) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete cluster role binding", func(t *testing.T) { - var clusterRoleBinding rbacv1.ClusterRoleBinding - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &clusterRoleBinding) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete cluster role", func(t *testing.T) { - var clusterRole rbacv1.ClusterRole - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &clusterRole) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete role binding", func(t *testing.T) { - var roleBinding rbacv1.RoleBinding - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &roleBinding) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete role", func(t *testing.T) { - var role rbacv1.Role - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &role) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete metrics service", func(t *testing.T) { - var service corev1.Service - err := client.Get(ctx, types.NamespacedName{Name: gatewayName + "-metrics", Namespace: gatewayNamespace}, &service) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete network policy", func(t *testing.T) { - var networkPolicy networkingv1.NetworkPolicy - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &networkPolicy) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete env secret", func(t *testing.T) { - var secret corev1.Secret - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &secret) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete collector config configmap", func(t *testing.T) { - var configMap corev1.ConfigMap - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &configMap) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete deployment", func(t *testing.T) { - var deployment appsv1.Deployment - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &deployment) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete OTLP service", func(t *testing.T) { - var service corev1.Service - err := client.Get(ctx, types.NamespacedName{Name: otlpServiceName, Namespace: gatewayNamespace}, &service) - require.True(t, apierrors.IsNotFound(err)) - }) - - t.Run("should delete permissive peer authentication", func(t *testing.T) { - var peerAuth istiosecurityclientv1.PeerAuthentication - err := client.Get(ctx, types.NamespacedName{Name: gatewayName, Namespace: gatewayNamespace}, &peerAuth) - require.True(t, apierrors.IsNotFound(err)) - }) -} - -func createGatewayConfig() GatewayConfig { - return GatewayConfig{ - Config: Config{ - BaseName: gatewayName, - Namespace: gatewayNamespace, +func TestGateway_ApplyResources(t *testing.T) { + image := "opentelemetry/collector:latest" + namespace := "kyma-system" + priorityClassName := "normal" + + tests := []struct { + name string + sut *GatewayApplierDeleter + istioEnabled bool + goldenFilePath string + saveGoldenFile bool + }{ + { + name: "metric gateway", + sut: NewMetricGatewayApplierDeleter(image, namespace, priorityClassName), + goldenFilePath: "testdata/metric-gateway.yaml", }, - OTLPServiceName: otlpServiceName, - - Deployment: DeploymentConfig{ - BaseCPURequest: baseCPURequest, - DynamicCPURequest: dynamicCPURequest, - BaseCPULimit: baseCPULimit, - DynamicCPULimit: dynamicCPULimit, - BaseMemoryRequest: baseMemoryRequest, - DynamicMemoryRequest: dynamicMemoryRequest, - BaseMemoryLimit: baseMemoryLimit, - DynamicMemoryLimit: dynamicMemoryLimit, + { + name: "trace gateway", + sut: NewTraceGatewayApplierDeleter(image, namespace, priorityClassName), + goldenFilePath: "testdata/trace-gateway.yaml", }, - } -} - -func createGatewayRBAC() Rbac { - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: gatewayName, - Namespace: gatewayNamespace, - Labels: labels.MakeDefaultLabel(gatewayName), + { + name: "log gateway", + sut: NewLogGatewayApplierDeleter(image, namespace, priorityClassName), + goldenFilePath: "testdata/log-gateway.yaml", }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"test"}, - Resources: []string{"test"}, - Verbs: []string{"test"}, - }, + { + name: "metric gateway with istio", + sut: NewMetricGatewayApplierDeleter(image, namespace, priorityClassName), + istioEnabled: true, + goldenFilePath: "testdata/metric-gateway-istio.yaml", }, } - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: gatewayName, - Namespace: gatewayNamespace, - Labels: labels.MakeDefaultLabel(gatewayName), - }, - Subjects: []rbacv1.Subject{{Name: gatewayName, Namespace: gatewayNamespace, Kind: rbacv1.ServiceAccountKind}}, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: gatewayName, - }, + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var objects []client.Object + + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(istiosecurityclientv1.AddToScheme(scheme)) + + client := fake.NewClientBuilder().WithScheme(scheme).WithInterceptorFuncs(interceptor.Funcs{ + Create: func(_ context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error { + objects = append(objects, obj) + // Nothing has to be created, just add created object to the list + return nil + }, + }).Build() + + err := tt.sut.ApplyResources(context.Background(), client, GatewayApplyOptions{ + AllowedPorts: []int32{5555, 6666}, + CollectorConfigYAML: "dummy", + CollectorEnvVars: map[string][]byte{ + "DUMMY_ENV_VAR": []byte("foo"), + }, + IstioEnabled: tt.istioEnabled, + Replicas: 2, + }) + require.NoError(t, err) + + if tt.saveGoldenFile { + testutils.SaveAsYAML(t, scheme, objects, tt.goldenFilePath) + } + + bytes, err := testutils.MarshalYAML(scheme, objects) + require.NoError(t, err) + + goldenFileBytes, err := os.ReadFile(tt.goldenFilePath) + require.NoError(t, err) + + require.Equal(t, string(goldenFileBytes), string(bytes)) + }) } +} - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: gatewayName, - Namespace: gatewayNamespace, - Labels: labels.MakeDefaultLabel(gatewayName), +func TestGateway_DeleteResources(t *testing.T) { + image := "opentelemetry/collector:latest" + namespace := "kyma-system" + priorityClassName := "normal" + + tests := []struct { + name string + sut *GatewayApplierDeleter + istioEnabled bool + }{ + { + name: "metric gateway", + sut: NewMetricGatewayApplierDeleter(image, namespace, priorityClassName), }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"test"}, - Resources: []string{"test"}, - Verbs: []string{"test"}, - }, + { + name: "trace gateway", + sut: NewTraceGatewayApplierDeleter(image, namespace, priorityClassName), }, - } - - roleBinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: gatewayName, - Namespace: gatewayNamespace, - Labels: labels.MakeDefaultLabel(gatewayName), + { + name: "log gateway", + sut: NewLogGatewayApplierDeleter(image, namespace, priorityClassName), }, - Subjects: []rbacv1.Subject{{Name: gatewayName, Namespace: gatewayNamespace, Kind: rbacv1.ServiceAccountKind}}, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: gatewayName, + { + name: "metric gateway with istio", + sut: NewMetricGatewayApplierDeleter(image, namespace, priorityClassName), + istioEnabled: true, }, } - return Rbac{ - clusterRole: clusterRole, - clusterRoleBinding: clusterRoleBinding, - role: role, - roleBinding: roleBinding, + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var created []client.Object + + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(istiosecurityclientv1.AddToScheme(scheme)) + + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithInterceptorFuncs(interceptor.Funcs{ + Create: func(ctx context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error { + created = append(created, obj) + return c.Create(ctx, obj) + }, + }).Build() + + err := tt.sut.ApplyResources(context.Background(), fakeClient, GatewayApplyOptions{ + AllowedPorts: []int32{5555, 6666}, + CollectorConfigYAML: "dummy", + IstioEnabled: tt.istioEnabled, + }) + require.NoError(t, err) + + err = tt.sut.DeleteResources(context.Background(), fakeClient, tt.istioEnabled) + require.NoError(t, err) + + for i := range created { + // an update operation on a non-existent object should return a NotFound error + err = fakeClient.Get(context.Background(), client.ObjectKeyFromObject(created[i]), created[i]) + require.True(t, apierrors.IsNotFound(err), "want not found, got %v: %#v", err, created[i]) + } + }) } } diff --git a/internal/resources/otelcollector/rbac.go b/internal/resources/otelcollector/rbac.go index 2a2730268..890456916 100644 --- a/internal/resources/otelcollector/rbac.go +++ b/internal/resources/otelcollector/rbac.go @@ -5,20 +5,20 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "github.com/kyma-project/telemetry-manager/internal/labels" + commonresources "github.com/kyma-project/telemetry-manager/internal/resources/common" ) -type Rbac struct { +type rbac struct { clusterRole *rbacv1.ClusterRole clusterRoleBinding *rbacv1.ClusterRoleBinding role *rbacv1.Role roleBinding *rbacv1.RoleBinding } -type RBACOption func(*Rbac, types.NamespacedName) +type RBACOption func(*rbac, types.NamespacedName) -func newRBAC(name types.NamespacedName, options ...RBACOption) *Rbac { - rbac := &Rbac{} +func newRBAC(name types.NamespacedName, options ...RBACOption) *rbac { + rbac := &rbac{} for _, o := range options { o(rbac, name) @@ -28,12 +28,12 @@ func newRBAC(name types.NamespacedName, options ...RBACOption) *Rbac { } func withClusterRole(options ...ClusterRoleOption) RBACOption { - return func(r *Rbac, name types.NamespacedName) { + return func(r *rbac, name types.NamespacedName) { clusterRole := &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: name.Name, Namespace: name.Namespace, - Labels: labels.MakeDefaultLabel(name.Name), + Labels: commonresources.MakeDefaultLabels(name.Name), }, Rules: []rbacv1.PolicyRule{}, } @@ -46,12 +46,12 @@ func withClusterRole(options ...ClusterRoleOption) RBACOption { } func withClusterRoleBinding() RBACOption { - return func(r *Rbac, name types.NamespacedName) { + return func(r *rbac, name types.NamespacedName) { r.clusterRoleBinding = &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: name.Name, Namespace: name.Namespace, - Labels: labels.MakeDefaultLabel(name.Name), + Labels: commonresources.MakeDefaultLabels(name.Name), }, Subjects: []rbacv1.Subject{{Name: name.Name, Namespace: name.Namespace, Kind: rbacv1.ServiceAccountKind}}, RoleRef: rbacv1.RoleRef{ @@ -64,12 +64,12 @@ func withClusterRoleBinding() RBACOption { } func withRole(options ...RoleOption) RBACOption { - return func(r *Rbac, name types.NamespacedName) { + return func(r *rbac, name types.NamespacedName) { role := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: name.Name, Namespace: name.Namespace, - Labels: labels.MakeDefaultLabel(name.Name), + Labels: commonresources.MakeDefaultLabels(name.Name), }, Rules: []rbacv1.PolicyRule{}, } @@ -83,12 +83,12 @@ func withRole(options ...RoleOption) RBACOption { } func withRoleBinding() RBACOption { - return func(r *Rbac, name types.NamespacedName) { + return func(r *rbac, name types.NamespacedName) { r.roleBinding = &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: name.Name, Namespace: name.Namespace, - Labels: labels.MakeDefaultLabel(name.Name), + Labels: commonresources.MakeDefaultLabels(name.Name), }, Subjects: []rbacv1.Subject{ { @@ -106,17 +106,17 @@ func withRoleBinding() RBACOption { } } -func MakeTraceGatewayRBAC(name types.NamespacedName) Rbac { +func makeTraceGatewayRBAC(namespace string) rbac { return *newRBAC( - name, + types.NamespacedName{Name: TraceGatewayName, Namespace: namespace}, withClusterRole(withK8sAttributeRules()), withClusterRoleBinding(), ) } -func MakeMetricAgentRBAC(name types.NamespacedName) Rbac { +func makeMetricAgentRBAC(namespace string) rbac { return *newRBAC( - name, + types.NamespacedName{Name: MetricAgentName, Namespace: namespace}, withClusterRole(withKubeletStatsRules(), withPrometheusRules(), withK8sClusterRules()), withClusterRoleBinding(), withRole(withSingletonCreatorRules()), @@ -124,9 +124,9 @@ func MakeMetricAgentRBAC(name types.NamespacedName) Rbac { ) } -func MakeMetricGatewayRBAC(name types.NamespacedName) Rbac { +func makeMetricGatewayRBAC(namespace string) rbac { return *newRBAC( - name, + types.NamespacedName{Name: MetricGatewayName, Namespace: namespace}, withClusterRole(withK8sAttributeRules(), withKymaStatsRules()), withClusterRoleBinding(), withRole(withSingletonCreatorRules()), @@ -134,9 +134,9 @@ func MakeMetricGatewayRBAC(name types.NamespacedName) Rbac { ) } -func MakeLogGatewayRBAC(name types.NamespacedName) Rbac { +func makeLogGatewayRBAC(namespace string) rbac { return *newRBAC( - name, + types.NamespacedName{Name: LogGatewayName, Namespace: namespace}, withClusterRole(withK8sAttributeRules()), withClusterRoleBinding(), ) diff --git a/internal/resources/otelcollector/rbac_test.go b/internal/resources/otelcollector/rbac_test.go deleted file mode 100644 index c3a79b12b..000000000 --- a/internal/resources/otelcollector/rbac_test.go +++ /dev/null @@ -1,304 +0,0 @@ -package otelcollector - -import ( - "testing" - - "github.com/stretchr/testify/require" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/types" -) - -func TestMakeTraceGatewayRBAC(t *testing.T) { - name := "test-gateway" - namespace := "test-namespace" - - rbac := MakeTraceGatewayRBAC(types.NamespacedName{Name: name, Namespace: namespace}) - - t.Run("should have a cluster role", func(t *testing.T) { - cr := rbac.clusterRole - expectedRules := []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"namespaces", "pods"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"apps"}, - Resources: []string{"replicasets"}, - Verbs: []string{"get", "list", "watch"}, - }, - } - - require.NotNil(t, cr) - require.Equal(t, name, cr.Name) - require.Equal(t, namespace, cr.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, cr.Labels) - require.Equal(t, expectedRules, cr.Rules) - }) - - t.Run("should have a cluster role binding", func(t *testing.T) { - crb := rbac.clusterRoleBinding - checkClusterRoleBinding(t, crb, name, namespace) - }) - - t.Run("should not have a role", func(t *testing.T) { - r := rbac.role - require.Nil(t, r) - }) - - t.Run("should not have a role binding", func(t *testing.T) { - rb := rbac.roleBinding - require.Nil(t, rb) - }) -} - -func TestMakeMetricAgentRBAC(t *testing.T) { - name := "test-agent" - namespace := "test-namespace" - - rbac := MakeMetricAgentRBAC(types.NamespacedName{Name: name, Namespace: namespace}) - - t.Run("should have a cluster role", func(t *testing.T) { - cr := rbac.clusterRole - expectedRules := []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"nodes", "nodes/stats", "nodes/proxy"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"nodes", "nodes/metrics", "services", "endpoints", "pods"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - NonResourceURLs: []string{"/metrics", "/metrics/cadvisor"}, - Verbs: []string{"get"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services"}, - Verbs: []string{"get", "list", "watch"}, - }, { - APIGroups: []string{"apps"}, - Resources: []string{"daemonsets", "deployments", "replicasets", "statefulsets"}, - Verbs: []string{"get", "list", "watch"}, - }, { - APIGroups: []string{"extensions"}, - Resources: []string{"daemonsets", "deployments", "replicasets"}, - Verbs: []string{"get", "list", "watch"}, - }, { - APIGroups: []string{"batch"}, - Resources: []string{"jobs", "cronjobs"}, - Verbs: []string{"get", "list", "watch"}, - }, { - APIGroups: []string{"autoscaling"}, - Resources: []string{"horizontalpodautoscalers"}, - Verbs: []string{"get", "list", "watch"}, - }, - } - - require.NotNil(t, cr) - require.Equal(t, cr.Name, name) - require.Equal(t, cr.Namespace, namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, cr.Labels) - require.Equal(t, cr.Rules, expectedRules) - }) - - t.Run("should have a cluster role binding", func(t *testing.T) { - crb := rbac.clusterRoleBinding - checkClusterRoleBinding(t, crb, name, namespace) - }) - - t.Run("should have a role", func(t *testing.T) { - r := rbac.role - expectedRules := []rbacv1.PolicyRule{ - { - APIGroups: []string{"coordination.k8s.io"}, - Resources: []string{"leases"}, - Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, - }, - } - - require.NotNil(t, r) - require.Equal(t, name, r.Name) - require.Equal(t, namespace, r.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, r.Labels) - require.Equal(t, expectedRules, r.Rules) - }) - - t.Run("should have a role binding", func(t *testing.T) { - rb := rbac.roleBinding - require.NotNil(t, rb) - - checkRoleBinding(t, rb, name, namespace) - }) -} - -func TestMakeMetricGatewayRBAC(t *testing.T) { - name := "test-gateway" - namespace := "test-namespace" - - rbac := MakeMetricGatewayRBAC(types.NamespacedName{Name: name, Namespace: namespace}) - - t.Run("should have a cluster role", func(t *testing.T) { - cr := rbac.clusterRole - expectedRules := []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"namespaces", "pods"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"apps"}, - Resources: []string{"replicasets"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"operator.kyma-project.io"}, - Resources: []string{"telemetries"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"telemetry.kyma-project.io"}, - Resources: []string{"metricpipelines"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"telemetry.kyma-project.io"}, - Resources: []string{"tracepipelines"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"telemetry.kyma-project.io"}, - Resources: []string{"logpipelines"}, - Verbs: []string{"get", "list", "watch"}, - }} - - require.NotNil(t, cr) - require.Equal(t, name, cr.Name) - require.Equal(t, namespace, cr.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, cr.Labels) - require.Equal(t, expectedRules, cr.Rules) - }) - - t.Run("should have a cluster role binding", func(t *testing.T) { - crb := rbac.clusterRoleBinding - checkClusterRoleBinding(t, crb, name, namespace) - }) - - t.Run("should have a role", func(t *testing.T) { - r := rbac.role - expectedRules := []rbacv1.PolicyRule{ - { - APIGroups: []string{"coordination.k8s.io"}, - Resources: []string{"leases"}, - Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, - }, - } - - require.NotNil(t, r) - require.Equal(t, name, r.Name) - require.Equal(t, namespace, r.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, r.Labels) - require.Equal(t, expectedRules, r.Rules) - }) - - t.Run("should have a role binding", func(t *testing.T) { - rb := rbac.roleBinding - require.NotNil(t, rb) - - checkRoleBinding(t, rb, name, namespace) - }) -} - -func TestMakeLogGatewayRBAC(t *testing.T) { - name := "test-gateway" - namespace := "test-namespace" - - rbac := MakeLogGatewayRBAC(types.NamespacedName{Name: name, Namespace: namespace}) - - t.Run("should have a cluster role", func(t *testing.T) { - cr := rbac.clusterRole - expectedRules := []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"namespaces", "pods"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"apps"}, - Resources: []string{"replicasets"}, - Verbs: []string{"get", "list", "watch"}, - }, - } - - require.NotNil(t, cr) - require.Equal(t, name, cr.Name) - require.Equal(t, namespace, cr.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, cr.Labels) - require.Equal(t, expectedRules, cr.Rules) - }) - - t.Run("should have a cluster role binding", func(t *testing.T) { - crb := rbac.clusterRoleBinding - checkClusterRoleBinding(t, crb, name, namespace) - }) - - t.Run("should not have a role", func(t *testing.T) { - r := rbac.role - require.Nil(t, r) - }) - - t.Run("should not have a role binding", func(t *testing.T) { - rb := rbac.roleBinding - require.Nil(t, rb) - }) -} - -func checkClusterRoleBinding(t *testing.T, crb *rbacv1.ClusterRoleBinding, name, namespace string) { - require.NotNil(t, crb) - require.Equal(t, name, crb.Name) - require.Equal(t, namespace, crb.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, crb.Labels) - - subject := crb.Subjects[0] - require.Equal(t, "ServiceAccount", subject.Kind) - require.Equal(t, name, subject.Name) - require.Equal(t, namespace, subject.Namespace) - - require.Equal(t, "rbac.authorization.k8s.io", crb.RoleRef.APIGroup) - require.Equal(t, "ClusterRole", crb.RoleRef.Kind) - require.Equal(t, name, crb.RoleRef.Name) -} - -func checkRoleBinding(t *testing.T, rb *rbacv1.RoleBinding, name, namespace string) { - require.Equal(t, name, rb.Name) - require.Equal(t, namespace, rb.Namespace) - require.Equal(t, map[string]string{ - "app.kubernetes.io/name": name, - }, rb.Labels) - - subject := rb.Subjects[0] - require.Equal(t, "ServiceAccount", subject.Kind) - require.Equal(t, name, subject.Name) - require.Equal(t, namespace, subject.Namespace) - - require.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) - require.Equal(t, "Role", rb.RoleRef.Kind) - require.Equal(t, name, rb.RoleRef.Name) -} diff --git a/internal/resources/otelcollector/testdata/log-gateway.yaml b/internal/resources/otelcollector/testdata/log-gateway.yaml new file mode 100644 index 000000000..f4b78b905 --- /dev/null +++ b/internal/resources/otelcollector/testdata/log-gateway.yaml @@ -0,0 +1,263 @@ +apiVersion: v1 +data: + relay.conf: dummy +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-log-gateway + namespace: kyma-system +--- +apiVersion: v1 +data: + DUMMY_ENV_VAR: Zm9v +kind: Secret +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-log-gateway + namespace: kyma-system +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8888" + prometheus.io/scheme: http + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + telemetry.kyma-project.io/self-monitor: enabled + name: telemetry-log-gateway-metrics + namespace: kyma-system +spec: + ports: + - name: http-metrics + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app.kubernetes.io/name: telemetry-log-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-otlp-logs + namespace: kyma-system +spec: + ports: + - name: grpc-collector + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http-collector + port: 4318 + protocol: TCP + targetPort: 4318 + selector: + app.kubernetes.io/name: telemetry-log-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-log-gateway + namespace: kyma-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-log-gateway + namespace: kyma-system +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: telemetry-log-gateway + strategy: {} + template: + metadata: + annotations: + checksum/config: 1d8e9f768e6b24485bbdd6b9aa417d37fec897a7dafc8321355abc0d45259c9e + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + telemetry.kyma-project.io/log-export: "true" + telemetry.kyma-project.io/log-ingest: "true" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-log-gateway + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-log-gateway + topologyKey: topology.kubernetes.io/zone + weight: 100 + containers: + - args: + - --config=/conf/relay.conf + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: MY_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: GOMEMLIMIT + value: "419430400" + envFrom: + - secretRef: + name: telemetry-log-gateway + optional: true + image: opentelemetry/collector:latest + livenessProbe: + httpGet: + path: / + port: 13133 + name: collector + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 700m + memory: 500Mi + requests: + cpu: 100m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /conf + name: config + priorityClassName: normal + securityContext: + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + serviceAccountName: telemetry-log-gateway + volumes: + - configMap: + items: + - key: relay.conf + path: relay.conf + name: telemetry-log-gateway + name: config +status: {} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-log-gateway + namespace: kyma-system +spec: + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ports: + - port: 5555 + protocol: TCP + - port: 6666 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: telemetry-log-gateway + policyTypes: + - Ingress + - Egress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-log-gateway + namespace: kyma-system +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-log-gateway + name: telemetry-log-gateway + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: telemetry-log-gateway +subjects: +- kind: ServiceAccount + name: telemetry-log-gateway + namespace: kyma-system +--- diff --git a/internal/resources/otelcollector/testdata/metric-agent.yaml b/internal/resources/otelcollector/testdata/metric-agent.yaml new file mode 100644 index 000000000..dbd43e013 --- /dev/null +++ b/internal/resources/otelcollector/testdata/metric-agent.yaml @@ -0,0 +1,334 @@ +apiVersion: v1 +data: + relay.conf: dummy +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8888" + prometheus.io/scheme: http + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + telemetry.kyma-project.io/self-monitor: enabled + name: telemetry-metric-agent-metrics + namespace: kyma-system +spec: + ports: + - name: http-metrics + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app.kubernetes.io/name: telemetry-metric-agent + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +spec: + selector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-agent + template: + metadata: + annotations: + checksum/config: 6a334c19c8f1698c843d1c40ef9c228c222b0c04f9945a359a3e932c2aa11ac7 + proxy.istio.io/config: | + # configure an env variable OUTPUT_CERTS to write certificates to the given folder + proxyMetadata: + OUTPUT_CERTS: /etc/istio-output-certs + sidecar.istio.io/userVolumeMount: '[{"name": "istio-certs", "mountPath": "/etc/istio-output-certs"}]' + traffic.sidecar.istio.io/excludeInboundPorts: "8888" + traffic.sidecar.istio.io/includeOutboundIPRanges: "" + traffic.sidecar.istio.io/includeOutboundPorts: "4317" + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + sidecar.istio.io/inject: "true" + telemetry.kyma-project.io/metric-scrape: "true" + spec: + containers: + - args: + - --config=/conf/relay.conf + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: MY_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: GOMEMLIMIT + value: "1006632960" + envFrom: + - secretRef: + name: telemetry-metric-agent + optional: true + image: opentelemetry/collector:latest + livenessProbe: + httpGet: + path: / + port: 13133 + name: collector + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: "1" + memory: 1200Mi + requests: + cpu: 15m + memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /conf + name: config + - mountPath: /etc/istio-output-certs + name: istio-certs + readOnly: true + priorityClassName: normal + securityContext: + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + serviceAccountName: telemetry-metric-agent + volumes: + - configMap: + items: + - key: relay.conf + path: relay.conf + name: telemetry-metric-agent + name: config + - emptyDir: {} + name: istio-certs + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +spec: + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ports: + - port: 5555 + protocol: TCP + - port: 6666 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-agent + policyTypes: + - Ingress + - Egress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +rules: +- apiGroups: + - "" + resources: + - nodes + - nodes/stats + - nodes/proxy + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: + - get + - list + - watch +- nonResourceURLs: + - /metrics + - /metrics/cadvisor + verbs: + - get +- apiGroups: + - "" + resources: + - events + - namespaces + - namespaces/status + - nodes + - nodes/spec + - pods + - pods/status + - replicationcontrollers + - replicationcontrollers/status + - resourcequotas + - services + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - extensions + resources: + - daemonsets + - deployments + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: telemetry-metric-agent +subjects: +- kind: ServiceAccount + name: telemetry-metric-agent + namespace: kyma-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-agent + name: telemetry-metric-agent + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: telemetry-metric-agent +subjects: +- kind: ServiceAccount + name: telemetry-metric-agent + namespace: kyma-system +--- diff --git a/internal/resources/otelcollector/testdata/metric-gateway-istio.yaml b/internal/resources/otelcollector/testdata/metric-gateway-istio.yaml new file mode 100644 index 000000000..2ed5ab74d --- /dev/null +++ b/internal/resources/otelcollector/testdata/metric-gateway-istio.yaml @@ -0,0 +1,353 @@ +apiVersion: v1 +data: + relay.conf: dummy +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: v1 +data: + DUMMY_ENV_VAR: Zm9v +kind: Secret +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8888" + prometheus.io/scheme: http + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + telemetry.kyma-project.io/self-monitor: enabled + name: telemetry-metric-gateway-metrics + namespace: kyma-system +spec: + ports: + - name: http-metrics + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app.kubernetes.io/name: telemetry-metric-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-otlp-metrics + namespace: kyma-system +spec: + ports: + - name: grpc-collector + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http-collector + port: 4318 + protocol: TCP + targetPort: 4318 + selector: + app.kubernetes.io/name: telemetry-metric-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + strategy: {} + template: + metadata: + annotations: + checksum/config: 1d8e9f768e6b24485bbdd6b9aa417d37fec897a7dafc8321355abc0d45259c9e + sidecar.istio.io/interceptionMode: TPROXY + traffic.sidecar.istio.io/excludeInboundPorts: "" + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + sidecar.istio.io/inject: "true" + telemetry.kyma-project.io/metric-export: "true" + telemetry.kyma-project.io/metric-ingest: "true" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + topologyKey: topology.kubernetes.io/zone + weight: 100 + containers: + - args: + - --config=/conf/relay.conf + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: MY_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: GOMEMLIMIT + value: "429496720" + envFrom: + - secretRef: + name: telemetry-metric-gateway + optional: true + image: opentelemetry/collector:latest + livenessProbe: + httpGet: + path: / + port: 13133 + name: collector + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 900m + memory: 512Mi + requests: + cpu: 25m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /conf + name: config + priorityClassName: normal + securityContext: + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + serviceAccountName: telemetry-metric-gateway + volumes: + - configMap: + items: + - key: relay.conf + path: relay.conf + name: telemetry-metric-gateway + name: config +status: {} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +spec: + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ports: + - port: 5555 + protocol: TCP + - port: 6666 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + policyTypes: + - Ingress + - Egress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - telemetries + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.kyma-project.io + resources: + - metricpipelines + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.kyma-project.io + resources: + - tracepipelines + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.kyma-project.io + resources: + - logpipelines + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: telemetry-metric-gateway +subjects: +- kind: ServiceAccount + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: telemetry-metric-gateway +subjects: +- kind: ServiceAccount + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: security.istio.io/v1 +kind: PeerAuthentication +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +spec: + mtls: + mode: PERMISSIVE + selector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway +status: {} +--- diff --git a/internal/resources/otelcollector/testdata/metric-gateway.yaml b/internal/resources/otelcollector/testdata/metric-gateway.yaml new file mode 100644 index 000000000..9b51a946c --- /dev/null +++ b/internal/resources/otelcollector/testdata/metric-gateway.yaml @@ -0,0 +1,335 @@ +apiVersion: v1 +data: + relay.conf: dummy +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: v1 +data: + DUMMY_ENV_VAR: Zm9v +kind: Secret +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8888" + prometheus.io/scheme: http + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + telemetry.kyma-project.io/self-monitor: enabled + name: telemetry-metric-gateway-metrics + namespace: kyma-system +spec: + ports: + - name: http-metrics + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app.kubernetes.io/name: telemetry-metric-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-otlp-metrics + namespace: kyma-system +spec: + ports: + - name: grpc-collector + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http-collector + port: 4318 + protocol: TCP + targetPort: 4318 + selector: + app.kubernetes.io/name: telemetry-metric-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + strategy: {} + template: + metadata: + annotations: + checksum/config: 1d8e9f768e6b24485bbdd6b9aa417d37fec897a7dafc8321355abc0d45259c9e + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + sidecar.istio.io/inject: "true" + telemetry.kyma-project.io/metric-export: "true" + telemetry.kyma-project.io/metric-ingest: "true" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + topologyKey: topology.kubernetes.io/zone + weight: 100 + containers: + - args: + - --config=/conf/relay.conf + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: MY_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: GOMEMLIMIT + value: "429496720" + envFrom: + - secretRef: + name: telemetry-metric-gateway + optional: true + image: opentelemetry/collector:latest + livenessProbe: + httpGet: + path: / + port: 13133 + name: collector + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 900m + memory: 512Mi + requests: + cpu: 25m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /conf + name: config + priorityClassName: normal + securityContext: + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + serviceAccountName: telemetry-metric-gateway + volumes: + - configMap: + items: + - key: relay.conf + path: relay.conf + name: telemetry-metric-gateway + name: config +status: {} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +spec: + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ports: + - port: 5555 + protocol: TCP + - port: 6666 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: telemetry-metric-gateway + policyTypes: + - Ingress + - Egress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - telemetries + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.kyma-project.io + resources: + - metricpipelines + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.kyma-project.io + resources: + - tracepipelines + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.kyma-project.io + resources: + - logpipelines + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: telemetry-metric-gateway +subjects: +- kind: ServiceAccount + name: telemetry-metric-gateway + namespace: kyma-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-metric-gateway + name: telemetry-metric-gateway + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: telemetry-metric-gateway +subjects: +- kind: ServiceAccount + name: telemetry-metric-gateway + namespace: kyma-system +--- diff --git a/internal/resources/otelcollector/testdata/trace-gateway.yaml b/internal/resources/otelcollector/testdata/trace-gateway.yaml new file mode 100644 index 000000000..08d7ac72b --- /dev/null +++ b/internal/resources/otelcollector/testdata/trace-gateway.yaml @@ -0,0 +1,264 @@ +apiVersion: v1 +data: + relay.conf: dummy +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-trace-gateway + namespace: kyma-system +--- +apiVersion: v1 +data: + DUMMY_ENV_VAR: Zm9v +kind: Secret +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-trace-gateway + namespace: kyma-system +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-otlp-traces + namespace: kyma-system +spec: + ports: + - name: grpc-collector + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http-collector + port: 4318 + protocol: TCP + targetPort: 4318 + selector: + app.kubernetes.io/name: telemetry-trace-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8888" + prometheus.io/scheme: http + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + telemetry.kyma-project.io/self-monitor: enabled + name: telemetry-trace-gateway-metrics + namespace: kyma-system +spec: + ports: + - name: http-metrics + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app.kubernetes.io/name: telemetry-trace-gateway + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-trace-gateway + namespace: kyma-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-trace-gateway + namespace: kyma-system +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: telemetry-trace-gateway + strategy: {} + template: + metadata: + annotations: + checksum/config: 1d8e9f768e6b24485bbdd6b9aa417d37fec897a7dafc8321355abc0d45259c9e + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + sidecar.istio.io/inject: "true" + telemetry.kyma-project.io/trace-export: "true" + telemetry.kyma-project.io/trace-ingest: "true" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-trace-gateway + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: telemetry-trace-gateway + topologyKey: topology.kubernetes.io/zone + weight: 100 + containers: + - args: + - --config=/conf/relay.conf + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: MY_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: GOMEMLIMIT + value: "419430400" + envFrom: + - secretRef: + name: telemetry-trace-gateway + optional: true + image: opentelemetry/collector:latest + livenessProbe: + httpGet: + path: / + port: 13133 + name: collector + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 700m + memory: 500Mi + requests: + cpu: 100m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /conf + name: config + priorityClassName: normal + securityContext: + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + serviceAccountName: telemetry-trace-gateway + volumes: + - configMap: + items: + - key: relay.conf + path: relay.conf + name: telemetry-trace-gateway + name: config +status: {} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-trace-gateway + namespace: kyma-system +spec: + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + - ipBlock: + cidr: ::/0 + ports: + - port: 5555 + protocol: TCP + - port: 6666 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: telemetry-trace-gateway + policyTypes: + - Ingress + - Egress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-trace-gateway + namespace: kyma-system +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: telemetry-trace-gateway + name: telemetry-trace-gateway + namespace: kyma-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: telemetry-trace-gateway +subjects: +- kind: ServiceAccount + name: telemetry-trace-gateway + namespace: kyma-system +--- diff --git a/internal/resources/selfmonitor/resources.go b/internal/resources/selfmonitor/resources.go index cded53430..c9eb0ed1a 100644 --- a/internal/resources/selfmonitor/resources.go +++ b/internal/resources/selfmonitor/resources.go @@ -16,7 +16,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/kyma-project/telemetry-manager/internal/configchecksum" - "github.com/kyma-project/telemetry-manager/internal/labels" commonresources "github.com/kyma-project/telemetry-manager/internal/resources/common" "github.com/kyma-project/telemetry-manager/internal/selfmonitor/ports" k8sutils "github.com/kyma-project/telemetry-manager/internal/utils/k8s" @@ -128,7 +127,7 @@ func (ad *ApplierDeleter) makeServiceAccount() *corev1.ServiceAccount { ObjectMeta: metav1.ObjectMeta{ Name: ad.Config.BaseName, Namespace: ad.Config.Namespace, - Labels: labels.MakeDefaultLabel(ad.Config.BaseName), + Labels: commonresources.MakeDefaultLabels(ad.Config.BaseName), }, } @@ -140,7 +139,7 @@ func (ad *ApplierDeleter) makeRole() *rbacv1.Role { ObjectMeta: metav1.ObjectMeta{ Name: ad.Config.BaseName, Namespace: ad.Config.Namespace, - Labels: labels.MakeDefaultLabel(ad.Config.BaseName), + Labels: commonresources.MakeDefaultLabels(ad.Config.BaseName), }, Rules: []rbacv1.PolicyRule{ { @@ -159,7 +158,7 @@ func (ad *ApplierDeleter) makeRoleBinding() *rbacv1.RoleBinding { ObjectMeta: metav1.ObjectMeta{ Name: ad.Config.BaseName, Namespace: ad.Config.Namespace, - Labels: labels.MakeDefaultLabel(ad.Config.BaseName), + Labels: commonresources.MakeDefaultLabels(ad.Config.BaseName), }, Subjects: []rbacv1.Subject{{Name: ad.Config.BaseName, Namespace: ad.Config.Namespace, Kind: rbacv1.ServiceAccountKind}}, RoleRef: rbacv1.RoleRef{ @@ -179,11 +178,11 @@ func (ad *ApplierDeleter) makeNetworkPolicy() *networkingv1.NetworkPolicy { ObjectMeta: metav1.ObjectMeta{ Name: ad.Config.BaseName, Namespace: ad.Config.Namespace, - Labels: labels.MakeDefaultLabel(ad.Config.BaseName), + Labels: commonresources.MakeDefaultLabels(ad.Config.BaseName), }, Spec: networkingv1.NetworkPolicySpec{ PodSelector: metav1.LabelSelector{ - MatchLabels: labels.MakeDefaultLabel(ad.Config.BaseName), + MatchLabels: commonresources.MakeDefaultLabels(ad.Config.BaseName), }, PolicyTypes: []networkingv1.PolicyType{ networkingv1.PolicyTypeIngress, @@ -239,7 +238,7 @@ func (ad *ApplierDeleter) makeConfigMap(prometheusConfigFileName, prometheusConf ObjectMeta: metav1.ObjectMeta{ Name: ad.Config.BaseName, Namespace: ad.Config.Namespace, - Labels: labels.MakeDefaultLabel(ad.Config.BaseName), + Labels: commonresources.MakeDefaultLabels(ad.Config.BaseName), }, Data: map[string]string{ prometheusConfigFileName: prometheusConfigYAML, @@ -251,7 +250,7 @@ func (ad *ApplierDeleter) makeConfigMap(prometheusConfigFileName, prometheusConf func (ad *ApplierDeleter) makeDeployment(configChecksum, configPath, configFile string) *appsv1.Deployment { var replicas int32 = 1 - selectorLabels := labels.MakeDefaultLabel(ad.Config.BaseName) + selectorLabels := commonresources.MakeDefaultLabels(ad.Config.BaseName) podLabels := maps.Clone(selectorLabels) podLabels["sidecar.istio.io/inject"] = "false" @@ -400,7 +399,7 @@ func (ad *ApplierDeleter) makeService(port int32) *corev1.Service { ObjectMeta: metav1.ObjectMeta{ Name: ad.Config.BaseName, Namespace: ad.Config.Namespace, - Labels: labels.MakeDefaultLabel(ad.Config.BaseName), + Labels: commonresources.MakeDefaultLabels(ad.Config.BaseName), }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ @@ -411,7 +410,7 @@ func (ad *ApplierDeleter) makeService(port int32) *corev1.Service { TargetPort: intstr.FromInt32(port), }, }, - Selector: labels.MakeDefaultLabel(ad.Config.BaseName), + Selector: commonresources.MakeDefaultLabels(ad.Config.BaseName), Type: corev1.ServiceTypeClusterIP, }, } diff --git a/internal/utils/test/marshal.go b/internal/utils/test/marshal.go new file mode 100644 index 000000000..47dab4d22 --- /dev/null +++ b/internal/utils/test/marshal.go @@ -0,0 +1,85 @@ +package test + +import ( + "bytes" + "fmt" + "os" + "slices" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// MarshalYAML marshals a list of objects into a YAML byte slice. It is used to compare the expected objects with the actual ones in golden file tests. +func MarshalYAML(scheme *runtime.Scheme, objects []client.Object) ([]byte, error) { + // TypeMeta is not set by default, so we need to set it manually + for _, obj := range objects { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return nil, fmt.Errorf("failed to get GVK for object %T: %w", obj, err) + } + + obj.GetObjectKind().SetGroupVersionKind(gvk) + } + + // Always sort to have a deterministic output + slices.SortFunc(objects, compareObjects) + + e := json.NewYAMLSerializer(json.DefaultMetaFactory, scheme, scheme) + + var buffer bytes.Buffer + + for _, obj := range objects { + if err := e.Encode(obj, &buffer); err != nil { + return nil, fmt.Errorf("failed to encode object %T: %w", obj, err) + } + + buffer.WriteString("---\n") // YAML document separator + } + + return buffer.Bytes(), nil +} + +// SaveAsYAML dumps the list of objects as a YAML file. Used to generate golden files, never should be committed. +func SaveAsYAML(t *testing.T, scheme *runtime.Scheme, objects []client.Object, path string) { + objectsYAML, err := MarshalYAML(scheme, objects) + require.NoError(t, err) + + err = os.WriteFile(path, objectsYAML, 0600) + require.NoError(t, err) + + t.Fatalf("Golden file %s has been saved, please verify it and remove this line", path) +} + +func compareObjects(a, b client.Object) int { + gvkA := a.GetObjectKind().GroupVersionKind() + gvkB := b.GetObjectKind().GroupVersionKind() + + if cmp := compareGVKs(gvkA, gvkB); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.GetNamespace(), b.GetNamespace()); cmp != 0 { + return cmp + } + + return strings.Compare(a.GetName(), b.GetName()) +} + +func compareGVKs(a, b schema.GroupVersionKind) int { + if cmp := strings.Compare(a.Group, b.Group); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Version, b.Version); cmp != 0 { + return cmp + } + + return strings.Compare(a.Kind, b.Kind) +} diff --git a/main.go b/main.go index e1d402886..e75c20766 100644 --- a/main.go +++ b/main.go @@ -99,10 +99,7 @@ const ( cacheSyncPeriod = 1 * time.Minute telemetryNamespaceEnvVar = "MANAGER_NAMESPACE" telemetryNamespaceDefault = "default" - metricOTLPServiceName = "telemetry-otlp-metrics" selfMonitorName = "telemetry-self-monitor" - traceOTLPServiceName = "telemetry-otlp-traces" - logOTLPServiceName = "telemetry-otlp-logs" webhookServiceName = "telemetry-manager-webhook" healthProbePort = 8081 @@ -309,12 +306,10 @@ func enableTelemetryModuleController(mgr manager.Manager, webhookConfig telemetr operator.TelemetryControllerConfig{ Config: telemetry.Config{ Traces: telemetry.TracesConfig{ - OTLPServiceName: traceOTLPServiceName, - Namespace: telemetryNamespace, + Namespace: telemetryNamespace, }, Metrics: telemetry.MetricsConfig{ - OTLPServiceName: metricOTLPServiceName, - Namespace: telemetryNamespace, + Namespace: telemetryNamespace, }, Webhook: webhookConfig, SelfMonitor: selfMonitorConfig, @@ -360,7 +355,6 @@ func setupLogPipelineController(mgr manager.Manager, reconcileTriggerChan <-chan OTelCollectorImage: otelCollectorImage, FluentBitPriorityClassName: highPriorityClassName, LogGatewayPriorityClassName: normalPriorityClassName, - LogGatewayServiceName: logOTLPServiceName, RestConfig: mgr.GetConfig(), SelfMonitorName: selfMonitorName, TelemetryNamespace: telemetryNamespace, @@ -402,7 +396,6 @@ func setupTracePipelineController(mgr manager.Manager, reconcileTriggerChan <-ch SelfMonitorName: selfMonitorName, TelemetryNamespace: telemetryNamespace, TraceGatewayPriorityClassName: normalPriorityClassName, - TraceGatewayServiceName: traceOTLPServiceName, }, ) if err != nil { @@ -425,7 +418,6 @@ func setupMetricPipelineController(mgr manager.Manager, reconcileTriggerChan <-c telemetrycontrollers.MetricPipelineControllerConfig{ MetricAgentPriorityClassName: highPriorityClassName, MetricGatewayPriorityClassName: normalPriorityClassName, - MetricGatewayServiceName: metricOTLPServiceName, ModuleVersion: version, OTelCollectorImage: otelCollectorImage, RestConfig: mgr.GetConfig(),