From 1ca137a228433f3deaa2947be34dcb6d239c38db Mon Sep 17 00:00:00 2001 From: Alejandro Pedraza Date: Tue, 5 Dec 2023 10:06:57 -0500 Subject: [PATCH 1/6] Add reinitialize-pods controller to linkerd-cni DaemonSet Followup to linkerd/linkerd2-proxy-init#306 Fixes linkerd/linkerd2#11073 This adds the `reinitialize-pods` container to the `linkerd-cni` DaemonSet, along with its config in `values.yaml`. Also the `linkerd-cni`'s version is bumped, to contain the new binary for this controller. ## TO-DOs - Integration test --- charts/linkerd2-cni/templates/cni-plugin.yaml | 51 +++++++++++++++++++ charts/linkerd2-cni/values.yaml | 40 ++++++++++++++- .../install-cni-plugin_default.golden | 49 ++++++++++++++++++ ...install-cni-plugin_fully_configured.golden | 47 +++++++++++++++++ ...-plugin_fully_configured_equal_dsts.golden | 47 +++++++++++++++++ ...lugin_fully_configured_no_namespace.golden | 47 +++++++++++++++++ .../install-cni-plugin_skip_ports.golden | 47 +++++++++++++++++ .../install_cni_helm_default_output.golden | 49 +++++++++++++++++- .../install_cni_helm_override_output.golden | 47 +++++++++++++++++ pkg/charts/cni/values.go | 10 ++++ 10 files changed, 431 insertions(+), 3 deletions(-) diff --git a/charts/linkerd2-cni/templates/cni-plugin.yaml b/charts/linkerd2-cni/templates/cni-plugin.yaml index 160449ee36620..62e0833d30763 100644 --- a/charts/linkerd2-cni/templates/cni-plugin.yaml +++ b/charts/linkerd2-cni/templates/cni-plugin.yaml @@ -112,6 +112,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -274,6 +277,54 @@ spec: {{- if .Values.resources }} {{- include "partials.resources" .Values.resources | nindent 8 }} {{- end }} + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: {{ .Values.reinitializePods.image.name -}}:{{- .Values.reinitializePods.image.version }} + imagePullPolicy: {{ .Values.reinitializePods.image.pullPolicy }} + {{- if .Values.reinitializePods.enableSecurityContext }} + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - {{ .Values.reinitializePods.logFormat }} + - --log-level + - {{ .Values.reinitializePods.logLevel }} + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + {{- end }} + {{- if .Values.resources }} + {{- include "partials.resources" .Values.resources | nindent 8 }} + {{- end }} + priorityClassName: system-cluster-critical volumes: {{- if ne .Values.destCNIBinDir .Values.destCNINetDir }} - name: cni-bin-dir diff --git a/charts/linkerd2-cni/values.yaml b/charts/linkerd2-cni/values.yaml index 977763edbb9a1..87340938945b3 100644 --- a/charts/linkerd2-cni/values.yaml +++ b/charts/linkerd2-cni/values.yaml @@ -53,7 +53,7 @@ image: # -- Docker image for the CNI plugin name: "cr.l5d.io/linkerd/cni-plugin" # -- Tag for the CNI container Docker image - version: "v1.2.2" + version: "v1.3.0" # -- Pull policy for the linkerd-cni container pullPolicy: IfNotPresent @@ -86,7 +86,7 @@ extraInitContainers: [] # - mountPath: /host/etc/cni/net.d # name: cni-net-dir -# -- Resource requests and limits for linkerd-cni daemonset containers +# -- Resource requests and limits for linkerd-cni daemonset container resources: cpu: # -- Maximum amount of CPU units that the cni container can use @@ -103,3 +103,39 @@ resources: limit: "" # -- Amount of ephemeral storage that the cni container requests request: "" + +reinitializePods: + image: + # -- Docker image for the reinitialize-pods container + name: "cr.l5d.io/linkerd/cni-plugin" + # -- Tag for the reinitialize-pods container Docker image + version: "v1.3.0" + # -- Pull policy for the reinitialize-pods container + pullPolicy: IfNotPresent + + # -- Log level for the reinitialize-pods container + # @default -- info + logLevel: info + # -- Log format (`plain` or `json`) for the reinitialize-pods container + # @default -- plain + logFormat: plain + + # -- Include a securityContext in the reinitialize-pods container + enableSecurityContext: true + + resources: + cpu: + # -- Maximum amount of CPU units that the reinitialize-pods container can use + limit: "" + # -- Amount of CPU units that the reinitialize-pods container requests + request: "" + memory: + # -- Maximum amount of memory that the reinitialize-pods container can use + limit: "" + # -- Amount of memory that the reinitialize-pods container requests + request: "" + ephemeral-storage: + # -- Maximum amount of ephemeral storage that the reinitialize-pods container can use + limit: "" + # -- Amount of ephemeral storage that the reinitialize-pods container requests + request: "" diff --git a/cli/cmd/testdata/install-cni-plugin_default.golden b/cli/cmd/testdata/install-cni-plugin_default.golden index 1fb551be483ed..2f52c30758d26 100644 --- a/cli/cmd/testdata/install-cni-plugin_default.golden +++ b/cli/cmd/testdata/install-cni-plugin_default.golden @@ -25,6 +25,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -160,6 +163,52 @@ spec: resources: limits: cpu: "1m" + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 + imagePullPolicy: IfNotPresent + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - plain + - --log-level + - info + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + limits: + cpu: "1m" + priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden index b31c00286a65d..20bedd8388423 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden @@ -25,6 +25,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -159,6 +162,50 @@ spec: readOnlyRootFilesystem: true privileged: false resources: + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 + imagePullPolicy: IfNotPresent + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - plain + - --log-level + - info + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden index 2b547098b2f3b..54328acb1c5e0 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden @@ -25,6 +25,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -157,6 +160,50 @@ spec: readOnlyRootFilesystem: true privileged: false resources: + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 + imagePullPolicy: IfNotPresent + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - plain + - --log-level + - info + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + priorityClassName: system-cluster-critical volumes: - name: cni-net-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden index b31c00286a65d..20bedd8388423 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden @@ -25,6 +25,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -159,6 +162,50 @@ spec: readOnlyRootFilesystem: true privileged: false resources: + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 + imagePullPolicy: IfNotPresent + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - plain + - --log-level + - info + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden index 196296afc5c48..25d57f54ffe9d 100644 --- a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden +++ b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden @@ -25,6 +25,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -159,6 +162,50 @@ spec: readOnlyRootFilesystem: true privileged: false resources: + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 + imagePullPolicy: IfNotPresent + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - plain + - --log-level + - info + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install_cni_helm_default_output.golden b/cli/cmd/testdata/install_cni_helm_default_output.golden index 6bd305f0668af..f07cddd234eed 100644 --- a/cli/cmd/testdata/install_cni_helm_default_output.golden +++ b/cli/cmd/testdata/install_cni_helm_default_output.golden @@ -18,6 +18,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -111,7 +114,7 @@ spec: # script copies the files into place and then sleeps so # that Kubernetes doesn't keep trying to restart it. - name: install-cni - image: cr.l5d.io/linkerd/cni-plugin:v1.2.2 + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - name: DEST_CNI_NET_DIR @@ -151,6 +154,50 @@ spec: readOnlyRootFilesystem: true privileged: false resources: + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 + imagePullPolicy: IfNotPresent + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - plain + - --log-level + - info + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install_cni_helm_override_output.golden b/cli/cmd/testdata/install_cni_helm_override_output.golden index 7d99a13d1bbd5..730b02d2cf8a4 100644 --- a/cli/cmd/testdata/install_cni_helm_override_output.golden +++ b/cli/cmd/testdata/install_cni_helm_override_output.golden @@ -18,6 +18,9 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -152,6 +155,50 @@ spec: readOnlyRootFilesystem: true privileged: false resources: + # This container watches over pods whose linkerd-network-validator + # container failed, probably because of a race condition while setting up + # the CNI plugin chain, and evicts those pods so they can try acquiring a + # proper network config again + - name: reinitialize-pods + image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 + imagePullPolicy: IfNotPresent + env: + - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - /usr/lib/linkerd/linkerd-reinitialize-pods + args: + - --admin-addr=0.0.0.0:9990 + - --log-format + - plain + - --log-level + - info + livenessProbe: + httpGet: + path: /live + port: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: admin-http + initialDelaySeconds: 10 + ports: + - containerPort: 9990 + name: admin-http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/pkg/charts/cni/values.go b/pkg/charts/cni/values.go index 6c04d776404e3..fe3a9203202fc 100644 --- a/pkg/charts/cni/values.go +++ b/pkg/charts/cni/values.go @@ -35,6 +35,15 @@ type Resources struct { EphemeralStorage Constraints `json:"ephemeral-storage"` } +// ReinitializePods contains the config for the reinitialize-pods container +type ReinitializePods struct { + Image Image `json:"image"` + LogLevel string `json:"logLevel"` + LogFormat string `json:"logFormat"` + EnableSecurityContext bool `json:"enableSecurityContext"` + Resources Resources `json:"resources"` +} + // Values contains the top-level elements in the cni Helm chart type Values struct { InboundProxyPort uint `json:"inboundProxyPort"` @@ -60,6 +69,7 @@ type Values struct { EnablePSP bool `json:"enablePSP"` Privileged bool `json:"privileged"` Resources Resources `json:"resources"` + ReinitializePods ReinitializePods `json:"reinitializePods"` } // NewValues returns a new instance of the Values type. From 0e943f41a19789dbecbb25a19d54158de9911370 Mon Sep 17 00:00:00 2001 From: Alejandro Pedraza Date: Wed, 6 Dec 2023 11:25:00 -0500 Subject: [PATCH 2/6] Add RBAC for publishing events, and env var for pod name --- charts/linkerd2-cni/templates/cni-plugin.yaml | 9 ++++++++- cli/cmd/testdata/install-cni-plugin_default.golden | 9 ++++++++- .../testdata/install-cni-plugin_fully_configured.golden | 9 ++++++++- ...install-cni-plugin_fully_configured_equal_dsts.golden | 9 ++++++++- ...stall-cni-plugin_fully_configured_no_namespace.golden | 9 ++++++++- cli/cmd/testdata/install-cni-plugin_skip_ports.golden | 9 ++++++++- cli/cmd/testdata/install_cni_helm_default_output.golden | 9 ++++++++- cli/cmd/testdata/install_cni_helm_override_output.golden | 9 ++++++++- 8 files changed, 64 insertions(+), 8 deletions(-) diff --git a/charts/linkerd2-cni/templates/cni-plugin.yaml b/charts/linkerd2-cni/templates/cni-plugin.yaml index 62e0833d30763..09d10b5214644 100644 --- a/charts/linkerd2-cni/templates/cni-plugin.yaml +++ b/charts/linkerd2-cni/templates/cni-plugin.yaml @@ -115,6 +115,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -286,10 +289,14 @@ spec: imagePullPolicy: {{ .Values.reinitializePods.image.pullPolicy }} {{- if .Values.reinitializePods.enableSecurityContext }} env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: diff --git a/cli/cmd/testdata/install-cni-plugin_default.golden b/cli/cmd/testdata/install-cni-plugin_default.golden index 2f52c30758d26..c6b5547fcf28e 100644 --- a/cli/cmd/testdata/install-cni-plugin_default.golden +++ b/cli/cmd/testdata/install-cni-plugin_default.golden @@ -28,6 +28,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -171,10 +174,14 @@ spec: image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden index 20bedd8388423..6b295968fb02c 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden @@ -28,6 +28,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -170,10 +173,14 @@ spec: image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden index 54328acb1c5e0..9841b9a6e1ff8 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden @@ -28,6 +28,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -168,10 +171,14 @@ spec: image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden index 20bedd8388423..6b295968fb02c 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden @@ -28,6 +28,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -170,10 +173,14 @@ spec: image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: diff --git a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden index 25d57f54ffe9d..e147c729a42ac 100644 --- a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden +++ b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden @@ -28,6 +28,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -170,10 +173,14 @@ spec: image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: diff --git a/cli/cmd/testdata/install_cni_helm_default_output.golden b/cli/cmd/testdata/install_cni_helm_default_output.golden index f07cddd234eed..3cf24d0218373 100644 --- a/cli/cmd/testdata/install_cni_helm_default_output.golden +++ b/cli/cmd/testdata/install_cni_helm_default_output.golden @@ -21,6 +21,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -162,10 +165,14 @@ spec: image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: diff --git a/cli/cmd/testdata/install_cni_helm_override_output.golden b/cli/cmd/testdata/install_cni_helm_override_output.golden index 730b02d2cf8a4..a4f3c81b99f03 100644 --- a/cli/cmd/testdata/install_cni_helm_override_output.golden +++ b/cli/cmd/testdata/install_cni_helm_override_output.golden @@ -21,6 +21,9 @@ rules: - apiGroups: [""] resources: ["pods/eviction"] verbs: ["create"] +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -163,10 +166,14 @@ spec: image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 imagePullPolicy: IfNotPresent env: - - name: LINKERD_REINITIALIZE_PODS_POD_NODE_NAME + - name: LINKERD_REINITIALIZE_PODS_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: LINKERD_REINITIALIZE_PODS_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-reinitialize-pods args: From 113e3cf0ce570bf57024e048806feecfdf04f0ca Mon Sep 17 00:00:00 2001 From: Alejandro Pedraza Date: Wed, 6 Dec 2023 17:04:13 -0500 Subject: [PATCH 3/6] Remove duped priorityClassName --- charts/linkerd2-cni/README.md | 18 +++++++++++++++--- charts/linkerd2-cni/templates/cni-plugin.yaml | 1 - charts/linkerd2-cni/values.yaml | 7 +++++-- .../install-cni-plugin_fully_configured.golden | 1 - ...i-plugin_fully_configured_equal_dsts.golden | 1 - ...plugin_fully_configured_no_namespace.golden | 1 - .../install_cni_helm_override_output.golden | 1 - 7 files changed, 20 insertions(+), 10 deletions(-) diff --git a/charts/linkerd2-cni/README.md b/charts/linkerd2-cni/README.md index 30c6e4187a4e2..1f78fa2fd46bf 100644 --- a/charts/linkerd2-cni/README.md +++ b/charts/linkerd2-cni/README.md @@ -31,19 +31,31 @@ Kubernetes: `>=1.21.0-0` | ignoreOutboundPorts | string | `""` | Default set of outbound ports to skip via iptables | | image.name | string | `"cr.l5d.io/linkerd/cni-plugin"` | Docker image for the CNI plugin | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the linkerd-cni container | -| image.version | string | `"v1.2.2"` | Tag for the CNI container Docker image | +| image.version | string | `"v1.3.0"` | Tag for the CNI container Docker image | | imagePullSecrets | list | `[]` | | | inboundProxyPort | int | `4143` | Inbound port for the proxy container | | logLevel | string | `"info"` | Log level for the CNI plugin | | outboundProxyPort | int | `4140` | Outbound port for the proxy container | | podLabels | object | `{}` | Additional labels to add to all pods | | portsToRedirect | string | `""` | Ports to redirect to proxy | -| priorityClassName | string | `""` | Kubernetes priorityClassName for the CNI plugin's Pods | +| priorityClassName | string | `"system-cluster-critical"` | Kubernetes priorityClassName for the CNI plugin's Pods. Defaults to system-cluster-critical so it signals the scheduler to start before application pods, but after CNI plugins (whose priorityClassName is system-node-critical). This isn't strictly enforced. | | privileged | bool | `false` | Run the install-cni container in privileged mode | | proxyAdminPort | int | `4191` | Admin port for the proxy container | | proxyControlPort | int | `4190` | Control port for the proxy container | | proxyUID | int | `2102` | User id under which the proxy shall be ran | -| resources | object | `{"cpu":{"limit":"","request":""},"ephemeral-storage":{"limit":"","request":""},"memory":{"limit":"","request":""}}` | Resource requests and limits for linkerd-cni daemonset containers | +| reinitializePods.enableSecurityContext | bool | `true` | Include a securityContext in the reinitialize-pods container | +| reinitializePods.image.name | string | `"cr.l5d.io/linkerd/cni-plugin"` | Docker image for the reinitialize-pods container | +| reinitializePods.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the reinitialize-pods container | +| reinitializePods.image.version | string | `"v1.3.0"` | Tag for the reinitialize-pods container Docker image | +| reinitializePods.logFormat | string | plain | Log format (`plain` or `json`) for the reinitialize-pods container | +| reinitializePods.logLevel | string | info | Log level for the reinitialize-pods container | +| reinitializePods.resources.cpu.limit | string | `""` | Maximum amount of CPU units that the reinitialize-pods container can use | +| reinitializePods.resources.cpu.request | string | `""` | Amount of CPU units that the reinitialize-pods container requests | +| reinitializePods.resources.ephemeral-storage.limit | string | `""` | Maximum amount of ephemeral storage that the reinitialize-pods container can use | +| reinitializePods.resources.ephemeral-storage.request | string | `""` | Amount of ephemeral storage that the reinitialize-pods container requests | +| reinitializePods.resources.memory.limit | string | `""` | Maximum amount of memory that the reinitialize-pods container can use | +| reinitializePods.resources.memory.request | string | `""` | Amount of memory that the reinitialize-pods container requests | +| resources | object | `{"cpu":{"limit":"","request":""},"ephemeral-storage":{"limit":"","request":""},"memory":{"limit":"","request":""}}` | Resource requests and limits for linkerd-cni daemonset container | | resources.cpu.limit | string | `""` | Maximum amount of CPU units that the cni container can use | | resources.cpu.request | string | `""` | Amount of CPU units that the cni container requests | | resources.ephemeral-storage.limit | string | `""` | Maximum amount of ephemeral storage that the cni container can use | diff --git a/charts/linkerd2-cni/templates/cni-plugin.yaml b/charts/linkerd2-cni/templates/cni-plugin.yaml index 09d10b5214644..0c699f6ffaad1 100644 --- a/charts/linkerd2-cni/templates/cni-plugin.yaml +++ b/charts/linkerd2-cni/templates/cni-plugin.yaml @@ -331,7 +331,6 @@ spec: {{- if .Values.resources }} {{- include "partials.resources" .Values.resources | nindent 8 }} {{- end }} - priorityClassName: system-cluster-critical volumes: {{- if ne .Values.destCNIBinDir .Values.destCNINetDir }} - name: cni-bin-dir diff --git a/charts/linkerd2-cni/values.yaml b/charts/linkerd2-cni/values.yaml index 87340938945b3..f193665ba357d 100644 --- a/charts/linkerd2-cni/values.yaml +++ b/charts/linkerd2-cni/values.yaml @@ -26,8 +26,11 @@ destCNINetDir: "/etc/cni/net.d" destCNIBinDir: "/opt/cni/bin" # -- Configures the CNI plugin to use the -w flag for the iptables command useWaitFlag: false -# -- Kubernetes priorityClassName for the CNI plugin's Pods -priorityClassName: "" +# -- Kubernetes priorityClassName for the CNI plugin's Pods. +# Defaults to system-cluster-critical so it signals the scheduler to start +# before application pods, but after CNI plugins (whose priorityClassName is +# system-node-critical). This isn't strictly enforced. +priorityClassName: "system-cluster-critical" # -- Add a PSP resource and bind it to the linkerd-cni ServiceAccounts. # Note PSP has been deprecated since k8s v1.21 diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden index 6b295968fb02c..8f37895d2ecef 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden @@ -212,7 +212,6 @@ spec: seccompProfile: type: RuntimeDefault resources: - priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden index 9841b9a6e1ff8..824a1306c75d9 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden @@ -210,7 +210,6 @@ spec: seccompProfile: type: RuntimeDefault resources: - priorityClassName: system-cluster-critical volumes: - name: cni-net-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden index 6b295968fb02c..8f37895d2ecef 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden @@ -212,7 +212,6 @@ spec: seccompProfile: type: RuntimeDefault resources: - priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install_cni_helm_override_output.golden b/cli/cmd/testdata/install_cni_helm_override_output.golden index a4f3c81b99f03..c3e24ade8e23e 100644 --- a/cli/cmd/testdata/install_cni_helm_override_output.golden +++ b/cli/cmd/testdata/install_cni_helm_override_output.golden @@ -205,7 +205,6 @@ spec: seccompProfile: type: RuntimeDefault resources: - priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: From b014077b3e857be00783e39cfa8d807b649851c5 Mon Sep 17 00:00:00 2001 From: Alejandro Pedraza Date: Wed, 6 Dec 2023 17:27:58 -0500 Subject: [PATCH 4/6] Remove commented example no longer required --- charts/linkerd2-cni/values.yaml | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/charts/linkerd2-cni/values.yaml b/charts/linkerd2-cni/values.yaml index f193665ba357d..64096c0639c1c 100644 --- a/charts/linkerd2-cni/values.yaml +++ b/charts/linkerd2-cni/values.yaml @@ -74,20 +74,6 @@ imagePullSecrets: [] # -- Add additional initContainers to the daemonset extraInitContainers: [] -# - name: wait-for-other-cni -# image: busybox:1.33 -# command: -# - /bin/sh -# - -xc -# - | -# for i in $(seq 1 180); do -# test -f /host/etc/cni/net.d/10-aws.conflist && exit 0 -# sleep 1 -# done -# exit 1 -# volumeMounts: -# - mountPath: /host/etc/cni/net.d -# name: cni-net-dir # -- Resource requests and limits for linkerd-cni daemonset container resources: From 127a6f4ad2a4e58c7274824a92d6d3e348db3281 Mon Sep 17 00:00:00 2001 From: Alejandro Pedraza Date: Tue, 2 Jan 2024 16:46:00 -0500 Subject: [PATCH 5/6] Sync with latest changes in the linkerd2-proxy-init repo --- charts/linkerd2-cni/README.md | 22 +++--- charts/linkerd2-cni/templates/cni-plugin.yaml | 26 +++---- charts/linkerd2-cni/values.yaml | 69 +++++++++---------- .../install-cni-plugin_default.golden | 55 +-------------- ...install-cni-plugin_fully_configured.golden | 51 +------------- ...-plugin_fully_configured_equal_dsts.golden | 51 +------------- ...lugin_fully_configured_no_namespace.golden | 51 +------------- .../install-cni-plugin_skip_ports.golden | 53 +------------- .../install_cni_helm_default_output.golden | 53 +------------- .../install_cni_helm_override_output.golden | 51 +------------- pkg/charts/cni/values.go | 6 +- 11 files changed, 78 insertions(+), 410 deletions(-) diff --git a/charts/linkerd2-cni/README.md b/charts/linkerd2-cni/README.md index 1f78fa2fd46bf..1daa9a94f1c03 100644 --- a/charts/linkerd2-cni/README.md +++ b/charts/linkerd2-cni/README.md @@ -43,18 +43,16 @@ Kubernetes: `>=1.21.0-0` | proxyAdminPort | int | `4191` | Admin port for the proxy container | | proxyControlPort | int | `4190` | Control port for the proxy container | | proxyUID | int | `2102` | User id under which the proxy shall be ran | -| reinitializePods.enableSecurityContext | bool | `true` | Include a securityContext in the reinitialize-pods container | -| reinitializePods.image.name | string | `"cr.l5d.io/linkerd/cni-plugin"` | Docker image for the reinitialize-pods container | -| reinitializePods.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the reinitialize-pods container | -| reinitializePods.image.version | string | `"v1.3.0"` | Tag for the reinitialize-pods container Docker image | -| reinitializePods.logFormat | string | plain | Log format (`plain` or `json`) for the reinitialize-pods container | -| reinitializePods.logLevel | string | info | Log level for the reinitialize-pods container | -| reinitializePods.resources.cpu.limit | string | `""` | Maximum amount of CPU units that the reinitialize-pods container can use | -| reinitializePods.resources.cpu.request | string | `""` | Amount of CPU units that the reinitialize-pods container requests | -| reinitializePods.resources.ephemeral-storage.limit | string | `""` | Maximum amount of ephemeral storage that the reinitialize-pods container can use | -| reinitializePods.resources.ephemeral-storage.request | string | `""` | Amount of ephemeral storage that the reinitialize-pods container requests | -| reinitializePods.resources.memory.limit | string | `""` | Maximum amount of memory that the reinitialize-pods container can use | -| reinitializePods.resources.memory.request | string | `""` | Amount of memory that the reinitialize-pods container requests | +| repairController | object | `{"enableSecurityContext":true,"enabled":false,"logFormat":"plain","logLevel":"info","resources":{"cpu":{"limit":"","request":""},"ephemeral-storage":{"limit":"","request":""},"memory":{"limit":"","request":""}}}` | The cni-repair-controller scans pods in each node to find those that have been injected by linkerd, and whose linkerd-network-validator container has failed. This is usually caused by a race between linkerd-cni and the CNI plugin used in the cluster. This controller deletes those failed pods so they can restart and rety re-acquiring a proper network config. | +| repairController.enableSecurityContext | bool | `true` | Include a securityContext in the repair-controller container | +| repairController.logFormat | string | plain | Log format (`plain` or `json`) for the repair-controller container | +| repairController.logLevel | string | info | Log level for the repair-controller container | +| repairController.resources.cpu.limit | string | `""` | Maximum amount of CPU units that the repair-controller container can use | +| repairController.resources.cpu.request | string | `""` | Amount of CPU units that the repair-controller container requests | +| repairController.resources.ephemeral-storage.limit | string | `""` | Maximum amount of ephemeral storage that the repair-controller container can use | +| repairController.resources.ephemeral-storage.request | string | `""` | Amount of ephemeral storage that the repair-controller container requests | +| repairController.resources.memory.limit | string | `""` | Maximum amount of memory that the repair-controller container can use | +| repairController.resources.memory.request | string | `""` | Amount of memory that the repair-controller container requests | | resources | object | `{"cpu":{"limit":"","request":""},"ephemeral-storage":{"limit":"","request":""},"memory":{"limit":"","request":""}}` | Resource requests and limits for linkerd-cni daemonset container | | resources.cpu.limit | string | `""` | Maximum amount of CPU units that the cni container can use | | resources.cpu.request | string | `""` | Amount of CPU units that the cni container requests | diff --git a/charts/linkerd2-cni/templates/cni-plugin.yaml b/charts/linkerd2-cni/templates/cni-plugin.yaml index 0c699f6ffaad1..a0a78fc62a4b3 100644 --- a/charts/linkerd2-cni/templates/cni-plugin.yaml +++ b/charts/linkerd2-cni/templates/cni-plugin.yaml @@ -113,8 +113,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -280,31 +280,32 @@ spec: {{- if .Values.resources }} {{- include "partials.resources" .Values.resources | nindent 8 }} {{- end }} + {{- if .Values.repairController.enabled }} # This container watches over pods whose linkerd-network-validator # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a + # the CNI plugin chain, and deletes those pods so they can try acquiring a # proper network config again - - name: reinitialize-pods - image: {{ .Values.reinitializePods.image.name -}}:{{- .Values.reinitializePods.image.version }} - imagePullPolicy: {{ .Values.reinitializePods.image.pullPolicy }} - {{- if .Values.reinitializePods.enableSecurityContext }} + - name: repair-controller + image: {{ .Values.image.name -}}:{{- .Values.image.version }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.repairController.enableSecurityContext }} env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME + - name: LINKERD_CNI_REPAIR_CONTROLLER_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME + - name: LINKERD_CNI_REPAIR_CONTROLLER_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name command: - - /usr/lib/linkerd/linkerd-reinitialize-pods + - /usr/lib/linkerd/linkerd-cni-repair-controller args: - --admin-addr=0.0.0.0:9990 - --log-format - - {{ .Values.reinitializePods.logFormat }} + - {{ .Values.repairController.logFormat }} - --log-level - - {{ .Values.reinitializePods.logLevel }} + - {{ .Values.repairController.logLevel }} livenessProbe: httpGet: path: /live @@ -331,6 +332,7 @@ spec: {{- if .Values.resources }} {{- include "partials.resources" .Values.resources | nindent 8 }} {{- end }} + {{- end }} volumes: {{- if ne .Values.destCNIBinDir .Values.destCNINetDir }} - name: cni-bin-dir diff --git a/charts/linkerd2-cni/values.yaml b/charts/linkerd2-cni/values.yaml index 64096c0639c1c..9981f48b5cb07 100644 --- a/charts/linkerd2-cni/values.yaml +++ b/charts/linkerd2-cni/values.yaml @@ -75,56 +75,55 @@ imagePullSecrets: [] # -- Add additional initContainers to the daemonset extraInitContainers: [] -# -- Resource requests and limits for linkerd-cni daemonset container -resources: - cpu: - # -- Maximum amount of CPU units that the cni container can use - limit: "" - # -- Amount of CPU units that the cni container requests - request: "" - memory: - # -- Maximum amount of memory that the cni container can use - limit: "" - # -- Amount of memory that the cni container requests - request: "" - ephemeral-storage: - # -- Maximum amount of ephemeral storage that the cni container can use - limit: "" - # -- Amount of ephemeral storage that the cni container requests - request: "" - -reinitializePods: - image: - # -- Docker image for the reinitialize-pods container - name: "cr.l5d.io/linkerd/cni-plugin" - # -- Tag for the reinitialize-pods container Docker image - version: "v1.3.0" - # -- Pull policy for the reinitialize-pods container - pullPolicy: IfNotPresent +# -- The cni-repair-controller scans pods in each node to find those that have +# been injected by linkerd, and whose linkerd-network-validator container has +# failed. This is usually caused by a race between linkerd-cni and the CNI +# plugin used in the cluster. This controller deletes those failed pods so they +# can restart and rety re-acquiring a proper network config. +repairController: + enabled: false - # -- Log level for the reinitialize-pods container + # -- Log level for the repair-controller container # @default -- info logLevel: info - # -- Log format (`plain` or `json`) for the reinitialize-pods container + # -- Log format (`plain` or `json`) for the repair-controller container # @default -- plain logFormat: plain - # -- Include a securityContext in the reinitialize-pods container + # -- Include a securityContext in the repair-controller container enableSecurityContext: true resources: cpu: - # -- Maximum amount of CPU units that the reinitialize-pods container can use + # -- Maximum amount of CPU units that the repair-controller container can use limit: "" - # -- Amount of CPU units that the reinitialize-pods container requests + # -- Amount of CPU units that the repair-controller container requests request: "" memory: - # -- Maximum amount of memory that the reinitialize-pods container can use + # -- Maximum amount of memory that the repair-controller container can use limit: "" - # -- Amount of memory that the reinitialize-pods container requests + # -- Amount of memory that the repair-controller container requests request: "" ephemeral-storage: - # -- Maximum amount of ephemeral storage that the reinitialize-pods container can use + # -- Maximum amount of ephemeral storage that the repair-controller container can use limit: "" - # -- Amount of ephemeral storage that the reinitialize-pods container requests + # -- Amount of ephemeral storage that the repair-controller container requests request: "" + +# -- Resource requests and limits for linkerd-cni daemonset container +resources: + cpu: + # -- Maximum amount of CPU units that the cni container can use + limit: "" + # -- Amount of CPU units that the cni container requests + request: "" + memory: + # -- Maximum amount of memory that the cni container can use + limit: "" + # -- Amount of memory that the cni container requests + request: "" + ephemeral-storage: + # -- Maximum amount of ephemeral storage that the cni container can use + limit: "" + # -- Amount of ephemeral storage that the cni container requests + request: "" diff --git a/cli/cmd/testdata/install-cni-plugin_default.golden b/cli/cmd/testdata/install-cni-plugin_default.golden index c6b5547fcf28e..ad985b2d23087 100644 --- a/cli/cmd/testdata/install-cni-plugin_default.golden +++ b/cli/cmd/testdata/install-cni-plugin_default.golden @@ -26,8 +26,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -118,6 +118,7 @@ spec: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni + priorityClassName: system-cluster-critical containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install @@ -166,56 +167,6 @@ spec: resources: limits: cpu: "1m" - # This container watches over pods whose linkerd-network-validator - # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a - # proper network config again - - name: reinitialize-pods - image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 - imagePullPolicy: IfNotPresent - env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /usr/lib/linkerd/linkerd-reinitialize-pods - args: - - --admin-addr=0.0.0.0:9990 - - --log-format - - plain - - --log-level - - info - livenessProbe: - httpGet: - path: /live - port: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: admin-http - initialDelaySeconds: 10 - ports: - - containerPort: 9990 - name: admin-http - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - resources: - limits: - cpu: "1m" - priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden index 8f37895d2ecef..0e7cfd3d3ba0f 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden @@ -26,8 +26,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -165,53 +165,6 @@ spec: readOnlyRootFilesystem: true privileged: false resources: - # This container watches over pods whose linkerd-network-validator - # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a - # proper network config again - - name: reinitialize-pods - image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 - imagePullPolicy: IfNotPresent - env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /usr/lib/linkerd/linkerd-reinitialize-pods - args: - - --admin-addr=0.0.0.0:9990 - - --log-format - - plain - - --log-level - - info - livenessProbe: - httpGet: - path: /live - port: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: admin-http - initialDelaySeconds: 10 - ports: - - containerPort: 9990 - name: admin-http - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - resources: volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden index 824a1306c75d9..4f518d4cd18bb 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden @@ -26,8 +26,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -163,53 +163,6 @@ spec: readOnlyRootFilesystem: true privileged: false resources: - # This container watches over pods whose linkerd-network-validator - # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a - # proper network config again - - name: reinitialize-pods - image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 - imagePullPolicy: IfNotPresent - env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /usr/lib/linkerd/linkerd-reinitialize-pods - args: - - --admin-addr=0.0.0.0:9990 - - --log-format - - plain - - --log-level - - info - livenessProbe: - httpGet: - path: /live - port: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: admin-http - initialDelaySeconds: 10 - ports: - - containerPort: 9990 - name: admin-http - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - resources: volumes: - name: cni-net-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden index 8f37895d2ecef..0e7cfd3d3ba0f 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden @@ -26,8 +26,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -165,53 +165,6 @@ spec: readOnlyRootFilesystem: true privileged: false resources: - # This container watches over pods whose linkerd-network-validator - # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a - # proper network config again - - name: reinitialize-pods - image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 - imagePullPolicy: IfNotPresent - env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /usr/lib/linkerd/linkerd-reinitialize-pods - args: - - --admin-addr=0.0.0.0:9990 - - --log-format - - plain - - --log-level - - info - livenessProbe: - httpGet: - path: /live - port: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: admin-http - initialDelaySeconds: 10 - ports: - - containerPort: 9990 - name: admin-http - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - resources: volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden index e147c729a42ac..33d42384dddae 100644 --- a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden +++ b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden @@ -26,8 +26,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -119,6 +119,7 @@ spec: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni + priorityClassName: system-cluster-critical containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install @@ -165,54 +166,6 @@ spec: readOnlyRootFilesystem: true privileged: false resources: - # This container watches over pods whose linkerd-network-validator - # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a - # proper network config again - - name: reinitialize-pods - image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 - imagePullPolicy: IfNotPresent - env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /usr/lib/linkerd/linkerd-reinitialize-pods - args: - - --admin-addr=0.0.0.0:9990 - - --log-format - - plain - - --log-level - - info - livenessProbe: - httpGet: - path: /live - port: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: admin-http - initialDelaySeconds: 10 - ports: - - containerPort: 9990 - name: admin-http - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - resources: - priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install_cni_helm_default_output.golden b/cli/cmd/testdata/install_cni_helm_default_output.golden index 3cf24d0218373..dab4c0646df09 100644 --- a/cli/cmd/testdata/install_cni_helm_default_output.golden +++ b/cli/cmd/testdata/install_cni_helm_default_output.golden @@ -19,8 +19,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -111,6 +111,7 @@ spec: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni + priorityClassName: system-cluster-critical containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install @@ -157,54 +158,6 @@ spec: readOnlyRootFilesystem: true privileged: false resources: - # This container watches over pods whose linkerd-network-validator - # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a - # proper network config again - - name: reinitialize-pods - image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 - imagePullPolicy: IfNotPresent - env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /usr/lib/linkerd/linkerd-reinitialize-pods - args: - - --admin-addr=0.0.0.0:9990 - - --log-format - - plain - - --log-level - - info - livenessProbe: - httpGet: - path: /live - port: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: admin-http - initialDelaySeconds: 10 - ports: - - containerPort: 9990 - name: admin-http - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - resources: - priorityClassName: system-cluster-critical volumes: - name: cni-bin-dir hostPath: diff --git a/cli/cmd/testdata/install_cni_helm_override_output.golden b/cli/cmd/testdata/install_cni_helm_override_output.golden index c3e24ade8e23e..e94d77dff278c 100644 --- a/cli/cmd/testdata/install_cni_helm_override_output.golden +++ b/cli/cmd/testdata/install_cni_helm_override_output.golden @@ -19,8 +19,8 @@ rules: resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] + resources: ["pods"] + verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] @@ -158,53 +158,6 @@ spec: readOnlyRootFilesystem: true privileged: false resources: - # This container watches over pods whose linkerd-network-validator - # container failed, probably because of a race condition while setting up - # the CNI plugin chain, and evicts those pods so they can try acquiring a - # proper network config again - - name: reinitialize-pods - image: cr.l5d.io/linkerd/cni-plugin:v1.3.0 - imagePullPolicy: IfNotPresent - env: - - name: LINKERD_REINITIALIZE_PODS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: LINKERD_REINITIALIZE_PODS_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /usr/lib/linkerd/linkerd-reinitialize-pods - args: - - --admin-addr=0.0.0.0:9990 - - --log-format - - plain - - --log-level - - info - livenessProbe: - httpGet: - path: /live - port: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: admin-http - initialDelaySeconds: 10 - ports: - - containerPort: 9990 - name: admin-http - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - resources: volumes: - name: cni-bin-dir hostPath: diff --git a/pkg/charts/cni/values.go b/pkg/charts/cni/values.go index fe3a9203202fc..46e29d23d567e 100644 --- a/pkg/charts/cni/values.go +++ b/pkg/charts/cni/values.go @@ -35,8 +35,8 @@ type Resources struct { EphemeralStorage Constraints `json:"ephemeral-storage"` } -// ReinitializePods contains the config for the reinitialize-pods container -type ReinitializePods struct { +// RepairController contains the config for the repair-controller container +type RepairController struct { Image Image `json:"image"` LogLevel string `json:"logLevel"` LogFormat string `json:"logFormat"` @@ -69,7 +69,7 @@ type Values struct { EnablePSP bool `json:"enablePSP"` Privileged bool `json:"privileged"` Resources Resources `json:"resources"` - ReinitializePods ReinitializePods `json:"reinitializePods"` + RepairController RepairController `json:"repairController"` } // NewValues returns a new instance of the Values type. From 5a70742099320267119d09854ef9003b162de174 Mon Sep 17 00:00:00 2001 From: Alejandro Pedraza Date: Fri, 5 Jan 2024 09:21:31 -0500 Subject: [PATCH 6/6] @olix0r's feedback --- charts/linkerd2-cni/README.md | 4 ++-- charts/linkerd2-cni/templates/cni-plugin.yaml | 2 ++ charts/linkerd2-cni/values.yaml | 12 +++++------- cli/cmd/testdata/install-cni-plugin_default.golden | 7 ------- .../install-cni-plugin_fully_configured.golden | 6 ------ ...all-cni-plugin_fully_configured_equal_dsts.golden | 6 ------ ...l-cni-plugin_fully_configured_no_namespace.golden | 6 ------ .../testdata/install-cni-plugin_skip_ports.golden | 7 ------- .../testdata/install_cni_helm_default_output.golden | 7 ------- .../testdata/install_cni_helm_override_output.golden | 6 ------ 10 files changed, 9 insertions(+), 54 deletions(-) diff --git a/charts/linkerd2-cni/README.md b/charts/linkerd2-cni/README.md index 1daa9a94f1c03..d4a44b2bda79e 100644 --- a/charts/linkerd2-cni/README.md +++ b/charts/linkerd2-cni/README.md @@ -38,13 +38,13 @@ Kubernetes: `>=1.21.0-0` | outboundProxyPort | int | `4140` | Outbound port for the proxy container | | podLabels | object | `{}` | Additional labels to add to all pods | | portsToRedirect | string | `""` | Ports to redirect to proxy | -| priorityClassName | string | `"system-cluster-critical"` | Kubernetes priorityClassName for the CNI plugin's Pods. Defaults to system-cluster-critical so it signals the scheduler to start before application pods, but after CNI plugins (whose priorityClassName is system-node-critical). This isn't strictly enforced. | +| priorityClassName | string | `""` | Kubernetes priorityClassName for the CNI plugin's Pods | | privileged | bool | `false` | Run the install-cni container in privileged mode | | proxyAdminPort | int | `4191` | Admin port for the proxy container | | proxyControlPort | int | `4190` | Control port for the proxy container | | proxyUID | int | `2102` | User id under which the proxy shall be ran | -| repairController | object | `{"enableSecurityContext":true,"enabled":false,"logFormat":"plain","logLevel":"info","resources":{"cpu":{"limit":"","request":""},"ephemeral-storage":{"limit":"","request":""},"memory":{"limit":"","request":""}}}` | The cni-repair-controller scans pods in each node to find those that have been injected by linkerd, and whose linkerd-network-validator container has failed. This is usually caused by a race between linkerd-cni and the CNI plugin used in the cluster. This controller deletes those failed pods so they can restart and rety re-acquiring a proper network config. | | repairController.enableSecurityContext | bool | `true` | Include a securityContext in the repair-controller container | +| repairController.enabled | bool | `false` | Enables the repair-controller container | | repairController.logFormat | string | plain | Log format (`plain` or `json`) for the repair-controller container | | repairController.logLevel | string | info | Log level for the repair-controller container | | repairController.resources.cpu.limit | string | `""` | Maximum amount of CPU units that the repair-controller container can use | diff --git a/charts/linkerd2-cni/templates/cni-plugin.yaml b/charts/linkerd2-cni/templates/cni-plugin.yaml index a0a78fc62a4b3..69d3e0e641574 100644 --- a/charts/linkerd2-cni/templates/cni-plugin.yaml +++ b/charts/linkerd2-cni/templates/cni-plugin.yaml @@ -112,12 +112,14 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] +{{- if .Values.repairController.enabled }} - apiGroups: [""] resources: ["pods"] verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] +{{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/charts/linkerd2-cni/values.yaml b/charts/linkerd2-cni/values.yaml index 9981f48b5cb07..a9f9e8fd48781 100644 --- a/charts/linkerd2-cni/values.yaml +++ b/charts/linkerd2-cni/values.yaml @@ -26,11 +26,8 @@ destCNINetDir: "/etc/cni/net.d" destCNIBinDir: "/opt/cni/bin" # -- Configures the CNI plugin to use the -w flag for the iptables command useWaitFlag: false -# -- Kubernetes priorityClassName for the CNI plugin's Pods. -# Defaults to system-cluster-critical so it signals the scheduler to start -# before application pods, but after CNI plugins (whose priorityClassName is -# system-node-critical). This isn't strictly enforced. -priorityClassName: "system-cluster-critical" +# -- Kubernetes priorityClassName for the CNI plugin's Pods +priorityClassName: "" # -- Add a PSP resource and bind it to the linkerd-cni ServiceAccounts. # Note PSP has been deprecated since k8s v1.21 @@ -75,12 +72,13 @@ imagePullSecrets: [] # -- Add additional initContainers to the daemonset extraInitContainers: [] -# -- The cni-repair-controller scans pods in each node to find those that have +# The cni-repair-controller scans pods in each node to find those that have # been injected by linkerd, and whose linkerd-network-validator container has -# failed. This is usually caused by a race between linkerd-cni and the CNI +# failed. This is usually caused by a race between linkerd-cni and the CNI # plugin used in the cluster. This controller deletes those failed pods so they # can restart and rety re-acquiring a proper network config. repairController: + # -- Enables the repair-controller container enabled: false # -- Log level for the repair-controller container diff --git a/cli/cmd/testdata/install-cni-plugin_default.golden b/cli/cmd/testdata/install-cni-plugin_default.golden index ad985b2d23087..1fb551be483ed 100644 --- a/cli/cmd/testdata/install-cni-plugin_default.golden +++ b/cli/cmd/testdata/install-cni-plugin_default.golden @@ -25,12 +25,6 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["delete"] -- apiGroups: ["events.k8s.io"] - resources: ["events"] - verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -118,7 +112,6 @@ spec: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni - priorityClassName: system-cluster-critical containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden index 0e7cfd3d3ba0f..b31c00286a65d 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured.golden @@ -25,12 +25,6 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["delete"] -- apiGroups: ["events.k8s.io"] - resources: ["events"] - verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden index 4f518d4cd18bb..2b547098b2f3b 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_equal_dsts.golden @@ -25,12 +25,6 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["delete"] -- apiGroups: ["events.k8s.io"] - resources: ["events"] - verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden index 0e7cfd3d3ba0f..b31c00286a65d 100644 --- a/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden +++ b/cli/cmd/testdata/install-cni-plugin_fully_configured_no_namespace.golden @@ -25,12 +25,6 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["delete"] -- apiGroups: ["events.k8s.io"] - resources: ["events"] - verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden index 33d42384dddae..196296afc5c48 100644 --- a/cli/cmd/testdata/install-cni-plugin_skip_ports.golden +++ b/cli/cmd/testdata/install-cni-plugin_skip_ports.golden @@ -25,12 +25,6 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["delete"] -- apiGroups: ["events.k8s.io"] - resources: ["events"] - verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -119,7 +113,6 @@ spec: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni - priorityClassName: system-cluster-critical containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install diff --git a/cli/cmd/testdata/install_cni_helm_default_output.golden b/cli/cmd/testdata/install_cni_helm_default_output.golden index dab4c0646df09..566534f1efea5 100644 --- a/cli/cmd/testdata/install_cni_helm_default_output.golden +++ b/cli/cmd/testdata/install_cni_helm_default_output.golden @@ -18,12 +18,6 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["delete"] -- apiGroups: ["events.k8s.io"] - resources: ["events"] - verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -111,7 +105,6 @@ spec: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni - priorityClassName: system-cluster-critical containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install diff --git a/cli/cmd/testdata/install_cni_helm_override_output.golden b/cli/cmd/testdata/install_cni_helm_override_output.golden index e94d77dff278c..7d99a13d1bbd5 100644 --- a/cli/cmd/testdata/install_cni_helm_override_output.golden +++ b/cli/cmd/testdata/install_cni_helm_override_output.golden @@ -18,12 +18,6 @@ rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["delete"] -- apiGroups: ["events.k8s.io"] - resources: ["events"] - verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding