From bd743c5ab68957ab9cba0d49512ab663b77c4543 Mon Sep 17 00:00:00 2001 From: Benjamin Ruland Date: Fri, 4 Oct 2024 10:39:11 +0200 Subject: [PATCH] Adjusted rules for BSI APP.4.4.A19 according to review --- .../rule.yml | 26 ++-------------- .../rule.yml | 30 +++---------------- .../rule.yml | 5 ++-- .../infra_nodes_in_two_zones_or_more/rule.yml | 5 ++-- .../rule.yml | 19 ++++++------ .../tests/master_infra_three_nodes.pass.sh | 0 .../tests/ocp4/e2e.yml | 0 .../tests/single_worker.fail.sh | 0 .../three_control_plane_nodes/rule.yml | 29 +++++++++--------- .../rule.yml | 3 +- controls/bsi_app_4_4.yml | 2 +- shared/references/cce-redhat-avail.txt | 7 ----- 12 files changed, 40 insertions(+), 86 deletions(-) rename applications/openshift/high-availability/{multiple_nodes_in_every_role => multiple_nodes_in_every_mcp}/rule.yml (65%) rename applications/openshift/high-availability/{multiple_nodes_in_every_role => multiple_nodes_in_every_mcp}/tests/master_infra_three_nodes.pass.sh (100%) rename applications/openshift/high-availability/{multiple_nodes_in_every_role => multiple_nodes_in_every_mcp}/tests/ocp4/e2e.yml (100%) rename applications/openshift/high-availability/{multiple_nodes_in_every_role => multiple_nodes_in_every_mcp}/tests/single_worker.fail.sh (100%) diff --git a/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_deployment/rule.yml b/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_deployment/rule.yml index 45d632c62ad..d5d04a8ea55 100644 --- a/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_deployment/rule.yml +++ b/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_deployment/rule.yml @@ -17,29 +17,6 @@ description: |- names: var_deployments_without_high_availability. This will ignore deployments matching those names in all namespaces. - An example allowing all deployments named uncritical-service is as follows: - -
-    apiVersion: compliance.openshift.io/v1alpha1
-    kind: TailoredProfile
-    metadata:
-      name: bsi-additional-deployments
-    spec:
-      description: Allows additional deployments to not be highly available and evenly spread
-      setValues:
-      - name: upstream-ocp4-var_deployments_without_high_availability
-        rationale: Ignore our uncritical service
-        value: ^uncritical-service$
-      extends: upstream-ocp4-bsi
-      title: Modified BSI allowing non-highly-available deployments
-    
- - Finally, reference this TailoredProfile in a ScanSettingBinding - For more information on Tailoring the Compliance Operator, please consult the - OpenShift documentation: - {{{ weblink(link="https://docs.openshift.com/container-platform/latest/security/compliance_operator/co-scans/compliance-operator-tailor.html") }}} - - rationale: |- Distributing Kubernetes pods across nodes and availability zones using pod topology spread constraints and anti-affinity rules is essential for enhancing high availability, fault @@ -47,7 +24,8 @@ rationale: |- This approach ensures that a single node or AZ failure does not lead to total application downtime, as workloads are balanced and resources are efficiently utilized. -identifiers: {} +identifiers: + cce@ocp4: CCE-89351-1 severity: medium diff --git a/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_statefulset/rule.yml b/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_statefulset/rule.yml index f6d0914c9cb..a14358769e6 100644 --- a/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_statefulset/rule.yml +++ b/applications/openshift/high-availability/anti_affinity_or_topology_spread_constraints_in_statefulset/rule.yml @@ -1,6 +1,6 @@ documentation_complete: true -title: 'Ensure statefulsets have either anti-affinity rules or topology spread constraints' +title: 'Ensure Statefulsets have either Anti-Affinity Rules or Topology Spread Constraints' description: |- Distributing Kubernetes pods across nodes and availability zones using pod topology spread @@ -10,36 +10,13 @@ description: |- There might be statefulsets, that do not require high availability or spreading across nodes. To limit the number of false positives, this rule only checks statefulsets with a replica count - of more than one. For statefulsets with one replica neither anti-affinity rules nor topology + of more than one. For statefulsets with one replica, neither anti-affinity rules nor topology spread constraints provide any value. To exclude other statefulsets from this rule, you can create a regular expression for statefulset names: var_statefulsets_without_high_availability. This will ignore statefulsets matching those names in all namespaces. - An example allowing all statefulsets named uncritical-service is as follows: - -
-    apiVersion: compliance.openshift.io/v1alpha1
-    kind: TailoredProfile
-    metadata:
-      name: bsi-additional-statefulsets
-    spec:
-      description: Allows additional statefulsets to not be highly available and evenly spread
-      setValues:
-      - name: upstream-ocp4-var_statefulsets_without_high_availability
-        rationale: Ignore our uncritical service
-        value: ^uncritical-service$
-      extends: upstream-ocp4-bsi
-      title: Modified BSI allowing non-highly-available statefulsets
-    
- - Finally, reference this TailoredProfile in a ScanSettingBinding - For more information on Tailoring the Compliance Operator, please consult the - OpenShift documentation: - {{{ weblink(link="https://docs.openshift.com/container-platform/4.16/security/compliance_operator/co-scans/compliance-operator-tailor.html") }}} - - rationale: |- Distributing Kubernetes pods across nodes and availability zones using pod topology spread constraints and anti-affinity rules is essential for enhancing high availability, fault @@ -47,7 +24,8 @@ rationale: |- This approach ensures that a single node or AZ failure does not lead to total application downtime, as workloads are balanced and resources are efficiently utilized. -identifiers: {} +identifiers: + cce@ocp4: CCE-89908-8 severity: medium diff --git a/applications/openshift/high-availability/control_plane_nodes_in_three_zones/rule.yml b/applications/openshift/high-availability/control_plane_nodes_in_three_zones/rule.yml index f380d71ccf2..c53a1838a39 100644 --- a/applications/openshift/high-availability/control_plane_nodes_in_three_zones/rule.yml +++ b/applications/openshift/high-availability/control_plane_nodes_in_three_zones/rule.yml @@ -1,6 +1,6 @@ documentation_complete: true -title: 'Ensure control plane / master nodes are distribute across three failure zones' +title: 'Ensure Control Plane / Master Nodes are Distributed Across Three Failure Zones' description: |- Distributing Kubernetes control plane nodes across failure zones enhances security by mitigating @@ -21,7 +21,8 @@ rationale: |- This label is automatically assigned to each node by cloud providers but might need to be managed manually in other environments -identifiers: {} +identifiers: + cce@ocp4: CCE-88713-3 severity: medium diff --git a/applications/openshift/high-availability/infra_nodes_in_two_zones_or_more/rule.yml b/applications/openshift/high-availability/infra_nodes_in_two_zones_or_more/rule.yml index 638abf77a46..45d4818c65c 100644 --- a/applications/openshift/high-availability/infra_nodes_in_two_zones_or_more/rule.yml +++ b/applications/openshift/high-availability/infra_nodes_in_two_zones_or_more/rule.yml @@ -1,6 +1,6 @@ documentation_complete: true -title: 'Ensure infrastructure nodes are distribute across three failure zones' +title: 'Ensure Infrastructure Nodes are Distributed Across Three Failure Zones' description: |- Distributing Kubernetes infrastructure nodes across failure zones enhances security by mitigating @@ -20,7 +20,8 @@ rationale: |- This label is automatically assigned to each node by cloud providers but might need to be managed manually in other environments -identifiers: {} +identifiers: + cce@ocp4: CCE-87050-1 severity: medium diff --git a/applications/openshift/high-availability/multiple_nodes_in_every_role/rule.yml b/applications/openshift/high-availability/multiple_nodes_in_every_mcp/rule.yml similarity index 65% rename from applications/openshift/high-availability/multiple_nodes_in_every_role/rule.yml rename to applications/openshift/high-availability/multiple_nodes_in_every_mcp/rule.yml index 06e4aa1115c..51b514577ed 100644 --- a/applications/openshift/high-availability/multiple_nodes_in_every_role/rule.yml +++ b/applications/openshift/high-availability/multiple_nodes_in_every_mcp/rule.yml @@ -1,18 +1,18 @@ documentation_complete: true -title: 'Ensure every MachineConfigPool consists of more than one node' +title: 'Ensure every MachineConfigPool consists of More Than One Node' description: |- - To ensure, that workloads are able to be provisioned highly available, every node role should - consist of more than one node. This enables workloads to be scheduled across multiple nodes and - stay available in case one node of a role is unavailable. Different node roles may exist to isolate - control plane, infrastructure and application workload. There might be additional use cases to - create additional node roles for further isolation. + To ensure, that workloads are able to be provisioned highly available, every node MachineConfigPool + should consist of more than one node. This enables workloads to be scheduled across multiple nodes and + stay available in case one node of a MachineConfigPool is unavailable. Different MachineConfigPools + may exist to isolate control plane, infrastructure and application workload. There might be additional + use cases to create additional MachineConfigPools for further isolation. rationale: |- - To ensure, that workloads are able to be provisioned highly available, every node role should + To ensure, that workloads are able to be provisioned highly available, every MachineConfigPool should consist of more than one node. This enables workloads to be scheduled across multiple nodes and - stay available in case one node of a role is unavailable. + stay available in case one node of a MachineConfigPool is unavailable. {{% set jqfilter = '[.items[] | select(.status.machineCount == 1 or .status.machineCount == 0) | .metadata.name]' %}} @@ -23,7 +23,8 @@ ocil: |-
$ oc get machineconfigpools -o json | jq '{{{ jqfilter }}}'
Make sure that there is output nothing in the result. -identifiers: {} +identifiers: + cce@ocp4: CCE-90465-6 severity: medium diff --git a/applications/openshift/high-availability/multiple_nodes_in_every_role/tests/master_infra_three_nodes.pass.sh b/applications/openshift/high-availability/multiple_nodes_in_every_mcp/tests/master_infra_three_nodes.pass.sh similarity index 100% rename from applications/openshift/high-availability/multiple_nodes_in_every_role/tests/master_infra_three_nodes.pass.sh rename to applications/openshift/high-availability/multiple_nodes_in_every_mcp/tests/master_infra_three_nodes.pass.sh diff --git a/applications/openshift/high-availability/multiple_nodes_in_every_role/tests/ocp4/e2e.yml b/applications/openshift/high-availability/multiple_nodes_in_every_mcp/tests/ocp4/e2e.yml similarity index 100% rename from applications/openshift/high-availability/multiple_nodes_in_every_role/tests/ocp4/e2e.yml rename to applications/openshift/high-availability/multiple_nodes_in_every_mcp/tests/ocp4/e2e.yml diff --git a/applications/openshift/high-availability/multiple_nodes_in_every_role/tests/single_worker.fail.sh b/applications/openshift/high-availability/multiple_nodes_in_every_mcp/tests/single_worker.fail.sh similarity index 100% rename from applications/openshift/high-availability/multiple_nodes_in_every_role/tests/single_worker.fail.sh rename to applications/openshift/high-availability/multiple_nodes_in_every_mcp/tests/single_worker.fail.sh diff --git a/applications/openshift/high-availability/three_control_plane_nodes/rule.yml b/applications/openshift/high-availability/three_control_plane_nodes/rule.yml index 31b70837b8b..e9e1af4faaf 100644 --- a/applications/openshift/high-availability/three_control_plane_nodes/rule.yml +++ b/applications/openshift/high-availability/three_control_plane_nodes/rule.yml @@ -1,16 +1,17 @@ documentation_complete: true -title: 'Ensure machine count of MachineConfigPool master is 3' +title: 'Ensure there are Three Machines in the Master MachineConfigPool' description: |- - To ensure, that the OpenShift control plane stays accessible on outage of a single master node, a - number of 3 control plane nodes is required. + To ensure, that the OpenShift control plane stays accessible on outage of a single master node, + three control plane nodes are required. rationale: |- - A highly-available OpenShift control plane requires 3 control plane nodes. This allows etcd to have - a functional quorum state, when a single control plane node is unavailable. + A high available OpenShift control plane requires three control plane nodes. This allows etcd + to have a functional quorum state, when a single control plane node is unavailable. -identifiers: {} +identifiers: + cce@ocp4: CCE-87551-8 severity: medium @@ -26,11 +27,11 @@ warnings: {{{ openshift_cluster_setting("/apis/machineconfiguration.openshift.io/v1/machineconfigpools/master") | indent(4) }}} template: - name: yamlfile_value - vars: - ocp_data: 'true' - filepath: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/master - yamlpath: .status.machineCount - entity_check: at least one - values: - - value: '3' + name: yamlfile_value + vars: + ocp_data: 'true' + filepath: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/master + yamlpath: .status.machineCount + entity_check: at least one + values: + - value: '3' diff --git a/applications/openshift/high-availability/worker_nodes_in_two_zones_or_more/rule.yml b/applications/openshift/high-availability/worker_nodes_in_two_zones_or_more/rule.yml index e3b92dfb638..52b16950aaf 100644 --- a/applications/openshift/high-availability/worker_nodes_in_two_zones_or_more/rule.yml +++ b/applications/openshift/high-availability/worker_nodes_in_two_zones_or_more/rule.yml @@ -18,7 +18,8 @@ rationale: |- This label is automatically assigned to each node by cloud providers but might need to be managed manually in other environments -identifiers: {} +identifiers: + cce@ocp4: CCE-88863-6 severity: medium diff --git a/controls/bsi_app_4_4.yml b/controls/bsi_app_4_4.yml index 0c8da13fc6e..1c62aabb9df 100644 --- a/controls/bsi_app_4_4.yml +++ b/controls/bsi_app_4_4.yml @@ -479,7 +479,7 @@ controls: rules: # Section 1, 3 - - multiple_nodes_in_every_role + - multiple_nodes_in_every_mcp - control_plane_nodes_in_three_zones - worker_nodes_in_two_zones_or_more - infra_nodes_in_two_zones_or_more diff --git a/shared/references/cce-redhat-avail.txt b/shared/references/cce-redhat-avail.txt index 33debb230d4..b159004c0a5 100644 --- a/shared/references/cce-redhat-avail.txt +++ b/shared/references/cce-redhat-avail.txt @@ -295,7 +295,6 @@ CCE-87020-4 CCE-87044-4 CCE-87048-5 CCE-87049-3 -CCE-87050-1 CCE-87051-9 CCE-87054-3 CCE-87058-4 @@ -604,7 +603,6 @@ CCE-87547-6 CCE-87548-4 CCE-87549-2 CCE-87550-0 -CCE-87551-8 CCE-87553-4 CCE-87554-2 CCE-87556-7 @@ -1336,7 +1334,6 @@ CCE-88708-3 CCE-88709-1 CCE-88710-9 CCE-88711-7 -CCE-88713-3 CCE-88715-8 CCE-88716-6 CCE-88719-0 @@ -1419,7 +1416,6 @@ CCE-88859-4 CCE-88860-2 CCE-88861-0 CCE-88862-8 -CCE-88863-6 CCE-88864-4 CCE-88867-7 CCE-88869-3 @@ -1707,7 +1703,6 @@ CCE-89343-8 CCE-89347-9 CCE-89348-7 CCE-89349-5 -CCE-89351-1 CCE-89352-9 CCE-89353-7 CCE-89354-5 @@ -2075,7 +2070,6 @@ CCE-89899-9 CCE-89901-3 CCE-89905-4 CCE-89907-0 -CCE-89908-8 CCE-89909-6 CCE-89910-4 CCE-89911-2 @@ -2450,7 +2444,6 @@ CCE-90461-5 CCE-90462-3 CCE-90463-1 CCE-90464-9 -CCE-90465-6 CCE-90467-2 CCE-90468-0 CCE-90470-6