From 471468ff10c6e8dd48c8933a1f76264ccda20d8e Mon Sep 17 00:00:00 2001 From: Lennart Jern Date: Mon, 27 May 2024 10:32:16 +0300 Subject: [PATCH] Test BYO certificates This adds an integration test for BYO CA certificate. It is practically a copy of TestReconcileInitializeControlPlane but with a custom CA and a check to verify that the generated kubeconfig is really using this CA. Signed-off-by: Lennart Jern --- .../internal/controllers/controller_test.go | 242 ++++++++++++++++++ .../clusterresourceset_controller_test.go | 6 + .../extensionconfig_controller_test.go | 4 + internal/test/envtest/environment.go | 19 ++ internal/topology/clustershim/clustershim.go | 3 + 5 files changed, 274 insertions(+) diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 5a3c8971f632..85b89e779c22 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -24,6 +24,7 @@ import ( "crypto/x509/pkix" "fmt" "math/big" + "path" "sync" "testing" "time" @@ -36,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -1391,6 +1393,246 @@ kubernetesVersion: metav1.16.1 }, 30*time.Second).Should(Succeed()) } +func TestReconcileInitializeControlPlane_withUserCA(t *testing.T) { + setup := func(t *testing.T, g *WithT) *corev1.Namespace { + t.Helper() + + t.Log("Creating the namespace") + ns, err := env.CreateNamespace(ctx, "test-kcp-reconcile-initializecontrolplane") + g.Expect(err).ToNot(HaveOccurred()) + + return ns + } + + teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) { + t.Helper() + + t.Log("Deleting the namespace") + g.Expect(env.Delete(ctx, ns)).To(Succeed()) + } + + g := NewWithT(t) + namespace := setup(t, g) + defer teardown(t, g, namespace) + + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: namespace.Name}) + cluster.Spec = clusterv1.ClusterSpec{ + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "test.local", + Port: 9999, + }, + } + + caCertificate := &secret.Certificate{ + Purpose: secret.ClusterCA, + CertFile: path.Join(secret.DefaultCertificatesDir, "ca.crt"), + KeyFile: path.Join(secret.DefaultCertificatesDir, "ca.key"), + } + // The certificate is user provided so no owner references should be added. + g.Expect(caCertificate.Generate()).To(Succeed()) + certSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace.Name, + Name: cluster.Name + "-ca", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Data: map[string][]byte{ + secret.TLSKeyDataName: caCertificate.KeyPair.Key, + secret.TLSCrtDataName: caCertificate.KeyPair.Cert, + }, + Type: clusterv1.ClusterSecretType, + } + + g.Expect(env.Create(ctx, cluster)).To(Succeed()) + patchHelper, err := patch.NewHelper(cluster, env) + g.Expect(err).ToNot(HaveOccurred()) + cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true} + g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) + + g.Expect(env.Create(ctx, certSecret)).To(Succeed()) + + genericInfrastructureMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "GenericInfrastructureMachineTemplate", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-foo", + "namespace": cluster.Namespace, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + }, + }, + } + g.Expect(env.Create(ctx, genericInfrastructureMachineTemplate)).To(Succeed()) + + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: "foo", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: cluster.Name, + UID: cluster.UID, + }, + }, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Replicas: nil, + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + InfrastructureRef: corev1.ObjectReference{ + Kind: genericInfrastructureMachineTemplate.GetKind(), + APIVersion: genericInfrastructureMachineTemplate.GetAPIVersion(), + Name: genericInfrastructureMachineTemplate.GetName(), + Namespace: cluster.Namespace, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{}, + }, + } + g.Expect(env.Create(ctx, kcp)).To(Succeed()) + + corednsCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "coredns", + Namespace: namespace.Name, + }, + Data: map[string]string{ + "Corefile": "original-core-file", + }, + } + g.Expect(env.Create(ctx, corednsCM)).To(Succeed()) + + kubeadmCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm-config", + Namespace: namespace.Name, + }, + Data: map[string]string{ + "ClusterConfiguration": `apiServer: +dns: + type: CoreDNS +imageRepository: registry.k8s.io +kind: ClusterConfiguration +kubernetesVersion: metav1.16.1`, + }, + } + g.Expect(env.Create(ctx, kubeadmCM)).To(Succeed()) + + corednsDepl := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "coredns", + Namespace: namespace.Name, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "coredns": "", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "coredns", + Labels: map[string]string{ + "coredns": "", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "coredns", + Image: "registry.k8s.io/coredns:1.6.2", + }}, + }, + }, + }, + } + g.Expect(env.Create(ctx, corednsDepl)).To(Succeed()) + + r := &KubeadmControlPlaneReconciler{ + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), + managementCluster: &fakeManagementCluster{ + Management: &internal.Management{Client: env}, + Workload: fakeWorkloadCluster{ + Workload: &internal.Workload{ + Client: env, + }, + Status: internal.ClusterStatus{}, + }, + }, + managementClusterUncached: &fakeManagementCluster{ + Management: &internal.Management{Client: env}, + Workload: fakeWorkloadCluster{ + Workload: &internal.Workload{ + Client: env, + }, + Status: internal.ClusterStatus{}, + }, + }, + ssaCache: ssa.NewCache(), + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + // this first requeue is to add finalizer + g.Expect(result).To(BeComparableTo(ctrl.Result{})) + g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed()) + g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) + + g.Eventually(func(g Gomega) { + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed()) + // Expect the referenced infrastructure template to have a Cluster Owner Reference. + g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(genericInfrastructureMachineTemplate), genericInfrastructureMachineTemplate)).To(Succeed()) + g.Expect(genericInfrastructureMachineTemplate.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: cluster.UID, + })) + + // Always expect that the Finalizer is set on the passed in resource + g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) + + g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) + g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1)) + g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) + + // Verify that the kubeconfig is using the custom CA + kBytes, err := kubeconfig.FromSecret(ctx, env, util.ObjectKey(cluster)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kBytes).NotTo(BeEmpty()) + k, err := clientcmd.Load(kBytes) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(k).NotTo(BeNil()) + g.Expect(k.Clusters[cluster.Name]).NotTo(BeNil()) + g.Expect(k.Clusters[cluster.Name].CertificateAuthorityData).To(Equal(caCertificate.KeyPair.Cert)) + + machineList := &clusterv1.MachineList{} + g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(HaveLen(1)) + + machine := machineList.Items[0] + g.Expect(machine.Name).To(HavePrefix(kcp.Name)) + // Newly cloned infra objects should have the infraref annotation. + infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Spec.InfrastructureRef.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) + }, 30*time.Second).Should(Succeed()) +} + func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) { t.Helper() diff --git a/exp/addons/internal/controllers/clusterresourceset_controller_test.go b/exp/addons/internal/controllers/clusterresourceset_controller_test.go index 76a7400dd977..cd7afe4c7d04 100644 --- a/exp/addons/internal/controllers/clusterresourceset_controller_test.go +++ b/exp/addons/internal/controllers/clusterresourceset_controller_test.go @@ -99,6 +99,9 @@ metadata: ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + }, }, Type: "addons.cluster.x-k8s.io/resource-set", StringData: map[string]string{ @@ -435,6 +438,9 @@ metadata: ObjectMeta: metav1.ObjectMeta{ Name: newSecretName, Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: testCluster.Name, + }, }, Type: addonsv1.ClusterResourceSetSecretType, Data: map[string][]byte{}, diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index 2026e1cba300..998dca26d8d7 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" @@ -408,6 +409,9 @@ func fakeCASecret(namespace, name string, caData []byte) *corev1.Secret { ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "", + }, }, Data: map[string][]byte{}, } diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index 6e33d91f76ff..f9a0f0349578 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -36,6 +36,8 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" kerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -43,6 +45,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -252,11 +255,27 @@ func newEnvironment(uncachedObjs ...client.Object) *Environment { } } + req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil) + clusterSecretCacheSelector := labels.NewSelector().Add(*req) + syncPeriod := 10 * time.Minute + options := manager.Options{ Scheme: scheme.Scheme, Metrics: metricsserver.Options{ BindAddress: "0", }, + Cache: cache.Options{ + // Namespaces: watchNamespaces, + SyncPeriod: &syncPeriod, + ByObject: map[client.Object]cache.ByObject{ + // Note: Only Secrets with the cluster name label are cached. + // The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor). + // The cached secrets will only be used by the secretCachingClient we create below. + &corev1.Secret{}: { + Label: clusterSecretCacheSelector, + }, + }, + }, Client: client.Options{ Cache: &client.CacheOptions{ DisableFor: uncachedObjs, diff --git a/internal/topology/clustershim/clustershim.go b/internal/topology/clustershim/clustershim.go index 630192a979f7..0971a5b66949 100644 --- a/internal/topology/clustershim/clustershim.go +++ b/internal/topology/clustershim/clustershim.go @@ -40,6 +40,9 @@ func New(c *clusterv1.Cluster) *corev1.Secret { OwnerReferences: []metav1.OwnerReference{ *ownerrefs.OwnerReferenceTo(c, clusterv1.GroupVersion.WithKind("Cluster")), }, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: c.Name, + }, }, Type: clusterv1.ClusterSecretType, }