diff --git a/Makefile b/Makefile
index f6dcba47..eb37e771 100644
--- a/Makefile
+++ b/Makefile
@@ -141,7 +141,7 @@ SHARD_NAME ?= shard-$(shell tr -dc bcdfghjklmnpqrstvwxz2456789
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/design.md b/docs/design.md
index ea39c024..2da78021 100644
--- a/docs/design.md
+++ b/docs/design.md
@@ -23,17 +23,17 @@ Notably, no leader election is performed, and there is no designated single acti
Instead, each controller instance maintains an individual shard `Lease` labeled with the ring's name, allowing them to announce themselves to the sharder for membership and failure detection.
The sharder watches these leases to build a hash ring with the available instances.
-### The `ClusterRing` Resource and Sharder Webhook
+### The `ControllerRing` Resource and Sharder Webhook
-Rings of controllers are configured through the use of the `ClusterRing` custom resource.
-The sharder creates a `MutatingWebhookConfiguration` for each `ClusterRing` to perform assignments for objects associated with the ring.
+Rings of controllers are configured through the use of the `ControllerRing` custom resource.
+The sharder creates a `MutatingWebhookConfiguration` for each `ControllerRing` to perform assignments for objects associated with the ring.
The sharder webhook is called on `CREATE` and `UPDATE` requests for configured resources, but only for objects that don't have the ring-specific shard label, i.e., for unassigned objects.
The sharder uses the consistent hashing ring to determine the desired shard and adds the shard label during admission accordingly.
Shards then use a label selector for the shard label with their own instance name to restrict the cache and controller to the subset of objects assigned to them.
-For the controller's "main" object (configured in `ClusterRing.spec.resources[]`), the object's `apiVersion`, `kind`, `namespace`, and `name` are concatenated to form its hash key.
-For objects controlled by other objects (configured in `ClusterRing.spec.resources[].controlledResources[]`), the sharder utilizes information about the controlling object (`ownerReference` with `controller=true`) to calculate the object's hash key.
+For the controller's "main" object (configured in `ControllerRing.spec.resources[]`), the object's `apiVersion`, `kind`, `namespace`, and `name` are concatenated to form its hash key.
+For objects controlled by other objects (configured in `ControllerRing.spec.resources[].controlledResources[]`), the sharder utilizes information about the controlling object (`ownerReference` with `controller=true`) to calculate the object's hash key.
This ensures that owned objects are consistently assigned to the same shard as their owner.
### Object Movements and Rebalancing
@@ -88,7 +88,7 @@ The comparisons show that the sharder's resource consumption is almost constant
### Minimize Impact on the Critical Path
While the use of mutating webhooks might allow dropping watches for the sharded objects, they can have a significant impact on API requests, e.g., regarding request latency.
-To minimize the impact of the sharder's webhook on the overall request latency, the webhook is configured to only react on precisely the set of objects configured in the `ClusterRing` and only for `CREATE` and `UPDATE` requests of unassigned objects.
+To minimize the impact of the sharder's webhook on the overall request latency, the webhook is configured to only react on precisely the set of objects configured in the `ControllerRing` and only for `CREATE` and `UPDATE` requests of unassigned objects.
With this the webhook is only on the critical path during initial object creation and whenever the set of available shards requires reassignments.
Furthermore, webhooks can cause API requests to fail entirely.
diff --git a/docs/development.md b/docs/development.md
index 6de2e41f..d9b05811 100644
--- a/docs/development.md
+++ b/docs/development.md
@@ -83,7 +83,7 @@ Assuming a fresh kind cluster:
make run
```
-Now, create the `example` `ClusterRing` and run a local shard:
+Now, create the `example` `ControllerRing` and run a local shard:
```bash
make run-shard
@@ -92,13 +92,13 @@ make run-shard
You should see that the shard successfully announced itself to the sharder:
```bash
-$ kubectl get lease -L alpha.sharding.timebertt.dev/clusterring,alpha.sharding.timebertt.dev/state
-NAME HOLDER AGE CLUSTERRING STATE
-shard-h9np6f8c shard-h9np6f8c 8s example ready
+$ kubectl get lease -L alpha.sharding.timebertt.dev/controllerring,alpha.sharding.timebertt.dev/state
+NAME HOLDER AGE CONTROLLERRING STATE
+shard-fkpxhjk8 shard-fkpxhjk8 18s example ready
-$ kubectl get clusterring
+$ kubectl get controllerring
NAME READY AVAILABLE SHARDS AGE
-example True 1 1 15s
+example True 1 1 34s
```
Running the shard locally gives you the option to test non-graceful termination, i.e., a scenario where the shard fails to renew its lease in time.
@@ -113,19 +113,20 @@ make run-shard
## Testing the Sharding Setup
-Independent of the used setup (skaffold-based or running on the host machine), you should be able to create sharded `ConfigMaps` in the `default` namespace as configured in the `example` `ClusterRing`.
+Independent of the used setup (skaffold-based or running on the host machine), you should be able to create sharded `ConfigMaps` in the `default` namespace as configured in the `example` `ControllerRing`.
The `Secrets` created by the example shard controller should be assigned to the same shard as the owning `ConfigMap`:
```bash
$ kubectl create cm foo
configmap/foo created
-$ kubectl get cm,secret -L shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example
-NAME DATA AGE CLUSTERRING-50D858E0-EXAMPLE
-configmap/foo 0 3s shard-5fc87c9fb7-kfb2z
+$ kubectl get cm,secret -L shard.alpha.sharding.timebertt.dev/50d858e0-example
+NAME DATA AGE 50D858E0-EXAMPLE
+configmap/foo 0 1s shard-656d588475-5746d
-NAME TYPE DATA AGE CLUSTERRING-50D858E0-EXAMPLE
-secret/dummy-foo Opaque 0 3s shard-5fc87c9fb7-kfb2z
+NAME TYPE DATA AGE 50D858E0-EXAMPLE
+secret/dummy-foo Opaque 0 1s shard-656d588475-5746d
+secret/dummy-kube-root-ca.crt Opaque 0 2m14s
```
## Monitoring
diff --git a/docs/evaluation.md b/docs/evaluation.md
index 9e76ba4c..2834688e 100644
--- a/docs/evaluation.md
+++ b/docs/evaluation.md
@@ -25,22 +25,22 @@ To perform a quick test of the webhosting-operator, create some example `Website
$ kubectl apply -k webhosting-operator/config/samples
...
-$ kubectl -n project-foo get website,deploy,svc,ing -L shard.alpha.sharding.timebertt.dev/clusterring-ef3d63cd-webhosting-operator
-NAME THEME PHASE AGE CLUSTERRING-EF3D63CD-WEBHOSTING-OPERATOR
-website.webhosting.timebertt.dev/homepage exciting Ready 58s webhosting-operator-5d8d548cb9-qmwc7
-website.webhosting.timebertt.dev/official lame Ready 58s webhosting-operator-5d8d548cb9-qq549
-
-NAME READY UP-TO-DATE AVAILABLE AGE CLUSTERRING-EF3D63CD-WEBHOSTING-OPERATOR
-deployment.apps/homepage-c1160b 1/1 1 1 57s webhosting-operator-5d8d548cb9-qmwc7
-deployment.apps/official-97b754 1/1 1 1 57s webhosting-operator-5d8d548cb9-qq549
-
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE CLUSTERRING-EF3D63CD-WEBHOSTING-OPERATOR
-service/homepage-c1160b ClusterIP 10.96.83.180 8080/TCP 58s webhosting-operator-5d8d548cb9-qmwc7
-service/official-97b754 ClusterIP 10.96.193.214 8080/TCP 58s webhosting-operator-5d8d548cb9-qq549
-
-NAME CLASS HOSTS ADDRESS PORTS AGE CLUSTERRING-EF3D63CD-WEBHOSTING-OPERATOR
-ingress.networking.k8s.io/homepage-c1160b nginx * 80 58s webhosting-operator-5d8d548cb9-qmwc7
-ingress.networking.k8s.io/official-97b754 nginx * 80 58s webhosting-operator-5d8d548cb9-qq549
+$ kubectl -n project-foo get website,deploy,svc,ing -L shard.alpha.sharding.timebertt.dev/ef3d63cd-webhosting-operator
+NAME THEME PHASE SINCE AGE EF3D63CD-WEBHOSTING-OPERATOR
+website.webhosting.timebertt.dev/homepage exciting Ready 6s 16s webhosting-operator-98ff76b66-tdrtc
+website.webhosting.timebertt.dev/official lame Ready 5s 16s webhosting-operator-98ff76b66-tdrtc
+
+NAME READY UP-TO-DATE AVAILABLE AGE EF3D63CD-WEBHOSTING-OPERATOR
+deployment.apps/homepage-98bad4 1/1 1 1 15s webhosting-operator-98ff76b66-tdrtc
+deployment.apps/official-10ff22 1/1 1 1 15s webhosting-operator-98ff76b66-tdrtc
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE EF3D63CD-WEBHOSTING-OPERATOR
+service/homepage-98bad4 ClusterIP 100.82.128.107 8080/TCP 16s webhosting-operator-98ff76b66-tdrtc
+service/official-10ff22 ClusterIP 100.82.194.21 8080/TCP 16s webhosting-operator-98ff76b66-tdrtc
+
+NAME CLASS HOSTS ADDRESS PORTS AGE EF3D63CD-WEBHOSTING-OPERATOR
+ingress.networking.k8s.io/homepage-98bad4 nginx webhosting.timebertt.dev 80, 443 16s webhosting-operator-98ff76b66-tdrtc
+ingress.networking.k8s.io/official-10ff22 nginx webhosting.timebertt.dev 80, 443 15s webhosting-operator-98ff76b66-tdrtc
```
You can now visit the created websites at http://localhost:8088/project-foo/homepage and http://localhost:8088/project-foo/official.
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 3ad956b3..e206b7fa 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -23,39 +23,39 @@ NAME READY STATUS RESTARTS AGE
sharder-57889fcd8c-p2wxf 1/1 Running 0 44s
sharder-57889fcd8c-z6bm5 1/1 Running 0 44s
$ kubectl get po
-NAME READY STATUS RESTARTS AGE
-shard-7997b8d9b7-9c2db 1/1 Running 0 45s
-shard-7997b8d9b7-9nvr2 1/1 Running 0 45s
-shard-7997b8d9b7-f9gtd 1/1 Running 0 45s
+NAME READY STATUS RESTARTS AGE
+shard-9c6678c9f-8jc5b 1/1 Running 0 45s
+shard-9c6678c9f-v4bw2 1/1 Running 0 45s
+shard-9c6678c9f-xntqc 1/1 Running 0 45s
```
-## The `ClusterRing` and `Lease` Objects
+## The `ControllerRing` and `Lease` Objects
-We can see that the `ClusterRing` object is ready and reports 3 available shards out of 3 total shards:
+We can see that the `ControllerRing` object is ready and reports 3 available shards out of 3 total shards:
```bash
-$ kubectl get clusterring
+$ kubectl get controllerring example
NAME READY AVAILABLE SHARDS AGE
example True 3 3 64s
```
-All shards announce themselves to the sharder by maintaining an individual `Lease` object with the `alpha.sharding.timebertt.dev/clusterring` label.
+All shards announce themselves to the sharder by maintaining an individual `Lease` object with the `alpha.sharding.timebertt.dev/controllerring` label.
We can observe that the sharder recognizes all shards as available by looking at the `alpha.sharding.timebertt.dev/state` label:
```bash
-$ kubectl get lease -L alpha.sharding.timebertt.dev/clusterring,alpha.sharding.timebertt.dev/state
-NAME HOLDER AGE CLUSTERRING STATE
-shard-7997b8d9b7-9c2db shard-7997b8d9b7-9c2db 75s example ready
-shard-7997b8d9b7-9nvr2 shard-7997b8d9b7-9nvr2 75s example ready
-shard-7997b8d9b7-f9gtd shard-7997b8d9b7-f9gtd 76s example ready
+$ kubectl get lease -L alpha.sharding.timebertt.dev/controllerring,alpha.sharding.timebertt.dev/state
+NAME HOLDER AGE CONTROLLERRING STATE
+shard-9c6678c9f-8jc5b shard-9c6678c9f-8jc5b 72s example ready
+shard-9c6678c9f-v4bw2 shard-9c6678c9f-v4bw2 72s example ready
+shard-9c6678c9f-xntqc shard-9c6678c9f-xntqc 72s example ready
```
-The `ClusterRing` object specifies which API resources should be sharded.
+The `ControllerRing` object specifies which API resources should be sharded.
Optionally, it allows selecting the namespaces in which API resources are sharded:
```yaml
apiVersion: sharding.timebertt.dev/v1alpha1
-kind: ClusterRing
+kind: ControllerRing
metadata:
name: example
spec:
@@ -75,12 +75,12 @@ The created `Secrets` are controlled by the respective `ConfigMap`, i.e., there
## The Sharder Webhook
-The sharder created a `MutatingWebhookConfiguration` for the resources listed in our `ClusterRing` specification:
+The sharder created a `MutatingWebhookConfiguration` for the resources listed in our `ControllerRing` specification:
```bash
-$ kubectl get mutatingwebhookconfiguration -l app.kubernetes.io/name=controller-sharding
-NAME WEBHOOKS AGE
-sharding-clusterring-50d858e0-example 1 2m50s
+$ kubectl get mutatingwebhookconfiguration -l alpha.sharding.timebertt.dev/controllerring=example
+NAME WEBHOOKS AGE
+sharding-50d858e0-example 1 2m50s
```
Let's examine the webhook configuration for more details.
@@ -92,13 +92,13 @@ I.e., it gets called for unassigned objects and adds the shard assignment label
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
- name: sharding-clusterring-50d858e0-example
+ name: sharding-50d858e0-example
webhooks:
- clientConfig:
service:
name: sharder
namespace: sharding-system
- path: /webhooks/sharder/clusterring/example
+ path: /webhooks/sharder/controllerring/example
port: 443
name: sharder.sharding.timebertt.dev
namespaceSelector:
@@ -106,7 +106,7 @@ webhooks:
kubernetes.io/metadata.name: default
objectSelector:
matchExpressions:
- - key: shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example
+ - key: shard.alpha.sharding.timebertt.dev/50d858e0-example
operator: DoesNotExist
rules:
- apiGroups:
@@ -144,7 +144,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
labels:
- shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example: shard-7997b8d9b7-9c2db
+ shard.alpha.sharding.timebertt.dev/50d858e0-example: shard-9c6678c9f-8jc5b
name: foo
namespace: default
```
@@ -162,7 +162,7 @@ apiVersion: v1
kind: Secret
metadata:
labels:
- shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example: shard-7997b8d9b7-9c2db
+ shard.alpha.sharding.timebertt.dev/50d858e0-example: shard-9c6678c9f-8jc5b
name: dummy-foo
namespace: default
ownerReferences:
@@ -176,32 +176,30 @@ Let's create a few more `ConfigMaps` and observe the distribution of objects acr
```bash
$ for i in $(seq 1 9); do k create cm foo$i ; done
-$ kubectl get cm,secret -L shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example
-NAME DATA AGE CLUSTERRING-50D858E0-EXAMPLE
-configmap/foo 0 52s shard-7997b8d9b7-9c2db
-configmap/foo1 0 7s shard-7997b8d9b7-9nvr2
-configmap/foo10 0 6s shard-7997b8d9b7-9nvr2
-configmap/foo2 0 6s shard-7997b8d9b7-9nvr2
-configmap/foo3 0 6s shard-7997b8d9b7-f9gtd
-configmap/foo4 0 6s shard-7997b8d9b7-9c2db
-configmap/foo5 0 6s shard-7997b8d9b7-f9gtd
-configmap/foo6 0 6s shard-7997b8d9b7-f9gtd
-configmap/foo7 0 6s shard-7997b8d9b7-9c2db
-configmap/foo8 0 6s shard-7997b8d9b7-9c2db
-configmap/foo9 0 6s shard-7997b8d9b7-9nvr2
-
-NAME TYPE DATA AGE CLUSTERRING-50D858E0-EXAMPLE
-secret/dummy-foo Opaque 0 52s shard-7997b8d9b7-9c2db
-secret/dummy-foo1 Opaque 0 7s shard-7997b8d9b7-9nvr2
-secret/dummy-foo10 Opaque 0 6s shard-7997b8d9b7-9nvr2
-secret/dummy-foo2 Opaque 0 6s shard-7997b8d9b7-9nvr2
-secret/dummy-foo3 Opaque 0 6s shard-7997b8d9b7-f9gtd
-secret/dummy-foo4 Opaque 0 6s shard-7997b8d9b7-9c2db
-secret/dummy-foo5 Opaque 0 6s shard-7997b8d9b7-f9gtd
-secret/dummy-foo6 Opaque 0 6s shard-7997b8d9b7-f9gtd
-secret/dummy-foo7 Opaque 0 6s shard-7997b8d9b7-9c2db
-secret/dummy-foo8 Opaque 0 6s shard-7997b8d9b7-9c2db
-secret/dummy-foo9 Opaque 0 6s shard-7997b8d9b7-9nvr2
+$ kubectl get cm,secret -L shard.alpha.sharding.timebertt.dev/50d858e0-example
+NAME DATA AGE 50D858E0-EXAMPLE
+configmap/foo 0 52s shard-9c6678c9f-8jc5b
+configmap/foo1 0 7s shard-9c6678c9f-v4bw2
+configmap/foo2 0 6s shard-9c6678c9f-8jc5b
+configmap/foo3 0 6s shard-9c6678c9f-v4bw2
+configmap/foo4 0 6s shard-9c6678c9f-v4bw2
+configmap/foo5 0 6s shard-9c6678c9f-xntqc
+configmap/foo6 0 6s shard-9c6678c9f-xntqc
+configmap/foo7 0 6s shard-9c6678c9f-xntqc
+configmap/foo8 0 6s shard-9c6678c9f-xntqc
+configmap/foo9 0 6s shard-9c6678c9f-8jc5b
+
+NAME TYPE DATA AGE 50D858E0-EXAMPLE
+secret/dummy-foo Opaque 0 52s shard-9c6678c9f-8jc5b
+secret/dummy-foo1 Opaque 0 7s shard-9c6678c9f-v4bw2
+secret/dummy-foo2 Opaque 0 6s shard-9c6678c9f-8jc5b
+secret/dummy-foo3 Opaque 0 6s shard-9c6678c9f-v4bw2
+secret/dummy-foo4 Opaque 0 6s shard-9c6678c9f-v4bw2
+secret/dummy-foo5 Opaque 0 6s shard-9c6678c9f-xntqc
+secret/dummy-foo6 Opaque 0 6s shard-9c6678c9f-xntqc
+secret/dummy-foo7 Opaque 0 6s shard-9c6678c9f-xntqc
+secret/dummy-foo8 Opaque 0 6s shard-9c6678c9f-xntqc
+secret/dummy-foo9 Opaque 0 6s shard-9c6678c9f-8jc5b
```
## Removing Shards From the Ring
@@ -222,11 +220,11 @@ With this, the shard is no longer considered for object assignments.
The orphaned `Lease` is cleaned up after 1 minute.
```bash
-$ kubectl get lease -L alpha.sharding.timebertt.dev/clusterring,alpha.sharding.timebertt.dev/state
-NAME HOLDER AGE CLUSTERRING STATE
-shard-7997b8d9b7-9c2db 3m25s example dead
-shard-7997b8d9b7-9nvr2 shard-7997b8d9b7-9nvr2 3m25s example ready
-shard-7997b8d9b7-f9gtd shard-7997b8d9b7-f9gtd 3m26s example ready
+$ kubectl get lease -L alpha.sharding.timebertt.dev/controllerring,alpha.sharding.timebertt.dev/state
+NAME HOLDER AGE CONTROLLERRING STATE
+shard-9c6678c9f-f49zn 25s example dead
+shard-9c6678c9f-kvgft shard-9c6678c9f-kvgft 25s example ready
+shard-9c6678c9f-ppzf7 shard-9c6678c9f-ppzf7 25s example ready
```
We can observe that the sharder immediately moved objects that were assigned to the removed shard to the remaining available shards.
@@ -235,11 +233,9 @@ As the original shard is not available anymore, moving the objects doesn't need
```bash
$ kubectl get cm --show-labels -w --output-watch-events --watch-only
-EVENT NAME DATA AGE LABELS
-MODIFIED foo 0 85s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-f9gtd
-MODIFIED foo4 0 39s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-9nvr2
-MODIFIED foo7 0 39s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-9nvr2
-MODIFIED foo8 0 39s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-9nvr2
+EVENT NAME DATA AGE LABELS
+MODIFIED foo4 0 7m52s shard.alpha.sharding.timebertt.dev/50d858e0-example=shard-9c6678c9f-ppzf7
+MODIFIED foo6 0 7m52s shard.alpha.sharding.timebertt.dev/50d858e0-example=shard-9c6678c9f-ppzf7
```
## Adding Shards to the Ring
@@ -255,11 +251,11 @@ We can observe that the new `Lease` object is in state `ready`.
With this, the new shard is immediately considered for assignment of new objects.
```bash
-$ kubectl get lease -L alpha.sharding.timebertt.dev/clusterring,alpha.sharding.timebertt.dev/state
-NAME HOLDER AGE CLUSTERRING STATE
-shard-7997b8d9b7-9nvr2 shard-7997b8d9b7-9nvr2 4m52s example ready
-shard-7997b8d9b7-f9gtd shard-7997b8d9b7-f9gtd 4m53s example ready
-shard-7997b8d9b7-mkh72 shard-7997b8d9b7-mkh72 8s example ready
+$ kubectl get lease -L alpha.sharding.timebertt.dev/controllerring,alpha.sharding.timebertt.dev/state
+NAME HOLDER AGE CONTROLLERRING STATE
+shard-9c6678c9f-jkgj6 shard-9c6678c9f-jkgj6 3s example ready
+shard-9c6678c9f-kvgft shard-9c6678c9f-kvgft 96s example ready
+shard-9c6678c9f-ppzf7 shard-9c6678c9f-ppzf7 96s example ready
```
In this case, a rebalancing needs to happen and the sharder needs to move objects away from available shards to the new shard.
@@ -273,21 +269,11 @@ This triggers the sharder webhook which immediately assigns the object to the de
```bash
$ kubectl get cm --show-labels -w --output-watch-events --watch-only
-EVENT NAME DATA AGE LABELS
-MODIFIED foo 0 2m49s drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=true,shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-f9gtd
-MODIFIED foo3 0 2m3s drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=true,shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-f9gtd
-MODIFIED foo4 0 2m3s drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=true,shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-9nvr2
-MODIFIED foo 0 2m49s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-mkh72
-MODIFIED foo3 0 2m3s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-mkh72
-MODIFIED foo6 0 2m3s drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=true,shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-f9gtd
-MODIFIED foo4 0 2m3s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-mkh72
-MODIFIED foo7 0 2m3s drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=true,shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-9nvr2
-MODIFIED foo6 0 2m3s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-mkh72
-MODIFIED foo8 0 2m3s drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=true,shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-9nvr2
-MODIFIED foo7 0 2m3s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-mkh72
-MODIFIED foo9 0 2m3s drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=true,shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-9nvr2
-MODIFIED foo8 0 2m3s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-mkh72
-MODIFIED foo9 0 2m3s shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example=shard-7997b8d9b7-mkh72
+EVENT NAME DATA AGE LABELS
+MODIFIED foo4 0 9m2s drain.alpha.sharding.timebertt.dev/50d858e0-example=true,shard.alpha.sharding.timebertt.dev/50d858e0-example=shard-9c6678c9f-ppzf7
+MODIFIED foo7 0 9m2s drain.alpha.sharding.timebertt.dev/50d858e0-example=true,shard.alpha.sharding.timebertt.dev/50d858e0-example=shard-9c6678c9f-ppzf7
+MODIFIED foo4 0 9m2s shard.alpha.sharding.timebertt.dev/50d858e0-example=shard-9c6678c9f-jkgj6
+MODIFIED foo7 0 9m2s shard.alpha.sharding.timebertt.dev/50d858e0-example=shard-9c6678c9f-jkgj6
```
## Clean Up
diff --git a/docs/implement-sharding.md b/docs/implement-sharding.md
index c4c29924..58e6d63f 100644
--- a/docs/implement-sharding.md
+++ b/docs/implement-sharding.md
@@ -3,21 +3,21 @@
This guide walks you through implementing sharding for your own controller.
Prerequisite for using a sharded controller setup is to install the sharding components in the cluster, see [Install the Sharding Components](installation.md).
-## Configuring the `ClusterRing`
+## Configuring the `ControllerRing`
-After installing the sharding components, you can go ahead and configure a `ClusterRing` object for your controller.
-For all controllers that you want to shard, configure the controller's main resource and the controlled resources in `ClusterRing.spec.resources`.
+After installing the sharding components, you can go ahead and configure a `ControllerRing` object for your controller.
+For all controllers that you want to shard, configure the controller's main resource and the controlled resources in `ControllerRing.spec.resources`.
As an example, let's consider a subset of kube-controller-manager's controllers: `Deployment` and `ReplicaSet`.
- The `Deployment` controller reconciles the `deployments` resource and controls `replicasets`.
- The `ReplicaSet` controller reconciles the `replicaset` resource and controls `pods`.
-The corresponding `ClusterRing` for the `Deployment` controller would need to be configured like this:
+The corresponding `ControllerRing` for the `Deployment` controller would need to be configured like this:
```yaml
apiVersion: sharding.timebertt.dev/v1alpha1
-kind: ClusterRing
+kind: ControllerRing
metadata:
name: kube-controller-manager-deployment
spec:
@@ -30,7 +30,7 @@ spec:
```
To allow the sharder to reassign the sharded objects during rebalancing, we need to grant the corresponding permissions.
-We need to grant these permissions explicitly depending on what is configured in the `ClusterRing`.
+We need to grant these permissions explicitly depending on what is configured in the `ControllerRing`.
Otherwise, the sharder would basically require `cluster-admin` access.
For the above example, we would use these RBAC manifests:
@@ -38,7 +38,7 @@ For the above example, we would use these RBAC manifests:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: sharding:clusterring:kube-controller-manager
+ name: sharding:controllerring:kube-controller-manager
rules:
- apiGroups:
- apps
@@ -52,11 +52,11 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: sharding:clusterring:kube-controller-manager
+ name: sharding:controllerring:kube-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
- name: sharding:clusterring:kube-controller-manager
+ name: sharding:controllerring:kube-controller-manager
subjects:
- kind: ServiceAccount
name: sharder
@@ -89,7 +89,7 @@ apiVersion: coordination.k8s.io/v1
kind: Lease
metadata:
labels:
- alpha.sharding.timebertt.dev/clusterring: my-clusterring
+ alpha.sharding.timebertt.dev/controllerring: my-controllerring
name: my-operator-565df55f4b-5vwpj
namespace: operator-system
spec:
@@ -114,7 +114,7 @@ Similar to usual leader election, a shard may release its own shard `Lease` on g
This immediately triggers reassignments by the sharder to minimize the duration where no shard is acting on a subset of objects.
In essence, all the existing machinery for leader election can be reused for maintaining the shard `Lease` – that is, with two minor changes.
-First, the shard `Lease` needs to be labelled with `alpha.sharding.timebertt.dev/clusterring=` to specify which `ClusterRing` the shard belongs to.
+First, the shard `Lease` needs to be labelled with `alpha.sharding.timebertt.dev/controllerring=` to specify which `ControllerRing` the shard belongs to.
Second, the name of the shard `Lease` needs to match the `holderIdentity`.
By default, the instance's hostname is used for both values.
If the `holderIdentity` differs from the name, the sharder assumes that the shard is unavailable.
@@ -134,7 +134,7 @@ func run() error {
restConfig := config.GetConfigOrDie()
shardLease, err := shardlease.NewResourceLock(restConfig, nil, shardlease.Options{
- ClusterRingName: "my-clusterring",
+ ControllerRingName: "my-controllerring",
})
if err != nil {
return err
@@ -163,19 +163,19 @@ func run() error {
### Filtered Watch Cache
-In short: use the following label selector on watches for all sharded resources listed in the `ClusterRing`.
+In short: use the following label selector on watches for all sharded resources listed in the `ControllerRing`.
```text
-shard.alpha.sharding.timebertt.dev/clusterring-50d858e0-example: my-operator-565df55f4b-5vwpj
+shard.alpha.sharding.timebertt.dev/50d858e0-example: my-operator-565df55f4b-5vwpj
```
-The sharder assigns all sharded objects by adding a shard label that is specific to the `ClusterRing` (resources could be part of multiple `ClusterRings`).
-The shard label's key consists of the `shard.alpha.sharding.timebertt.dev/clusterring-` prefix followed by the first 8 hex characters of the SHA256 checksum of the `ClusterRing` name followed by a `-` followed by the `ClusterRing` name itself.
+The sharder assigns all sharded objects by adding a shard label that is specific to the `ControllerRing` (resources could be part of multiple `ControllerRings`).
+The shard label's key consists of the `shard.alpha.sharding.timebertt.dev/` prefix followed by the first 8 hex characters of the SHA256 checksum of the `ControllerRing` name followed by a `-` followed by the `ControllerRing` name itself.
The key part after the `/` is shortened to 63 characters so that it is a valid label key.
-The checksum is added to the label key to derive unique label keys even for `ClusterRings` with long names that would cause the pattern to exceed the 63 characters limit after the `/`.
+The checksum is added to the label key to derive unique label keys even for `ControllerRings` with long names that would cause the pattern to exceed the 63 characters limit after the `/`.
The shard label's value is the name of the shard, i.e., the name of the shard lease and the shard lease's `holderIdentity`.
-Once you have determined the shard label key for your `ClusterRing`, use it as a selector on all watches that your controller starts for any of the sharded resources.
+Once you have determined the shard label key for your `ControllerRing`, use it as a selector on all watches that your controller starts for any of the sharded resources.
With this, the shard will only cache the objects assigned to it and the controllers will only reconcile these objects.
Note that when you use a label or field selector on a watch connection and the label or field changes so that the selector doesn't match anymore, the API server will emit a `DELETE` watch event.
@@ -204,7 +204,7 @@ func run() error {
// If your shard watches sharded objects as well as non-sharded objects, use cache.Options.ByObject to configure
// the label selector on object level.
DefaultLabelSelector: labels.SelectorFromSet(labels.Set{
- shardingv1alpha1.LabelShard(shardingv1alpha1.KindClusterRing, "", "my-clusterring"): shardLease.Identity(),
+ shardingv1alpha1.LabelShard("my-controllerring"): shardLease.Identity(),
}),
},
@@ -221,12 +221,12 @@ In short: ensure your sharded controller acknowledges drain operations.
When the drain label like this is added by the sharder, the controller needs to remove both the shard and the drain label and stop reconciling the object.
```text
-drain.alpha.sharding.timebertt.dev/clusterring-50d858e0-example
+drain.alpha.sharding.timebertt.dev/50d858e0-example
```
When the sharder needs to move an object from an available shard to another shard for rebalancing, it first adds the drain label to instruct the currently responsible shard to stop reconciling the object.
The shard needs to acknowledge this operation, as the sharder must prevent concurrent reconciliations of the same object in multiple shards.
-The drain label's key is specific to the `ClusterRing` and follows the same pattern as the shard label (see above).
+The drain label's key is specific to the `ControllerRing` and follows the same pattern as the shard label (see above).
The drain label's value is irrelevant, only the presence of the label is relevant.
Apart from changing the controller's business logic to first check the drain label, also ensure that the watch event filtering logic (predicates) always reacts on events with the drain label set independent of the controller's actual predicates.
@@ -244,19 +244,19 @@ import (
// AddToManager adds a controller to the manager.
// shardName must match the shard lease's name/identity.
-func (r *Reconciler) AddToManager(mgr manager.Manager, clusterRingName, shardName string) error {
+func (r *Reconciler) AddToManager(mgr manager.Manager, controllerRingName, shardName string) error {
// ACKNOWLEDGE DRAIN OPERATIONS
// Use the shardcontroller package as helpers for:
// - a predicate that triggers when the drain label is present (even if the actual predicates don't trigger)
// - wrapping the actual reconciler a reconciler that handles the drain operation for us
return builder.ControllerManagedBy(mgr).
Named("example").
- For(&corev1.ConfigMap{}, builder.WithPredicates(shardcontroller.Predicate(clusterRingName, shardName, MyConfigMapPredicate()))).
+ For(&corev1.ConfigMap{}, builder.WithPredicates(shardcontroller.Predicate(controllerRingName, shardName, MyConfigMapPredicate()))).
Owns(&corev1.Secret{}, builder.WithPredicates(MySecretPredicate())).
Complete(
shardcontroller.NewShardedReconciler(mgr).
For(&corev1.ConfigMap{}). // must match the kind in For() above
- InClusterRing(clusterRingName).
+ InControllerRing(controllerRingName).
WithShardName(shardName).
MustBuild(r),
)
diff --git a/docs/installation.md b/docs/installation.md
index c56d4f18..703ba6e4 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -46,7 +46,7 @@ Be sure to mount your webhook server cert to `/tmp/k8s-webhook-server/serving-ce
`config/monitoring/sharder` contains a `ServiceMonitor` for configuring metrics scraping for the sharder using the [prometheus-operator](https://prometheus-operator.dev/).
-`config/monitoring/sharding-exporter` contains a small exporter for metrics on the state of `ClusterRings` and shards based on the [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) [custom resource metrics feature](https://github.com/kubernetes/kube-state-metrics/blob/main/docs/customresourcestate-metrics.md).
+`config/monitoring/sharding-exporter` contains a small exporter for metrics on the state of `ControllerRings` and shards based on the [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) [custom resource metrics feature](https://github.com/kubernetes/kube-state-metrics/blob/main/docs/customresourcestate-metrics.md).
Use the `config/monitoring` configuration to apply both configurations together.
Also see [Monitoring the Sharding Components](monitoring.md) for more information on the exposed metrics.
diff --git a/docs/monitoring.md b/docs/monitoring.md
index 5b9e16e8..a3196b52 100644
--- a/docs/monitoring.md
+++ b/docs/monitoring.md
@@ -12,20 +12,20 @@ Clients need to authenticate against the endpoint and must be authorized for `ge
#### `controller_sharding_assignments_total`
Type: counter
-Description: Total number of shard assignments by the sharder webhook per Ring and GroupResource.
+Description: Total number of shard assignments by the sharder webhook per `ControllerRing` and GroupResource.
This counter is incremented every time the mutating webhook of the sharder assigns a sharded object (excluding dry-run requests).
#### `controller_sharding_movements_total`
Type: counter
-Description: Total number of shard movements triggered by the sharder controller per Ring and GroupResource.
+Description: Total number of shard movements triggered by the sharder controller per `ControllerRing` and GroupResource.
This counter is incremented every time the sharder controller triggers a direct object assignment, i.e., when an object needs to be moved away from an unavailable shard (or when an object has missed the webhook and needs to be assigned).
This only considers the sharder controller's side, i.e., the `controller_sharding_assignments_total` counter is incremented as well when the controller successfully triggers an assignment by the webhook.
#### `controller_sharding_drains_total`
Type: counter
-Description: Total number of shard drains triggered by the sharder controller per Ring and GroupResource.
+Description: Total number of shard drains triggered by the sharder controller per `ControllerRing` and GroupResource.
This counter is incremented every time the sharder controller triggers a drain operation, i.e., when an object needs to be moved away from an available shard.
This only considers the sharder controller's side, i.e., the `controller_sharding_assignments_total` counter is incremented as well when the shard removes the drain label as expect and thereby triggers an assignment by the webhook.
This doesn't consider the action taken by the shard.
@@ -33,37 +33,37 @@ This doesn't consider the action taken by the shard.
#### `controller_sharding_ring_calculations_total`
Type: counter
-Description: Total number of shard ring calculations per ring kind.
+Description: Total number of shard ring calculations per `ControllerRing`.
This counter is incremented every time the sharder calculates a new consistent hash ring based on the shard leases.
## sharding-exporter
-The sharding-exporter is an optional component for metrics about the state of `ClusterRings` and shards, see [Install the Sharding Components](installation.md#monitoring-optional).
+The sharding-exporter is an optional component for metrics about the state of `ControllerRings` and shards, see [Install the Sharding Components](installation.md#monitoring-optional).
The `sharding-exporter` service exposes metrics via `https` on the `8443` port at the `/metrics` endpoint.
Clients need to authenticate against the endpoint and must be authorized for `get` on the `nonResourceURL` `/metrics`.
-### Exposed `ClusterRing` Metrics
+### Exposed `ControllerRing` Metrics
-#### `kube_clusterring_metadata_generation`
+#### `kube_controllerring_metadata_generation`
Type: gauge
-Description: The generation of a ClusterRing.
+Description: The generation of a ControllerRing.
-#### `kube_clusterring_observed_generation`
+#### `kube_controllerring_observed_generation`
Type: gauge
-Description: The latest generation observed by the ClusterRing controller.
+Description: The latest generation observed by the ControllerRing controller.
-#### `kube_clusterring_status_shards`
+#### `kube_controllerring_status_shards`
Type: gauge
-Description: The ClusterRing's total number of shards observed by the ClusterRing controller.
+Description: The ControllerRing's total number of shards observed by the ControllerRing controller.
-#### `kube_clusterring_status_available_shards`
+#### `kube_controllerring_status_available_shards`
Type: gauge
-Description: The ClusterRing's number of available shards observed by the ClusterRing controller.
+Description: The ControllerRing's number of available shards observed by the ControllerRing controller.
### Exposed Shard Metrics
diff --git a/hack/config/shard/clusterring/clusterring.yaml b/hack/config/shard/controllerring/controllerring.yaml
similarity index 92%
rename from hack/config/shard/clusterring/clusterring.yaml
rename to hack/config/shard/controllerring/controllerring.yaml
index 85e7fa1e..d6a4d036 100644
--- a/hack/config/shard/clusterring/clusterring.yaml
+++ b/hack/config/shard/controllerring/controllerring.yaml
@@ -1,5 +1,5 @@
apiVersion: sharding.timebertt.dev/v1alpha1
-kind: ClusterRing
+kind: ControllerRing
metadata:
name: example
spec:
diff --git a/hack/config/shard/clusterring/kustomization.yaml b/hack/config/shard/controllerring/kustomization.yaml
similarity index 81%
rename from hack/config/shard/clusterring/kustomization.yaml
rename to hack/config/shard/controllerring/kustomization.yaml
index 8e22cac6..9f3a8e97 100644
--- a/hack/config/shard/clusterring/kustomization.yaml
+++ b/hack/config/shard/controllerring/kustomization.yaml
@@ -2,5 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
-- clusterring.yaml
+- controllerring.yaml
- sharder_rbac.yaml
diff --git a/hack/config/shard/clusterring/sharder_rbac.yaml b/hack/config/shard/controllerring/sharder_rbac.yaml
similarity index 77%
rename from hack/config/shard/clusterring/sharder_rbac.yaml
rename to hack/config/shard/controllerring/sharder_rbac.yaml
index 5fe3ebd8..50c5ef70 100644
--- a/hack/config/shard/clusterring/sharder_rbac.yaml
+++ b/hack/config/shard/controllerring/sharder_rbac.yaml
@@ -1,11 +1,11 @@
-# These manifests grant the sharder controller permissions to act on resources that we listed in the ClusterRing.
+# These manifests grant the sharder controller permissions to act on resources that we listed in the ControllerRing.
# We need to grant these permissions explicitly depending on what we configured. Otherwise, the sharder would require
# cluster-admin access.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: sharding:clusterring:example
+ name: sharding:controllerring:example
rules:
- apiGroups:
- ""
@@ -19,11 +19,11 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: sharding:clusterring:example
+ name: sharding:controllerring:example
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
- name: sharding:clusterring:example
+ name: sharding:controllerring:example
subjects:
- kind: ServiceAccount
name: sharder
diff --git a/hack/config/shard/shard/deployment.yaml b/hack/config/shard/shard/deployment.yaml
index ee0d0ea7..6f92016f 100644
--- a/hack/config/shard/shard/deployment.yaml
+++ b/hack/config/shard/shard/deployment.yaml
@@ -13,7 +13,7 @@ spec:
- name: shard
image: shard:latest
args:
- - --clusterring=example
+ - --controllerring=example
- --zap-log-level=debug
env:
- name: DISABLE_HTTP2
diff --git a/hack/config/shard/shard/kustomization.yaml b/hack/config/shard/shard/kustomization.yaml
index 70651732..fe2aa398 100644
--- a/hack/config/shard/shard/kustomization.yaml
+++ b/hack/config/shard/shard/kustomization.yaml
@@ -13,7 +13,7 @@ images:
newTag: latest
resources:
-- ../clusterring
+- ../controllerring
- deployment.yaml
- rbac.yaml
- serviceaccount.yaml
diff --git a/hack/config/skaffold.yaml b/hack/config/skaffold.yaml
index 305f2344..c748f3b0 100644
--- a/hack/config/skaffold.yaml
+++ b/hack/config/skaffold.yaml
@@ -208,7 +208,7 @@ build:
manifests:
kustomize:
paths:
- - hack/config/shard/clusterring
+ - hack/config/shard/controllerring
- hack/config/shard/shard
deploy:
kubectl:
@@ -333,7 +333,7 @@ build:
manifests:
kustomize:
paths:
- # default configuration: only run operator shards and use external sharding implementation via ClusterRing
+ # default configuration: only run operator shards and use external sharding implementation via ControllerRing
- webhosting-operator/config/manager/overlays/default
- webhosting-operator/config/monitoring/default
deploy:
@@ -372,7 +372,7 @@ profiles:
patches:
- op: replace
path: /manifests/kustomize/paths/0
- # default configuration: only run operator shards and use external sharding implementation via ClusterRing
+ # default configuration: only run operator shards and use external sharding implementation via ControllerRing
value: webhosting-operator/config/manager/overlays/shoot/default
- op: add
path: /manifests/kustomize/paths/-
@@ -441,7 +441,7 @@ profiles:
- bash
- -c
- |
- if kubectl get clusterring example &>/dev/null || kubectl -n default get deploy shard &>/dev/null ; then
+ if kubectl get controllerring example &>/dev/null || kubectl -n default get deploy shard &>/dev/null ; then
echo "Example shard is still running, refusing to run a load test experiment."
echo "Ensure a clean load test environment, i.e., run 'make down SKAFFOLD_MODULE=shard'."
exit 1
diff --git a/pkg/apis/sharding/v1alpha1/constants.go b/pkg/apis/sharding/v1alpha1/constants.go
index d8b5ee18..ca5b0108 100644
--- a/pkg/apis/sharding/v1alpha1/constants.go
+++ b/pkg/apis/sharding/v1alpha1/constants.go
@@ -35,8 +35,8 @@ const (
// alphaPrefix is a common prefix for all well-known annotations and labels in this API version package.
alphaPrefix = "alpha.sharding.timebertt.dev/"
- // LabelClusterRing is the label on objects that identifies the ClusterRing that the object belongs to.
- LabelClusterRing = alphaPrefix + "clusterring"
+ // LabelControllerRing is the label on objects that identifies the ControllerRing that the object belongs to.
+ LabelControllerRing = alphaPrefix + "controllerring"
// LabelState is the label on Lease objects that reflects the state of a shard for observability purposes.
// This label is maintained by the shardlease controller.
LabelState = alphaPrefix + "state"
@@ -53,31 +53,24 @@ const (
IdentityShardLeaseController = "shardlease-controller"
delimiter = "-"
- // KindClusterRing is the kind string for ClusterRings used in label keys.
- KindClusterRing = "clusterring"
)
// LabelShard returns the label on sharded objects that holds the name of the responsible shard within a ring.
-func LabelShard(kind string, namespace, name string) string {
- return LabelShardPrefix + RingSuffix(kind, namespace, name)
+func LabelShard(ringName string) string {
+ return LabelShardPrefix + RingSuffix(ringName)
}
// LabelDrain returns the label on sharded objects that instructs the responsible shard within a ring to stop reconciling
// the object and remove both the shard and drain label.
-func LabelDrain(kind string, namespace, name string) string {
- return LabelDrainPrefix + RingSuffix(kind, namespace, name)
+func LabelDrain(ringName string) string {
+ return LabelDrainPrefix + RingSuffix(ringName)
}
-// RingSuffix returns the label key for a given ring kind and key that is appended to a qualified prefix.
-func RingSuffix(kind string, namespace, name string) string {
- key := name
- if namespace != "" {
- key = namespace + "_" + name
- }
-
- keyHash := sha256.Sum256([]byte(key))
+// RingSuffix returns the label key for a given ring name that is appended to a qualified prefix.
+func RingSuffix(ringName string) string {
+ keyHash := sha256.Sum256([]byte(ringName))
hexHash := hex.EncodeToString(keyHash[:])
// the label part after the "/" must not exceed 63 characters, cut off at 63 characters
- return strings.ShortenString(kind+delimiter+hexHash[:8]+delimiter+key, 63)
+ return strings.ShortenString(hexHash[:8]+delimiter+ringName, 63)
}
diff --git a/pkg/apis/sharding/v1alpha1/register.go b/pkg/apis/sharding/v1alpha1/register.go
index c9465e8c..bb0d9980 100644
--- a/pkg/apis/sharding/v1alpha1/register.go
+++ b/pkg/apis/sharding/v1alpha1/register.go
@@ -41,8 +41,8 @@ var (
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
- &ClusterRing{},
- &ClusterRingList{},
+ &ControllerRing{},
+ &ControllerRingList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
diff --git a/pkg/apis/sharding/v1alpha1/types_clusterring.go b/pkg/apis/sharding/v1alpha1/types_controllerring.go
similarity index 70%
rename from pkg/apis/sharding/v1alpha1/types_clusterring.go
rename to pkg/apis/sharding/v1alpha1/types_controllerring.go
index c8dc5bb1..71a1e21e 100644
--- a/pkg/apis/sharding/v1alpha1/types_clusterring.go
+++ b/pkg/apis/sharding/v1alpha1/types_controllerring.go
@@ -29,36 +29,37 @@ import (
//+kubebuilder:printcolumn:name="Shards",type=string,JSONPath=`.status.shards`
//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`
-// ClusterRing declares a virtual ring of sharded controller instances. The specified objects are distributed across
-// shards of this ring on the cluster-scope (i.e., objects in all namespaces). Hence, the "Cluster" prefix.
-type ClusterRing struct {
+// ControllerRing declares a virtual ring of sharded controller instances. Objects of the specified resources are
+// distributed across shards of this ring. Objects in all namespaces are considered unless a namespaceSelector is
+// specified.
+type ControllerRing struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
- // Spec contains the specification of the desired behavior of the ClusterRing.
+ // Spec contains the specification of the desired behavior of the ControllerRing.
// +optional
- Spec ClusterRingSpec `json:"spec,omitempty"`
- // Status contains the most recently observed status of the ClusterRing.
+ Spec ControllerRingSpec `json:"spec,omitempty"`
+ // Status contains the most recently observed status of the ControllerRing.
// +optional
- Status ClusterRingStatus `json:"status,omitempty"`
+ Status ControllerRingStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
-// ClusterRingList contains a list of ClusterRings.
-type ClusterRingList struct {
+// ControllerRingList contains a list of ControllerRings.
+type ControllerRingList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
- // Items is the list of ClusterRings.
- Items []ClusterRing `json:"items"`
+ // Items is the list of ControllerRings.
+ Items []ControllerRing `json:"items"`
}
-// ClusterRingSpec defines the desired state of a ClusterRing.
-type ClusterRingSpec struct {
- // Resources specifies the list of resources that are distributed across shards in this ClusterRing.
+// ControllerRingSpec defines the desired state of a ControllerRing.
+type ControllerRingSpec struct {
+ // Resources specifies the list of resources that are distributed across shards in this ControllerRing.
// +optional
// +listType=map
// +listMapKey=group
@@ -79,7 +80,7 @@ type RingResource struct {
// This resource is the controller's main resource, i.e., the resource of which it updates the object status.
metav1.GroupResource `json:",inline"`
- // ControlledResources are additional resources that are distributed across shards in the ClusterRing.
+ // ControlledResources are additional resources that are distributed across shards in the ControllerRing.
// These resources are controlled by the controller's main resource, i.e., they have an owner reference with
// controller=true back to the GroupResource of this RingResource.
// Typically, the controller also watches objects of this resource and enqueues the owning object (of the main
@@ -92,13 +93,13 @@ type RingResource struct {
}
const (
- // ClusterRingReady is the condition type for the "Ready" condition on ClusterRings.
- ClusterRingReady = "Ready"
+ // ControllerRingReady is the condition type for the "Ready" condition on ControllerRings.
+ ControllerRingReady = "Ready"
)
-// ClusterRingStatus defines the observed state of a ClusterRing.
-type ClusterRingStatus struct {
- // The generation observed by the ClusterRing controller.
+// ControllerRingStatus defines the observed state of a ControllerRing.
+type ControllerRingStatus struct {
+ // The generation observed by the ControllerRing controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Shards is the total number of shards of this ring.
@@ -113,23 +114,23 @@ type ClusterRingStatus struct {
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
-// LeaseSelector returns a label selector for selecting shard Lease objects belonging to this ClusterRing.
-func (c *ClusterRing) LeaseSelector() labels.Selector {
- return labels.SelectorFromSet(labels.Set{LabelClusterRing: c.Name})
+// LeaseSelector returns a label selector for selecting shard Lease objects belonging to this ControllerRing.
+func (c *ControllerRing) LeaseSelector() labels.Selector {
+ return labels.SelectorFromSet(labels.Set{LabelControllerRing: c.Name})
}
-// LabelShard returns the label on sharded objects that holds the name of the responsible shard within this ClusterRing.
-func (c *ClusterRing) LabelShard() string {
- return LabelShard(KindClusterRing, "", c.Name)
+// LabelShard returns the label on sharded objects that holds the name of the responsible shard within this ControllerRing.
+func (c *ControllerRing) LabelShard() string {
+ return LabelShard(c.Name)
}
-// LabelDrain returns the label on sharded objects that instructs the responsible shard within this ClusterRing to stop
+// LabelDrain returns the label on sharded objects that instructs the responsible shard within this ControllerRing to stop
// reconciling the object and remove both the shard and drain label.
-func (c *ClusterRing) LabelDrain() string {
- return LabelDrain(KindClusterRing, "", c.Name)
+func (c *ControllerRing) LabelDrain() string {
+ return LabelDrain(c.Name)
}
-// RingResources returns the the list of resources that are distributed across shards in this ClusterRing.
-func (c *ClusterRing) RingResources() []RingResource {
+// RingResources returns the the list of resources that are distributed across shards in this ControllerRing.
+func (c *ControllerRing) RingResources() []RingResource {
return c.Spec.Resources
}
diff --git a/pkg/apis/sharding/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/sharding/v1alpha1/zz_generated.deepcopy.go
index 01be6653..a8d4e769 100644
--- a/pkg/apis/sharding/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/sharding/v1alpha1/zz_generated.deepcopy.go
@@ -26,7 +26,7 @@ import (
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterRing) DeepCopyInto(out *ClusterRing) {
+func (in *ControllerRing) DeepCopyInto(out *ControllerRing) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
@@ -34,18 +34,18 @@ func (in *ClusterRing) DeepCopyInto(out *ClusterRing) {
in.Status.DeepCopyInto(&out.Status)
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRing.
-func (in *ClusterRing) DeepCopy() *ClusterRing {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRing.
+func (in *ControllerRing) DeepCopy() *ControllerRing {
if in == nil {
return nil
}
- out := new(ClusterRing)
+ out := new(ControllerRing)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterRing) DeepCopyObject() runtime.Object {
+func (in *ControllerRing) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -53,31 +53,31 @@ func (in *ClusterRing) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterRingList) DeepCopyInto(out *ClusterRingList) {
+func (in *ControllerRingList) DeepCopyInto(out *ControllerRingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
- *out = make([]ClusterRing, len(*in))
+ *out = make([]ControllerRing, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRingList.
-func (in *ClusterRingList) DeepCopy() *ClusterRingList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRingList.
+func (in *ControllerRingList) DeepCopy() *ControllerRingList {
if in == nil {
return nil
}
- out := new(ClusterRingList)
+ out := new(ControllerRingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterRingList) DeepCopyObject() runtime.Object {
+func (in *ControllerRingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@@ -85,7 +85,7 @@ func (in *ClusterRingList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterRingSpec) DeepCopyInto(out *ClusterRingSpec) {
+func (in *ControllerRingSpec) DeepCopyInto(out *ControllerRingSpec) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
@@ -101,18 +101,18 @@ func (in *ClusterRingSpec) DeepCopyInto(out *ClusterRingSpec) {
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRingSpec.
-func (in *ClusterRingSpec) DeepCopy() *ClusterRingSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRingSpec.
+func (in *ControllerRingSpec) DeepCopy() *ControllerRingSpec {
if in == nil {
return nil
}
- out := new(ClusterRingSpec)
+ out := new(ControllerRingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterRingStatus) DeepCopyInto(out *ClusterRingStatus) {
+func (in *ControllerRingStatus) DeepCopyInto(out *ControllerRingStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
@@ -123,12 +123,12 @@ func (in *ClusterRingStatus) DeepCopyInto(out *ClusterRingStatus) {
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRingStatus.
-func (in *ClusterRingStatus) DeepCopy() *ClusterRingStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRingStatus.
+func (in *ControllerRingStatus) DeepCopy() *ControllerRingStatus {
if in == nil {
return nil
}
- out := new(ClusterRingStatus)
+ out := new(ControllerRingStatus)
in.DeepCopyInto(out)
return out
}
diff --git a/pkg/controller/add.go b/pkg/controller/add.go
index 473017ff..dfa8d83c 100644
--- a/pkg/controller/add.go
+++ b/pkg/controller/add.go
@@ -23,17 +23,17 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
configv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/config/v1alpha1"
- "github.com/timebertt/kubernetes-controller-sharding/pkg/controller/clusterring"
+ "github.com/timebertt/kubernetes-controller-sharding/pkg/controller/controllerring"
"github.com/timebertt/kubernetes-controller-sharding/pkg/controller/sharder"
"github.com/timebertt/kubernetes-controller-sharding/pkg/controller/shardlease"
)
// AddToManager adds all controllers to the manager.
func AddToManager(ctx context.Context, mgr manager.Manager, config *configv1alpha1.SharderConfig) error {
- if err := (&clusterring.Reconciler{
+ if err := (&controllerring.Reconciler{
Config: config,
}).AddToManager(mgr); err != nil {
- return fmt.Errorf("failed adding clusterring controller: %w", err)
+ return fmt.Errorf("failed adding controllerring controller: %w", err)
}
if err := (&sharder.Reconciler{
diff --git a/pkg/controller/clusterring/add.go b/pkg/controller/controllerring/add.go
similarity index 83%
rename from pkg/controller/clusterring/add.go
rename to pkg/controller/controllerring/add.go
index 3c87f9cf..f4a2e836 100644
--- a/pkg/controller/clusterring/add.go
+++ b/pkg/controller/controllerring/add.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package clusterring
+package controllerring
import (
"context"
@@ -35,7 +35,7 @@ import (
)
// ControllerName is the name of this controller.
-const ControllerName = "clusterring"
+const ControllerName = "controllerring"
// AddToManager adds Reconciler to the given manager.
func (r *Reconciler) AddToManager(mgr manager.Manager) error {
@@ -51,18 +51,18 @@ func (r *Reconciler) AddToManager(mgr manager.Manager) error {
return builder.ControllerManagedBy(mgr).
Named(ControllerName).
- For(&shardingv1alpha1.ClusterRing{}, builder.WithPredicates(r.ClusterRingPredicate())).
- Watches(&coordinationv1.Lease{}, handler.EnqueueRequestsFromMapFunc(MapLeaseToClusterRing), builder.WithPredicates(r.LeasePredicate())).
+ For(&shardingv1alpha1.ControllerRing{}, builder.WithPredicates(r.ControllerRingPredicate())).
+ Watches(&coordinationv1.Lease{}, handler.EnqueueRequestsFromMapFunc(MapLeaseToControllerRing), builder.WithPredicates(r.LeasePredicate())).
WithOptions(controller.Options{
MaxConcurrentReconciles: 5,
}).
Complete(r)
}
-func (r *Reconciler) ClusterRingPredicate() predicate.Predicate {
+func (r *Reconciler) ControllerRingPredicate() predicate.Predicate {
return predicate.And(
predicate.GenerationChangedPredicate{},
- // ignore deletion of ClusterRings
+ // ignore deletion of ControllerRings
predicate.Funcs{
CreateFunc: func(_ event.CreateEvent) bool { return true },
UpdateFunc: func(_ event.UpdateEvent) bool { return true },
@@ -71,8 +71,8 @@ func (r *Reconciler) ClusterRingPredicate() predicate.Predicate {
)
}
-func MapLeaseToClusterRing(ctx context.Context, obj client.Object) []reconcile.Request {
- ring, ok := obj.GetLabels()[shardingv1alpha1.LabelClusterRing]
+func MapLeaseToControllerRing(ctx context.Context, obj client.Object) []reconcile.Request {
+ ring, ok := obj.GetLabels()[shardingv1alpha1.LabelControllerRing]
if !ok {
return nil
}
@@ -105,5 +105,5 @@ func (r *Reconciler) LeasePredicate() predicate.Predicate {
}
func isShardLease(obj client.Object) bool {
- return obj.GetLabels()[shardingv1alpha1.LabelClusterRing] != ""
+ return obj.GetLabels()[shardingv1alpha1.LabelControllerRing] != ""
}
diff --git a/pkg/controller/clusterring/reconciler.go b/pkg/controller/controllerring/reconciler.go
similarity index 68%
rename from pkg/controller/clusterring/reconciler.go
rename to pkg/controller/controllerring/reconciler.go
index eca1d6c7..332f2954 100644
--- a/pkg/controller/clusterring/reconciler.go
+++ b/pkg/controller/controllerring/reconciler.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package clusterring
+package controllerring
import (
"context"
@@ -46,12 +46,12 @@ import (
"github.com/timebertt/kubernetes-controller-sharding/pkg/webhook/sharder"
)
-//+kubebuilder:rbac:groups=sharding.timebertt.dev,resources=clusterrings,verbs=get;list;watch
-//+kubebuilder:rbac:groups=sharding.timebertt.dev,resources=clusterrings/status,verbs=update;patch
+//+kubebuilder:rbac:groups=sharding.timebertt.dev,resources=controllerrings,verbs=get;list;watch
+//+kubebuilder:rbac:groups=sharding.timebertt.dev,resources=controllerrings/status,verbs=update;patch
//+kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations,verbs=create;patch
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch
-// Reconciler reconciles ClusterRings.
+// Reconciler reconciles ControllerRings.
type Reconciler struct {
Client client.Client
Recorder record.EventRecorder
@@ -59,12 +59,12 @@ type Reconciler struct {
Config *configv1alpha1.SharderConfig
}
-// Reconcile reconciles a ClusterRing object.
+// Reconcile reconciles a ControllerRing object.
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := logf.FromContext(ctx)
- clusterRing := &shardingv1alpha1.ClusterRing{}
- if err := r.Client.Get(ctx, req.NamespacedName, clusterRing); err != nil {
+ controllerRing := &shardingv1alpha1.ControllerRing{}
+ if err := r.Client.Get(ctx, req.NamespacedName, controllerRing); err != nil {
if apierrors.IsNotFound(err) {
log.V(1).Info("Object is gone, stop reconciling")
return reconcile.Result{}, nil
@@ -72,90 +72,90 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
return reconcile.Result{}, fmt.Errorf("error retrieving object from store: %w", err)
}
- before := clusterRing.DeepCopy()
+ before := controllerRing.DeepCopy()
// reconcile sharder webhook configs
- if err := r.reconcileWebhooks(ctx, clusterRing); err != nil {
- return reconcile.Result{}, r.updateStatusError(ctx, log, fmt.Errorf("error reconciling webhooks for ClusterRing: %w", err), clusterRing, before)
+ if err := r.reconcileWebhooks(ctx, controllerRing); err != nil {
+ return reconcile.Result{}, r.updateStatusError(ctx, log, fmt.Errorf("error reconciling webhooks for ControllerRing: %w", err), controllerRing, before)
}
// collect list of shards in the ring
leaseList := &coordinationv1.LeaseList{}
- if err := r.Client.List(ctx, leaseList, client.MatchingLabelsSelector{Selector: clusterRing.LeaseSelector()}); err != nil {
- return reconcile.Result{}, r.updateStatusError(ctx, log, fmt.Errorf("error listing Leases for ClusterRing: %w", err), clusterRing, before)
+ if err := r.Client.List(ctx, leaseList, client.MatchingLabelsSelector{Selector: controllerRing.LeaseSelector()}); err != nil {
+ return reconcile.Result{}, r.updateStatusError(ctx, log, fmt.Errorf("error listing Leases for ControllerRing: %w", err), controllerRing, before)
}
shards := leases.ToShards(leaseList.Items, r.Clock.Now())
- clusterRing.Status.Shards = int32(len(shards)) // nolint:gosec
- clusterRing.Status.AvailableShards = int32(len(shards.AvailableShards())) // nolint:gosec
+ controllerRing.Status.Shards = int32(len(shards)) // nolint:gosec
+ controllerRing.Status.AvailableShards = int32(len(shards.AvailableShards())) // nolint:gosec
// update status if necessary
- return reconcile.Result{}, r.updateStatusSuccess(ctx, clusterRing, before)
+ return reconcile.Result{}, r.updateStatusSuccess(ctx, controllerRing, before)
}
-func (r *Reconciler) updateStatusSuccess(ctx context.Context, clusterRing, before *shardingv1alpha1.ClusterRing) error {
- if err := r.optionallyUpdateStatus(ctx, clusterRing, before, func(ready *metav1.Condition) {
+func (r *Reconciler) updateStatusSuccess(ctx context.Context, controllerRing, before *shardingv1alpha1.ControllerRing) error {
+ if err := r.optionallyUpdateStatus(ctx, controllerRing, before, func(ready *metav1.Condition) {
ready.Status = metav1.ConditionTrue
ready.Reason = "ReconciliationSucceeded"
- ready.Message = "ClusterRing was successfully reconciled"
+ ready.Message = "ControllerRing was successfully reconciled"
}); err != nil {
- return fmt.Errorf("error updating ClusterRing status: %w", err)
+ return fmt.Errorf("error updating ControllerRing status: %w", err)
}
return nil
}
-func (r *Reconciler) updateStatusError(ctx context.Context, log logr.Logger, reconcileError error, clusterRing, before *shardingv1alpha1.ClusterRing) error {
+func (r *Reconciler) updateStatusError(ctx context.Context, log logr.Logger, reconcileError error, controllerRing, before *shardingv1alpha1.ControllerRing) error {
message := utils.CapitalizeFirst(reconcileError.Error())
- r.Recorder.Event(clusterRing, corev1.EventTypeWarning, "ReconciliationFailed", message)
+ r.Recorder.Event(controllerRing, corev1.EventTypeWarning, "ReconciliationFailed", message)
- if err := r.optionallyUpdateStatus(ctx, clusterRing, before, func(ready *metav1.Condition) {
+ if err := r.optionallyUpdateStatus(ctx, controllerRing, before, func(ready *metav1.Condition) {
ready.Status = metav1.ConditionFalse
ready.Reason = "ReconciliationFailed"
ready.Message = message
}); err != nil {
// We will return the underlying error to the controller. If we fail to publish it to the status, make sure to log
// it at least.
- log.Error(err, "Error updating ClusterRing status with error")
+ log.Error(err, "Error updating ControllerRing status with error")
}
return reconcileError
}
-func (r *Reconciler) optionallyUpdateStatus(ctx context.Context, clusterRing, before *shardingv1alpha1.ClusterRing, mutate func(ready *metav1.Condition)) error {
+func (r *Reconciler) optionallyUpdateStatus(ctx context.Context, controllerRing, before *shardingv1alpha1.ControllerRing, mutate func(ready *metav1.Condition)) error {
// always update status with the latest observed generation, no matter if reconciliation succeeded or not
- clusterRing.Status.ObservedGeneration = clusterRing.Generation
+ controllerRing.Status.ObservedGeneration = controllerRing.Generation
readyCondition := metav1.Condition{
- Type: shardingv1alpha1.ClusterRingReady,
- ObservedGeneration: clusterRing.Generation,
+ Type: shardingv1alpha1.ControllerRingReady,
+ ObservedGeneration: controllerRing.Generation,
}
mutate(&readyCondition)
- meta.SetStatusCondition(&clusterRing.Status.Conditions, readyCondition)
+ meta.SetStatusCondition(&controllerRing.Status.Conditions, readyCondition)
- if apiequality.Semantic.DeepEqual(clusterRing.Status, before.Status) {
+ if apiequality.Semantic.DeepEqual(controllerRing.Status, before.Status) {
return nil
}
- return r.Client.Status().Update(ctx, clusterRing)
+ return r.Client.Status().Update(ctx, controllerRing)
}
-func (r *Reconciler) reconcileWebhooks(ctx context.Context, clusterRing *shardingv1alpha1.ClusterRing) error {
+func (r *Reconciler) reconcileWebhooks(ctx context.Context, controllerRing *shardingv1alpha1.ControllerRing) error {
webhookConfig := &admissionregistrationv1.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: admissionregistrationv1.SchemeGroupVersion.String(),
Kind: "MutatingWebhookConfiguration",
},
ObjectMeta: metav1.ObjectMeta{
- Name: "sharding-" + shardingv1alpha1.RingSuffix(shardingv1alpha1.KindClusterRing, "", clusterRing.Name),
+ Name: "sharding-" + shardingv1alpha1.RingSuffix(controllerRing.Name),
Labels: map[string]string{
- "app.kubernetes.io/name": shardingv1alpha1.AppControllerSharding,
- shardingv1alpha1.LabelClusterRing: clusterRing.Name,
+ "app.kubernetes.io/name": shardingv1alpha1.AppControllerSharding,
+ shardingv1alpha1.LabelControllerRing: controllerRing.Name,
},
Annotations: maps.Clone(r.Config.Webhook.Config.Annotations),
},
}
- if err := controllerutil.SetControllerReference(clusterRing, webhookConfig, r.Client.Scheme()); err != nil {
+ if err := controllerutil.SetControllerReference(controllerRing, webhookConfig, r.Client.Scheme()); err != nil {
return fmt.Errorf("error setting controller reference: %w", err)
}
@@ -167,7 +167,7 @@ func (r *Reconciler) reconcileWebhooks(ctx context.Context, clusterRing *shardin
// only process unassigned objects
ObjectSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
- Key: clusterRing.LabelShard(),
+ Key: controllerRing.LabelShard(),
Operator: metav1.LabelSelectorOpDoesNotExist,
}},
},
@@ -181,12 +181,12 @@ func (r *Reconciler) reconcileWebhooks(ctx context.Context, clusterRing *shardin
}
// overwrite namespaceSelector with ring-specific namespaceSelector if specified
- if clusterRing.Spec.NamespaceSelector != nil {
- webhook.NamespaceSelector = clusterRing.Spec.NamespaceSelector.DeepCopy()
+ if controllerRing.Spec.NamespaceSelector != nil {
+ webhook.NamespaceSelector = controllerRing.Spec.NamespaceSelector.DeepCopy()
}
// add ring-specific path to webhook client config
- webhookPath, err := sharder.WebhookPathFor(clusterRing)
+ webhookPath, err := sharder.WebhookPathFor(controllerRing)
if err != nil {
return err
}
@@ -202,7 +202,7 @@ func (r *Reconciler) reconcileWebhooks(ctx context.Context, clusterRing *shardin
}
// add rules for all ring resources
- for _, ringResource := range clusterRing.Spec.Resources {
+ for _, ringResource := range controllerRing.Spec.Resources {
webhook.Rules = append(webhook.Rules, RuleForResource(ringResource.GroupResource))
for _, controlledResource := range ringResource.ControlledResources {
diff --git a/pkg/controller/sharder/add.go b/pkg/controller/sharder/add.go
index 34c6ca64..893f0192 100644
--- a/pkg/controller/sharder/add.go
+++ b/pkg/controller/sharder/add.go
@@ -51,18 +51,18 @@ func (r *Reconciler) AddToManager(mgr manager.Manager) error {
return builder.ControllerManagedBy(mgr).
Named(ControllerName).
- For(&shardingv1alpha1.ClusterRing{}, builder.WithPredicates(r.ClusterRingPredicate())).
- Watches(&coordinationv1.Lease{}, handler.EnqueueRequestsFromMapFunc(MapLeaseToClusterRing), builder.WithPredicates(r.LeasePredicate())).
+ For(&shardingv1alpha1.ControllerRing{}, builder.WithPredicates(r.ControllerRingPredicate())).
+ Watches(&coordinationv1.Lease{}, handler.EnqueueRequestsFromMapFunc(MapLeaseToControllerRing), builder.WithPredicates(r.LeasePredicate())).
WithOptions(controller.Options{
MaxConcurrentReconciles: 5,
}).
Complete(r)
}
-func (r *Reconciler) ClusterRingPredicate() predicate.Predicate {
+func (r *Reconciler) ControllerRingPredicate() predicate.Predicate {
return predicate.And(
predicate.GenerationChangedPredicate{},
- // ignore deletion of ClusterRings
+ // ignore deletion of ControllerRings
predicate.Funcs{
CreateFunc: func(_ event.CreateEvent) bool { return true },
UpdateFunc: func(_ event.UpdateEvent) bool { return true },
@@ -71,8 +71,8 @@ func (r *Reconciler) ClusterRingPredicate() predicate.Predicate {
)
}
-func MapLeaseToClusterRing(ctx context.Context, obj client.Object) []reconcile.Request {
- ring, ok := obj.GetLabels()[shardingv1alpha1.LabelClusterRing]
+func MapLeaseToControllerRing(ctx context.Context, obj client.Object) []reconcile.Request {
+ ring, ok := obj.GetLabels()[shardingv1alpha1.LabelControllerRing]
if !ok {
return nil
}
@@ -91,7 +91,7 @@ func (r *Reconciler) LeasePredicate() predicate.Predicate {
}
// We only need to resync the ring if the new shard is available right away.
- // Note: on controller start we will enqueue anyway for the add event of ClusterRings.
+ // Note: on controller start we will enqueue anyway for the add event of ControllerRings.
return leases.ToState(lease, r.Clock.Now()).IsAvailable()
},
UpdateFunc: func(e event.UpdateEvent) bool {
@@ -131,5 +131,5 @@ func (r *Reconciler) LeasePredicate() predicate.Predicate {
}
func isShardLease(obj client.Object) bool {
- return obj.GetLabels()[shardingv1alpha1.LabelClusterRing] != ""
+ return obj.GetLabels()[shardingv1alpha1.LabelControllerRing] != ""
}
diff --git a/pkg/controller/sharder/reconciler.go b/pkg/controller/sharder/reconciler.go
index 9b774ebb..5075c48e 100644
--- a/pkg/controller/sharder/reconciler.go
+++ b/pkg/controller/sharder/reconciler.go
@@ -45,17 +45,17 @@ import (
"github.com/timebertt/kubernetes-controller-sharding/pkg/utils/pager"
)
-//+kubebuilder:rbac:groups=sharding.timebertt.dev,resources=clusterrings,verbs=get;list;watch
+//+kubebuilder:rbac:groups=sharding.timebertt.dev,resources=controllerrings,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
-// Note: The sharder requires permissions to list and patch resources listed in ClusterRings. However, the default
+// Note: The sharder requires permissions to list and patch resources listed in ControllerRings. However, the default
// sharder role doesn't include permissions for listing/mutating arbitrary resources (which would basically be
// cluster-admin access) to adhere to the least privilege principle.
-// We can't automate permission management in the clusterring controller, because you can't grant permissions you don't
+// We can't automate permission management in the controllerring controller, because you can't grant permissions you don't
// already have.
// Hence, users need to grant the sharder permissions for listing/mutating sharded resources explicitly.
-// Reconciler reconciles ClusterRings.
+// Reconciler reconciles ControllerRings.
type Reconciler struct {
Client client.Client
Reader client.Reader
@@ -63,12 +63,12 @@ type Reconciler struct {
Config *configv1alpha1.SharderConfig
}
-// Reconcile reconciles a ClusterRing object.
+// Reconcile reconciles a ControllerRing object.
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := logf.FromContext(ctx)
- clusterRing := &shardingv1alpha1.ClusterRing{}
- if err := r.Client.Get(ctx, req.NamespacedName, clusterRing); err != nil {
+ controllerRing := &shardingv1alpha1.ControllerRing{}
+ if err := r.Client.Get(ctx, req.NamespacedName, controllerRing); err != nil {
if apierrors.IsNotFound(err) {
log.V(1).Info("Object is gone, stop reconciling")
return reconcile.Result{}, nil
@@ -76,18 +76,18 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
return reconcile.Result{}, fmt.Errorf("error retrieving object from store: %w", err)
}
- log = log.WithValues("ring", client.ObjectKeyFromObject(clusterRing))
+ log = log.WithValues("ring", client.ObjectKeyFromObject(controllerRing))
// collect list of shards in the ring
leaseList := &coordinationv1.LeaseList{}
- if err := r.Client.List(ctx, leaseList, client.MatchingLabelsSelector{Selector: clusterRing.LeaseSelector()}); err != nil {
- return reconcile.Result{}, fmt.Errorf("error listing Leases for ClusterRing: %w", err)
+ if err := r.Client.List(ctx, leaseList, client.MatchingLabelsSelector{Selector: controllerRing.LeaseSelector()}); err != nil {
+ return reconcile.Result{}, fmt.Errorf("error listing Leases for ControllerRing: %w", err)
}
// get ring and shards from cache
- hashRing, shards := ring.FromLeases(clusterRing, leaseList, r.Clock.Now())
+ hashRing, shards := ring.FromLeases(controllerRing, leaseList, r.Clock.Now())
- namespaces, err := r.getSelectedNamespaces(ctx, clusterRing)
+ namespaces, err := r.getSelectedNamespaces(ctx, controllerRing)
if err != nil {
return reconcile.Result{}, err
}
@@ -97,14 +97,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
}
// resync all ring resources
- for _, ringResource := range clusterRing.Spec.Resources {
+ for _, ringResource := range controllerRing.Spec.Resources {
allErrs = multierror.Append(allErrs,
- r.resyncResource(ctx, log, ringResource.GroupResource, clusterRing, namespaces, hashRing, shards, false),
+ r.resyncResource(ctx, log, ringResource.GroupResource, controllerRing, namespaces, hashRing, shards, false),
)
for _, controlledResource := range ringResource.ControlledResources {
allErrs = multierror.Append(allErrs,
- r.resyncResource(ctx, log, controlledResource, clusterRing, namespaces, hashRing, shards, true),
+ r.resyncResource(ctx, log, controlledResource, controllerRing, namespaces, hashRing, shards, true),
)
}
}
@@ -118,10 +118,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
return reconcile.Result{RequeueAfter: r.Config.Controller.Sharder.SyncPeriod.Duration}, nil
}
-func (r *Reconciler) getSelectedNamespaces(ctx context.Context, clusterRing *shardingv1alpha1.ClusterRing) (sets.Set[string], error) {
+func (r *Reconciler) getSelectedNamespaces(ctx context.Context, controllerRing *shardingv1alpha1.ControllerRing) (sets.Set[string], error) {
namespaceSelector := r.Config.Webhook.Config.NamespaceSelector
- if clusterRing.Spec.NamespaceSelector != nil {
- namespaceSelector = clusterRing.Spec.NamespaceSelector
+ if controllerRing.Spec.NamespaceSelector != nil {
+ namespaceSelector = controllerRing.Spec.NamespaceSelector
}
selector, err := metav1.LabelSelectorAsSelector(namespaceSelector)
@@ -131,7 +131,7 @@ func (r *Reconciler) getSelectedNamespaces(ctx context.Context, clusterRing *sha
namespaceList := &corev1.NamespaceList{}
if err := r.Client.List(ctx, namespaceList, client.MatchingLabelsSelector{Selector: selector}); err != nil {
- return nil, fmt.Errorf("error listing selected namespaces for ClusterRing: %w", err)
+ return nil, fmt.Errorf("error listing selected namespaces for ControllerRing: %w", err)
}
namespaceSet := sets.New[string]()
@@ -243,8 +243,7 @@ func (r *Reconciler) resyncObject(
}
shardingmetrics.DrainsTotal.WithLabelValues(
- shardingv1alpha1.KindClusterRing, ring.GetNamespace(), ring.GetName(),
- gr.Group, gr.Resource,
+ ring.GetName(), gr.Group, gr.Resource,
).Inc()
// object will go through the sharder webhook when shard removes the drain label, which will perform the assignment
@@ -265,8 +264,7 @@ func (r *Reconciler) resyncObject(
}
shardingmetrics.MovementsTotal.WithLabelValues(
- shardingv1alpha1.KindClusterRing, ring.GetNamespace(), ring.GetName(),
- gr.Group, gr.Resource,
+ ring.GetName(), gr.Group, gr.Resource,
).Inc()
return nil
diff --git a/pkg/controller/shardlease/add.go b/pkg/controller/shardlease/add.go
index d800e9cf..2dd3361c 100644
--- a/pkg/controller/shardlease/add.go
+++ b/pkg/controller/shardlease/add.go
@@ -52,8 +52,8 @@ func (r *Reconciler) AddToManager(mgr manager.Manager) error {
return builder.ControllerManagedBy(mgr).
Named(ControllerName).
For(&coordinationv1.Lease{}, builder.WithPredicates(r.LeasePredicate())).
- // enqueue all Leases belonging to a ClusterRing when it is created or the spec is updated
- Watches(&shardingv1alpha1.ClusterRing{}, handler.EnqueueRequestsFromMapFunc(r.MapClusterRingToLeases), builder.WithPredicates(predicate.GenerationChangedPredicate{})).
+ // enqueue all Leases belonging to a ControllerRing when it is created or the spec is updated
+ Watches(&shardingv1alpha1.ControllerRing{}, handler.EnqueueRequestsFromMapFunc(r.MapControllerRingToLeases), builder.WithPredicates(predicate.GenerationChangedPredicate{})).
WithOptions(controller.Options{
MaxConcurrentReconciles: 5,
}).
@@ -84,12 +84,12 @@ func (r *Reconciler) LeasePredicate() predicate.Predicate {
)
}
-func (r *Reconciler) MapClusterRingToLeases(ctx context.Context, obj client.Object) []reconcile.Request {
- clusterRing := obj.(*shardingv1alpha1.ClusterRing)
+func (r *Reconciler) MapControllerRingToLeases(ctx context.Context, obj client.Object) []reconcile.Request {
+ controllerRing := obj.(*shardingv1alpha1.ControllerRing)
leaseList := &coordinationv1.LeaseList{}
- if err := r.Client.List(ctx, leaseList, client.MatchingLabelsSelector{Selector: clusterRing.LeaseSelector()}); err != nil {
- handlerLog.Error(err, "failed listing Leases for ClusterRing", "clusterRing", client.ObjectKeyFromObject(clusterRing))
+ if err := r.Client.List(ctx, leaseList, client.MatchingLabelsSelector{Selector: controllerRing.LeaseSelector()}); err != nil {
+ handlerLog.Error(err, "failed listing Leases for ControllerRing", "controllerRing", client.ObjectKeyFromObject(controllerRing))
return nil
}
@@ -103,5 +103,5 @@ func (r *Reconciler) MapClusterRingToLeases(ctx context.Context, obj client.Obje
}
func isShardLease(obj client.Object) bool {
- return obj.GetLabels()[shardingv1alpha1.LabelClusterRing] != ""
+ return obj.GetLabels()[shardingv1alpha1.LabelControllerRing] != ""
}
diff --git a/pkg/controller/shardlease/reconciler.go b/pkg/controller/shardlease/reconciler.go
index b3c1b334..e5966d48 100644
--- a/pkg/controller/shardlease/reconciler.go
+++ b/pkg/controller/shardlease/reconciler.go
@@ -55,18 +55,18 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
return reconcile.Result{}, fmt.Errorf("error retrieving object from store: %w", err)
}
- clusterRingName := lease.Labels[shardingv1alpha1.LabelClusterRing]
- if clusterRingName == "" {
+ controllerRingName := lease.Labels[shardingv1alpha1.LabelControllerRing]
+ if controllerRingName == "" {
log.V(1).Info("Ignoring non-shard lease")
return reconcile.Result{}, nil
}
- if err := r.Client.Get(ctx, client.ObjectKey{Name: clusterRingName}, &shardingv1alpha1.ClusterRing{}); err != nil {
+ if err := r.Client.Get(ctx, client.ObjectKey{Name: controllerRingName}, &shardingv1alpha1.ControllerRing{}); err != nil {
if !apierrors.IsNotFound(err) {
- return reconcile.Result{}, fmt.Errorf("error checking for existence of ClusterRing: %w", err)
+ return reconcile.Result{}, fmt.Errorf("error checking for existence of ControllerRing: %w", err)
}
- log.V(1).Info("Ignoring shard lease without a corresponding ClusterRing")
+ log.V(1).Info("Ignoring shard lease without a corresponding ControllerRing")
return reconcile.Result{}, nil
}
diff --git a/pkg/shard/controller/builder.go b/pkg/shard/controller/builder.go
index 508bef9c..c4925b22 100644
--- a/pkg/shard/controller/builder.go
+++ b/pkg/shard/controller/builder.go
@@ -30,11 +30,11 @@ import (
// Builder can build a sharded reconciler.
// Use NewShardedReconciler to create a new Builder.
type Builder struct {
- object client.Object
- client client.Client
- clusterRingName string
- shardName string
- err error
+ object client.Object
+ client client.Client
+ controllerRingName string
+ shardName string
+ err error
}
// NewShardedReconciler returns a new Builder for building a sharded reconciler.
@@ -64,9 +64,9 @@ func (b *Builder) WithClient(c client.Client) *Builder {
return b
}
-// InClusterRing sets the name of the ClusterRing that the shard belongs to.
-func (b *Builder) InClusterRing(name string) *Builder {
- b.clusterRingName = name
+// InControllerRing sets the name of the ControllerRing that the shard belongs to.
+func (b *Builder) InControllerRing(name string) *Builder {
+ b.controllerRingName = name
return b
}
@@ -97,8 +97,8 @@ func (b *Builder) Build(r reconcile.Reconciler) (reconcile.Reconciler, error) {
if b.client == nil {
return nil, fmt.Errorf("missing client")
}
- if b.clusterRingName == "" {
- return nil, fmt.Errorf("missing ClusterRing name")
+ if b.controllerRingName == "" {
+ return nil, fmt.Errorf("missing ControllerRing name")
}
if b.shardName == "" {
return nil, fmt.Errorf("missing shard name")
@@ -108,8 +108,8 @@ func (b *Builder) Build(r reconcile.Reconciler) (reconcile.Reconciler, error) {
Object: b.object,
Client: b.client,
ShardName: b.shardName,
- LabelShard: shardingv1alpha1.LabelShard(shardingv1alpha1.KindClusterRing, "", b.clusterRingName),
- LabelDrain: shardingv1alpha1.LabelDrain(shardingv1alpha1.KindClusterRing, "", b.clusterRingName),
+ LabelShard: shardingv1alpha1.LabelShard(b.controllerRingName),
+ LabelDrain: shardingv1alpha1.LabelDrain(b.controllerRingName),
Do: r,
}, nil
}
diff --git a/pkg/shard/controller/predicate.go b/pkg/shard/controller/predicate.go
index 950a9895..f92e2d68 100644
--- a/pkg/shard/controller/predicate.go
+++ b/pkg/shard/controller/predicate.go
@@ -30,24 +30,24 @@ import (
// `builder.Builder.For` (i.e., the controller's main kind).
// It is not needed to use this predicate for secondary watches (e.g., for object kinds given to
// `builder.Builder.{Owns,Watches}`) as secondary objects are not drained by the sharder.
-func Predicate(clusterRingName, shardName string, predicates ...predicate.Predicate) predicate.Predicate {
+func Predicate(controllerRingName, shardName string, predicates ...predicate.Predicate) predicate.Predicate {
return predicate.Or(
// always enqueue if we need to acknowledge the drain operation, other predicates don't matter in this case
- isDrained(clusterRingName),
+ isDrained(controllerRingName),
// or enqueue if we are responsible and all other predicates match
- predicate.And(isAssigned(clusterRingName, shardName), predicate.And(predicates...)),
+ predicate.And(isAssigned(controllerRingName, shardName), predicate.And(predicates...)),
)
}
-func isAssigned(clusterRingName, shardName string) predicate.Predicate {
+func isAssigned(controllerRingName, shardName string) predicate.Predicate {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
- return object.GetLabels()[shardingv1alpha1.LabelShard(shardingv1alpha1.KindClusterRing, "", clusterRingName)] == shardName
+ return object.GetLabels()[shardingv1alpha1.LabelShard(controllerRingName)] == shardName
})
}
-func isDrained(clusterRingName string) predicate.Predicate {
+func isDrained(controllerRingName string) predicate.Predicate {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
- _, drain := object.GetLabels()[shardingv1alpha1.LabelDrain(shardingv1alpha1.KindClusterRing, "", clusterRingName)]
+ _, drain := object.GetLabels()[shardingv1alpha1.LabelDrain(controllerRingName)]
return drain
})
}
diff --git a/pkg/shard/controller/reconciler.go b/pkg/shard/controller/reconciler.go
index 9de8add7..98a7f477 100644
--- a/pkg/shard/controller/reconciler.go
+++ b/pkg/shard/controller/reconciler.go
@@ -37,9 +37,9 @@ type Reconciler struct {
Client client.Client
// ShardName is the shard ID of the manager.
ShardName string
- // LabelShard is the shard label specific to the manager's ClusterRing.
+ // LabelShard is the shard label specific to the manager's ControllerRing.
LabelShard string
- // LabelDrain is the drain label specific to the manager's ClusterRing.
+ // LabelDrain is the drain label specific to the manager's ControllerRing.
LabelDrain string
// Do is the actual Reconciler.
Do reconcile.Reconciler
diff --git a/pkg/shard/lease/lease.go b/pkg/shard/lease/lease.go
index e657b6f7..206296cf 100644
--- a/pkg/shard/lease/lease.go
+++ b/pkg/shard/lease/lease.go
@@ -35,8 +35,8 @@ import (
// Options provides the required configuration to create a new shard lease.
type Options struct {
- // ClusterRingName specifies the name of the ClusterRing that the shard belongs to.
- ClusterRingName string
+ // ControllerRingName specifies the name of the ControllerRing that the shard belongs to.
+ ControllerRingName string
// LeaseNamespace determines the namespace in which the shard lease will be created.
// Defaults to the pod's namespace if running in-cluster.
LeaseNamespace string
@@ -88,7 +88,7 @@ func NewResourceLock(config *rest.Config, eventRecorder resourcelock.EventRecord
EventRecorder: eventRecorder,
},
Labels: map[string]string{
- shardingv1alpha1.LabelClusterRing: options.ClusterRingName,
+ shardingv1alpha1.LabelControllerRing: options.ControllerRingName,
},
}
diff --git a/pkg/sharding/metrics/metrics.go b/pkg/sharding/metrics/metrics.go
index 07bf6cb4..177b3152 100644
--- a/pkg/sharding/metrics/metrics.go
+++ b/pkg/sharding/metrics/metrics.go
@@ -25,35 +25,35 @@ import (
var (
// AssignmentsTotal is a prometheus counter metric which holds the total number of shard assignments by the sharder
// webhook per Ring and GroupResource.
- // It has three labels which refer to the Ring and two labels which refer to the object's GroupResource.
+ // It has a label which refers to the ControllerRing and two labels which refer to the object's GroupResource.
AssignmentsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_sharding_assignments_total",
Help: "Total number of shard assignments by the sharder webhook per Ring and GroupResource",
- }, []string{"ringKind", "ringNamespace", "ringName", "group", "resource"})
+ }, []string{"ringName", "group", "resource"})
// MovementsTotal is a prometheus counter metric which holds the total number of shard movements triggered by the
// sharder controller per Ring and GroupResource.
- // It has three labels which refer to the Ring and two labels which refer to the object's GroupResource.
+ // It has a label which refers to the ControllerRing and two labels which refer to the object's GroupResource.
MovementsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_sharding_movements_total",
Help: "Total number of shard movements triggered by the sharder controller per Ring and GroupResource",
- }, []string{"ringKind", "ringNamespace", "ringName", "group", "resource"})
+ }, []string{"ringName", "group", "resource"})
// DrainsTotal is a prometheus counter metric which holds the total number of shard drains triggered by the sharder
// controller per Ring and GroupResource.
- // It has three labels which refer to the Ring and two labels which refer to the object's GroupResource.
+ // It has a label which refers to the ControllerRing and two labels which refer to the object's GroupResource.
DrainsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_sharding_drains_total",
Help: "Total number of shard drains triggered by the sharder controller per Ring and GroupResource",
- }, []string{"ringKind", "ringNamespace", "ringName", "group", "resource"})
+ }, []string{"ringName", "group", "resource"})
// RingCalculationsTotal is a prometheus counter metric which holds the total
// number of shard ring calculations per ring kind.
- // It has three labels which refer to the ring's kind, name, and namespace.
+ // It has a label which refers to the ControllerRing.
RingCalculationsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_sharding_ring_calculations_total",
Help: "Total number of shard ring calculations per ring kind",
- }, []string{"kind", "namespace", "name"})
+ }, []string{"name"})
)
func init() {
diff --git a/pkg/sharding/ring/ring.go b/pkg/sharding/ring/ring.go
index 5700f89f..9c7fa835 100644
--- a/pkg/sharding/ring/ring.go
+++ b/pkg/sharding/ring/ring.go
@@ -17,13 +17,11 @@ limitations under the License.
package ring
import (
- "fmt"
"time"
coordinationv1 "k8s.io/api/coordination/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
- shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
+ "github.com/timebertt/kubernetes-controller-sharding/pkg/sharding"
"github.com/timebertt/kubernetes-controller-sharding/pkg/sharding/consistenthash"
"github.com/timebertt/kubernetes-controller-sharding/pkg/sharding/leases"
shardingmetrics "github.com/timebertt/kubernetes-controller-sharding/pkg/sharding/metrics"
@@ -34,22 +32,13 @@ import (
// This is a central function in the sharding implementation bringing together the leases package with the
// consistenthash package.
// In short, it determines the subset of available shards and constructs a new consistenthash.Ring with it.
-func FromLeases(ringObj client.Object, leaseList *coordinationv1.LeaseList, now time.Time) (*consistenthash.Ring, leases.Shards) {
- var kind string
-
- switch ringObj.(type) {
- case *shardingv1alpha1.ClusterRing:
- kind = shardingv1alpha1.KindClusterRing
- default:
- panic(fmt.Errorf("unexpected kind %T", ringObj))
- }
-
+func FromLeases(ringObj sharding.Ring, leaseList *coordinationv1.LeaseList, now time.Time) (*consistenthash.Ring, leases.Shards) {
// determine ready shards and calculate hash ring
shards := leases.ToShards(leaseList.Items, now)
availableShards := shards.AvailableShards().IDs()
ring := consistenthash.New(nil, 0, availableShards...)
- shardingmetrics.RingCalculationsTotal.WithLabelValues(kind, ringObj.GetNamespace(), ringObj.GetName()).Inc()
+ shardingmetrics.RingCalculationsTotal.WithLabelValues(ringObj.GetName()).Inc()
return ring, shards
}
diff --git a/pkg/test/matchers/condition.go b/pkg/test/matchers/condition.go
index 43839edd..aed4e90c 100644
--- a/pkg/test/matchers/condition.go
+++ b/pkg/test/matchers/condition.go
@@ -25,9 +25,9 @@ import (
// MatchCondition is an alias for gomega.And to make matching conditions more readable, e.g.,
//
-// Expect(clusterRing.Status.Conditions).To(ConsistOf(
+// Expect(controllerRing.Status.Conditions).To(ConsistOf(
// MatchCondition(
-// OfType(shardingv1alpha1.ClusterRingReady),
+// OfType(shardingv1alpha1.ControllerRingReady),
// WithStatus(metav1.ConditionTrue),
// ),
// ))
diff --git a/pkg/webhook/sharder/add.go b/pkg/webhook/sharder/add.go
index c92f72a5..def2dc30 100644
--- a/pkg/webhook/sharder/add.go
+++ b/pkg/webhook/sharder/add.go
@@ -56,14 +56,14 @@ func (h *Handler) AddToManager(mgr manager.Manager) error {
return nil
}
-const pathClusterRing = "clusterring"
+const pathControllerRing = "controllerring"
// WebhookPathFor returns the webhook handler path that should be used for implementing the given ring object.
// It is the reverse of RingForWebhookPath.
func WebhookPathFor(obj client.Object) (string, error) {
switch obj.(type) {
- case *shardingv1alpha1.ClusterRing:
- return path.Join(WebhookPathPrefix, pathClusterRing, obj.GetName()), nil
+ case *shardingv1alpha1.ControllerRing:
+ return path.Join(WebhookPathPrefix, pathControllerRing, obj.GetName()), nil
default:
return "", fmt.Errorf("unexpected kind %T", obj)
}
@@ -83,11 +83,11 @@ func RingForWebhookPath(requestPath string) (sharding.Ring, error) {
var ring sharding.Ring
switch parts[0] {
- case pathClusterRing:
+ case pathControllerRing:
if len(parts) != 2 {
return nil, fmt.Errorf("unexpected request path: %s", requestPath)
}
- ring = &shardingv1alpha1.ClusterRing{ObjectMeta: metav1.ObjectMeta{Name: parts[1]}}
+ ring = &shardingv1alpha1.ControllerRing{ObjectMeta: metav1.ObjectMeta{Name: parts[1]}}
default:
return nil, fmt.Errorf("unexpected request path: %s", requestPath)
}
diff --git a/pkg/webhook/sharder/handler.go b/pkg/webhook/sharder/handler.go
index 9339ed17..c92144b6 100644
--- a/pkg/webhook/sharder/handler.go
+++ b/pkg/webhook/sharder/handler.go
@@ -32,7 +32,6 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
"github.com/timebertt/kubernetes-controller-sharding/pkg/sharding"
shardingmetrics "github.com/timebertt/kubernetes-controller-sharding/pkg/sharding/metrics"
"github.com/timebertt/kubernetes-controller-sharding/pkg/sharding/ring"
@@ -86,7 +85,7 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R
// collect list of shards in the ring
leaseList := &coordinationv1.LeaseList{}
if err := h.Reader.List(ctx, leaseList, client.MatchingLabelsSelector{Selector: ringObj.LeaseSelector()}); err != nil {
- return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error listing Leases for ClusterRing: %w", err))
+ return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error listing Leases for ControllerRing: %w", err))
}
// get ring from cache and hash the object onto the ring
@@ -104,8 +103,7 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R
if !ptr.Deref(req.DryRun, false) {
shardingmetrics.AssignmentsTotal.WithLabelValues(
- shardingv1alpha1.KindClusterRing, ringObj.GetNamespace(), ringObj.GetName(),
- req.Resource.Group, req.Resource.Resource,
+ ringObj.GetName(), req.Resource.Group, req.Resource.Resource,
).Inc()
}
diff --git a/test/e2e/example_test.go b/test/e2e/example_test.go
index c51b56c1..a0e0998f 100644
--- a/test/e2e/example_test.go
+++ b/test/e2e/example_test.go
@@ -33,14 +33,14 @@ import (
)
var _ = Describe("Example Shard", Label("example"), Ordered, func() {
- const clusterRingName = "example"
+ const controllerRingName = "example"
var (
- clusterRing *shardingv1alpha1.ClusterRing
+ controllerRing *shardingv1alpha1.ControllerRing
)
BeforeAll(func() {
- clusterRing = &shardingv1alpha1.ClusterRing{ObjectMeta: metav1.ObjectMeta{Name: clusterRingName}}
+ controllerRing = &shardingv1alpha1.ControllerRing{ObjectMeta: metav1.ObjectMeta{Name: controllerRingName}}
})
Describe("setup", func() {
@@ -59,7 +59,7 @@ var _ = Describe("Example Shard", Label("example"), Ordered, func() {
Eventually(ctx, func(g Gomega) {
g.Expect(List(leaseList, client.InNamespace(metav1.NamespaceDefault), client.MatchingLabels{
- shardingv1alpha1.LabelClusterRing: clusterRingName,
+ shardingv1alpha1.LabelControllerRing: controllerRingName,
})(ctx)).To(Succeed())
g.Expect(leaseList.Items).To(And(
HaveLen(3),
@@ -68,14 +68,14 @@ var _ = Describe("Example Shard", Label("example"), Ordered, func() {
}).Should(Succeed())
}, SpecTimeout(ShortTimeout))
- It("the ClusterRing should be healthy", func(ctx SpecContext) {
+ It("the ControllerRing should be healthy", func(ctx SpecContext) {
Eventually(ctx, func(g Gomega) {
- g.Expect(Get(clusterRing)(ctx)).To(Succeed())
- g.Expect(clusterRing.Status.Shards).To(BeEquivalentTo(3))
- g.Expect(clusterRing.Status.AvailableShards).To(BeEquivalentTo(3))
- g.Expect(clusterRing.Status.Conditions).To(ConsistOf(
+ g.Expect(Get(controllerRing)(ctx)).To(Succeed())
+ g.Expect(controllerRing.Status.Shards).To(BeEquivalentTo(3))
+ g.Expect(controllerRing.Status.AvailableShards).To(BeEquivalentTo(3))
+ g.Expect(controllerRing.Status.Conditions).To(ConsistOf(
MatchCondition(
- OfType(shardingv1alpha1.ClusterRingReady),
+ OfType(shardingv1alpha1.ControllerRingReady),
WithStatus(metav1.ConditionTrue),
),
))
@@ -104,14 +104,14 @@ var _ = Describe("Example Shard", Label("example"), Ordered, func() {
})
It("should assign the main object to a healthy shard", func(ctx SpecContext) {
- shards := getReadyShards(ctx, clusterRingName)
+ shards := getReadyShards(ctx, controllerRingName)
Expect(testClient.Create(ctx, configMap)).To(Succeed())
log.Info("Created object", "configMap", client.ObjectKeyFromObject(configMap))
- shard = configMap.Labels[clusterRing.LabelShard()]
+ shard = configMap.Labels[controllerRing.LabelShard()]
Expect(shard).To(BeElementOf(shards))
- Expect(configMap).NotTo(HaveLabel(clusterRing.LabelDrain()))
+ Expect(configMap).NotTo(HaveLabel(controllerRing.LabelDrain()))
}, SpecTimeout(ShortTimeout))
It("should assign the controlled object to the same shard", func(ctx SpecContext) {
@@ -120,20 +120,20 @@ var _ = Describe("Example Shard", Label("example"), Ordered, func() {
secret.Namespace = configMap.Namespace
Eventually(ctx, Object(secret)).Should(And(
- HaveLabelWithValue(clusterRing.LabelShard(), Equal(shard)),
- Not(HaveLabel(clusterRing.LabelDrain())),
+ HaveLabelWithValue(controllerRing.LabelShard(), Equal(shard)),
+ Not(HaveLabel(controllerRing.LabelDrain())),
))
}, SpecTimeout(ShortTimeout))
})
})
-func getReadyShards(ctx SpecContext, clusterRingName string) []string {
+func getReadyShards(ctx SpecContext, controllerRingName string) []string {
GinkgoHelper()
leaseList := &coordinationv1.LeaseList{}
Eventually(ctx, List(leaseList, client.InNamespace(metav1.NamespaceDefault), client.MatchingLabels{
- shardingv1alpha1.LabelClusterRing: clusterRingName,
- shardingv1alpha1.LabelState: "ready",
+ shardingv1alpha1.LabelControllerRing: controllerRingName,
+ shardingv1alpha1.LabelState: "ready",
})).Should(Succeed())
return toShardNames(leaseList.Items)
diff --git a/webhosting-operator/cmd/webhosting-operator/main.go b/webhosting-operator/cmd/webhosting-operator/main.go
index 07cd91cf..05378344 100644
--- a/webhosting-operator/cmd/webhosting-operator/main.go
+++ b/webhosting-operator/cmd/webhosting-operator/main.go
@@ -116,7 +116,7 @@ func main() {
if err = (&webhosting.WebsiteReconciler{
Config: opts.config,
- }).SetupWithManager(mgr, opts.enableSharding, opts.clusterRingName, opts.shardName); err != nil {
+ }).SetupWithManager(mgr, opts.enableSharding, opts.controllerRingName, opts.shardName); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Website")
os.Exit(1)
}
@@ -147,12 +147,12 @@ func main() {
type options struct {
configFile string
- restConfig *rest.Config
- config *configv1alpha1.WebhostingOperatorConfig
- managerOptions ctrl.Options
- enableSharding bool
- clusterRingName string
- shardName string
+ restConfig *rest.Config
+ config *configv1alpha1.WebhostingOperatorConfig
+ managerOptions ctrl.Options
+ enableSharding bool
+ controllerRingName string
+ shardName string
}
func (o *options) AddFlags(fs *flag.FlagSet) {
@@ -273,9 +273,9 @@ func (o *options) applyOptionsOverrides() error {
}
// SHARD LEASE
- o.clusterRingName = "webhosting-operator"
+ o.controllerRingName = "webhosting-operator"
shardLease, err := shardlease.NewResourceLock(o.restConfig, nil, shardlease.Options{
- ClusterRingName: o.clusterRingName,
+ ControllerRingName: o.controllerRingName,
})
if err != nil {
return fmt.Errorf("failed creating shard lease: %w", err)
@@ -290,7 +290,7 @@ func (o *options) applyOptionsOverrides() error {
// FILTERED WATCH CACHE
// Configure cache to only watch objects that are assigned to this shard.
shardLabelSelector := labels.SelectorFromSet(labels.Set{
- shardingv1alpha1.LabelShard(shardingv1alpha1.KindClusterRing, "", o.clusterRingName): o.shardName,
+ shardingv1alpha1.LabelShard(o.controllerRingName): o.shardName,
})
// This operator watches sharded objects (Websites, etc.) as well as non-sharded objects (Themes),
diff --git a/webhosting-operator/config/manager/clusterring/clusterring.yaml b/webhosting-operator/config/manager/controllerring/controllerring.yaml
similarity index 95%
rename from webhosting-operator/config/manager/clusterring/clusterring.yaml
rename to webhosting-operator/config/manager/controllerring/controllerring.yaml
index 53fe3efc..83348e87 100644
--- a/webhosting-operator/config/manager/clusterring/clusterring.yaml
+++ b/webhosting-operator/config/manager/controllerring/controllerring.yaml
@@ -1,5 +1,5 @@
apiVersion: sharding.timebertt.dev/v1alpha1
-kind: ClusterRing
+kind: ControllerRing
metadata:
name: webhosting-operator
spec:
diff --git a/webhosting-operator/config/manager/clusterring/kustomization.yaml b/webhosting-operator/config/manager/controllerring/kustomization.yaml
similarity index 85%
rename from webhosting-operator/config/manager/clusterring/kustomization.yaml
rename to webhosting-operator/config/manager/controllerring/kustomization.yaml
index b7b9da4a..26f07650 100644
--- a/webhosting-operator/config/manager/clusterring/kustomization.yaml
+++ b/webhosting-operator/config/manager/controllerring/kustomization.yaml
@@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
-- clusterring.yaml
+- controllerring.yaml
- sharder_rbac.yaml
patches:
diff --git a/webhosting-operator/config/manager/clusterring/manager_patch.yaml b/webhosting-operator/config/manager/controllerring/manager_patch.yaml
similarity index 100%
rename from webhosting-operator/config/manager/clusterring/manager_patch.yaml
rename to webhosting-operator/config/manager/controllerring/manager_patch.yaml
diff --git a/webhosting-operator/config/manager/clusterring/sharder_rbac.yaml b/webhosting-operator/config/manager/controllerring/sharder_rbac.yaml
similarity index 80%
rename from webhosting-operator/config/manager/clusterring/sharder_rbac.yaml
rename to webhosting-operator/config/manager/controllerring/sharder_rbac.yaml
index 76347a86..e11e44ad 100644
--- a/webhosting-operator/config/manager/clusterring/sharder_rbac.yaml
+++ b/webhosting-operator/config/manager/controllerring/sharder_rbac.yaml
@@ -1,11 +1,11 @@
-# These manifests grant the sharder controller permissions to act on resources that we listed in the ClusterRing.
+# These manifests grant the sharder controller permissions to act on resources that we listed in the ControllerRing.
# We need to grant these permissions explicitly depending on what we configured. Otherwise, the sharder would require
# cluster-admin access.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: sharding:clusterring:webhosting-operator
+ name: sharding:controllerring:webhosting-operator
rules:
- apiGroups:
- webhosting.timebertt.dev
@@ -40,11 +40,11 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: sharding:clusterring:webhosting-operator
+ name: sharding:controllerring:webhosting-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
- name: sharding:clusterring:webhosting-operator
+ name: sharding:controllerring:webhosting-operator
subjects:
- kind: ServiceAccount
name: sharder
diff --git a/webhosting-operator/config/manager/overlays/default/kustomization.yaml b/webhosting-operator/config/manager/overlays/default/kustomization.yaml
index b32eaca5..480470fb 100644
--- a/webhosting-operator/config/manager/overlays/default/kustomization.yaml
+++ b/webhosting-operator/config/manager/overlays/default/kustomization.yaml
@@ -5,4 +5,4 @@ resources:
- ../../base
components:
-- ../../clusterring
+- ../../controllerring
diff --git a/webhosting-operator/config/monitoring/default/dashboards/sharding.json b/webhosting-operator/config/monitoring/default/dashboards/sharding.json
index 9980db2d..c6700878 100644
--- a/webhosting-operator/config/monitoring/default/dashboards/sharding.json
+++ b/webhosting-operator/config/monitoring/default/dashboards/sharding.json
@@ -98,7 +98,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(kube_shard_state{clusterring=\"$clusterring\", state=~\"ready|expired|uncertain\"}) or vector(0)",
+ "expr": "sum(kube_shard_state{controllerring=\"$controllerring\", state=~\"ready|expired|uncertain\"}) or vector(0)",
"hide": false,
"range": true,
"refId": "A"
@@ -165,7 +165,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(kube_shard_info{clusterring=\"$clusterring\"}) or vector(0)",
+ "expr": "sum(kube_shard_info{controllerring=\"$controllerring\"}) or vector(0)",
"range": true,
"refId": "A"
}
@@ -355,7 +355,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "sum(kube_shard_state{clusterring=\"$clusterring\"}) by (state)",
+ "expr": "sum(kube_shard_state{controllerring=\"$controllerring\"}) by (state)",
"hide": false,
"instant": false,
"interval": "",
@@ -433,7 +433,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "(sum(kube_shard_state{clusterring=\"$clusterring\",state=\"ready\"}) or vector(0)) / (sum(kube_shard_info{clusterring=\"$clusterring\"}) or vector(0)) * 100",
+ "expr": "(sum(kube_shard_state{controllerring=\"$controllerring\",state=\"ready\"}) or vector(0)) / (sum(kube_shard_info{controllerring=\"$controllerring\"}) or vector(0)) * 100",
"instant": true,
"range": false,
"refId": "A"
@@ -502,7 +502,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\"} or 0*sum(kube_shard_info{clusterring=\"$clusterring\"}) by (shard)) by (shard) / ignoring(shard) group_left sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\"}) or vector(0)",
+ "expr": "sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\"} or 0*sum(kube_shard_info{controllerring=\"$controllerring\"}) by (shard)) by (shard) / ignoring(shard) group_left sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\"}) or vector(0)",
"instant": true,
"legendFormat": "__auto",
"range": false,
@@ -615,7 +615,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\"} or 0*sum(kube_shard_info{clusterring=\"$clusterring\"}) by (shard)) by (shard)",
+ "expr": "sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\"} or 0*sum(kube_shard_info{controllerring=\"$controllerring\"}) by (shard)) by (shard)",
"instant": false,
"interval": "",
"legendFormat": "{{shard}}",
@@ -717,7 +717,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\"} or 0*sum(kube_shard_info{clusterring=\"$clusterring\"}) by (shard)) by (shard) / ignoring(shard) group_left sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\"})",
+ "expr": "sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\"} or 0*sum(kube_shard_info{controllerring=\"$controllerring\"}) by (shard)) by (shard) / ignoring(shard) group_left sum(namespace_shard:kube_website_shard:sum{namespace=~\"$project\"})",
"format": "time_series",
"instant": false,
"interval": "",
@@ -882,7 +882,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(namespace_shard_drain:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\",drain=\"\"} * on(shard) group_left kube_shard_state{clusterring=\"$clusterring\",state=\"ready\"}) or vector(0)",
+ "expr": "sum(namespace_shard_drain:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\",drain=\"\"} * on(shard) group_left kube_shard_state{controllerring=\"$controllerring\",state=\"ready\"}) or vector(0)",
"instant": false,
"interval": "",
"legendFormat": "Assigned",
@@ -896,7 +896,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(namespace_shard_drain:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\",drain=\"\"} * on(shard) group_left max(kube_shard_state{clusterring=\"$clusterring\",state!=\"ready\"}) by (shard)) or vector(0)",
+ "expr": "sum(namespace_shard_drain:kube_website_shard:sum{namespace=~\"$project\",shard!=\"\",drain=\"\"} * on(shard) group_left max(kube_shard_state{controllerring=\"$controllerring\",state!=\"ready\"}) by (shard)) or vector(0)",
"hide": false,
"instant": false,
"interval": "",
@@ -1035,7 +1035,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "sum(rate(controller_sharding_assignments_total{ringKind=\"clusterring\", ringName=\"$clusterring\", group=~\"$group\", resource=~\"$resource\"}[$__rate_interval]))",
+ "expr": "sum(rate(controller_sharding_assignments_total{group=~\"$group\", resource=~\"$resource\"}[$__rate_interval]))",
"hide": false,
"interval": "",
"legendFormat": "Assignments",
@@ -1049,7 +1049,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "sum(rate(controller_sharding_movements_total{ringKind=\"clusterring\", ringName=\"$clusterring\", group=~\"$group\", resource=~\"$resource\"}[$__rate_interval]))",
+ "expr": "sum(rate(controller_sharding_movements_total{group=~\"$group\", resource=~\"$resource\"}[$__rate_interval]))",
"hide": false,
"interval": "",
"legendFormat": "Movements",
@@ -1063,7 +1063,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "sum(rate(controller_sharding_drains_total{ringKind=\"clusterring\", ringName=\"$clusterring\", group=~\"$group\", resource=~\"$resource\"}[$__rate_interval]))",
+ "expr": "sum(rate(controller_sharding_drains_total{group=~\"$group\", resource=~\"$resource\"}[$__rate_interval]))",
"hide": false,
"interval": "",
"legendFormat": "Drains",
@@ -1159,7 +1159,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "sum(rate(controller_sharding_assignments_total{ringKind=\"clusterring\", ringName=\"$clusterring\", group=~\"$group\", resource=~\"$resource\"}[$__rate_interval])) by (resource)",
+ "expr": "sum(rate(controller_sharding_assignments_total{group=~\"$group\", resource=~\"$resource\"}[$__rate_interval])) by (resource)",
"hide": false,
"interval": "",
"legendFormat": "{{kind}}",
@@ -1475,7 +1475,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "sum(rate(controller_sharding_ring_calculations_total{kind=\"clusterring\", name=\"$clusterring\"}[$__rate_interval]))",
+ "expr": "sum(rate(controller_sharding_ring_calculations_total{name=\"$controllerring\"}[$__rate_interval]))",
"hide": false,
"interval": "",
"legendFormat": "Ring Calculations",
@@ -1524,14 +1524,14 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "definition": "label_values(kube_shard_info,clusterring)",
+ "definition": "label_values(kube_shard_info,controllerring)",
"hide": 0,
"includeAll": false,
"multi": false,
- "name": "clusterring",
+ "name": "controllerring",
"options": [],
"query": {
- "query": "label_values(kube_shard_info,clusterring)",
+ "query": "label_values(kube_shard_info,controllerring)",
"refId": "PrometheusVariableQueryEditor-VariableQuery"
},
"refresh": 2,
diff --git a/webhosting-operator/pkg/controllers/webhosting/website_controller.go b/webhosting-operator/pkg/controllers/webhosting/website_controller.go
index 78d88d5f..bfcfa8b6 100644
--- a/webhosting-operator/pkg/controllers/webhosting/website_controller.go
+++ b/webhosting-operator/pkg/controllers/webhosting/website_controller.go
@@ -468,7 +468,7 @@ const (
)
// SetupWithManager sets up the controller with the Manager.
-func (r *WebsiteReconciler) SetupWithManager(mgr ctrl.Manager, enableSharding bool, clusterRingName, shardName string) error {
+func (r *WebsiteReconciler) SetupWithManager(mgr ctrl.Manager, enableSharding bool, controllerRingName, shardName string) error {
if r.Client == nil {
r.Client = client.WithFieldOwner(mgr.GetClient(), ControllerName+"-controller")
}
@@ -500,12 +500,12 @@ func (r *WebsiteReconciler) SetupWithManager(mgr ctrl.Manager, enableSharding bo
// ACKNOWLEDGE DRAIN OPERATIONS
// Use the shardcontroller package as helpers for:
// - a predicate that triggers when the drain label is present (even if the actual predicates don't trigger)
- websitePredicate = shardcontroller.Predicate(clusterRingName, shardName, websitePredicate)
+ websitePredicate = shardcontroller.Predicate(controllerRingName, shardName, websitePredicate)
// - wrapping the actual reconciler a reconciler that handles the drain operation for us
reconciler = shardcontroller.NewShardedReconciler(mgr).
For(&webhostingv1alpha1.Website{}).
- InClusterRing(clusterRingName).
+ InControllerRing(controllerRingName).
WithShardName(shardName).
MustBuild(reconciler)
}
diff --git a/webhosting-operator/pkg/experiment/scenario/base/base.go b/webhosting-operator/pkg/experiment/scenario/base/base.go
index 90b929da..3987fd6b 100644
--- a/webhosting-operator/pkg/experiment/scenario/base/base.go
+++ b/webhosting-operator/pkg/experiment/scenario/base/base.go
@@ -33,6 +33,7 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
+ shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
"github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/experiment/generator"
"github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/experiment/tracker"
"github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/utils"
@@ -249,7 +250,7 @@ func (s *Scenario) waitForShardLeases(ctx context.Context) error {
if err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, false, func(ctx context.Context) (done bool, err error) {
leaseList := &coordinationv1.LeaseList{}
if err := s.Client.List(ctx, leaseList,
- client.InNamespace("webhosting-system"), client.MatchingLabels{"alpha.sharding.timebertt.dev/clusterring": "webhosting-operator"},
+ client.InNamespace("webhosting-system"), client.MatchingLabels{shardingv1alpha1.LabelControllerRing: "webhosting-operator"},
); err != nil {
return true, err
}
diff --git a/webhosting-operator/pkg/metrics/website.go b/webhosting-operator/pkg/metrics/website.go
index 815d89ee..4fe865ae 100644
--- a/webhosting-operator/pkg/metrics/website.go
+++ b/webhosting-operator/pkg/metrics/website.go
@@ -26,6 +26,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics"
+ shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
webhostingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/apis/webhosting/v1alpha1"
)
@@ -145,8 +146,8 @@ var (
prometheus.GaugeValue,
1,
append(staticLabels,
- website.Labels["shard.alpha.sharding.timebertt.dev/clusterring-ef3d63cd-webhosting-operator"],
- website.Labels["drain.alpha.sharding.timebertt.dev/clusterring-ef3d63cd-webhosting-operator"],
+ website.Labels[shardingv1alpha1.LabelShard("webhosting-operator")],
+ website.Labels[shardingv1alpha1.LabelDrain("webhosting-operator")],
)...,
)
},