From 45798fe882e7e0af59447e2c9d1d52867817d007 Mon Sep 17 00:00:00 2001 From: csuzhangxc Date: Tue, 17 Dec 2024 17:20:22 +0800 Subject: [PATCH] Initial commit for v2 Co-authored-by: liubo02 Co-authored-by: Hoshea --- .dockerignore | 3 + .github/licenserc.yaml | 28 + .github/workflows/ci.yaml | 107 + .github/workflows/e2e.yaml | 75 + .gitignore | 2 + .golangci.yml | 120 + LICENSE | 201 + Makefile | 186 + README.md | 47 + apis/core/go.mod | 29 + apis/core/go.sum | 97 + apis/core/v1alpha1/cluster_types.go | 220 + apis/core/v1alpha1/common_types.go | 371 + apis/core/v1alpha1/doc.go | 60 + apis/core/v1alpha1/errors.go | 21 + apis/core/v1alpha1/fake.go | 97 + apis/core/v1alpha1/pd_types.go | 351 + apis/core/v1alpha1/tidb_types.go | 502 + apis/core/v1alpha1/tiflash_types.go | 372 + apis/core/v1alpha1/tikv_types.go | 362 + apis/core/v1alpha1/zz_generated.deepcopy.go | 2005 ++++ apis/core/v1alpha1/zz_generated.register.go | 84 + cmd/operator/main.go | 293 + cmd/overlay-gen/generators/overlay.go | 493 + cmd/overlay-gen/generators/test.go | 669 ++ cmd/overlay-gen/generators/util.go | 460 + cmd/overlay-gen/main.go | 151 + cmd/prestop-checker/main.go | 146 + docs/CONTRIBUTING.md | 201 + docs/arch/README.md | 56 + docs/convention.md | 24 + docs/why.md | 66 + examples/basic/00-cluster.yaml | 5 + examples/basic/01-pd.yaml | 30 + examples/basic/02-tikv.yaml | 30 + examples/basic/03-tidb.yaml | 26 + examples/basic/04-tiflash.yaml | 32 + examples/bootstrap-sql/00-cluster.yaml | 5 + examples/bootstrap-sql/01-pd.yaml | 22 + examples/bootstrap-sql/02-tikv.yaml | 22 + examples/bootstrap-sql/03-tidb.yaml | 17 + examples/bootstrap-sql/bsql-cm.yaml | 7 + examples/overlay/00-cluster.yaml | 5 + examples/overlay/01-pd.yaml | 33 + examples/overlay/02-tikv.yaml | 22 + examples/overlay/03-tidb.yaml | 26 + .../overlay/dashboard-session-secret.yaml | 7 + examples/schedule-policy/00-cluster.yaml | 5 + examples/schedule-policy/01-pd.yaml | 33 + examples/schedule-policy/02-tikv.yaml | 33 + examples/schedule-policy/03-tidb.yaml | 29 + examples/tls/00-cluster.yaml | 7 + examples/tls/01-pd.yaml | 24 + examples/tls/02-tikv.yaml | 24 + examples/tls/03-tidb.yaml | 20 + examples/tls/04-tiflash.yaml | 27 + examples/tls/issuer-cert.yaml | 225 + go.mod | 142 + go.sum | 398 + go.work | 6 + go.work.sum | 86 + hack/boilerplate/boilerplate.go.txt | 13 + hack/build.sh | 26 + hack/download.sh | 78 + hack/e2e.sh | 25 + hack/image.sh | 26 + hack/kind.sh | 26 + hack/lib/build.sh | 73 + hack/lib/e2e.sh | 171 + hack/lib/image.sh | 105 + hack/lib/kind.sh | 112 + hack/lib/util.sh | 78 + hack/lib/vars.sh | 71 + hack/lib/verify.sh | 75 + hack/lib/version.sh | 102 + hack/verify.sh | 25 + image/Dockerfile | 52 + manifests/crd/core.pingcap.com_clusters.yaml | 217 + manifests/crd/core.pingcap.com_pdgroups.yaml | 410 + manifests/crd/core.pingcap.com_pds.yaml | 310 + .../crd/core.pingcap.com_tidbgroups.yaml | 527 + manifests/crd/core.pingcap.com_tidbs.yaml | 343 + manifests/crd/core.pingcap.com_tiflashes.yaml | 349 + .../crd/core.pingcap.com_tiflashgroups.yaml | 442 + .../crd/core.pingcap.com_tikvgroups.yaml | 411 + manifests/crd/core.pingcap.com_tikvs.yaml | 309 + manifests/deploy/deploy.yaml | 49 + manifests/rbac/namespace.yaml | 4 + manifests/rbac/role.yaml | 131 + manifests/rbac/role_binding.yaml | 12 + manifests/rbac/service_account.yaml | 5 + pkg/action/upgrader.go | 156 + pkg/action/upgrader_test.go | 185 + pkg/client/alias.go | 39 + pkg/client/client.go | 205 + pkg/client/client_test.go | 89 + pkg/client/fake.go | 596 + pkg/client/fake_test.go | 134 + pkg/configs/pd/config.go | 190 + pkg/configs/tidb/config.go | 163 + pkg/configs/tiflash/config.go | 243 + pkg/configs/tiflash/proxy_config.go | 98 + pkg/configs/tikv/config.go | 173 + pkg/controllers/cluster/controller.go | 92 + pkg/controllers/cluster/tasks/ctx.go | 118 + pkg/controllers/cluster/tasks/ctx_test.go | 86 + pkg/controllers/cluster/tasks/finalizer.go | 87 + .../cluster/tasks/finalizer_test.go | 77 + pkg/controllers/cluster/tasks/status.go | 195 + pkg/controllers/cluster/tasks/status_test.go | 104 + pkg/controllers/common/cond.go | 41 + pkg/controllers/common/task.go | 156 + pkg/controllers/pd/builder.go | 65 + pkg/controllers/pd/controller.go | 86 + pkg/controllers/pd/handler.go | 140 + pkg/controllers/pd/tasks/cm.go | 79 + pkg/controllers/pd/tasks/ctx.go | 182 + pkg/controllers/pd/tasks/finalizer.go | 66 + pkg/controllers/pd/tasks/pod.go | 270 + pkg/controllers/pd/tasks/pvc.go | 75 + pkg/controllers/pd/tasks/status.go | 180 + pkg/controllers/pd/tasks/util.go | 56 + pkg/controllers/pdgroup/controller.go | 120 + pkg/controllers/pdgroup/tasks/boot.go | 56 + pkg/controllers/pdgroup/tasks/ctx.go | 176 + pkg/controllers/pdgroup/tasks/finalizer.go | 94 + pkg/controllers/pdgroup/tasks/status.go | 96 + pkg/controllers/pdgroup/tasks/svc.go | 154 + pkg/controllers/pdgroup/tasks/updater.go | 212 + pkg/controllers/pdgroup/tasks/util.go | 39 + pkg/controllers/tidb/builder.go | 60 + pkg/controllers/tidb/controller.go | 76 + pkg/controllers/tidb/handler.go | 69 + pkg/controllers/tidb/tasks/cm.go | 92 + pkg/controllers/tidb/tasks/cm_test.go | 246 + pkg/controllers/tidb/tasks/ctx.go | 204 + pkg/controllers/tidb/tasks/finalizer.go | 47 + pkg/controllers/tidb/tasks/pod.go | 416 + pkg/controllers/tidb/tasks/pvc.go | 91 + pkg/controllers/tidb/tasks/server_labels.go | 98 + pkg/controllers/tidb/tasks/status.go | 158 + pkg/controllers/tidb/tasks/util.go | 38 + pkg/controllers/tidbgroup/controller.go | 116 + pkg/controllers/tidbgroup/tasks/ctx.go | 130 + pkg/controllers/tidbgroup/tasks/finalizer.go | 81 + pkg/controllers/tidbgroup/tasks/status.go | 111 + pkg/controllers/tidbgroup/tasks/svc.go | 152 + pkg/controllers/tidbgroup/tasks/updater.go | 188 + .../tidbgroup/tasks/updater_test.go | 95 + pkg/controllers/tidbgroup/tasks/util.go | 23 + pkg/controllers/tiflash/builder.go | 60 + pkg/controllers/tiflash/controller.go | 81 + pkg/controllers/tiflash/handler.go | 140 + pkg/controllers/tiflash/tasks/cm.go | 104 + pkg/controllers/tiflash/tasks/ctx.go | 194 + pkg/controllers/tiflash/tasks/finalizer.go | 78 + pkg/controllers/tiflash/tasks/pod.go | 258 + pkg/controllers/tiflash/tasks/pvc.go | 92 + pkg/controllers/tiflash/tasks/status.go | 171 + pkg/controllers/tiflash/tasks/store_labels.go | 105 + pkg/controllers/tiflash/tasks/util.go | 26 + pkg/controllers/tiflashgroup/controller.go | 116 + pkg/controllers/tiflashgroup/tasks/ctx.go | 121 + .../tiflashgroup/tasks/finalizer.go | 81 + pkg/controllers/tiflashgroup/tasks/status.go | 94 + pkg/controllers/tiflashgroup/tasks/svc.go | 118 + pkg/controllers/tiflashgroup/tasks/updater.go | 185 + pkg/controllers/tiflashgroup/tasks/util.go | 24 + pkg/controllers/tikv/builder.go | 61 + pkg/controllers/tikv/controller.go | 82 + pkg/controllers/tikv/handler.go | 140 + pkg/controllers/tikv/tasks/cm.go | 91 + pkg/controllers/tikv/tasks/ctx.go | 203 + pkg/controllers/tikv/tasks/evict_leader.go | 61 + pkg/controllers/tikv/tasks/finalizer.go | 80 + pkg/controllers/tikv/tasks/pod.go | 339 + pkg/controllers/tikv/tasks/pvc.go | 91 + pkg/controllers/tikv/tasks/status.go | 206 + pkg/controllers/tikv/tasks/store_labels.go | 91 + pkg/controllers/tikv/tasks/util.go | 27 + pkg/controllers/tikvgroup/controller.go | 116 + pkg/controllers/tikvgroup/tasks/ctx.go | 121 + pkg/controllers/tikvgroup/tasks/finalizer.go | 82 + pkg/controllers/tikvgroup/tasks/status.go | 94 + pkg/controllers/tikvgroup/tasks/svc.go | 106 + pkg/controllers/tikvgroup/tasks/updater.go | 184 + pkg/controllers/tikvgroup/tasks/util.go | 24 + pkg/image/image.go | 105 + pkg/image/image_test.go | 116 + pkg/metrics/metrics.go | 38 + pkg/overlay/overlay.go | 75 + pkg/overlay/overlay_test.go | 139 + pkg/overlay/zz_generated.overlay.go | 2432 ++++ pkg/overlay/zz_generated.overlay_test.go | 9764 +++++++++++++++++ pkg/pdapi/pd/duration.go | 64 + pkg/pdapi/pd/size.go | 66 + pkg/pdapi/v1/client.go | 561 + pkg/pdapi/v1/pd_config.go | 265 + pkg/pdapi/v1/types.go | 115 + pkg/runtime/group.go | 30 + pkg/runtime/instance.go | 39 + pkg/runtime/object.go | 33 + pkg/runtime/pd.go | 130 + pkg/runtime/tidb.go | 76 + pkg/runtime/tiflash.go | 76 + pkg/runtime/tikv.go | 76 + pkg/scheme/scheme.go | 35 + pkg/tidbapi/v1/client.go | 107 + pkg/tidbapi/v1/control.go | 82 + pkg/tidbapi/v1/types.go | 21 + pkg/tiflashapi/v1/client.go | 81 + pkg/tiflashapi/v1/control.go | 78 + pkg/tikvapi/v1/client.go | 100 + pkg/tikvapi/v1/control.go | 82 + pkg/timanager/apis/pd/v1/doc.go | 18 + pkg/timanager/apis/pd/v1/types.go | 130 + .../apis/pd/v1/zz_generated.deepcopy.go | 152 + .../apis/pd/v1/zz_generated.register.go | 70 + pkg/timanager/informer.go | 324 + pkg/timanager/manager.go | 370 + pkg/timanager/manager_test.go | 351 + pkg/timanager/pd/member.go | 141 + pkg/timanager/pd/pd.go | 161 + pkg/timanager/pd/store.go | 127 + pkg/timanager/poller.go | 255 + pkg/timanager/poller_test.go | 227 + pkg/timanager/util.go | 170 + pkg/updater/actor.go | 163 + pkg/updater/builder.go | 164 + pkg/updater/executor.go | 177 + pkg/updater/executor_test.go | 765 ++ pkg/updater/policy/keep.go | 34 + pkg/updater/policy/topology.go | 68 + pkg/updater/selector.go | 121 + pkg/updater/state.go | 67 + pkg/utils/fake/fake.go | 125 + pkg/utils/http/http.go | 84 + pkg/utils/http/http_test.go | 132 + pkg/utils/k8s/deletion.go | 117 + pkg/utils/k8s/finalizer.go | 44 + pkg/utils/k8s/node.go | 49 + pkg/utils/k8s/pod.go | 101 + pkg/utils/k8s/pod_test.go | 124 + pkg/utils/k8s/rate_limiter.go | 31 + pkg/utils/k8s/revision/controller_revision.go | 212 + .../k8s/revision/controller_revision_test.go | 193 + pkg/utils/kubefeat/feat.go | 19 + pkg/utils/kubefeat/gates.go | 149 + pkg/utils/map/map.go | 65 + pkg/utils/map/map_test.go | 103 + pkg/utils/random/random.go | 57 + pkg/utils/random/random_test.go | 34 + pkg/utils/task/mock_generated.go | 513 + pkg/utils/task/task.go | 267 + pkg/utils/task/task_test.go | 268 + pkg/utils/task/v2/result.go | 202 + pkg/utils/task/v2/runner.go | 69 + pkg/utils/task/v2/task.go | 233 + pkg/utils/task/v3/result.go | 214 + pkg/utils/task/v3/result_test.go | 169 + pkg/utils/task/v3/runner.go | 126 + pkg/utils/task/v3/runner_test.go | 180 + pkg/utils/task/v3/task.go | 126 + pkg/utils/task/v3/task_test.go | 290 + pkg/utils/time/clock.go | 50 + pkg/utils/tls/tls.go | 65 + pkg/utils/tls/tls_test.go | 104 + pkg/utils/toml/toml.go | 170 + pkg/utils/toml/toml_test.go | 240 + pkg/utils/topology/scheduler.go | 215 + pkg/utils/topology/scheduler_test.go | 324 + pkg/version/version.go | 83 + pkg/volumes/cloud/aws/ebs_modifier.go | 351 + pkg/volumes/cloud/aws/ebs_modifier_test.go | 287 + pkg/volumes/cloud/aws/fake.go | 119 + pkg/volumes/cloud/interface.go | 62 + pkg/volumes/mock.go | 85 + pkg/volumes/native_modifer.go | 157 + pkg/volumes/native_modifer_test.go | 270 + pkg/volumes/raw_modifier.go | 274 + pkg/volumes/raw_modifier_test.go | 240 + pkg/volumes/types.go | 135 + pkg/volumes/utils.go | 280 + pkg/volumes/utils_test.go | 136 + tests/e2e/cluster/cluster.go | 1761 +++ tests/e2e/cluster/tls.go | 305 + tests/e2e/config/restclientgetter.go | 99 + tests/e2e/data/cluster.go | 33 + tests/e2e/data/data.go | 41 + tests/e2e/data/ns.go | 45 + tests/e2e/data/pd.go | 53 + tests/e2e/e2e.go | 94 + tests/e2e/e2e_test.go | 35 + tests/e2e/framework/framework.go | 83 + tests/e2e/framework/pd.go | 57 + tests/e2e/framework/util.go | 43 + tests/e2e/label/well_known.go | 36 + tests/e2e/pd/pd.go | 160 + tests/e2e/utils/data/cluster.go | 191 + tests/e2e/utils/data/namespace.go | 45 + tests/e2e/utils/jwt/jwt.go | 95 + tests/e2e/utils/k8s/k8s.go | 125 + tests/e2e/utils/k8s/k8s_test.go | 125 + tests/e2e/utils/k8s/port_forward.go | 195 + tests/e2e/utils/k8s/yaml.go | 88 + tests/e2e/utils/tidb/tidb.go | 392 + tests/e2e/utils/waiter/cluster.go | 48 + tests/e2e/utils/waiter/common.go | 175 + tests/e2e/utils/waiter/pd.go | 54 + tests/e2e/utils/waiter/pod.go | 200 + third_party/kubernetes/LICENSE | 202 + .../controller/history/controller_history.go | 279 + .../statefulset/stateful_set_utils.go | 132 + third_party/kubernetes/pkg/util/hash/hash.go | 34 + tools/tools.go | 22 + 315 files changed, 57966 insertions(+) create mode 100644 .dockerignore create mode 100644 .github/licenserc.yaml create mode 100644 .github/workflows/ci.yaml create mode 100644 .github/workflows/e2e.yaml create mode 100644 .gitignore create mode 100644 .golangci.yml create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 apis/core/go.mod create mode 100644 apis/core/go.sum create mode 100644 apis/core/v1alpha1/cluster_types.go create mode 100644 apis/core/v1alpha1/common_types.go create mode 100644 apis/core/v1alpha1/doc.go create mode 100644 apis/core/v1alpha1/errors.go create mode 100644 apis/core/v1alpha1/fake.go create mode 100644 apis/core/v1alpha1/pd_types.go create mode 100644 apis/core/v1alpha1/tidb_types.go create mode 100644 apis/core/v1alpha1/tiflash_types.go create mode 100644 apis/core/v1alpha1/tikv_types.go create mode 100644 apis/core/v1alpha1/zz_generated.deepcopy.go create mode 100644 apis/core/v1alpha1/zz_generated.register.go create mode 100644 cmd/operator/main.go create mode 100644 cmd/overlay-gen/generators/overlay.go create mode 100644 cmd/overlay-gen/generators/test.go create mode 100644 cmd/overlay-gen/generators/util.go create mode 100644 cmd/overlay-gen/main.go create mode 100644 cmd/prestop-checker/main.go create mode 100644 docs/CONTRIBUTING.md create mode 100644 docs/arch/README.md create mode 100644 docs/convention.md create mode 100644 docs/why.md create mode 100644 examples/basic/00-cluster.yaml create mode 100644 examples/basic/01-pd.yaml create mode 100644 examples/basic/02-tikv.yaml create mode 100644 examples/basic/03-tidb.yaml create mode 100644 examples/basic/04-tiflash.yaml create mode 100644 examples/bootstrap-sql/00-cluster.yaml create mode 100644 examples/bootstrap-sql/01-pd.yaml create mode 100644 examples/bootstrap-sql/02-tikv.yaml create mode 100644 examples/bootstrap-sql/03-tidb.yaml create mode 100644 examples/bootstrap-sql/bsql-cm.yaml create mode 100644 examples/overlay/00-cluster.yaml create mode 100644 examples/overlay/01-pd.yaml create mode 100644 examples/overlay/02-tikv.yaml create mode 100644 examples/overlay/03-tidb.yaml create mode 100644 examples/overlay/dashboard-session-secret.yaml create mode 100644 examples/schedule-policy/00-cluster.yaml create mode 100644 examples/schedule-policy/01-pd.yaml create mode 100644 examples/schedule-policy/02-tikv.yaml create mode 100644 examples/schedule-policy/03-tidb.yaml create mode 100644 examples/tls/00-cluster.yaml create mode 100644 examples/tls/01-pd.yaml create mode 100644 examples/tls/02-tikv.yaml create mode 100644 examples/tls/03-tidb.yaml create mode 100644 examples/tls/04-tiflash.yaml create mode 100644 examples/tls/issuer-cert.yaml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 go.work create mode 100644 go.work.sum create mode 100644 hack/boilerplate/boilerplate.go.txt create mode 100755 hack/build.sh create mode 100755 hack/download.sh create mode 100755 hack/e2e.sh create mode 100755 hack/image.sh create mode 100755 hack/kind.sh create mode 100644 hack/lib/build.sh create mode 100755 hack/lib/e2e.sh create mode 100644 hack/lib/image.sh create mode 100644 hack/lib/kind.sh create mode 100644 hack/lib/util.sh create mode 100644 hack/lib/vars.sh create mode 100644 hack/lib/verify.sh create mode 100644 hack/lib/version.sh create mode 100755 hack/verify.sh create mode 100644 image/Dockerfile create mode 100644 manifests/crd/core.pingcap.com_clusters.yaml create mode 100644 manifests/crd/core.pingcap.com_pdgroups.yaml create mode 100644 manifests/crd/core.pingcap.com_pds.yaml create mode 100644 manifests/crd/core.pingcap.com_tidbgroups.yaml create mode 100644 manifests/crd/core.pingcap.com_tidbs.yaml create mode 100644 manifests/crd/core.pingcap.com_tiflashes.yaml create mode 100644 manifests/crd/core.pingcap.com_tiflashgroups.yaml create mode 100644 manifests/crd/core.pingcap.com_tikvgroups.yaml create mode 100644 manifests/crd/core.pingcap.com_tikvs.yaml create mode 100644 manifests/deploy/deploy.yaml create mode 100644 manifests/rbac/namespace.yaml create mode 100644 manifests/rbac/role.yaml create mode 100644 manifests/rbac/role_binding.yaml create mode 100644 manifests/rbac/service_account.yaml create mode 100644 pkg/action/upgrader.go create mode 100644 pkg/action/upgrader_test.go create mode 100644 pkg/client/alias.go create mode 100644 pkg/client/client.go create mode 100644 pkg/client/client_test.go create mode 100644 pkg/client/fake.go create mode 100644 pkg/client/fake_test.go create mode 100644 pkg/configs/pd/config.go create mode 100644 pkg/configs/tidb/config.go create mode 100644 pkg/configs/tiflash/config.go create mode 100644 pkg/configs/tiflash/proxy_config.go create mode 100644 pkg/configs/tikv/config.go create mode 100644 pkg/controllers/cluster/controller.go create mode 100644 pkg/controllers/cluster/tasks/ctx.go create mode 100644 pkg/controllers/cluster/tasks/ctx_test.go create mode 100644 pkg/controllers/cluster/tasks/finalizer.go create mode 100644 pkg/controllers/cluster/tasks/finalizer_test.go create mode 100644 pkg/controllers/cluster/tasks/status.go create mode 100644 pkg/controllers/cluster/tasks/status_test.go create mode 100644 pkg/controllers/common/cond.go create mode 100644 pkg/controllers/common/task.go create mode 100644 pkg/controllers/pd/builder.go create mode 100644 pkg/controllers/pd/controller.go create mode 100644 pkg/controllers/pd/handler.go create mode 100644 pkg/controllers/pd/tasks/cm.go create mode 100644 pkg/controllers/pd/tasks/ctx.go create mode 100644 pkg/controllers/pd/tasks/finalizer.go create mode 100644 pkg/controllers/pd/tasks/pod.go create mode 100644 pkg/controllers/pd/tasks/pvc.go create mode 100644 pkg/controllers/pd/tasks/status.go create mode 100644 pkg/controllers/pd/tasks/util.go create mode 100644 pkg/controllers/pdgroup/controller.go create mode 100644 pkg/controllers/pdgroup/tasks/boot.go create mode 100644 pkg/controllers/pdgroup/tasks/ctx.go create mode 100644 pkg/controllers/pdgroup/tasks/finalizer.go create mode 100644 pkg/controllers/pdgroup/tasks/status.go create mode 100644 pkg/controllers/pdgroup/tasks/svc.go create mode 100644 pkg/controllers/pdgroup/tasks/updater.go create mode 100644 pkg/controllers/pdgroup/tasks/util.go create mode 100644 pkg/controllers/tidb/builder.go create mode 100644 pkg/controllers/tidb/controller.go create mode 100644 pkg/controllers/tidb/handler.go create mode 100644 pkg/controllers/tidb/tasks/cm.go create mode 100644 pkg/controllers/tidb/tasks/cm_test.go create mode 100644 pkg/controllers/tidb/tasks/ctx.go create mode 100644 pkg/controllers/tidb/tasks/finalizer.go create mode 100644 pkg/controllers/tidb/tasks/pod.go create mode 100644 pkg/controllers/tidb/tasks/pvc.go create mode 100644 pkg/controllers/tidb/tasks/server_labels.go create mode 100644 pkg/controllers/tidb/tasks/status.go create mode 100644 pkg/controllers/tidb/tasks/util.go create mode 100644 pkg/controllers/tidbgroup/controller.go create mode 100644 pkg/controllers/tidbgroup/tasks/ctx.go create mode 100644 pkg/controllers/tidbgroup/tasks/finalizer.go create mode 100644 pkg/controllers/tidbgroup/tasks/status.go create mode 100644 pkg/controllers/tidbgroup/tasks/svc.go create mode 100644 pkg/controllers/tidbgroup/tasks/updater.go create mode 100644 pkg/controllers/tidbgroup/tasks/updater_test.go create mode 100644 pkg/controllers/tidbgroup/tasks/util.go create mode 100644 pkg/controllers/tiflash/builder.go create mode 100644 pkg/controllers/tiflash/controller.go create mode 100644 pkg/controllers/tiflash/handler.go create mode 100644 pkg/controllers/tiflash/tasks/cm.go create mode 100644 pkg/controllers/tiflash/tasks/ctx.go create mode 100644 pkg/controllers/tiflash/tasks/finalizer.go create mode 100644 pkg/controllers/tiflash/tasks/pod.go create mode 100644 pkg/controllers/tiflash/tasks/pvc.go create mode 100644 pkg/controllers/tiflash/tasks/status.go create mode 100644 pkg/controllers/tiflash/tasks/store_labels.go create mode 100644 pkg/controllers/tiflash/tasks/util.go create mode 100644 pkg/controllers/tiflashgroup/controller.go create mode 100644 pkg/controllers/tiflashgroup/tasks/ctx.go create mode 100644 pkg/controllers/tiflashgroup/tasks/finalizer.go create mode 100644 pkg/controllers/tiflashgroup/tasks/status.go create mode 100644 pkg/controllers/tiflashgroup/tasks/svc.go create mode 100644 pkg/controllers/tiflashgroup/tasks/updater.go create mode 100644 pkg/controllers/tiflashgroup/tasks/util.go create mode 100644 pkg/controllers/tikv/builder.go create mode 100644 pkg/controllers/tikv/controller.go create mode 100644 pkg/controllers/tikv/handler.go create mode 100644 pkg/controllers/tikv/tasks/cm.go create mode 100644 pkg/controllers/tikv/tasks/ctx.go create mode 100644 pkg/controllers/tikv/tasks/evict_leader.go create mode 100644 pkg/controllers/tikv/tasks/finalizer.go create mode 100644 pkg/controllers/tikv/tasks/pod.go create mode 100644 pkg/controllers/tikv/tasks/pvc.go create mode 100644 pkg/controllers/tikv/tasks/status.go create mode 100644 pkg/controllers/tikv/tasks/store_labels.go create mode 100644 pkg/controllers/tikv/tasks/util.go create mode 100644 pkg/controllers/tikvgroup/controller.go create mode 100644 pkg/controllers/tikvgroup/tasks/ctx.go create mode 100644 pkg/controllers/tikvgroup/tasks/finalizer.go create mode 100644 pkg/controllers/tikvgroup/tasks/status.go create mode 100644 pkg/controllers/tikvgroup/tasks/svc.go create mode 100644 pkg/controllers/tikvgroup/tasks/updater.go create mode 100644 pkg/controllers/tikvgroup/tasks/util.go create mode 100644 pkg/image/image.go create mode 100644 pkg/image/image_test.go create mode 100644 pkg/metrics/metrics.go create mode 100644 pkg/overlay/overlay.go create mode 100644 pkg/overlay/overlay_test.go create mode 100644 pkg/overlay/zz_generated.overlay.go create mode 100644 pkg/overlay/zz_generated.overlay_test.go create mode 100644 pkg/pdapi/pd/duration.go create mode 100644 pkg/pdapi/pd/size.go create mode 100644 pkg/pdapi/v1/client.go create mode 100644 pkg/pdapi/v1/pd_config.go create mode 100644 pkg/pdapi/v1/types.go create mode 100644 pkg/runtime/group.go create mode 100644 pkg/runtime/instance.go create mode 100644 pkg/runtime/object.go create mode 100644 pkg/runtime/pd.go create mode 100644 pkg/runtime/tidb.go create mode 100644 pkg/runtime/tiflash.go create mode 100644 pkg/runtime/tikv.go create mode 100644 pkg/scheme/scheme.go create mode 100644 pkg/tidbapi/v1/client.go create mode 100644 pkg/tidbapi/v1/control.go create mode 100644 pkg/tidbapi/v1/types.go create mode 100644 pkg/tiflashapi/v1/client.go create mode 100644 pkg/tiflashapi/v1/control.go create mode 100644 pkg/tikvapi/v1/client.go create mode 100644 pkg/tikvapi/v1/control.go create mode 100644 pkg/timanager/apis/pd/v1/doc.go create mode 100644 pkg/timanager/apis/pd/v1/types.go create mode 100644 pkg/timanager/apis/pd/v1/zz_generated.deepcopy.go create mode 100644 pkg/timanager/apis/pd/v1/zz_generated.register.go create mode 100644 pkg/timanager/informer.go create mode 100644 pkg/timanager/manager.go create mode 100644 pkg/timanager/manager_test.go create mode 100644 pkg/timanager/pd/member.go create mode 100644 pkg/timanager/pd/pd.go create mode 100644 pkg/timanager/pd/store.go create mode 100644 pkg/timanager/poller.go create mode 100644 pkg/timanager/poller_test.go create mode 100644 pkg/timanager/util.go create mode 100644 pkg/updater/actor.go create mode 100644 pkg/updater/builder.go create mode 100644 pkg/updater/executor.go create mode 100644 pkg/updater/executor_test.go create mode 100644 pkg/updater/policy/keep.go create mode 100644 pkg/updater/policy/topology.go create mode 100644 pkg/updater/selector.go create mode 100644 pkg/updater/state.go create mode 100644 pkg/utils/fake/fake.go create mode 100644 pkg/utils/http/http.go create mode 100644 pkg/utils/http/http_test.go create mode 100644 pkg/utils/k8s/deletion.go create mode 100644 pkg/utils/k8s/finalizer.go create mode 100644 pkg/utils/k8s/node.go create mode 100644 pkg/utils/k8s/pod.go create mode 100644 pkg/utils/k8s/pod_test.go create mode 100644 pkg/utils/k8s/rate_limiter.go create mode 100644 pkg/utils/k8s/revision/controller_revision.go create mode 100644 pkg/utils/k8s/revision/controller_revision_test.go create mode 100644 pkg/utils/kubefeat/feat.go create mode 100644 pkg/utils/kubefeat/gates.go create mode 100644 pkg/utils/map/map.go create mode 100644 pkg/utils/map/map_test.go create mode 100644 pkg/utils/random/random.go create mode 100644 pkg/utils/random/random_test.go create mode 100644 pkg/utils/task/mock_generated.go create mode 100644 pkg/utils/task/task.go create mode 100644 pkg/utils/task/task_test.go create mode 100644 pkg/utils/task/v2/result.go create mode 100644 pkg/utils/task/v2/runner.go create mode 100644 pkg/utils/task/v2/task.go create mode 100644 pkg/utils/task/v3/result.go create mode 100644 pkg/utils/task/v3/result_test.go create mode 100644 pkg/utils/task/v3/runner.go create mode 100644 pkg/utils/task/v3/runner_test.go create mode 100644 pkg/utils/task/v3/task.go create mode 100644 pkg/utils/task/v3/task_test.go create mode 100644 pkg/utils/time/clock.go create mode 100644 pkg/utils/tls/tls.go create mode 100644 pkg/utils/tls/tls_test.go create mode 100644 pkg/utils/toml/toml.go create mode 100644 pkg/utils/toml/toml_test.go create mode 100644 pkg/utils/topology/scheduler.go create mode 100644 pkg/utils/topology/scheduler_test.go create mode 100644 pkg/version/version.go create mode 100644 pkg/volumes/cloud/aws/ebs_modifier.go create mode 100644 pkg/volumes/cloud/aws/ebs_modifier_test.go create mode 100644 pkg/volumes/cloud/aws/fake.go create mode 100644 pkg/volumes/cloud/interface.go create mode 100644 pkg/volumes/mock.go create mode 100644 pkg/volumes/native_modifer.go create mode 100644 pkg/volumes/native_modifer_test.go create mode 100644 pkg/volumes/raw_modifier.go create mode 100644 pkg/volumes/raw_modifier_test.go create mode 100644 pkg/volumes/types.go create mode 100644 pkg/volumes/utils.go create mode 100644 pkg/volumes/utils_test.go create mode 100644 tests/e2e/cluster/cluster.go create mode 100644 tests/e2e/cluster/tls.go create mode 100644 tests/e2e/config/restclientgetter.go create mode 100644 tests/e2e/data/cluster.go create mode 100644 tests/e2e/data/data.go create mode 100644 tests/e2e/data/ns.go create mode 100644 tests/e2e/data/pd.go create mode 100644 tests/e2e/e2e.go create mode 100644 tests/e2e/e2e_test.go create mode 100644 tests/e2e/framework/framework.go create mode 100644 tests/e2e/framework/pd.go create mode 100644 tests/e2e/framework/util.go create mode 100644 tests/e2e/label/well_known.go create mode 100644 tests/e2e/pd/pd.go create mode 100644 tests/e2e/utils/data/cluster.go create mode 100644 tests/e2e/utils/data/namespace.go create mode 100644 tests/e2e/utils/jwt/jwt.go create mode 100644 tests/e2e/utils/k8s/k8s.go create mode 100644 tests/e2e/utils/k8s/k8s_test.go create mode 100644 tests/e2e/utils/k8s/port_forward.go create mode 100644 tests/e2e/utils/k8s/yaml.go create mode 100644 tests/e2e/utils/tidb/tidb.go create mode 100644 tests/e2e/utils/waiter/cluster.go create mode 100644 tests/e2e/utils/waiter/common.go create mode 100644 tests/e2e/utils/waiter/pd.go create mode 100644 tests/e2e/utils/waiter/pod.go create mode 100644 third_party/kubernetes/LICENSE create mode 100644 third_party/kubernetes/pkg/controller/history/controller_history.go create mode 100644 third_party/kubernetes/pkg/controller/statefulset/stateful_set_utils.go create mode 100644 third_party/kubernetes/pkg/util/hash/hash.go create mode 100644 tools/tools.go diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000000..51278ed9ac7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +_output +manifests +examples diff --git a/.github/licenserc.yaml b/.github/licenserc.yaml new file mode 100644 index 00000000000..4b06dc177c0 --- /dev/null +++ b/.github/licenserc.yaml @@ -0,0 +1,28 @@ +header: + license: + spdx-id: Apache-2.0 + copyright-owner: PingCAP, Inc. + copyright-year: '2024' + paths-ignore: + - "**/.git/**" + - ".github/" + - "_output/**" + - "examples/**" + - "image/**" + - "manifests/**" + - ".gitignore" + - ".dockerignore" + - ".golangci.yml" + - "LICENSES/" + - "**/*.md" + - ".codecov.yml" + - "**/go.mod" + - "**/go.sum" + - "**/go.work" + - "**/go.work.sum" + - "**/LICENSE" + - "third_party/**" + - "pkg/**/*mock.go" + + + comment: on-failure diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000000..f78d4bc9e87 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,107 @@ +name: ci + +on: + push: + branches: + - main + pull_request: + branches: + - main + types: [review_requested, ready_for_review, synchronize] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + image: + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + - name: cache image builder + uses: actions/cache@v4 + with: + path: ./_output/cache + key: ${{ runner.os }}-image-cache + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - uses: docker/setup-qemu-action@v3 + with: + image: gcr.io/pingcap-public/third-party/tonistiigi/binfmt:latest + + - name: setup buildx context + run: | + docker context create builder + + - uses: docker/setup-buildx-action@v3 + with: + platforms: linux/arm64,linux/amd64 + endpoint: builder + driver-opts: | + image=gcr.io/pingcap-public/third-party/moby/buildkit:buildx-stable-1 + + - name: image + run: | + V_PLATFORMS=linux/arm64,linux/amd64 make image + + build: + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + - name: setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.work + cache-dependency-path: "**/*.sum" + - name: build + run: | + make build + + lint: + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + - name: setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.work + cache-dependency-path: "**/*.sum" + - name: cache golangci-lint + uses: actions/cache@v4 + with: + path: ./_output/bin/golangci-lint + key: ${{ runner.os }}-golangci-lint + - name: lint + run: | + make lint + + unit: + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + - name: setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.work + cache-dependency-path: "**/*.sum" + - name: unit test + run: | + make unit + + verify: + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + - name: setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.work + cache-dependency-path: "**/*.sum" + - name: verify + run: | + make verify diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 00000000000..4de5bd73040 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,75 @@ +name: e2e + +on: + pull_request: + branches: + - main + types: [review_requested, ready_for_review, synchronize] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + e2e: + runs-on: self-hosted + strategy: + fail-fast: false + matrix: + spec: + - "Basic" + - "Rolling Update" + - "Version Upgrade" + - "TLS" + - "TiDB Feature" + - "Overlay" + steps: + - uses: actions/checkout@v4 + - name: setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.work + cache-dependency-path: "**/*.sum" + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - uses: docker/setup-qemu-action@v3 + with: + image: gcr.io/pingcap-public/third-party/tonistiigi/binfmt:latest + + - name: setup buildx context + run: | + docker context create builder + + - uses: docker/setup-buildx-action@v3 + with: + platforms: linux/arm64,linux/amd64 + endpoint: builder + driver-opts: | + image=gcr.io/pingcap-public/third-party/moby/buildkit:buildx-stable-1 + + - name: E2E test + env: + CI: "true" + GINKGO_OPTS: "--focus=${{ matrix.spec }}" + run: | + make e2e/prepare + make e2e/run + + - name: Collect logs of Operator + if: always() + run: | + ./_output/bin/kubectl logs -n tidb-admin deployment/tidb-operator > tidb-operator.log + + - name: Upload logs + if: always() + uses: actions/upload-artifact@v4 + with: + retention-days: 7 + name: tidb-operator-log-${{ github.run_id }}-${{ github.job }} + path: tidb-operator.log + overwrite: 'true' diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..b6a35df2ab2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +_output +.idea/ diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000000..1cb34185736 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,120 @@ +linters-settings: + dupl: + threshold: 100 + funlen: + lines: -1 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + gocyclo: + min-complexity: 15 + godox: + keywords: + - FIXME + gofmt: + rewrite-rules: + - pattern: 'interface{}' + replacement: 'any' + goimports: + local-prefixes: github.com/pingcap/tidb-operator + mnd: + # don't include the "operation" and "assign" + checks: + - argument + - case + - condition + - return + ignored-numbers: + - '0' + - '1' + - '2' + - '3' + ignored-functions: + - strings.SplitN + govet: + enable: + - nilness + - shadow + errorlint: + asserts: false + lll: + line-length: 140 + misspell: + locale: US + ignore-words: + - "importas" # linter name + nolintlint: + allow-unused: false # report any unused nolint directives + require-explanation: true # require an explanation for nolint directives + require-specific: true # require nolint directives to be specific about which linter is being skipped + revive: + rules: + - name: indent-error-flow + - name: unexported-return + disabled: true + - name: unused-parameter + - name: unused-receiver + +linters: + disable-all: true + enable: + - bodyclose + - dogsled + - errcheck + - errorlint + - copyloopvar + - gocheckcompilerdirectives + - goconst + # - gochecknoinits + - gocritic + - gocyclo + - godox + - gofmt + - goimports + - mnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - noctx + - nolintlint + - revive + - stylecheck + - staticcheck + - testifylint + - unconvert + - unparam + - unused + - whitespace + + # This list of linters is not a recommendation (same thing for all this configuration file). + # We intentionally use a limited set of linters. + # See the comment on top of this file. + +issues: + exclude-rules: + - path: (.+)_test\.go + linters: + - dupl + - mnd + - lll + +run: + timeout: 15m diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000000..b67d9091009 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000000..ecc09f09300 --- /dev/null +++ b/Makefile @@ -0,0 +1,186 @@ +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ROOT = $(CURDIR) +OUTPUT_DIR = $(ROOT)/_output +BIN_DIR = $(OUTPUT_DIR)/bin +CORE_API_PATH = $(ROOT)/apis/core +PD_API_PATH = $(ROOT)/pkg/timanager/apis/pd +GO_MODULE := github.com/pingcap/tidb-operator +OVERLAY_PKG_DIR = $(ROOT)/pkg/overlay +BOILERPLATE_FILE = $(ROOT)/hack/boilerplate/boilerplate.go.txt + +# TODO: use kubectl in _output +KUBECTL = kubectl -n tidb-admin --context kind-tidb-operator + +ALL_CMD = operator prestop-checker +.PHONY: build +build: $(addprefix build/,$(ALL_CMD)) +build/%: + $(ROOT)/hack/build.sh $* + +.PHONY: image +image: $(addprefix image/,$(ALL_CMD)) +image/%: + $(ROOT)/hack/image.sh $* + +.PHONY: push +push: $(addprefix push/,$(ALL_CMD)) +push/%: + $(ROOT)/hack/image.sh $* --push + +.PHONY: deploy +deploy: crd + $(KUBECTL) apply --server-side=true -f $(ROOT)/manifests/crd + $(KUBECTL) apply --server-side=true -f $(ROOT)/manifests/rbac + $(KUBECTL) apply --server-side=true -f $(ROOT)/manifests/deploy + +.PHONY: codegen +codegen: bin/deepcopy-gen bin/register-gen bin/overlay-gen + $(REGISTER_GEN) \ + --output-file=zz_generated.register.go \ + --go-header-file=$(BOILERPLATE_FILE) \ + $(CORE_API_PATH)/... + + $(DEEPCOPY_GEN) \ + --output-file=zz_generated.deepcopy.go \ + --go-header-file=$(BOILERPLATE_FILE) \ + $(CORE_API_PATH)/... + + $(REGISTER_GEN) \ + --output-file=zz_generated.register.go \ + --go-header-file=$(BOILERPLATE_FILE) \ + $(PD_API_PATH)/... + + $(DEEPCOPY_GEN) \ + --output-file=zz_generated.deepcopy.go \ + --go-header-file=$(BOILERPLATE_FILE) \ + $(PD_API_PATH)/... + +.PHONY: overlaygen +overlaygen: bin/overlay-gen + $(OVERLAY_GEN) \ + --output-dir=$(OVERLAY_PKG_DIR) \ + --go-header-file=$(BOILERPLATE_FILE) \ + k8s.io/api/core/v1 + +.PHONY: crd +crd: bin/controller-gen + $(CONTROLLER_GEN) crd:generateEmbeddedObjectMeta=true output:crd:artifacts:config=$(ROOT)/manifests/crd paths=$(CORE_API_PATH)/... + +# Deprecate this generator, rbac generator cannot well handle nonResourceURLs +.PHONY: rbac +rbac: bin/controller-gen + $(CONTROLLER_GEN) rbac:roleName=tidb-operator output:rbac:artifacts:config=$(ROOT)/manifests/rbac paths=$(CORE_API_PATH)/... + +.PHONY: tidy +tidy: + cd $(CORE_API_PATH) && go mod tidy + go mod tidy + +gengo: GEN_DIR ?= ./... +gengo: bin/mockgen + GOBIN=$(BIN_DIR) GO_MODULE=$(GO_MODULE) go generate $(GEN_DIR) + +.PHONY: license +license: bin/license-eye + $(LICENSE_EYE) -c .github/licenserc.yaml header fix + +ALL_GEN = tidy codegen crd gengo overlaygen +.PHONY: generate +generate: $(ALL_GEN) license + +.PHONY: verify/license +verify/license: bin/license-eye + $(LICENSE_EYE) -c .github/licenserc.yaml header check + +.PHONY: verify +verify: $(addprefix verify/,$(ALL_GEN)) verify/license +verify/%: + $(ROOT)/hack/verify.sh make $* + +.PHONY: lint +lint: bin/golangci-lint + $(GOLANGCI_LINT) run ./... + +.PHONY: unit +unit: + go test $$(go list -e ./... | grep -v tools | grep -v tests) + +.PHONY: check +check: lint unit verify + + +.PHONY: e2e/prepare +e2e/prepare: bin/kind crd + $(ROOT)/hack/e2e.sh --prepare + +.PHONY: e2e/run +e2e/run: + $(ROOT)/hack/e2e.sh run $(GINKGO_OPTS) + +.PHONY: e2e +e2e: bin/kind crd + $(ROOT)/hack/e2e.sh --prepare run + +.PHONY: kube +kube: bin/kind + @echo "ensure that the kubernetes env is existing" + $(ROOT)/hack/kind.sh + +.PHONY: reload/operator +reload/operator: + $(KUBECTL) delete pod `$(KUBECTL) get pods | awk '/operator/{ print $$1 }'` + +.PHONY: logs/operator +logs/operator: + $(KUBECTL) logs -f `$(KUBECTL) get pods | awk '/operator/{ print $$1 }'` + +CONTROLLER_GEN = $(BIN_DIR)/controller-gen +bin/controller-gen: + $(ROOT)/hack/download.sh go_install $(CONTROLLER_GEN) sigs.k8s.io/controller-tools/cmd/controller-gen + +DEEPCOPY_GEN = $(BIN_DIR)/deepcopy-gen +bin/deepcopy-gen: + $(ROOT)/hack/download.sh go_install $(DEEPCOPY_GEN) k8s.io/code-generator/cmd/deepcopy-gen + +REGISTER_GEN = $(BIN_DIR)/register-gen +bin/register-gen: + $(ROOT)/hack/download.sh go_install $(REGISTER_GEN) k8s.io/code-generator/cmd/register-gen + +MOCKGEN = $(BIN_DIR)/mockgen +bin/mockgen: + $(ROOT)/hack/download.sh go_install $(MOCKGEN) go.uber.org/mock/mockgen v0.5.0 "--version" + +OVERLAY_GEN = $(BIN_DIR)/overlay-gen +bin/overlay-gen: + $(ROOT)/hack/build.sh overlay-gen + + +.PHONY: bin/golangci-lint +GOLANGCI_LINT = $(BIN_DIR)/golangci-lint +bin/golangci-lint: + # DON'T track the version of this cmd by go.mod + $(ROOT)/hack/download.sh go_install $(GOLANGCI_LINT) github.com/golangci/golangci-lint/cmd/golangci-lint v1.62.2 "version --format=short" + +.PHONY: bin/kind +KIND = $(BIN_DIR)/kind +bin/kind: + $(ROOT)/hack/download.sh go_install $(KIND) sigs.k8s.io/kind v0.24.0 "version | awk '{print \$$2}'" + +.PHONY: bin/license-eye +LICENSE_EYE = $(BIN_DIR)/license-eye +bin/license-eye: + if [ ! -f $(LICENSE_EYE) ]; then $(ROOT)/hack/download.sh go_install $(LICENSE_EYE) github.com/apache/skywalking-eyes/cmd/license-eye v0.6.0; fi + diff --git a/README.md b/README.md new file mode 100644 index 00000000000..775eba66be2 --- /dev/null +++ b/README.md @@ -0,0 +1,47 @@ +# TiDB Operator + +**NOTE: The v2 is expermential now, PLEASE don't use it in production** + +- [**Stack Overflow**](https://stackoverflow.com/questions/tagged/tidb) +- [**Community Slack Channel**](https://slack.tidb.io/invite?team=tidb-community&channel=sig-k8s&ref=pingcap-tidb-operator) +- [**Reddit**](https://www.reddit.com/r/TiDB/) +- **Mailing list**: [Google Group](https://groups.google.com/forum/#!forum/tidb-user) +- [**For support, please contact PingCAP**](http://bit.ly/contact_us_via_github) + +[![codecov](https://codecov.io/gh/pingcap/tidb-operator/branch/master/graph/badge.svg)](https://codecov.io/gh/pingcap/tidb-operator) +[![LICENSE](https://img.shields.io/github/license/pingcap/tidb-operator.svg)](https://github.com/pingcap/tidb-operator/blob/master/LICENSE) +[![Language](https://img.shields.io/badge/Language-Go-blue.svg)](https://golang.org/) +[![Go Report Card](https://goreportcard.com/badge/github.com/pingcap/tidb-operator)](https://goreportcard.com/report/github.com/pingcap/tidb-operator) +[![GitHub release](https://img.shields.io/github/tag/pingcap/tidb-operator.svg?label=release)](https://github.com/pingcap/tidb-operator/releases) +[![GoDoc](https://img.shields.io/badge/Godoc-reference-blue.svg)](https://godoc.org/github.com/pingcap/tidb-operator) + +TiDB Operator manages [TiDB](https://github.com/pingcap/tidb) clusters on [Kubernetes](https://kubernetes.io) and automates tasks related to operating a TiDB cluster. It makes TiDB a truly cloud-native database. + +## Why a new TiDB Operator + +See [Why we need a new TiDB Operator](./docs/why.md) + +## Arch + +See [arch.md](./docs/arch/README.md) + +## Roadmap + +See [rfcs](./docs/rfcs) + +## Documentation(TODO) + +You can see our documentation at PingCAP website for more in-depth installation and instructions for production: + +- [English](https://docs.pingcap.com/tidb-in-kubernetes/stable) +- [简体中文](https://docs.pingcap.com/zh/tidb-in-kubernetes/stable) + +All the TiDB Operator documentation is maintained in the [docs-tidb-operator repository](https://github.com/pingcap/docs-tidb-operator). + +## Contributing + +Contributions are welcome and greatly appreciated. See [CONTRIBUTING.md](./docs/CONTRIBUTING.md) for details on submitting patches and the contribution workflow. + +## License + +TiDB Operator is under the Apache 2.0 license. See the [LICENSE](./LICENSE) file for details. diff --git a/apis/core/go.mod b/apis/core/go.mod new file mode 100644 index 00000000000..30408bec688 --- /dev/null +++ b/apis/core/go.mod @@ -0,0 +1,29 @@ +module github.com/pingcap/tidb-operator/apis/core + +go 1.22.0 + +toolchain go1.22.3 + +require ( + k8s.io/api v0.31.0 + k8s.io/apimachinery v0.31.0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 +) + +require ( + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/text v0.17.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) diff --git a/apis/core/go.sum b/apis/core/go.sum new file mode 100644 index 00000000000..d5b938d762c --- /dev/null +++ b/apis/core/go.sum @@ -0,0 +1,97 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/apis/core/v1alpha1/cluster_types.go b/apis/core/v1alpha1/cluster_types.go new file mode 100644 index 00000000000..cd767e5df48 --- /dev/null +++ b/apis/core/v1alpha1/cluster_types.go @@ -0,0 +1,220 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + PDClusterTLSVolumeName = NamePrefix + "pd-tls" + PDClusterTLSMountPath = "/var/lib/pd-tls" + + TiKVClusterTLSVolumeName = NamePrefix + "tikv-tls" + TiKVClusterTLSMountPath = "/var/lib/tikv-tls" + + TiDBClusterTLSVolumeName = NamePrefix + "tidb-tls" + TiDBClusterTLSMountPath = "/var/lib/tidb-tls" + + TiFlashClusterTLSVolumeName = NamePrefix + "tiflash-tls" + TiFlashClusterTLSMountPath = "/var/lib/tiflash-tls" + + ClusterTLSClientVolumeName = NamePrefix + "cluster-client-tls" + ClusterTLSClientMountPath = "/var/lib/cluster-client-tls" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// ClusterList defines a list of TiDB clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Cluster `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:printcolumn:name="PD",type="integer",JSONPath=".status.components[?(@.kind==\"PD\")].replicas" +// +kubebuilder:printcolumn:name="TiKV",type="integer",JSONPath=".status.components[?(@.kind==\"TiKV\")].replicas" +// +kubebuilder:printcolumn:name="TiDB",type="integer",JSONPath=".status.components[?(@.kind==\"TiDB\")].replicas" +// +kubebuilder:printcolumn:name="TiFlash",type="integer",JSONPath=".status.components[?(@.kind==\"TiFlash\")].replicas" +// +kubebuilder:printcolumn:name="Available",type=string,JSONPath=`.status.conditions[?(@.type=="Available")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// Cluster defines a TiDB cluster +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +type ClusterSpec struct { + // SuspendAction defines the suspend actions for the cluster. + SuspendAction *SuspendAction `json:"suspendAction,omitempty"` + + // Whether enable the TLS connection between TiDB cluster components. + TLSCluster *TLSCluster `json:"tlsCluster,omitempty"` + + // UpgradePolicy defines the upgrade policy for the cluster. + UpgradePolicy UpgradePolicy `json:"upgradePolicy,omitempty"` + + // Paused specifies whether to pause the reconciliation loop for all components of the cluster. + Paused bool `json:"paused,omitempty"` + + // RevisionHistoryLimit is the maximum number of revisions that will + // be maintained in each Group's revision history. + // The revision history consists of all revisions not represented by a currently applied version. + // The default value is 10. + // +kubebuilder:validation:Minimum=0 + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` +} + +type SuspendAction struct { + // SuspendCompute indicates delete the pods but keep the PVCs. + SuspendCompute bool `json:"suspendCompute,omitempty"` +} + +// TLSCluster is used to enable mutual TLS connection between TiDB cluster components. +// https://docs.pingcap.com/tidb/stable/enable-tls-between-components +type TLSCluster struct { + // Enable mutual TLS connection between TiDB cluster components. + // Once enabled, the mutual authentication applies to all components, + // and it does not support applying to only part of the components. + // The steps to enable this feature: + // 1. Generate TiDB cluster components certificates and a client-side certifiacete for them. + // There are multiple ways to generate these certificates: + // - user-provided certificates: https://docs.pingcap.com/tidb/stable/generate-self-signed-certificates + // - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ + // - or use cert-manager signed certificates: https://cert-manager.io/ + // 2. Create one secret object for one component group which contains the certificates created above. + // The name of this Secret must be: --cluster-secret. + // For PD: kubectl create secret generic --cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // For TiKV: kubectl create secret generic --cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // For TiDB: kubectl create secret generic --cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // For Client: kubectl create secret generic -cluster-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // Same for other components. + // +optional + Enabled bool `json:"enabled,omitempty"` +} + +type UpgradePolicy string + +const ( + // UpgradePolicyDefault means the cluster will be upgraded in the following order: + // PD, TiProxy, TiFlash, TiKV, TiDB. + UpgradePolicyDefault UpgradePolicy = "Default" + + // UpgradePolicyNoConstraints means the cluster will be upgraded without any constraints, + // all components will be upgraded at the same time. + UpgradePolicyNoConstraints UpgradePolicy = "NoConstraints" +) + +type ClusterStatus struct { + // observedGeneration is the most recent generation observed for this Cluster. It corresponds to the + // Cluster's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Components is the status of each component in the cluster. + // +patchMergeKey=kind + // +patchStrategy=merge + // +listType=map + // +listMapKey=kind + Components []ComponentStatus `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"kind" protobuf:"bytes,1,rep,name=components"` + + // Conditions contains the current status of the cluster. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // PD means url of the pd service, it's prepared for internal use + // e.g. https://pd:2379 + PD string `json:"pd,omitempty"` +} + +type ComponentKind string + +const ( + ComponentKindPD ComponentKind = "PD" + ComponentKindTiKV ComponentKind = "TiKV" + ComponentKindTiDB ComponentKind = "TiDB" + ComponentKindTiFlash ComponentKind = "TiFlash" +) + +// ComponentStatus is the status of a component in the cluster. +type ComponentStatus struct { + // Kind is the kind of the component, e.g., PD, TiKV, TiDB, TiFlash. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=PD;TiKV;TiDB;TiFlash + Kind ComponentKind `json:"kind"` + + // Replicas is the number of desired replicas of the component. + // +kubebuilder:validation:Required + Replicas int32 `json:"replicas"` +} + +// ShouldSuspendCompute returns whether the cluster should suspend compute. +func (c *Cluster) ShouldSuspendCompute() bool { + return c.Spec.SuspendAction != nil && c.Spec.SuspendAction.SuspendCompute +} + +// IsTLSClusterEnabled returns whether the cluster has enabled mTLS. +func (c *Cluster) IsTLSClusterEnabled() bool { + return c.Spec.TLSCluster != nil && c.Spec.TLSCluster.Enabled +} + +// TLSClusterSecretName returns the mTLS secret name for a component group. +func (c *Cluster) TLSClusterSecretName(groupName string) string { + return fmt.Sprintf("%s-%s-cluster-secret", c.Name, groupName) +} + +// ClusterClientTLSSecretName returns the mTLS secret name for the cluster client. +func (c *Cluster) ClusterClientTLSSecretName() string { + return TLSClusterClientSecretName(c.Name) +} + +// TLSClusterClientSecretName returns the mTLS secret name for the cluster client. +func TLSClusterClientSecretName(clusterName string) string { + return fmt.Sprintf("%s-cluster-client-secret", clusterName) +} + +func (c *Cluster) ShouldPauseReconcile() bool { + return c.Spec.Paused +} + +const ( + // ClusterCondAvailable means the cluster is available, i.e. the cluster can be used. + // But it does not mean all members in the cluster are healthy. + ClusterCondAvailable = "Available" + + // ClusterCondProgressing means the cluster is progressing, i.e. the cluster is being created, updated, scaled, etc. + ClusterCondProgressing = "Progressing" + ClusterCreationReason = "ClusterCreation" + ClusterDeletionReason = "ClusterDeletion" + ClusterAvailableReason = "ClusterAvailable" + + ClusterCondSuspended = "Suspended" + ClusterSuspendReason = "ClusterSuspend" +) diff --git a/apis/core/v1alpha1/common_types.go b/apis/core/v1alpha1/common_types.go new file mode 100644 index 00000000000..8b0b318fc6b --- /dev/null +++ b/apis/core/v1alpha1/common_types.go @@ -0,0 +1,371 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // Finalizer is the finalizer used by all resources managed by TiDB Operator. + Finalizer = "core.pingcap.com/finalizer" + + // LabelKeyPrefix defines key prefix of well known labels + LabelKeyPrefix = "pingcap.com/" + + // LabelKeyManagedBy means resources are managed by tidb operator + LabelKeyManagedBy = LabelKeyPrefix + "managed-by" + LabelValManagedByOperator = "tidb-operator" + + // LabelKeyCluster means which tidb cluster the resource belongs to + LabelKeyCluster = LabelKeyPrefix + "cluster" + + // LabelKeyComponent means the component of the resource + LabelKeyComponent = LabelKeyPrefix + "component" + LabelValComponentPD = "pd" + LabelValComponentTiDB = "tidb" + LabelValComponentTiKV = "tikv" + LabelValComponentTiFlash = "tiflash" + + // LabelKeyGroup means the component group of the resource + LabelKeyGroup = LabelKeyPrefix + "group" + // LabelKeyInstance means the instance of the resource + LabelKeyInstance = LabelKeyPrefix + "instance" + + // LabelKeyPodSpecHash is the hash of the pod spec. + LabelKeyPodSpecHash = LabelKeyPrefix + "pod-spec-hash" + + LabelKeyInstanceRevisionHash = LabelKeyPrefix + "instance-revision-hash" + + // LabelKeyConfigHash is the hash of the user-specified config (i.e., `.Spec.Config`), + // which will be used to determine whether the config has changed. + // Since the tidb operator will overlay the user-specified config with some operator-managed fields, + // if we hash the overlayed config, with the evolving TiDB Operator, the hash may change, + // potentially triggering an unexpected rolling update. + // Instead, we choose to hash the user-specified config, + // and the worst case is that users expect a reboot but it doesn't happen. + LabelKeyConfigHash = LabelKeyPrefix + "config-hash" +) + +const ( + // NamePrefix for "names" in k8s resources + // Users may overlay some fields in managed resource such as pods. Names with this + // prefix is preserved to avoid conflicts with fields defined by users. + NamePrefix = "ti-" +) + +const ( + // All volume names + // + // VolumeNameConfig defines volume name for main config file + VolumeNameConfig = NamePrefix + "config" + // VolumeNamePrestopChecker defines volume name for pre stop checker cmd + VolumeNamePrestopChecker = NamePrefix + "prestop-checker" + + // All container names + // + // Main component containers of the tidb cluster + ContainerNamePD = "pd" + ContainerNameTiKV = "tikv" + ContainerNameTiDB = "tidb" + ContainerNameTiFlash = "tiflash" + // An init container to copy pre stop checker cmd to main container + ContainerNamePrestopChecker = NamePrefix + "prestop-checker" +) + +const ( + DirNameConfigPD = "/etc/pd" + DirNameConfigTiKV = "/etc/tikv" + DirNameConfigTiDB = "/etc/tidb" + DirNameConfigTiFlash = "/etc/tiflash" + + // ConfigFileName defines default name of config file + ConfigFileName = "config.toml" + + // ConfigFileTiFlashProxyName defines default name of tiflash proxy config file + ConfigFileTiFlashProxyName = "proxy.toml" + + // PrestopDirName defines dir path of pre stop checker cmd + DirNamePrestop = "/prestop" +) + +const ( + DefaultHelperImage = "busybox:1.37.0" +) + +// ConfigUpdateStrategy represents the strategy to update configuration. +type ConfigUpdateStrategy string + +const ( + // ConfigUpdateStrategyInPlace updates config without restarting. + ConfigUpdateStrategyInPlace ConfigUpdateStrategy = "InPlace" + + // ConfigUpdateStrategyRollingUpdate performs a rolling-update to apply changed configs. + ConfigUpdateStrategyRollingUpdate ConfigUpdateStrategy = "RollingUpdate" +) + +// ObjectMeta is defined for replacing the embedded metav1.ObjectMeta +// Now only labels and annotations are allowed +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty"` + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +type ClusterReference struct { + Name string `json:"name"` +} + +// Topology means the topo for scheduling +// e.g. topology.kubernetes.io/zone: us-west-1a +// It will be translated to pod.spec.nodeSelector +// IMPORTANT: Topology is immutable for an instance +type Topology map[string]string + +// Overlay defines some templates of k8s native resources. +// Users can specify this field to overlay the spec of managed resources(pod, pvcs, ...). +type Overlay struct { + Pod *PodOverlay `json:"pod,omitempty"` + PersistentVolumeClaims []PersistentVolumeClaimOverlay `json:"volumeClaims,omitempty"` +} + +type PodOverlay struct { + ObjectMeta `json:"metadata,omitempty"` + Spec *corev1.PodSpec `json:"spec,omitempty"` +} + +type PersistentVolumeClaimOverlay struct { + ObjectMeta `json:"metadata,omitempty"` + Spec *corev1.PersistentVolumeClaimSpec `json:"spec,omitempty"` +} + +type ConfigFile string + +// Volume defines a persistent volume, it will be mounted at a specified root path +// A volume can be mounted for multiple different usages. +// For example, a volume can be mounted for both data and raft log. +type Volume struct { + // Name is volume name. + // If not specified, the PVC name will be "{component}-{podName}" + Name string `json:"name,omitempty"` + + // Path is mount path of this volume + Path string `json:"path"` + + // For defines the usage of this volume + // At least one usage is needed for a new volume + For []VolumeUsage `json:"for"` + + // Storage defines the request size of this volume + Storage resource.Quantity `json:"storage"` + + // StorageClassName means the storage class the volume used. + // You can modify volumes' attributes by changing the StorageClass + // when VolumeAttributesClass is not available. + // Note that only newly created PV will use the new StorageClass. + StorageClassName *string `json:"storageClassName,omitempty"` + + // VolumeAttributesClassName means the VolumeAttributesClass the volume used. + // You can modify volumes' attributes by changing it. + // This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + // It's only available when the feature is enabled. + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` +} + +type VolumeUsage struct { + // Type is a usage type of the volume. + // A volume can be defined for multiple usages. + Type VolumeUsageType `json:"type"` + // SubPath is the relative path of the volume's mount path. + // The default value of sub path is determined by the usage type. + SubPath string `json:"subPath,omitempty"` +} + +type VolumeUsageType string + +// Port defines a listen port +type Port struct { + Port int32 `json:"port"` +} + +// SchedulePolicy defines how instances of the group schedules its pod. +type SchedulePolicy struct { + Type SchedulePolicyType `json:"type"` + EvenlySpread *SchedulePolicyEvenlySpread `json:"evenlySpread,omitempty"` +} + +type SchedulePolicyType string + +const ( + // This policy is defined to evenly spread all instances of a group + // e.g. we may hope tikvs can evenly spread in 3 az + SchedulePolicyTypeEvenlySpread = "EvenlySpread" +) + +type SchedulePolicyEvenlySpread struct { + // All instances of a group will evenly spread in differnet topologies + Topologies []ScheduleTopology `json:"topologies"` +} + +type ScheduleTopology struct { + // Topology means the topo for scheduling + Topology Topology `json:"topology"` + // Weight defines how many pods will be scheduled to this topo + // default is 1 + Weight *int32 `json:"weight,omitempty"` +} + +// ResourceRequirements describes the compute resource requirements. +// It's simplified from corev1.ResourceRequirements to fit the most common use cases. +// This field will be translated to requests=limits for all resources. +// If users need to specify more advanced resource requirements, just try to use overlay to override it +type ResourceRequirements struct { + CPU *resource.Quantity `json:"cpu,omitempty"` + Memory *resource.Quantity `json:"memory,omitempty"` +} + +// CommonStatus defines common status fields for instances and groups managed by TiDB Operator. +type CommonStatus struct { + // Conditions contain details of the current state. + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ObservedGeneration is the most recent generation observed by the controller. + // It's used to determine whether the controller has reconciled the latest spec. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // CurrentRevision is the revision of the Controller that created the resource. + CurrentRevision string `json:"currentRevision,omitempty"` + + // UpdateRevision is the revision of the Controller that should modify the resource. + UpdateRevision string `json:"updateRevision,omitempty"` + + // CollisionCount is the count of hash collisions. The controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty"` +} + +// GroupStatus defines the common status fields for all component groups. +type GroupStatus struct { + // Version is the version of all instances in the group. + // It will be same as the `spec.version` only when all instances are upgraded to the desired version. + Version string `json:"version,omitempty"` + + // Replicas is the number of Instance created by the controller. + Replicas int32 `json:"replicas"` + + // ReadyReplicas is the number of Instances created for this ComponentGroup with a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty"` + + // CurrentReplicas is the number of Instances created by the Group controller from the Group version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty"` + + // UpdatedReplicas is the number of Instances created by the Group controller from the Group version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` +} + +// ComponentAccessor is the interface to access details of instances/groups managed by TiDB Operator. +type ComponentAccessor interface { + GetName() string + GetNamespace() string + GetClusterName() string + ComponentKind() ComponentKind + + // GVK returns the GroupVersionKind of the instance/group. + GVK() schema.GroupVersionKind + GetGeneration() int64 + ObservedGeneration() int64 + CurrentRevision() string + UpdateRevision() string + CollisionCount() *int32 + + IsHealthy() bool +} + +func IsUpToDate(a ComponentAccessor) bool { + return IsReconciled(a) && a.CurrentRevision() == a.UpdateRevision() +} + +func StatusChanged(a ComponentAccessor, s CommonStatus) bool { + return a.CurrentRevision() != s.CurrentRevision || a.UpdateRevision() != s.UpdateRevision || a.CollisionCount() != s.CollisionCount +} + +func IsReconciled(a ComponentAccessor) bool { + return a.ObservedGeneration() == a.GetGeneration() +} + +// Instance is the interface for all components. +type Instance interface { + ComponentAccessor + *PD | *TiDB | *TiKV | *TiFlash +} + +func AllInstancesSynced[T Instance](instances []T, rev string) bool { + for _, instance := range instances { + if !IsUpToDate(instance) || instance.CurrentRevision() != rev { + return false + } + } + return true +} + +// Group is the interface for all component groups. +type Group interface { + ComponentAccessor + + GetDesiredReplicas() int32 + GetDesiredVersion() string + GetActualVersion() string + GetStatus() GroupStatus +} + +func IsGroupHealthyAndUpToDate(g Group) bool { + return g.IsHealthy() && IsUpToDate(g) && g.GetStatus().ReadyReplicas == g.GetDesiredReplicas() +} + +// GroupType is used for generic functions. +type GroupType interface { + Group + + *PDGroup | *TiDBGroup | *TiKVGroup | *TiFlashGroup +} + +type GroupList interface { + ToSlice() []Group +} diff --git a/apis/core/v1alpha1/doc.go b/apis/core/v1alpha1/doc.go new file mode 100644 index 00000000000..d39a6bba61e --- /dev/null +++ b/apis/core/v1alpha1/doc.go @@ -0,0 +1,60 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +groupName=core.pingcap.com +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete +// +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch +// +// +kubebuilder:rbac:groups=apps,resources=controllerrevisions,verbs=get;list;watch;create;update;patch;delete +// +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=storage.k8s.io,resources=volumeattributesclasses,verbs=get;list;watch +// +// +kubebuilder:rbac:groups=core.pingcap.com,resources=clusters,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=clusters/status,verbs=get;update;patch +// +// +kubebuilder:rbac:groups=core.pingcap.com,resources=pdgroups,verbs=get;list;watch;delete;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=pdgroups/status,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=pds,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.pingcap.com,resources=pds/status,verbs=get;list;watch;update;patch +// +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tikvgroups,verbs=get;list;watch;delete;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tikvgroups/status,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tikvs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tikvs/status,verbs=get;list;watch;update;patch +// +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tiflashgroups,verbs=get;list;watch;delete;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tiflashgroups/status,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tiflashes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tiflashes/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tidbgroups,verbs=get;list;watch;delete;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tidbgroups/status,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tidbs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.pingcap.com,resources=tidbs/status,verbs=get;list;watch;update;patch +// +// +kubebuilder:rbac:resources="",verbs=get,urls=/metrics +// +// Package v1alpha1 is the v1alpha1 version of core tidb operator api +package v1alpha1 diff --git a/apis/core/v1alpha1/errors.go b/apis/core/v1alpha1/errors.go new file mode 100644 index 00000000000..6cbe193a9cd --- /dev/null +++ b/apis/core/v1alpha1/errors.go @@ -0,0 +1,21 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "errors" +) + +var ErrFieldIsManagedByOperator = errors.New("field is managed by operator, cannot be set in config file") diff --git a/apis/core/v1alpha1/fake.go b/apis/core/v1alpha1/fake.go new file mode 100644 index 00000000000..dc0fd138271 --- /dev/null +++ b/apis/core/v1alpha1/fake.go @@ -0,0 +1,97 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import "k8s.io/apimachinery/pkg/runtime/schema" + +var _ Group = &FakeGroup{} + +// FakeGroup is a mock implementation of the Group interface for testing purposes. +type FakeGroup struct { + Name string + Namespace string + ClusterName string + ComponentKindVal ComponentKind + Generation int64 + ObservedGen int64 + CurrentRev string + UpdateRev string + CollisionCountVal *int32 + Healthy bool + DesiredReplicas int32 + DesiredVersion string + ActualVersion string + Status GroupStatus +} + +func (f *FakeGroup) GetName() string { + return f.Name +} + +func (f *FakeGroup) GetNamespace() string { + return f.Namespace +} + +func (f *FakeGroup) GetClusterName() string { + return f.ClusterName +} + +func (f *FakeGroup) ComponentKind() ComponentKind { + return f.ComponentKindVal +} + +func (f *FakeGroup) GVK() schema.GroupVersionKind { + return schema.GroupVersionKind{} +} + +func (f *FakeGroup) GetGeneration() int64 { + return f.Generation +} + +func (f *FakeGroup) ObservedGeneration() int64 { + return f.ObservedGen +} + +func (f *FakeGroup) CurrentRevision() string { + return f.CurrentRev +} + +func (f *FakeGroup) UpdateRevision() string { + return f.UpdateRev +} + +func (f *FakeGroup) CollisionCount() *int32 { + return f.CollisionCountVal +} + +func (f *FakeGroup) IsHealthy() bool { + return f.Healthy +} + +func (f *FakeGroup) GetDesiredReplicas() int32 { + return f.DesiredReplicas +} + +func (f *FakeGroup) GetDesiredVersion() string { + return f.DesiredVersion +} + +func (f *FakeGroup) GetActualVersion() string { + return f.ActualVersion +} + +func (f *FakeGroup) GetStatus() GroupStatus { + return f.Status +} diff --git a/apis/core/v1alpha1/pd_types.go b/apis/core/v1alpha1/pd_types.go new file mode 100644 index 00000000000..7734da07db9 --- /dev/null +++ b/apis/core/v1alpha1/pd_types.go @@ -0,0 +1,351 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" +) + +var ( + _ GroupList = &PDGroupList{} + _ Group = &PDGroup{} + _ ComponentAccessor = &PD{} +) + +const ( + // VolumeUsageTypePDData means data dir of PD + // The default sub path is "data" + VolumeUsageTypePDData VolumeUsageType = "data" +) + +const ( + PDPortNameClient = "client" + PDPortNamePeer = "peer" + DefaultPDPortClient = 2379 + DefaultPDPortPeer = 2380 +) + +const ( + // TODO: combine all Health condition + PDCondHealth = "Health" + PDHealthReason = "PDHealth" + + // PDCondInitialized means the operator detects that the PD instance has joined the cluster + PDCondInitialized = "Initialized" + + PDCondSuspended = "Suspended" + PDSuspendReason = "PDSuspend" + + PDGroupCondSuspended = "Suspended" + PDGroupSuspendReason = "PDGroupSuspend" +) + +const ( + AnnoKeyInitialClusterNum = "pd.core.pingcap.com/initial-cluster-num" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// PDGroupList defines a list of PD groups +type PDGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PDGroup `json:"items"` +} + +func (l *PDGroupList) ToSlice() []Group { + groups := make([]Group, 0, len(l.Items)) + for i := range l.Items { + groups = append(groups, &l.Items[i]) + } + return groups +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=tg +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="Available",type=string,JSONPath=`.status.conditions[?(@.type=="Available")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// PDGroup defines a group of similar PD instances +type PDGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PDGroupSpec `json:"spec,omitempty"` + Status PDGroupStatus `json:"status,omitempty"` +} + +func (in *PDGroup) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *PDGroup) GetDesiredReplicas() int32 { + if in.Spec.Replicas == nil { + return 0 + } + return *in.Spec.Replicas +} + +func (in *PDGroup) GetDesiredVersion() string { + return in.Spec.Version +} + +func (in *PDGroup) GetActualVersion() string { + return in.Status.Version +} + +func (in *PDGroup) GetStatus() GroupStatus { + return in.Status.GroupStatus +} + +func (in *PDGroup) ComponentKind() ComponentKind { + return ComponentKindPD +} + +func (in *PDGroup) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("PDGroup") +} + +func (in *PDGroup) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *PDGroup) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *PDGroup) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *PDGroup) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *PDGroup) IsHealthy() bool { + // TODO implement me + return true +} + +func (in *PDGroup) MountClusterClientSecret() bool { + return in.Spec.MountClusterClientSecret != nil && *in.Spec.MountClusterClientSecret +} + +func (in *PDGroup) GetClientPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Client != nil { + return in.Spec.Template.Spec.Server.Ports.Client.Port + } + return DefaultPDPortClient +} + +func (in *PDGroup) GetPeerPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Peer != nil { + return in.Spec.Template.Spec.Server.Ports.Peer.Port + } + return DefaultPDPortPeer +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// PDList defines a list of PD instances +type PDList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PD `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=peer +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="Leader",type=string,JSONPath=`.status.isLeader` +// +kubebuilder:printcolumn:name="Initialized",type=string,JSONPath=`.status.conditions[?(@.type=="Initialized")].status` +// +kubebuilder:printcolumn:name="Healthy",type=string,JSONPath=`.status.conditions[?(@.type=="Health")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// PD defines a PD instance +type PD struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PDSpec `json:"spec,omitempty"` + Status PDStatus `json:"status,omitempty"` +} + +func (in *PD) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *PD) ComponentKind() ComponentKind { + return ComponentKindPD +} + +func (in *PD) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("PD") +} + +func (in *PD) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *PD) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *PD) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *PD) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *PD) IsHealthy() bool { + return meta.IsStatusConditionTrue(in.Status.Conditions, PDCondInitialized) && meta.IsStatusConditionTrue(in.Status.Conditions, PDCondHealth) && in.DeletionTimestamp.IsZero() +} + +func (in *PD) GetClientPort() int32 { + if in.Spec.Server.Ports.Client != nil { + return in.Spec.Server.Ports.Client.Port + } + return DefaultPDPortClient +} + +func (in *PD) GetPeerPort() int32 { + if in.Spec.Server.Ports.Peer != nil { + return in.Spec.Server.Ports.Peer.Port + } + return DefaultPDPortPeer +} + +// PDGroupSpec describes the common attributes of a PDGroup +type PDGroupSpec struct { + Cluster ClusterReference `json:"cluster"` + Replicas *int32 `json:"replicas"` + Version string `json:"version"` + + // Bootstrapped means that pd cluster has been bootstrapped + // It's no need to initialize a new cluster + // Normally, this field is automatically changed by operator. + // If it's true, it cannot be set to false for security + Bootstrapped bool `json:"bootstrapped,omitempty"` + + // MountClusterClientSecret indicates whether to mount `cluster-client-secret` to the Pod. + MountClusterClientSecret *bool `json:"mountClusterClientSecret,omitempty"` + + SchedulePolicies []SchedulePolicy `json:"schedulePolicies,omitempty"` + + // ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + // Valid values are "RollingUpdate" (by default) and "InPlace". + // +kubebuilder:validation:Enum=RollingUpdate;InPlace + // +kubebuilder:default="RollingUpdate" + ConfigUpdateStrategy ConfigUpdateStrategy `json:"configUpdateStrategy,omitempty"` + + Template PDTemplate `json:"template"` +} + +type PDTemplate struct { + ObjectMeta `json:"metadata,omitempty"` + Spec PDTemplateSpec `json:"spec"` +} + +// PDTemplateSpec can only be specified in PDGroup +// TODO: It's name may need to be changed to distinguish from PodTemplateSpec +type PDTemplateSpec struct { + // Image is pd's image + // If tag is omitted, version will be used as the image tag. + // Default is pingcap/pd + Image *string `json:"image,omitempty"` + // Server defines server config for PD + Server PDServer `json:"server,omitempty"` + Resources ResourceRequirements `json:"resources,omitempty"` + // Config defines config file of PD + Config ConfigFile `json:"config"` + // Volumes defines persistent volumes of PD + Volumes []Volume `json:"volumes"` + // Overlay defines a k8s native resource template patch + // All resources(pod, pvcs, ...) managed by PD can be overlayed by this field + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Overlay *Overlay `json:"overlay,omitempty"` +} + +type PDServer struct { + // Ports defines all ports listened by pd + Ports PDPorts `json:"ports,omitempty"` +} + +type PDPorts struct { + // Client defines port for pd's api service + Client *Port `json:"client,omitempty"` + // Peer defines port for peer communication + Peer *Port `json:"peer,omitempty"` +} + +type PDGroupStatus struct { + CommonStatus `json:",inline"` + GroupStatus `json:",inline"` +} + +// PDSpec describes the common attributes of a PD instance +type PDSpec struct { + // Cluster is a reference of tidb cluster + Cluster ClusterReference `json:"cluster"` + + // Topology defines the topology domain of this pd instance + // It will be translated into a node affinity config + // Topology cannot be changed + Topology Topology `json:"topology,omitempty"` + + // Version specifies the PD version + Version string `json:"version"` + + // Subdomain means the subdomain of the exported pd dns. + // A same pd cluster will use a same subdomain + Subdomain string `json:"subdomain"` + + // PDTemplateSpec embedded some fields managed by PDGroup + PDTemplateSpec `json:",inline"` +} + +type PDStatus struct { + CommonStatus `json:",inline"` + + // ID is the member id of this pd instance + ID string `json:"id"` + + // IsLeader indicates whether this pd is the leader + // NOTE: it's a snapshot from PD, not always up to date + IsLeader bool `json:"isLeader"` +} diff --git a/apis/core/v1alpha1/tidb_types.go b/apis/core/v1alpha1/tidb_types.go new file mode 100644 index 00000000000..b2b8482dfb8 --- /dev/null +++ b/apis/core/v1alpha1/tidb_types.go @@ -0,0 +1,502 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" +) + +var ( + _ GroupList = &TiDBGroupList{} + _ Group = &TiDBGroup{} + _ ComponentAccessor = &TiDB{} +) + +const ( + TiDBPortNameClient = "mysql-client" + TiDBPortNameStatus = "status" + DefaultTiDBPortClient = 4000 + DefaultTiDBPortStatus = 10080 +) + +const ( + // TCPProbeType represents the readiness prob method with TCP. + TCPProbeType string = "tcp" + // CommandProbeType represents the readiness prob method with arbitrary unix `exec` call format commands. + CommandProbeType string = "command" +) + +const ( + // TiDBServerTLSVolumeName is the volume name for the TLS secret used by TLS communication between TiDB server and MySQL client. + TiDBServerTLSVolumeName = NamePrefix + "tidb-server-tls" + // TiDBServerTLSMountPath is the volume mount path for the TLS secret used by TLS communication between TiDB server and MySQL client. + TiDBServerTLSMountPath = "/var/lib/tidb-server-tls" +) + +const ( + BootstrapSQLVolumeName = NamePrefix + "tidb-bootstrap-sql" + BootstrapSQLFilePath = "/etc/tidb-bootstrap" + BootstrapSQLFileName = "bootstrap.sql" + BootstrapSQLConfigMapKey = "bootstrap-sql" +) + +const ( + TiDBAuthTokenVolumeName = NamePrefix + "tidb-auth-token" + TiDBAuthTokenPath = "/var/lib/tidb-auth-token" + TiDBAuthTokenJWKS = "tidb_auth_token_jwks.json" +) + +const ( + TiDBCondHealth = "Health" + TiDBHealthReason = "TiDBHealth" + + TiDBCondSuspended = "Suspended" + TiDBSuspendReason = "TiDBSuspend" + + TiDBGroupCondAvailable = "Available" + TiDBGroupAvailableReason = "TiDBGroupAvailable" + + TiDBGroupCondSuspended = "Suspended" + TiDBGroupSuspendReason = "TiDBGroupSuspend" +) + +const ( + TiDBSlowLogContainerName = NamePrefix + "slowlog" + TiDBDefaultSlowLogVolumeName = NamePrefix + "slowlog" + TiDBDefaultSlowLogDir = "/var/log/tidb" + TiDBSlowLogFileName = "slowlog" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// TiDBGroupList defines a list of TiDB groups +type TiDBGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TiDBGroup `json:"items"` +} + +func (l *TiDBGroupList) ToSlice() []Group { + groups := make([]Group, 0, len(l.Items)) + for i := range l.Items { + groups = append(groups, &l.Items[i]) + } + return groups +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=tg +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="Available",type=string,JSONPath=`.status.conditions[?(@.type=="Available")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// TiDBGroup defines a group of similar TiDB instances. +type TiDBGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TiDBGroupSpec `json:"spec,omitempty"` + Status TiDBGroupStatus `json:"status,omitempty"` +} + +func (in *TiDBGroup) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *TiDBGroup) GetDesiredReplicas() int32 { + if in.Spec.Replicas == nil { + return 0 + } + return *in.Spec.Replicas +} + +func (in *TiDBGroup) GetDesiredVersion() string { + return in.Spec.Version +} + +func (in *TiDBGroup) GetActualVersion() string { + return in.Status.Version +} + +func (in *TiDBGroup) GetStatus() GroupStatus { + return in.Status.GroupStatus +} + +func (in *TiDBGroup) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("TiDBGroup") +} + +func (in *TiDBGroup) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *TiDBGroup) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *TiDBGroup) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *TiDBGroup) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *TiDBGroup) ComponentKind() ComponentKind { + return ComponentKindTiDB +} + +func (in *TiDBGroup) IsHealthy() bool { + return meta.IsStatusConditionTrue(in.Status.Conditions, TiDBGroupCondAvailable) && in.DeletionTimestamp.IsZero() +} + +func (in *TiDBGroup) GetClientPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Client != nil { + return in.Spec.Template.Spec.Server.Ports.Client.Port + } + return DefaultTiDBPortClient +} + +func (in *TiDBGroup) GetStatusPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Status != nil { + return in.Spec.Template.Spec.Server.Ports.Status.Port + } + return DefaultTiDBPortStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// TiDBList defines a list of TiDB instances. +type TiDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TiDB `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=tidb +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="Healthy",type=string,JSONPath=`.status.conditions[?(@.type=="Health")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// TiDB defines a TiDB instance. +type TiDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TiDBSpec `json:"spec,omitempty"` + Status TiDBStatus `json:"status,omitempty"` +} + +func (in *TiDB) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *TiDB) GetName() string { + return in.Name +} + +func (in *TiDB) ComponentKind() ComponentKind { + return ComponentKindTiDB +} + +func (in *TiDB) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("TiDB") +} + +func (in *TiDB) IsSeperateSlowLogEnabled() bool { + if in.Spec.SlowLog == nil { + return true // enabled by default + } + return !in.Spec.SlowLog.Disabled +} + +func (in *TiDB) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *TiDB) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *TiDB) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *TiDB) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *TiDB) IsHealthy() bool { + return meta.IsStatusConditionTrue(in.Status.Conditions, TiDBCondHealth) && in.DeletionTimestamp.IsZero() +} + +func (in *TiDB) GetClientPort() int32 { + if in.Spec.Server.Ports.Client != nil { + return in.Spec.Server.Ports.Client.Port + } + return DefaultTiDBPortClient +} + +func (in *TiDB) GetStatusPort() int32 { + if in.Spec.Server.Ports.Status != nil { + return in.Spec.Server.Ports.Status.Port + } + return DefaultTiDBPortStatus +} + +// TiDBGroupSpec describes the common attributes of a TiDBGroup. +type TiDBGroupSpec struct { + Cluster ClusterReference `json:"cluster"` + Replicas *int32 `json:"replicas"` + Version string `json:"version"` + + // Service defines some fields used to override the default service. + Service *TiDBService `json:"service,omitempty"` + + // Whether enable the TLS connection between the TiDB server and MySQL client. + TLSClient *TiDBTLSClient `json:"tlsClient,omitempty"` + + // BootstrapSQLConfigMapName is the name of the ConfigMap which contains the bootstrap SQL file with the key `bootstrap-sql`, + // which will only be executed when a TiDB cluster bootstrap on the first time. + // The field should be set ONLY when create the first TiDB group for a cluster, since it only take effect on the first time bootstrap. + // Only v6.5.1+ supports this feature. + BootstrapSQLConfigMapName *string `json:"bootstrapSQLConfigMapName,omitempty"` + + // Whether enable `tidb_auth_token` authentication method. + // To enable this feature, a K8s secret named `-tidb-auth-token-jwks-secret` must be created to store the JWKs. + // ref: https://docs.pingcap.com/tidb/stable/security-compatibility-with-mysql#tidb_auth_token + // Defaults to false. + TiDBAuthToken *TiDBAuthToken `json:"tidbAuthToken,omitempty"` + + SchedulePolicies []SchedulePolicy `json:"schedulePolicies,omitempty"` + + // ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + // Valid values are "RollingUpdate" (by default) and "InPlace". + // +kubebuilder:validation:Enum=RollingUpdate;InPlace + // +kubebuilder:default="RollingUpdate" + ConfigUpdateStrategy ConfigUpdateStrategy `json:"configUpdateStrategy,omitempty"` + + Template TiDBTemplate `json:"template"` +} + +type TiDBTemplate struct { + ObjectMeta `json:"metadata,omitempty"` + Spec TiDBTemplateSpec `json:"spec"` +} + +// TiDBTemplateSpec can only be specified in TiDBGroup. +type TiDBTemplateSpec struct { + // Image is tidb's image + // If tag is omitted, version will be used as the image tag. + // Default is pingcap/tidb + Image *string `json:"image,omitempty"` + // Server defines the server configuration of TiDB. + Server TiDBServer `json:"server,omitempty"` + // Probes defines probes for TiDB. + Probes TiDBProbes `json:"probes,omitempty"` + // Resources defines resource required by TiDB. + Resources ResourceRequirements `json:"resources,omitempty"` + // Config defines config file of TiDB. + Config ConfigFile `json:"config"` + // Volumes defines data volume of TiDB, it is optional. + Volumes []Volume `json:"volumes,omitempty"` + + // SlowLog defines the separate slow log configuration for TiDB. + // When enabled, a sidecar container will be created to output the slow log to its stdout. + SlowLog *TiDBSlowLog `json:"slowLog,omitempty"` + + // Overlay defines a k8s native resource template patch. + // All resources(pod, pvcs, ...) managed by TiDB can be overlayed by this field. + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Overlay *Overlay `json:"overlay,omitempty"` +} + +type TiDBServer struct { + // Port defines all ports listened by TiDB. + Ports TiDBPorts `json:"ports,omitempty"` +} + +type TiDBPorts struct { + // Client defines port for TiDB's SQL service. + Client *Port `json:"client,omitempty"` + // Status defines port for TiDB status API. + Status *Port `json:"status,omitempty"` +} + +type TiDBProbes struct { + // Readiness defines the readiness probe for TiDB. + // The default handler is a TCP socket on the client port. + Readiness *TiDBProb `json:"readiness,omitempty"` +} + +type TiDBProb struct { + // "tcp" will use TCP socket to connect component port. + // "command" will probe the status api of tidb. + // +kubebuilder:validation:Enum=tcp;command + Type *string `json:"type,omitempty"` +} + +type TiDBSlowLog struct { + // Disabled indicates whether the separate slow log is disabled. + // Defaults to false. In other words, the separate slow log is enabled by default. + Disabled bool `json:"disable,omitempty"` + + // Image to tail slowlog to stdout + // Default is busybox:1.37.0 + Image *string `json:"image,omitempty"` + + // VolumeName is the name of the volume used to share the slow log file between the main container and the sidecar. + // If not set, a temparary volume will be used. + // Otherwise, it should be a name of a volume defined in the `volumes` field of the TiDBTemplateSpec. + VolumeName string `json:"volumeName,omitempty"` + + // ResourceRequirements defines the resource requirements for the slow log sidecar. + Resources ResourceRequirements `json:"resources,omitempty"` +} + +// TiDBService defines some fields used to override the default service. +type TiDBService struct { + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + // +optional + Type corev1.ServiceType `json:"type,omitempty"` +} + +type TiDBTLSClient struct { + // When enabled, TiDB will accept TLS encrypted connections from MySQL clients. + // The steps to enable this feature: + // 1. Generate a TiDB server-side certificate and a client-side certifiacete for the TiDB cluster. + // There are multiple ways to generate certificates: + // - user-provided certificates: https://docs.pingcap.com/tidb/stable/generate-self-signed-certificates + // - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ + // - or use cert-manager signed certificates: https://cert-manager.io/ + // 2. Create a K8s Secret object which contains the TiDB server-side certificate created above. + // The name of this Secret must be: --server-secret. + // kubectl create secret generic --server-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // 3. Create a K8s Secret object which contains the TiDB client-side certificate created above which will be used by TiDB Operator. + // The name of this Secret must be: --client-secret. + // kubectl create secret generic --client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // 4. Set Enabled to `true`. + Enabled bool `json:"enabled,omitempty"` + + // TODO(csuzhangxc): usage of the following fields + + // DisableClientAuthn will skip client's certificate validation from the TiDB server. + // Optional: defaults to false + DisableClientAuthn bool `json:"disableClientAuthn,omitempty"` + + // SkipInternalClientCA will skip TiDB server's certificate validation for internal components like Initializer, Dashboard, etc. + // Optional: defaults to false + SkipInternalClientCA bool `json:"skipInternalClientCA,omitempty"` +} + +type TiDBAuthToken struct { + // Enabled indicates whether the `tidb_auth_token` authentication method is enabled. + // Defaults to false. + Enabled bool `json:"enabled,omitempty"` +} + +type TiDBGroupStatus struct { + CommonStatus `json:",inline"` + GroupStatus `json:",inline"` +} + +type TiDBSpec struct { + Cluster ClusterReference `json:"cluster"` + + // Topology defines the topology domain of this TiDB instance. + // It will be translated into a node affnity config. + // Topology cannot be changed. + Topology Topology `json:"topology,omitempty"` + + // Version specifies the TiDB version. + Version string `json:"version"` + + // Subdomain means the subdomain of the exported pd dns. + // A same pd cluster will use a same subdomain + Subdomain string `json:"subdomain"` + + // TiDBTemplateSpec embeded some fields managed by TiDBGroup. + TiDBTemplateSpec `json:",inline"` +} + +type TiDBStatus struct { + CommonStatus `json:",inline"` +} + +// IsTLSClientEnabled returns whether the TLS between TiDB server and MySQL client is enabled. +func (in *TiDBGroup) IsTLSClientEnabled() bool { + return in.Spec.TLSClient != nil && in.Spec.TLSClient.Enabled +} + +// TiDBServerTLSSecretName returns the secret name used in TiDB server for the TLS between TiDB server and MySQL client. +func (in *TiDBGroup) TiDBServerTLSSecretName() string { + return fmt.Sprintf("%s-%s-server-secret", in.Spec.Cluster.Name, in.Name) +} + +// TiDBClientTLSSecretName returns the secret name used in MySQL client for the TLS between TiDB server and MySQL client. +func (in *TiDBGroup) TiDBClientTLSSecretName() string { + return fmt.Sprintf("%s-%s-client-secret", in.Spec.Cluster.Name, in.Name) +} + +func (in *TiDBGroup) IsBootstrapSQLEnabled() bool { + return in.Spec.BootstrapSQLConfigMapName != nil && *in.Spec.BootstrapSQLConfigMapName != "" +} + +func (dbg *TiDBGroup) IsTokenBasedAuthEnabled() bool { + return dbg.Spec.TiDBAuthToken != nil && dbg.Spec.TiDBAuthToken.Enabled +} + +func (dbg *TiDBGroup) TiDBAuthTokenJWKSSecretName() string { + return fmt.Sprintf("%s-tidb-auth-token-jwks-secret", dbg.Spec.Cluster.Name) +} diff --git a/apis/core/v1alpha1/tiflash_types.go b/apis/core/v1alpha1/tiflash_types.go new file mode 100644 index 00000000000..df8d28b6363 --- /dev/null +++ b/apis/core/v1alpha1/tiflash_types.go @@ -0,0 +1,372 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" +) + +var ( + _ GroupList = &TiFlashGroupList{} + _ Group = &TiFlashGroup{} + _ ComponentAccessor = &TiFlash{} +) + +const ( + TiFlashPortNameFlash = "tiflash" + TiFlashPortNameProxy = "proxy" + TiFlashPortNameMetrics = "metrics" + TiFlashPortNameProxyStatus = "proxy-metrics" // both used for metrics and status, same name as v1 + + DefaultTiFlashPortFlash = 3930 + DefaultTiFlashPortProxy = 20170 + DefaultTiFlashPortMetrics = 8234 + DefaultTiFlashPortProxyStatus = 20292 +) + +const ( + TiFlashCondHealth = "Health" + TiFlashHealthReason = "TiFlashHealth" + + TiFlashCondSuspended = "Suspended" + TiFlashSuspendReason = "TiFlashSuspend" + + TiFlashGroupCondSuspended = "Suspended" + TiFlashGroupSuspendReason = "TiFlashGroupSuspend" +) + +const ( + TiFlashServerLogContainerName = NamePrefix + "serverlog" + TiFlashErrorLogContainerName = NamePrefix + "errorlog" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// TiFlashGroupList defines a list of TiFlash groups +type TiFlashGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TiFlashGroup `json:"items"` +} + +func (l *TiFlashGroupList) ToSlice() []Group { + groups := make([]Group, 0, len(l.Items)) + for i := range l.Items { + groups = append(groups, &l.Items[i]) + } + return groups +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=tg +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="Available",type=string,JSONPath=`.status.conditions[?(@.type=="Available")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// TiFlashGroup defines a group of similar TiFlash instances +type TiFlashGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TiFlashGroupSpec `json:"spec,omitempty"` + Status TiFlashGroupStatus `json:"status,omitempty"` +} + +func (in *TiFlashGroup) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *TiFlashGroup) GetDesiredReplicas() int32 { + if in.Spec.Replicas == nil { + return 0 + } + return *in.Spec.Replicas +} + +func (in *TiFlashGroup) GetDesiredVersion() string { + return in.Spec.Version +} + +func (in *TiFlashGroup) GetActualVersion() string { + return in.Status.Version +} + +func (in *TiFlashGroup) GetStatus() GroupStatus { + return in.Status.GroupStatus +} + +func (in *TiFlashGroup) ComponentKind() ComponentKind { + return ComponentKindTiFlash +} + +func (in *TiFlashGroup) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("TiFlashGroup") +} + +func (in *TiFlashGroup) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *TiFlashGroup) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *TiFlashGroup) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *TiFlashGroup) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *TiFlashGroup) IsHealthy() bool { + // TODO implement me + return true +} + +func (in *TiFlashGroup) GetFlashPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Flash != nil { + return in.Spec.Template.Spec.Server.Ports.Flash.Port + } + return DefaultTiFlashPortFlash +} + +func (in *TiFlashGroup) GetProxyPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Proxy != nil { + return in.Spec.Template.Spec.Server.Ports.Proxy.Port + } + return DefaultTiFlashPortProxy +} + +func (in *TiFlashGroup) GetMetricsPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Metrics != nil { + return in.Spec.Template.Spec.Server.Ports.Metrics.Port + } + return DefaultTiFlashPortMetrics +} + +func (in *TiFlashGroup) GetProxyStatusPort() int32 { + if in.Spec.Template.Spec.Server.Ports.ProxyStatus != nil { + return in.Spec.Template.Spec.Server.Ports.ProxyStatus.Port + } + return DefaultTiFlashPortProxyStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// TiFlashList defines a list of TiFlash instances +type TiFlashList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TiFlash `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=tiflash +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="StoreID",type=string,JSONPath=`.status.id` +// +kubebuilder:printcolumn:name="StoreState",type=string,JSONPath=`.status.state` +// +kubebuilder:printcolumn:name="Healthy",type=string,JSONPath=`.status.conditions[?(@.type=="Health")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// TiFlash defines a TiFlash instance +type TiFlash struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TiFlashSpec `json:"spec,omitempty"` + Status TiFlashStatus `json:"status,omitempty"` +} + +func (in *TiFlash) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *TiFlash) ComponentKind() ComponentKind { + return ComponentKindTiFlash +} + +func (in *TiFlash) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("TiFlash") +} + +func (in *TiFlash) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *TiFlash) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *TiFlash) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *TiFlash) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *TiFlash) IsHealthy() bool { + return meta.IsStatusConditionTrue(in.Status.Conditions, TiFlashCondHealth) && in.DeletionTimestamp.IsZero() +} + +func (in *TiFlash) GetFlashPort() int32 { + if in.Spec.Server.Ports.Flash != nil { + return in.Spec.Server.Ports.Flash.Port + } + return DefaultTiFlashPortFlash +} + +func (in *TiFlash) GetProxyPort() int32 { + if in.Spec.Server.Ports.Proxy != nil { + return in.Spec.Server.Ports.Proxy.Port + } + return DefaultTiFlashPortProxy +} + +func (in *TiFlash) GetMetricsPort() int32 { + if in.Spec.Server.Ports.Metrics != nil { + return in.Spec.Server.Ports.Metrics.Port + } + return DefaultTiFlashPortMetrics +} + +func (in *TiFlash) GetProxyStatusPort() int32 { + if in.Spec.Server.Ports.ProxyStatus != nil { + return in.Spec.Server.Ports.ProxyStatus.Port + } + return DefaultTiFlashPortProxyStatus +} + +type TiFlashGroupSpec struct { + Cluster ClusterReference `json:"cluster"` + Replicas *int32 `json:"replicas"` + Version string `json:"version"` + + // ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + // Valid values are "RollingUpdate" (by default) and "InPlace". + // +kubebuilder:validation:Enum=RollingUpdate;InPlace + // +kubebuilder:default="RollingUpdate" + ConfigUpdateStrategy ConfigUpdateStrategy `json:"configUpdateStrategy,omitempty"` + SchedulePolicies []SchedulePolicy `json:"schedulePolicies,omitempty"` + Template TiFlashTemplate `json:"template"` +} + +type TiFlashTemplate struct { + ObjectMeta `json:"metadata,omitempty"` + Spec TiFlashTemplateSpec `json:"spec"` +} + +type TiFlashTemplateSpec struct { + // Image is tiflash's image + // If tag is omitted, version will be used as the image tag. + // Default is pingcap/tiflash + Image *string `json:"image,omitempty"` + // Server defines the server config of TiFlash + Server TiFlashServer `json:"server,omitempty"` + // Resources defines resource required by TiFlash + Resources ResourceRequirements `json:"resources,omitempty"` + + // Config defines config file of TiFlash + Config ConfigFile `json:"config"` + // ProxyConfig defines config file of TiFlash proxy + ProxyConfig ConfigFile `json:"proxyConfig,omitempty"` + + // Volumes defines data volume of TiFlash + Volumes []Volume `json:"volumes"` + + // LogTailer defines the sidercar log tailer config of TiFlash. + // We always use sidecar to tail the log of TiFlash now. + LogTailer *TiFlashLogTailer `json:"logTailer,omitempty"` + + // Overlay defines a k8s native resource template patch + // All resources(pod, pvcs, ...) managed by TiFlash can be overlayed by this field + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Overlay *Overlay `json:"overlay,omitempty"` +} + +type TiFlashServer struct { + // Ports defines all ports listened by tiflash + Ports TiFlashPorts `json:"ports,omitempty"` +} + +type TiFlashPorts struct { + Flash *Port `json:"flash,omitempty"` + Metrics *Port `json:"metrics,omitempty"` + + Proxy *Port `json:"proxy,omitempty"` + ProxyStatus *Port `json:"proxyStatus,omitempty"` +} + +type TiFlashLogTailer struct { + // Image to tail log to stdout + // Default is busybox:1.37.0 + Image *string `json:"image,omitempty"` + + // ResourceRequirements defines the resource requirements for the log sidecar. + Resources ResourceRequirements `json:"resources,omitempty"` +} + +type TiFlashGroupStatus struct { + CommonStatus `json:",inline"` + GroupStatus `json:",inline"` +} + +type TiFlashSpec struct { + // Cluster is a reference of tidb cluster + Cluster ClusterReference `json:"cluster"` + // Topology defines the topology domain of this pd instance + // It will be translated into a node affinity config + // Topology cannot be changed + Topology Topology `json:"topology,omitempty"` + // Version specifies the TiFlash version + Version string `json:"version"` + // Subdomain means the subdomain of the exported TiFlash dns. + // A same TiFlash group will use a same subdomain + Subdomain string `json:"subdomain"` + + // TiFlashTemplateSpec embedded some fields managed by TiFlashGroup + TiFlashTemplateSpec `json:",inline"` +} + +type TiFlashStatus struct { + CommonStatus `json:",inline"` + + // Store ID + ID string `json:"id,omitempty"` + + // Store State + State string `json:"state,omitempty"` +} diff --git a/apis/core/v1alpha1/tikv_types.go b/apis/core/v1alpha1/tikv_types.go new file mode 100644 index 00000000000..5c904c98ce2 --- /dev/null +++ b/apis/core/v1alpha1/tikv_types.go @@ -0,0 +1,362 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" +) + +var ( + _ GroupList = &TiKVGroupList{} + _ Group = &TiKVGroup{} + _ ComponentAccessor = &TiKV{} +) + +const ( + // VolumeUsageTypeTiKVData is the main data dir for the tikv + // The default sub path of this type is "data" + VolumeUsageTypeTiKVData VolumeUsageType = "data" + // VolumeUsageTypeTiKVRaftEngine is the dir for the raft log of tikv + // The default sub path of this type is "raft-engine" + VolumeUsageTypeTiKVRaftEngine VolumeUsageType = "raft-engine" + // VolumeUsageTypeTiKVWAL is the dir for the WAL of tikv + // The default sub path of this type is "wal" + VolumeUsageTypeTiKVWAL VolumeUsageType = "wal" +) + +const ( + TiKVPortNameClient = "client" + TiKVPortNameStatus = "status" + DefaultTiKVPortClient = 20160 + DefaultTiKVPortStatus = 20180 +) + +const ( + TiKVCondHealth = "Health" + TiKVHealthReason = "TiKVHealth" + + TiKVCondLeadersEvicted = "LeadersEvicted" + + TiKVCondSuspended = "Suspended" + TiKVSuspendReason = "TiKVSuspend" + + TiKVGroupCondSuspended = "Suspended" + TiKVGroupSuspendReason = "TiKVGroupSuspend" +) + +const ( + // store state for both TiKV and TiFlash stores + + StoreStateUnknown = "Unknown" + StoreStatePreparing = "Preparing" + StoreStateServing = "Serving" + StoreStateRemoving = "Removing" + StoreStateRemoved = "Removed" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// TiKVGroupList defines a list of TiKV groups +type TiKVGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TiKVGroup `json:"items"` +} + +func (l *TiKVGroupList) ToSlice() []Group { + groups := make([]Group, 0, len(l.Items)) + for i := range l.Items { + groups = append(groups, &l.Items[i]) + } + return groups +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=tg +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="Available",type=string,JSONPath=`.status.conditions[?(@.type=="Available")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// TiKVGroup defines a group of similar TiKV instances +type TiKVGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TiKVGroupSpec `json:"spec,omitempty"` + Status TiKVGroupStatus `json:"status,omitempty"` +} + +func (in *TiKVGroup) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *TiKVGroup) ComponentKind() ComponentKind { + return ComponentKindTiKV +} + +func (in *TiKVGroup) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("TiKVGroup") +} + +func (in *TiKVGroup) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *TiKVGroup) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *TiKVGroup) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *TiKVGroup) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *TiKVGroup) IsHealthy() bool { + // TODO implement me + return true +} + +func (in *TiKVGroup) GetDesiredReplicas() int32 { + if in.Spec.Replicas == nil { + return 0 + } + return *in.Spec.Replicas +} + +func (in *TiKVGroup) MountClusterClientSecret() bool { + return in.Spec.MountClusterClientSecret != nil && *in.Spec.MountClusterClientSecret +} + +func (in *TiKVGroup) GetDesiredVersion() string { + return in.Spec.Version +} + +func (in *TiKVGroup) GetActualVersion() string { + return in.Status.Version +} + +func (in *TiKVGroup) GetStatus() GroupStatus { + return in.Status.GroupStatus +} + +func (in *TiKVGroup) GetClientPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Client != nil { + return in.Spec.Template.Spec.Server.Ports.Client.Port + } + return DefaultTiKVPortClient +} + +func (in *TiKVGroup) GetStatusPort() int32 { + if in.Spec.Template.Spec.Server.Ports.Status != nil { + return in.Spec.Template.Spec.Server.Ports.Status.Port + } + return DefaultTiKVPortStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// TiKVList defines a list of TiKV instances +type TiKVList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TiKV `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=tc +// +kubebuilder:resource:categories=tikv +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.cluster.name` +// +kubebuilder:printcolumn:name="StoreID",type=string,JSONPath=`.status.id` +// +kubebuilder:printcolumn:name="StoreState",type=string,JSONPath=`.status.state` +// +kubebuilder:printcolumn:name="Healthy",type=string,JSONPath=`.status.conditions[?(@.type=="Health")].status` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// TiKV defines a TiKV instance +type TiKV struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TiKVSpec `json:"spec,omitempty"` + Status TiKVStatus `json:"status,omitempty"` +} + +func (in *TiKV) GetClusterName() string { + return in.Spec.Cluster.Name +} + +func (in *TiKV) ComponentKind() ComponentKind { + return ComponentKindTiKV +} + +func (in *TiKV) GVK() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("TiKV") +} + +func (in *TiKV) ObservedGeneration() int64 { + return in.Status.ObservedGeneration +} + +func (in *TiKV) CurrentRevision() string { + return in.Status.CurrentRevision +} + +func (in *TiKV) UpdateRevision() string { + return in.Status.UpdateRevision +} + +func (in *TiKV) CollisionCount() *int32 { + if in.Status.CollisionCount == nil { + return nil + } + return ptr.To(*in.Status.CollisionCount) +} + +func (in *TiKV) IsHealthy() bool { + return meta.IsStatusConditionTrue(in.Status.Conditions, TiKVCondHealth) && in.DeletionTimestamp.IsZero() +} + +func (in *TiKV) GetClientPort() int32 { + if in.Spec.Server.Ports.Client != nil { + return in.Spec.Server.Ports.Client.Port + } + return DefaultTiKVPortClient +} + +func (in *TiKV) GetStatusPort() int32 { + if in.Spec.Server.Ports.Status != nil { + return in.Spec.Server.Ports.Status.Port + } + return DefaultTiKVPortStatus +} + +// TiKVGroupSpec describes the common attributes of a TiKVGroup +type TiKVGroupSpec struct { + Cluster ClusterReference `json:"cluster"` + Replicas *int32 `json:"replicas"` + Version string `json:"version"` + + // MountClusterClientSecret indicates whether to mount `cluster-client-secret` to the Pod. + MountClusterClientSecret *bool `json:"mountClusterClientSecret,omitempty"` + + // +listType=map + // +listMapKey=type + SchedulePolicies []SchedulePolicy `json:"schedulePolicies,omitempty"` + + // ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + // Valid values are "RollingUpdate" (by default) and "InPlace". + // +kubebuilder:validation:Enum=RollingUpdate;InPlace + // +kubebuilder:default="RollingUpdate" + ConfigUpdateStrategy ConfigUpdateStrategy `json:"configUpdateStrategy,omitempty"` + + Template TiKVTemplate `json:"template"` +} + +type TiKVTemplate struct { + ObjectMeta `json:"metadata,omitempty"` + Spec TiKVTemplateSpec `json:"spec"` +} + +// TiKVTemplateSpec can only be specified in TiKVGroup +// TODO: It's name may need to be changed to distinguish from PodTemplateSpec +type TiKVTemplateSpec struct { + // Image is tikv's image + // If tag is omitted, version will be used as the image tag. + // Default is pingcap/tikv + Image *string `json:"image,omitempty"` + // Server defines the server config of TiKV + Server TiKVServer `json:"server,omitempty"` + // Resources defines resource required by TiKV + Resources ResourceRequirements `json:"resources,omitempty"` + // Config defines config file of TiKV + Config ConfigFile `json:"config"` + // Volumes defines data volume of TiKV + Volumes []Volume `json:"volumes"` + + // PreStop defines preStop config + PreStop *TiKVPreStop `json:"preStop,omitempty"` + // Overlay defines a k8s native resource template patch + // All resources(pod, pvcs, ...) managed by TiKV can be overlayed by this field + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Overlay *Overlay `json:"overlay,omitempty"` +} + +type TiKVPreStop struct { + // Image of pre stop checker + // Default is pingcap/prestop-checker:latest + Image *string `json:"image,omitempty"` +} + +type TiKVServer struct { + // Ports defines all ports listened by tikv + Ports TiKVPorts `json:"ports,omitempty"` +} + +type TiKVPorts struct { + // Client defines port for tikv's api service + Client *Port `json:"client,omitempty"` + // Status defines port for tikv status api + Status *Port `json:"peer,omitempty"` +} + +type TiKVGroupStatus struct { + CommonStatus `json:",inline"` + GroupStatus `json:",inline"` +} + +type TiKVSpec struct { + // Cluster is a reference of tidb cluster + Cluster ClusterReference `json:"cluster"` + // Topology defines the topology domain of this pd instance + // It will be translated into a node affinity config + // Topology cannot be changed + Topology Topology `json:"topology,omitempty"` + // Version specifies the TiKV version + Version string `json:"version"` + // Subdomain means the subdomain of the exported tikv dns. + // A same tikv group will use a same subdomain + Subdomain string `json:"subdomain"` + + // TiKVTemplateSpec embedded some fields managed by TiKVGroup + TiKVTemplateSpec `json:",inline"` +} + +type TiKVStatus struct { + CommonStatus `json:",inline"` + + // Store ID + ID string `json:"id,omitempty"` + + // Store State + State string `json:"state,omitempty"` +} diff --git a/apis/core/v1alpha1/zz_generated.deepcopy.go b/apis/core/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..d70d2468919 --- /dev/null +++ b/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2005 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterReference) DeepCopyInto(out *ClusterReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReference. +func (in *ClusterReference) DeepCopy() *ClusterReference { + if in == nil { + return nil + } + out := new(ClusterReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + if in.SuspendAction != nil { + in, out := &in.SuspendAction, &out.SuspendAction + *out = new(SuspendAction) + **out = **in + } + if in.TLSCluster != nil { + in, out := &in.TLSCluster, &out.TLSCluster + *out = new(TLSCluster) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentStatus, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonStatus) DeepCopyInto(out *CommonStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonStatus. +func (in *CommonStatus) DeepCopy() *CommonStatus { + if in == nil { + return nil + } + out := new(CommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FakeGroup) DeepCopyInto(out *FakeGroup) { + *out = *in + if in.CollisionCountVal != nil { + in, out := &in.CollisionCountVal, &out.CollisionCountVal + *out = new(int32) + **out = **in + } + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeGroup. +func (in *FakeGroup) DeepCopy() *FakeGroup { + if in == nil { + return nil + } + out := new(FakeGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupStatus) DeepCopyInto(out *GroupStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStatus. +func (in *GroupStatus) DeepCopy() *GroupStatus { + if in == nil { + return nil + } + out := new(GroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overlay) DeepCopyInto(out *Overlay) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(PodOverlay) + (*in).DeepCopyInto(*out) + } + if in.PersistentVolumeClaims != nil { + in, out := &in.PersistentVolumeClaims, &out.PersistentVolumeClaims + *out = make([]PersistentVolumeClaimOverlay, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overlay. +func (in *Overlay) DeepCopy() *Overlay { + if in == nil { + return nil + } + out := new(Overlay) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PD) DeepCopyInto(out *PD) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PD. +func (in *PD) DeepCopy() *PD { + if in == nil { + return nil + } + out := new(PD) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PD) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDGroup) DeepCopyInto(out *PDGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDGroup. +func (in *PDGroup) DeepCopy() *PDGroup { + if in == nil { + return nil + } + out := new(PDGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDGroupList) DeepCopyInto(out *PDGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PDGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDGroupList. +func (in *PDGroupList) DeepCopy() *PDGroupList { + if in == nil { + return nil + } + out := new(PDGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDGroupSpec) DeepCopyInto(out *PDGroupSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.MountClusterClientSecret != nil { + in, out := &in.MountClusterClientSecret, &out.MountClusterClientSecret + *out = new(bool) + **out = **in + } + if in.SchedulePolicies != nil { + in, out := &in.SchedulePolicies, &out.SchedulePolicies + *out = make([]SchedulePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDGroupSpec. +func (in *PDGroupSpec) DeepCopy() *PDGroupSpec { + if in == nil { + return nil + } + out := new(PDGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDGroupStatus) DeepCopyInto(out *PDGroupStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + out.GroupStatus = in.GroupStatus + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDGroupStatus. +func (in *PDGroupStatus) DeepCopy() *PDGroupStatus { + if in == nil { + return nil + } + out := new(PDGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDList) DeepCopyInto(out *PDList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PD, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDList. +func (in *PDList) DeepCopy() *PDList { + if in == nil { + return nil + } + out := new(PDList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDPorts) DeepCopyInto(out *PDPorts) { + *out = *in + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(Port) + **out = **in + } + if in.Peer != nil { + in, out := &in.Peer, &out.Peer + *out = new(Port) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDPorts. +func (in *PDPorts) DeepCopy() *PDPorts { + if in == nil { + return nil + } + out := new(PDPorts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDServer) DeepCopyInto(out *PDServer) { + *out = *in + in.Ports.DeepCopyInto(&out.Ports) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDServer. +func (in *PDServer) DeepCopy() *PDServer { + if in == nil { + return nil + } + out := new(PDServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDSpec) DeepCopyInto(out *PDSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(Topology, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.PDTemplateSpec.DeepCopyInto(&out.PDTemplateSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDSpec. +func (in *PDSpec) DeepCopy() *PDSpec { + if in == nil { + return nil + } + out := new(PDSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDStatus) DeepCopyInto(out *PDStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDStatus. +func (in *PDStatus) DeepCopy() *PDStatus { + if in == nil { + return nil + } + out := new(PDStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDTemplate) DeepCopyInto(out *PDTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDTemplate. +func (in *PDTemplate) DeepCopy() *PDTemplate { + if in == nil { + return nil + } + out := new(PDTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDTemplateSpec) DeepCopyInto(out *PDTemplateSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + in.Server.DeepCopyInto(&out.Server) + in.Resources.DeepCopyInto(&out.Resources) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Overlay != nil { + in, out := &in.Overlay, &out.Overlay + *out = new(Overlay) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDTemplateSpec. +func (in *PDTemplateSpec) DeepCopy() *PDTemplateSpec { + if in == nil { + return nil + } + out := new(PDTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimOverlay) DeepCopyInto(out *PersistentVolumeClaimOverlay) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(corev1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimOverlay. +func (in *PersistentVolumeClaimOverlay) DeepCopy() *PersistentVolumeClaimOverlay { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimOverlay) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodOverlay) DeepCopyInto(out *PodOverlay) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(corev1.PodSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodOverlay. +func (in *PodOverlay) DeepCopy() *PodOverlay { + if in == nil { + return nil + } + out := new(PodOverlay) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Port) DeepCopyInto(out *Port) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. +func (in *Port) DeepCopy() *Port { + if in == nil { + return nil + } + out := new(Port) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + x := (*in).DeepCopy() + *out = &x + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulePolicy) DeepCopyInto(out *SchedulePolicy) { + *out = *in + if in.EvenlySpread != nil { + in, out := &in.EvenlySpread, &out.EvenlySpread + *out = new(SchedulePolicyEvenlySpread) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicy. +func (in *SchedulePolicy) DeepCopy() *SchedulePolicy { + if in == nil { + return nil + } + out := new(SchedulePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulePolicyEvenlySpread) DeepCopyInto(out *SchedulePolicyEvenlySpread) { + *out = *in + if in.Topologies != nil { + in, out := &in.Topologies, &out.Topologies + *out = make([]ScheduleTopology, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicyEvenlySpread. +func (in *SchedulePolicyEvenlySpread) DeepCopy() *SchedulePolicyEvenlySpread { + if in == nil { + return nil + } + out := new(SchedulePolicyEvenlySpread) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleTopology) DeepCopyInto(out *ScheduleTopology) { + *out = *in + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(Topology, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleTopology. +func (in *ScheduleTopology) DeepCopy() *ScheduleTopology { + if in == nil { + return nil + } + out := new(ScheduleTopology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuspendAction) DeepCopyInto(out *SuspendAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendAction. +func (in *SuspendAction) DeepCopy() *SuspendAction { + if in == nil { + return nil + } + out := new(SuspendAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCluster) DeepCopyInto(out *TLSCluster) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCluster. +func (in *TLSCluster) DeepCopy() *TLSCluster { + if in == nil { + return nil + } + out := new(TLSCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDB) DeepCopyInto(out *TiDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDB. +func (in *TiDB) DeepCopy() *TiDB { + if in == nil { + return nil + } + out := new(TiDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBAuthToken) DeepCopyInto(out *TiDBAuthToken) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBAuthToken. +func (in *TiDBAuthToken) DeepCopy() *TiDBAuthToken { + if in == nil { + return nil + } + out := new(TiDBAuthToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBGroup) DeepCopyInto(out *TiDBGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBGroup. +func (in *TiDBGroup) DeepCopy() *TiDBGroup { + if in == nil { + return nil + } + out := new(TiDBGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiDBGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBGroupList) DeepCopyInto(out *TiDBGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TiDBGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBGroupList. +func (in *TiDBGroupList) DeepCopy() *TiDBGroupList { + if in == nil { + return nil + } + out := new(TiDBGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiDBGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBGroupSpec) DeepCopyInto(out *TiDBGroupSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(TiDBService) + **out = **in + } + if in.TLSClient != nil { + in, out := &in.TLSClient, &out.TLSClient + *out = new(TiDBTLSClient) + **out = **in + } + if in.BootstrapSQLConfigMapName != nil { + in, out := &in.BootstrapSQLConfigMapName, &out.BootstrapSQLConfigMapName + *out = new(string) + **out = **in + } + if in.TiDBAuthToken != nil { + in, out := &in.TiDBAuthToken, &out.TiDBAuthToken + *out = new(TiDBAuthToken) + **out = **in + } + if in.SchedulePolicies != nil { + in, out := &in.SchedulePolicies, &out.SchedulePolicies + *out = make([]SchedulePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBGroupSpec. +func (in *TiDBGroupSpec) DeepCopy() *TiDBGroupSpec { + if in == nil { + return nil + } + out := new(TiDBGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBGroupStatus) DeepCopyInto(out *TiDBGroupStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + out.GroupStatus = in.GroupStatus + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBGroupStatus. +func (in *TiDBGroupStatus) DeepCopy() *TiDBGroupStatus { + if in == nil { + return nil + } + out := new(TiDBGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBList) DeepCopyInto(out *TiDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TiDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBList. +func (in *TiDBList) DeepCopy() *TiDBList { + if in == nil { + return nil + } + out := new(TiDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBPorts) DeepCopyInto(out *TiDBPorts) { + *out = *in + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(Port) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(Port) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBPorts. +func (in *TiDBPorts) DeepCopy() *TiDBPorts { + if in == nil { + return nil + } + out := new(TiDBPorts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBProb) DeepCopyInto(out *TiDBProb) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBProb. +func (in *TiDBProb) DeepCopy() *TiDBProb { + if in == nil { + return nil + } + out := new(TiDBProb) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBProbes) DeepCopyInto(out *TiDBProbes) { + *out = *in + if in.Readiness != nil { + in, out := &in.Readiness, &out.Readiness + *out = new(TiDBProb) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBProbes. +func (in *TiDBProbes) DeepCopy() *TiDBProbes { + if in == nil { + return nil + } + out := new(TiDBProbes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBServer) DeepCopyInto(out *TiDBServer) { + *out = *in + in.Ports.DeepCopyInto(&out.Ports) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBServer. +func (in *TiDBServer) DeepCopy() *TiDBServer { + if in == nil { + return nil + } + out := new(TiDBServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBService) DeepCopyInto(out *TiDBService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBService. +func (in *TiDBService) DeepCopy() *TiDBService { + if in == nil { + return nil + } + out := new(TiDBService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBSlowLog) DeepCopyInto(out *TiDBSlowLog) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBSlowLog. +func (in *TiDBSlowLog) DeepCopy() *TiDBSlowLog { + if in == nil { + return nil + } + out := new(TiDBSlowLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBSpec) DeepCopyInto(out *TiDBSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(Topology, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.TiDBTemplateSpec.DeepCopyInto(&out.TiDBTemplateSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBSpec. +func (in *TiDBSpec) DeepCopy() *TiDBSpec { + if in == nil { + return nil + } + out := new(TiDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBStatus) DeepCopyInto(out *TiDBStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBStatus. +func (in *TiDBStatus) DeepCopy() *TiDBStatus { + if in == nil { + return nil + } + out := new(TiDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBTLSClient) DeepCopyInto(out *TiDBTLSClient) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBTLSClient. +func (in *TiDBTLSClient) DeepCopy() *TiDBTLSClient { + if in == nil { + return nil + } + out := new(TiDBTLSClient) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBTemplate) DeepCopyInto(out *TiDBTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBTemplate. +func (in *TiDBTemplate) DeepCopy() *TiDBTemplate { + if in == nil { + return nil + } + out := new(TiDBTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBTemplateSpec) DeepCopyInto(out *TiDBTemplateSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + in.Server.DeepCopyInto(&out.Server) + in.Probes.DeepCopyInto(&out.Probes) + in.Resources.DeepCopyInto(&out.Resources) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SlowLog != nil { + in, out := &in.SlowLog, &out.SlowLog + *out = new(TiDBSlowLog) + (*in).DeepCopyInto(*out) + } + if in.Overlay != nil { + in, out := &in.Overlay, &out.Overlay + *out = new(Overlay) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBTemplateSpec. +func (in *TiDBTemplateSpec) DeepCopy() *TiDBTemplateSpec { + if in == nil { + return nil + } + out := new(TiDBTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlash) DeepCopyInto(out *TiFlash) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlash. +func (in *TiFlash) DeepCopy() *TiFlash { + if in == nil { + return nil + } + out := new(TiFlash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiFlash) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashGroup) DeepCopyInto(out *TiFlashGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashGroup. +func (in *TiFlashGroup) DeepCopy() *TiFlashGroup { + if in == nil { + return nil + } + out := new(TiFlashGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiFlashGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashGroupList) DeepCopyInto(out *TiFlashGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TiFlashGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashGroupList. +func (in *TiFlashGroupList) DeepCopy() *TiFlashGroupList { + if in == nil { + return nil + } + out := new(TiFlashGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiFlashGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashGroupSpec) DeepCopyInto(out *TiFlashGroupSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.SchedulePolicies != nil { + in, out := &in.SchedulePolicies, &out.SchedulePolicies + *out = make([]SchedulePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashGroupSpec. +func (in *TiFlashGroupSpec) DeepCopy() *TiFlashGroupSpec { + if in == nil { + return nil + } + out := new(TiFlashGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashGroupStatus) DeepCopyInto(out *TiFlashGroupStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + out.GroupStatus = in.GroupStatus + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashGroupStatus. +func (in *TiFlashGroupStatus) DeepCopy() *TiFlashGroupStatus { + if in == nil { + return nil + } + out := new(TiFlashGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashList) DeepCopyInto(out *TiFlashList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TiFlash, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashList. +func (in *TiFlashList) DeepCopy() *TiFlashList { + if in == nil { + return nil + } + out := new(TiFlashList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiFlashList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashLogTailer) DeepCopyInto(out *TiFlashLogTailer) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashLogTailer. +func (in *TiFlashLogTailer) DeepCopy() *TiFlashLogTailer { + if in == nil { + return nil + } + out := new(TiFlashLogTailer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashPorts) DeepCopyInto(out *TiFlashPorts) { + *out = *in + if in.Flash != nil { + in, out := &in.Flash, &out.Flash + *out = new(Port) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(Port) + **out = **in + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(Port) + **out = **in + } + if in.ProxyStatus != nil { + in, out := &in.ProxyStatus, &out.ProxyStatus + *out = new(Port) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashPorts. +func (in *TiFlashPorts) DeepCopy() *TiFlashPorts { + if in == nil { + return nil + } + out := new(TiFlashPorts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashServer) DeepCopyInto(out *TiFlashServer) { + *out = *in + in.Ports.DeepCopyInto(&out.Ports) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashServer. +func (in *TiFlashServer) DeepCopy() *TiFlashServer { + if in == nil { + return nil + } + out := new(TiFlashServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashSpec) DeepCopyInto(out *TiFlashSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(Topology, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.TiFlashTemplateSpec.DeepCopyInto(&out.TiFlashTemplateSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashSpec. +func (in *TiFlashSpec) DeepCopy() *TiFlashSpec { + if in == nil { + return nil + } + out := new(TiFlashSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashStatus) DeepCopyInto(out *TiFlashStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashStatus. +func (in *TiFlashStatus) DeepCopy() *TiFlashStatus { + if in == nil { + return nil + } + out := new(TiFlashStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashTemplate) DeepCopyInto(out *TiFlashTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashTemplate. +func (in *TiFlashTemplate) DeepCopy() *TiFlashTemplate { + if in == nil { + return nil + } + out := new(TiFlashTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiFlashTemplateSpec) DeepCopyInto(out *TiFlashTemplateSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + in.Server.DeepCopyInto(&out.Server) + in.Resources.DeepCopyInto(&out.Resources) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogTailer != nil { + in, out := &in.LogTailer, &out.LogTailer + *out = new(TiFlashLogTailer) + (*in).DeepCopyInto(*out) + } + if in.Overlay != nil { + in, out := &in.Overlay, &out.Overlay + *out = new(Overlay) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashTemplateSpec. +func (in *TiFlashTemplateSpec) DeepCopy() *TiFlashTemplateSpec { + if in == nil { + return nil + } + out := new(TiFlashTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKV) DeepCopyInto(out *TiKV) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKV. +func (in *TiKV) DeepCopy() *TiKV { + if in == nil { + return nil + } + out := new(TiKV) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiKV) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVGroup) DeepCopyInto(out *TiKVGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVGroup. +func (in *TiKVGroup) DeepCopy() *TiKVGroup { + if in == nil { + return nil + } + out := new(TiKVGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiKVGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVGroupList) DeepCopyInto(out *TiKVGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TiKVGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVGroupList. +func (in *TiKVGroupList) DeepCopy() *TiKVGroupList { + if in == nil { + return nil + } + out := new(TiKVGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiKVGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVGroupSpec) DeepCopyInto(out *TiKVGroupSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.MountClusterClientSecret != nil { + in, out := &in.MountClusterClientSecret, &out.MountClusterClientSecret + *out = new(bool) + **out = **in + } + if in.SchedulePolicies != nil { + in, out := &in.SchedulePolicies, &out.SchedulePolicies + *out = make([]SchedulePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVGroupSpec. +func (in *TiKVGroupSpec) DeepCopy() *TiKVGroupSpec { + if in == nil { + return nil + } + out := new(TiKVGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVGroupStatus) DeepCopyInto(out *TiKVGroupStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + out.GroupStatus = in.GroupStatus + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVGroupStatus. +func (in *TiKVGroupStatus) DeepCopy() *TiKVGroupStatus { + if in == nil { + return nil + } + out := new(TiKVGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVList) DeepCopyInto(out *TiKVList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TiKV, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVList. +func (in *TiKVList) DeepCopy() *TiKVList { + if in == nil { + return nil + } + out := new(TiKVList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TiKVList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVPorts) DeepCopyInto(out *TiKVPorts) { + *out = *in + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(Port) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(Port) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVPorts. +func (in *TiKVPorts) DeepCopy() *TiKVPorts { + if in == nil { + return nil + } + out := new(TiKVPorts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVPreStop) DeepCopyInto(out *TiKVPreStop) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVPreStop. +func (in *TiKVPreStop) DeepCopy() *TiKVPreStop { + if in == nil { + return nil + } + out := new(TiKVPreStop) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVServer) DeepCopyInto(out *TiKVServer) { + *out = *in + in.Ports.DeepCopyInto(&out.Ports) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVServer. +func (in *TiKVServer) DeepCopy() *TiKVServer { + if in == nil { + return nil + } + out := new(TiKVServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVSpec) DeepCopyInto(out *TiKVSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(Topology, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.TiKVTemplateSpec.DeepCopyInto(&out.TiKVTemplateSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVSpec. +func (in *TiKVSpec) DeepCopy() *TiKVSpec { + if in == nil { + return nil + } + out := new(TiKVSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVStatus) DeepCopyInto(out *TiKVStatus) { + *out = *in + in.CommonStatus.DeepCopyInto(&out.CommonStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVStatus. +func (in *TiKVStatus) DeepCopy() *TiKVStatus { + if in == nil { + return nil + } + out := new(TiKVStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVTemplate) DeepCopyInto(out *TiKVTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVTemplate. +func (in *TiKVTemplate) DeepCopy() *TiKVTemplate { + if in == nil { + return nil + } + out := new(TiKVTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiKVTemplateSpec) DeepCopyInto(out *TiKVTemplateSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + in.Server.DeepCopyInto(&out.Server) + in.Resources.DeepCopyInto(&out.Resources) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(TiKVPreStop) + (*in).DeepCopyInto(*out) + } + if in.Overlay != nil { + in, out := &in.Overlay, &out.Overlay + *out = new(Overlay) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVTemplateSpec. +func (in *TiKVTemplateSpec) DeepCopy() *TiKVTemplateSpec { + if in == nil { + return nil + } + out := new(TiKVTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Topology) DeepCopyInto(out *Topology) { + { + in := &in + *out = make(Topology, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology. +func (in Topology) DeepCopy() Topology { + if in == nil { + return nil + } + out := new(Topology) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + if in.For != nil { + in, out := &in.For, &out.For + *out = make([]VolumeUsage, len(*in)) + copy(*out, *in) + } + out.Storage = in.Storage.DeepCopy() + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeUsage) DeepCopyInto(out *VolumeUsage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeUsage. +func (in *VolumeUsage) DeepCopy() *VolumeUsage { + if in == nil { + return nil + } + out := new(VolumeUsage) + in.DeepCopyInto(out) + return out +} diff --git a/apis/core/v1alpha1/zz_generated.register.go b/apis/core/v1alpha1/zz_generated.register.go new file mode 100644 index 00000000000..1121e9ab57c --- /dev/null +++ b/apis/core/v1alpha1/zz_generated.register.go @@ -0,0 +1,84 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by register-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "core.pingcap.com" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Deprecated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Cluster{}, + &ClusterList{}, + &PD{}, + &PDGroup{}, + &PDGroupList{}, + &PDList{}, + &TiDB{}, + &TiDBGroup{}, + &TiDBGroupList{}, + &TiDBList{}, + &TiFlash{}, + &TiFlashGroup{}, + &TiFlashGroupList{}, + &TiFlashList{}, + &TiKV{}, + &TiKVGroup{}, + &TiKVGroupList{}, + &TiKVList{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/cmd/operator/main.go b/cmd/operator/main.go new file mode 100644 index 00000000000..78b7aed18c1 --- /dev/null +++ b/cmd/operator/main.go @@ -0,0 +1,293 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "flag" + "fmt" + "os" + + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlcli "sigs.k8s.io/controller-runtime/pkg/client" + runtimeConfig "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/cluster" + "github.com/pingcap/tidb-operator/pkg/controllers/pd" + "github.com/pingcap/tidb-operator/pkg/controllers/pdgroup" + "github.com/pingcap/tidb-operator/pkg/controllers/tidb" + "github.com/pingcap/tidb-operator/pkg/controllers/tidbgroup" + "github.com/pingcap/tidb-operator/pkg/controllers/tiflash" + "github.com/pingcap/tidb-operator/pkg/controllers/tiflashgroup" + "github.com/pingcap/tidb-operator/pkg/controllers/tikv" + "github.com/pingcap/tidb-operator/pkg/controllers/tikvgroup" + "github.com/pingcap/tidb-operator/pkg/metrics" + "github.com/pingcap/tidb-operator/pkg/scheme" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/kubefeat" + "github.com/pingcap/tidb-operator/pkg/version" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +var setupLog = ctrl.Log.WithName("setup").WithValues(version.Get().KeysAndValues()...) + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + var maxConcurrentReconciles int + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", true, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + //nolint:mnd // easy to understand + flag.IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 4, "Max concurrent reconciles") + opts := zap.Options{ + Development: false, + StacktraceLevel: zapcore.PanicLevel, // stacktrace on panic only + // use console encoder now for development, switch to json if needed later + Encoder: zapcore.NewConsoleEncoder(zapcore.EncoderConfig{ + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }), + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + utilruntime.PanicHandlers = append(utilruntime.PanicHandlers, func(_ context.Context, _ any) { + metrics.ControllerPanic.WithLabelValues().Inc() + }) + + kubeconfig := ctrl.GetConfigOrDie() + kubefeat.MustInitFeatureGates(kubeconfig) + + mgr, err := ctrl.NewManager(kubeconfig, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{BindAddress: metricsAddr}, + HealthProbeBindAddress: probeAddr, + Controller: runtimeConfig.Controller{ + MaxConcurrentReconciles: maxConcurrentReconciles, + }, + Cache: cache.Options{ + // Disable label selector for our own CRs + // These CRs don't need to be filtered + ByObject: BuildCacheByObject(), + DefaultLabelSelector: labels.SelectorFromSet(labels.Set{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + }), + }, + NewClient: func(cfg *rest.Config, opts ctrlcli.Options) (ctrlcli.Client, error) { + return client.New(cfg, opts) + }, + LeaderElection: enableLeaderElection, + LeaderElectionID: "c6a50700.pingcap.com", + }) + if err != nil { + setupLog.Error(err, "unable to new manager") + os.Exit(1) + } + + if err := setup(context.Background(), mgr); err != nil { + setupLog.Error(err, "failed to setup") + os.Exit(1) + } +} + +func setup(ctx context.Context, mgr ctrl.Manager) error { + // client of manager should be newed by options.NewClient + // which actually returns tidb-operator/pkg/client.Client + c := mgr.GetClient().(client.Client) + + if err := addIndexer(ctx, mgr); err != nil { + return fmt.Errorf("unable to add indexer: %w", err) + } + + logger := mgr.GetLogger() + + logger.Info("setup pd client manager") + pdcm := pdm.NewPDClientManager(mgr.GetLogger(), c) + + logger.Info("setup volume modifier") + vm, err := volumes.NewModifier(ctx, mgr.GetLogger().WithName("VolumeModifier"), c) + if err != nil { + return fmt.Errorf("failed to create volume modifier: %w", err) + } + + setupLog.Info("setup controllers") + if err := setupControllers(mgr, c, pdcm, vm); err != nil { + setupLog.Error(err, "unable to setup controllers") + os.Exit(1) + } + + logger.Info("start pd client manager") + pdcm.Start(ctx) + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + return fmt.Errorf("unable to set up health check: %w", err) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + return fmt.Errorf("unable to set up ready check: %w", err) + } + + logger.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + return fmt.Errorf("start manager failed: %w", err) + } + + return nil +} + +func addIndexer(ctx context.Context, mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField(ctx, &v1alpha1.PDGroup{}, "spec.cluster.name", func(obj client.Object) []string { + pdGroup := obj.(*v1alpha1.PDGroup) + return []string{pdGroup.Spec.Cluster.Name} + }); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField(ctx, &v1alpha1.TiKVGroup{}, "spec.cluster.name", func(obj client.Object) []string { + tikvGroup := obj.(*v1alpha1.TiKVGroup) + return []string{tikvGroup.Spec.Cluster.Name} + }); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField(ctx, &v1alpha1.TiDBGroup{}, "spec.cluster.name", func(obj client.Object) []string { + tidbGroup := obj.(*v1alpha1.TiDBGroup) + return []string{tidbGroup.Spec.Cluster.Name} + }); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField(ctx, &v1alpha1.TiFlashGroup{}, "spec.cluster.name", func(obj client.Object) []string { + tiflashGroup := obj.(*v1alpha1.TiFlashGroup) + return []string{tiflashGroup.Spec.Cluster.Name} + }); err != nil { + return err + } + + return nil +} + +func setupControllers(mgr ctrl.Manager, c client.Client, pdcm pdm.PDClientManager, vm volumes.Modifier) error { + if err := cluster.Setup(mgr, c); err != nil { + return fmt.Errorf("unable to create controller Cluster: %w", err) + } + if err := pdgroup.Setup(mgr, c, pdcm); err != nil { + return fmt.Errorf("unable to create controller PDGroup: %w", err) + } + if err := pd.Setup(mgr, c, pdcm, vm); err != nil { + return fmt.Errorf("unable to create controller PD: %w", err) + } + if err := tidbgroup.Setup(mgr, c); err != nil { + return fmt.Errorf("unable to create controller TiDBGroup: %w", err) + } + if err := tidb.Setup(mgr, c, vm); err != nil { + return fmt.Errorf("unable to create controller TiDB: %w", err) + } + if err := tikvgroup.Setup(mgr, c); err != nil { + return fmt.Errorf("unable to create controller TiKVGroup: %w", err) + } + if err := tikv.Setup(mgr, c, pdcm, vm); err != nil { + return fmt.Errorf("unable to create controller TiKV: %w", err) + } + if err := tiflashgroup.Setup(mgr, c); err != nil { + return fmt.Errorf("unable to create controller TiFlashGroup: %w", err) + } + if err := tiflash.Setup(mgr, c, pdcm, vm); err != nil { + return fmt.Errorf("unable to create controller TiFlash: %w", err) + } + return nil +} + +func BuildCacheByObject() map[client.Object]cache.ByObject { + byObj := map[client.Object]cache.ByObject{ + &v1alpha1.Cluster{}: { + Label: labels.Everything(), + }, + &v1alpha1.PDGroup{}: { + Label: labels.Everything(), + }, + &v1alpha1.PD{}: { + Label: labels.Everything(), + }, + &v1alpha1.TiKVGroup{}: { + Label: labels.Everything(), + }, + &v1alpha1.TiKV{}: { + Label: labels.Everything(), + }, + &v1alpha1.TiDBGroup{}: { + Label: labels.Everything(), + }, + &v1alpha1.TiDB{}: { + Label: labels.Everything(), + }, + &v1alpha1.TiFlashGroup{}: { + Label: labels.Everything(), + }, + &v1alpha1.TiFlash{}: { + Label: labels.Everything(), + }, + &corev1.Secret{}: { + // TLS secrets managed by cert-manager or user + Label: labels.Everything(), + }, + &corev1.Node{}: { + // need to sync some labels from nodes to TiKV, TiDB, ... + Label: labels.Everything(), + }, + &corev1.PersistentVolume{}: { + Label: labels.Everything(), + }, + &storagev1.StorageClass{}: { + Label: labels.Everything(), + }, + } + if kubefeat.FeatureGates.Stage(kubefeat.VolumeAttributesClass).Enabled(kubefeat.BETA) { + byObj[&storagev1beta1.VolumeAttributesClass{}] = cache.ByObject{ + Label: labels.Everything(), + } + } + + return byObj +} diff --git a/cmd/overlay-gen/generators/overlay.go b/cmd/overlay-gen/generators/overlay.go new file mode 100644 index 00000000000..76c8c43c2b3 --- /dev/null +++ b/cmd/overlay-gen/generators/overlay.go @@ -0,0 +1,493 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package generators + +import ( + "fmt" + "io" + "log/slog" + "reflect" + "strings" + + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" +) + +// overlayGenerator produces a file with autogenerated functions. +type overlayGenerator struct { + generator.GoGenerator + targetPackage string + imports namer.ImportTracker + funcTracker map[string]map[string]struct{} +} + +func NewOverlayGenerator(outputFilename, targetPackage string) generator.Generator { + g := &overlayGenerator{ + GoGenerator: generator.GoGenerator{ + OutputFilename: outputFilename, + }, + targetPackage: targetPackage, + imports: generator.NewImportTrackerForPackage(targetPackage), + funcTracker: map[string]map[string]struct{}{}, + } + + g.trackFunc("overlay", types.Ref("k8s.io/apimachinery/pkg/api/resource", "Quantity")) + g.trackFunc("overlay", types.Ref("k8s.io/apimachinery/pkg/apis/meta/v1", "ObjectMeta")) + return g +} + +// Filter returns true if this Generator cares about this type. +// This will be called for every type which made it through this Package's +// Filter method. +func (*overlayGenerator) Filter(_ *generator.Context, t *types.Type) bool { + // We only handle exported structs. + return t.Name.Name == "PodSpec" +} + +// Namers returns a set of NameSystems which will be merged with the namers +// provided when executing this package. In case of a name collision, the +// values produced here will win. +func (g *overlayGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + // This elides package names when the name is in "this" package. + "raw": namer.NewRawNamer(g.targetPackage, g.imports), + "public": namer.NewPublicNamer(0), + } +} + +// GenerateType should emit code for the specified type. This will be called +// for every type which made it through this Generator's Filter method. +func (g *overlayGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + slog.Info("generating overlay", "type", t.String()) + + sw := generator.NewSnippetWriter(w, c, "$", "$") + + p := NewParams(".Spec", t, nil) + g.generateFunc(sw, p) + return sw.Error() +} + +func (g *overlayGenerator) Imports(_ *generator.Context) (imports []string) { + importLines := []string{} + importLines = append(importLines, g.imports.ImportLines()...) + + return importLines +} + +func (g *overlayGenerator) trackFunc(ns string, t *types.Type) (hasAdded bool) { + tracker, ok := g.funcTracker[ns] + if !ok { + tracker = map[string]struct{}{} + g.funcTracker[ns] = tracker + } + + if _, ok := tracker[t.Name.String()]; ok { + return true + } + + tracker[t.Name.String()] = struct{}{} + + return false +} + +func (g *overlayGenerator) generateFunc(sw *generator.SnippetWriter, p Params) { + slog.Debug("generate overlay function", "field", p.Name(), "type", p.Type()) + switch p.Type().Kind { + case types.Struct: + g.generateStructFunc(sw, p) + case types.Slice, types.Array: + g.generateSliceFunc(sw, p) + case types.Map: + g.generateMapFunc(sw, p) + case types.Pointer: + g.generateFunc(sw, p.Pointer()) + case types.Builtin: + // builtin will be handled by inline assignment + case types.Alias: + g.generateAliasFunc(sw, p) + default: + Panic("unhandled func generation", "typeName", p.Type(), "kind", p.Type().Kind) + } +} + +func (g *overlayGenerator) generateAliasFunc(sw *generator.SnippetWriter, p Params) { + if g.trackFunc("overlay", p.Type()) { + return + } + underlying := p.Underlying() + + DoLine(sw, "", "func overlay$.|public$(dst, src *$.|raw$) {", p.Type()) + DoLine(sw, "1", "ndst := (*$.|raw$)(dst)", underlying.Type()) + DoLine(sw, "1", "nsrc := (*$.|raw$)(src)", underlying.Type()) + g.generateSafeAssignment(sw, underlying) + DoLine(sw, "", `}`, nil) + + g.generateFunc(sw, underlying) +} + +func (g *overlayGenerator) generateStructFunc(sw *generator.SnippetWriter, p Params) { + switch p.AnnotateType() { + case AtomicStruct: + if g.trackFunc("overlayAtomicStruct", p.Type()) { + return + } + DoLine(sw, "", "func overlayAtomic$.|public$(dst, src *$.|raw$) {", p.Type()) + DoLine(sw, "1", "*dst = *src", nil) + DoLine(sw, "", `}`, nil) + case GranularStruct: + if g.trackFunc("overlay", p.Type()) { + return + } + DoLine(sw, "", "func overlay$.|public$(dst, src *$.|raw$) {", p.Type()) + + ms := p.Members() + for _, m := range ms { + g.generateAssignment(sw, m) + } + + DoLine(sw, "", `}`, nil) + for _, m := range ms { + g.generateFunc(sw, m) + } + + default: + Panic("unhandled struct type", "type", p.Type(), "annoType", p.AnnotateType()) + } +} + +func (g *overlayGenerator) generateMapFunc(sw *generator.SnippetWriter, p Params) { + switch p.AnnotateType() { + case AtomicMap: + if g.trackFunc("overlayAtomicMap", p.Type()) { + return + } + DoLine(sw, "", "func overlayAtomic$.|public$(dst, src *$.|raw$) {", p.Type()) + DoLine(sw, "1", "if len(*src) == 0 {", nil) + DoLine(sw, "1", "return", nil) + DoLine(sw, "1", "}", nil) + DoLine(sw, "1", "*dst = *src", nil) + DoLine(sw, "", `}`, nil) + case GranularMap: + if g.trackFunc("overlay", p.Type()) { + return + } + + elem := p.Map() + + DoLine(sw, "", "func overlay$.|public$(dst, src *$.|raw$) {", p.Type()) + DoLine(sw, "1", "for k := range *src {", nil) + DoLine(sw, "22", "vdst, ok := (*dst)[k]", nil) + DoLine(sw, "22", "if !ok {", nil) + DoLine(sw, "333", "(*dst)[k] = (*src)[k]", nil) + DoLine(sw, "22", "continue", nil) + DoLine(sw, "22", "}", nil) + DoLine(sw, "22", "vsrc := (*src)[k]", nil) + g.generateAssignment(sw, elem) + DoLine(sw, "22", "(*dst)[k] = vdst", nil) + DoLine(sw, "1", "}", nil) + DoLine(sw, "", "}", nil) + + g.generateFunc(sw, elem) + default: + Panic("unhandled map type %v for %v", p.AnnotateType(), p.Type()) + } +} + +func (g *overlayGenerator) generateSliceFunc(sw *generator.SnippetWriter, p Params) { + switch p.AnnotateType() { + case AtomicList: + if g.trackFunc("overlayAtomicList", p.Type()) { + return + } + DoLine(sw, "", "func overlayAtomicList$.|public$(dst, src *$.|raw$) {", p.Type()) + DoLine(sw, "1", "if len(*src) == 0 {", nil) + DoLine(sw, "1", "return", nil) + DoLine(sw, "1", "}", nil) + DoLine(sw, "1", "*dst = *src", nil) + DoLine(sw, "", `}`, nil) + case SetList: + elem := p.Slice("di", "si") + + if g.trackFunc("overlaySetList", p.Type()) { + return + } + // TODO: maybe validate elem is a scaler? + DoLine(sw, "", "func overlaySetList$.|public$(dst, src *$.|raw$) {", p.Type()) + DoLine(sw, "1", "m := map[$.|raw$]struct{}{}", elem.Type()) + DoLine(sw, "1", "for i := range *dst {", nil) + DoLine(sw, "22", "item := *dst[i]", nil) + DoLine(sw, "22", "m[item] = struct{}{}", nil) + DoLine(sw, "1", "}", nil) + DoLine(sw, "1", "for i := range *src {", nil) + DoLine(sw, "22", "item := *src[i]", nil) + DoLine(sw, "22", "if _, ok := m[item]; !ok {", nil) + DoLine(sw, "333", "*dst = append(*dst, item)", nil) + DoLine(sw, "22", "}", nil) + DoLine(sw, "1", "}", nil) + DoLine(sw, "", `}`, nil) + + g.generateFunc(sw, elem) + + case MapList: + if p.Type().Elem.Kind != types.Struct { + Panic("elem of map list is not struct", "type", p.Type()) + } + + elem := p.Slice("di", "si") + + fields, err := getFieldsByKeys(elem.Type(), p.Keys()...) + if err != nil { + Panic("get field names of map list failed", "err", err, "type", p.Type()) + } + if g.trackFunc("overlayMapList", p.Type()) { + return + } + joinFunc := types.Ref("strings", "Join") + DoLine(sw, "", "func overlayMapList$.|public$(dst, src *$.|raw$) {", p.Type()) + DoLine(sw, "1", "m := map[string]int{}", nil) + DoLine(sw, "1", "for i := range *dst {", nil) + DoLine(sw, "22", "item := (*dst)[i]", nil) + DoLine(sw, "22", "keys := []string{}", nil) + for _, field := range fields { + DoLine(sw, "22", "keys = append(keys, $.Type|public$ToString(item.$.Name$))", &field) + } + DoLine(sw, "22", `key := $.|raw$(keys, ",")`, joinFunc) + DoLine(sw, "22", "m[key] = i", nil) + DoLine(sw, "1", "}", nil) + DoLine(sw, "1", "for si := range *src{", nil) + DoLine(sw, "22", "item := (*src)[si]", nil) + DoLine(sw, "22", "keys := []string{}", nil) + for _, field := range fields { + DoLine(sw, "22", "keys = append(keys, $.Type|public$ToString(item.$.Name$))", &field) + } + DoLine(sw, "22", `key := $.|raw$(keys, ",")`, joinFunc) + DoLine(sw, "22", "di, ok := m[key]", nil) + DoLine(sw, "22", "if !ok {", nil) + DoLine(sw, "333", "*dst = append(*dst, item)", nil) + DoLine(sw, "22", "continue", nil) + DoLine(sw, "22", "}", nil) + g.generateAssignment(sw, elem) + DoLine(sw, "1", "}", nil) + DoLine(sw, "", `}`, nil) + + for _, field := range fields { + g.generateToStringFunc(sw, field.Type) + } + + g.generateFunc(sw, elem) + } +} + +func (g *overlayGenerator) generateToStringFunc(sw *generator.SnippetWriter, t *types.Type) { + if g.trackFunc("toString", t) { + return + } + DoLine(sw, "", "func $.|public$ToString(val $.|raw$) string {", t) + switch t.Kind { + case types.Builtin: + switch t { + case types.Int, types.Int64, types.Int32, types.Int16: + DoLine(sw, "1", `return $.|raw$(int64(val), 10)`, types.Ref("strconv", "FormatInt")) + case types.Uint, types.Uint64, types.Uint32, types.Uint16, types.Byte: + DoLine(sw, "1", `return $.|raw$(uint64(val), 10)`, types.Ref("strconv", "FormatUint")) + case types.Float, types.Float32, types.Float64: + DoLine(sw, "1", `return $.|raw$(float64(val), 'E', -1, 64)`, types.Ref("strconv", "FormatFloat")) + case types.String: + DoLine(sw, "1", `return val`, nil) + case types.Bool: + DoLine(sw, "1", `return $.|raw$(val)`, types.Ref("strconv", "FormatBool")) + default: + Panic("unhandled builtin to string func", "name", t.Name) + } + case types.Alias: + DoLine(sw, "1", "return $.|public$ToString($.|raw$(val))", t.Underlying) + default: + Panic("unhandled to string func", "name", t.Name, "kind", t.Kind) + } + DoLine(sw, "", "}", t) +} + +type StructField struct { + Name string + Type *types.Type +} + +func getFieldsByKeys(t *types.Type, keys ...string) ([]StructField, error) { + fields := []StructField{} + for _, key := range keys { + for _, m := range t.Members { + if m.Embedded { + field, err := getFieldsByKeys(m.Type, key) + if err != nil { + continue + } + fields = append(fields, field...) + break + } + tag := reflect.StructTag(m.Tags) + jsonTag := tag.Get("json") + name := strings.Split(jsonTag, ",") + if name[0] == key { + fields = append(fields, StructField{ + Name: m.Name, + Type: m.Type, + }) + } + } + } + + if len(fields) != len(keys) { + return nil, fmt.Errorf("cannot get field names by keys %v, only %v are found", keys, fields) + } + + return fields, nil +} + +func (g *overlayGenerator) generateAssignment(sw *generator.SnippetWriter, p Params) { + args := generator.Args{ + "type": p.Type(), + "dst": p.Dst(), + "src": p.Src(), + } + + p.CheckNil() + + needNilCheck := g.needNilCheck(p.Type()) + if needNilCheck { + DoLine(sw, "", `if $.dst$ != nil && $.src$ != nil {`, args) + } + + g.generateSafeAssignment(sw, p) + + if needNilCheck { + DoLine(sw, "", "} else if $.dst$ == nil {", args) + DoLine(sw, "", "$.dst$ = $.src$", args) + DoLine(sw, "", "}", nil) + } +} + +func (g *overlayGenerator) generateSafeAssignment(sw *generator.SnippetWriter, p Params) { + switch p.Type().Kind { + case types.Struct: + g.generateStructAssignment(sw, p) + case types.Alias: + g.generateAliasAssignment(sw, p) + case types.Slice, types.Array: + g.generateSliceAssignment(sw, p) + case types.Map: + g.generateMapAssignment(sw, p) + case types.Builtin: + g.generateBuiltinAssignment(sw, p) + case types.Pointer: + g.generatePointerAssignment(sw, p) + default: + Panic("unhandled assignment generation", "dst", p.Dst(), "src", p.Src(), "typeName", p.Type()) + } +} + +func (g *overlayGenerator) needNilCheck(t *types.Type) bool { + switch t.Kind { + case types.Alias: + return g.needNilCheck(t.Underlying) + case types.Slice, types.Array, types.Map, types.Pointer: + return true + } + + return false +} + +func (g *overlayGenerator) generatePointerAssignment(sw *generator.SnippetWriter, p Params) { + to := p.Pointer() + g.generateSafeAssignment(sw, to) +} + +func (*overlayGenerator) generateSliceAssignment(sw *generator.SnippetWriter, p Params) { + args := generator.Args{ + "type": p.Type(), + "dst": p.Dst(), + "src": p.Src(), + } + switch p.AnnotateType() { + case AtomicList: + DoLine(sw, "1", `overlayAtomicList$.type|public$($.dst$, $.src$)`, args) + case SetList: + DoLine(sw, "1", `overlaySetList$.type|public$($.dst$, $.src$)`, args) + case MapList: + DoLine(sw, "1", `overlayMapList$.type|public$($.dst$, $.src$)`, args) + default: + Panic("unhandled list type", "type", p.Type(), "annoType", p.AnnotateType()) + } +} + +func (*overlayGenerator) generateMapAssignment(sw *generator.SnippetWriter, p Params) { + args := generator.Args{ + "type": p.Type(), + "dst": p.Dst(), + "src": p.Src(), + } + switch p.AnnotateType() { + case AtomicMap: + DoLine(sw, "1", `overlayAtomic$.type|public$($.dst$, $.src$)`, args) + case GranularMap: + DoLine(sw, "1", `overlay$.type|public$($.dst$, $.src$)`, args) + default: + Panic("unhandled map type", "type", p.Type(), "annoType", p.AnnotateType()) + } +} + +func (*overlayGenerator) generateStructAssignment(sw *generator.SnippetWriter, p Params) { + args := generator.Args{ + "type": p.Type(), + "dst": p.Dst(), + "src": p.Src(), + } + switch p.AnnotateType() { + case AtomicStruct: + DoLine(sw, "1", `overlayAtomic$.type|public$($.dst$, $.src$)`, args) + case GranularStruct: + DoLine(sw, "1", `overlay$.type|public$($.dst$, $.src$)`, args) + default: + Panic("unhandled struct type", "type", p.Type(), "annoType", p.AnnotateType()) + } +} + +func (*overlayGenerator) generateAliasAssignment(sw *generator.SnippetWriter, p Params) { + args := generator.Args{ + "type": p.Type(), + "dst": p.Dst(), + "src": p.Src(), + } + DoLine(sw, "1", `overlay$.type|public$($.dst$, $.src$)`, args) +} + +func (*overlayGenerator) generateBuiltinAssignment(sw *generator.SnippetWriter, p Params) { + switch p.Type() { + case types.Int, types.Int64, types.Int32, types.Int16, types.Uint, types.Uint64, + types.Uint32, types.Uint16, types.Byte, types.Float, types.Float32, types.Float64: + DoLine(sw, "1", `if $.$ != 0 {`, p.Src()) + case types.String: + DoLine(sw, "1", `if $.$ != "" {`, p.Src()) + case types.Bool: + DoLine(sw, "1", `if $.$ {`, p.Src()) + default: + Panic("unhandled builtin assignment", "dst", p.Dst(), "src", p.Src(), "typeName", p.Type()) + } + DoLine(sw, "22", `$.dst$ = $.src$`, generator.Args{ + "dst": p.Dst(), + "src": p.Src(), + }) + DoLine(sw, "1", `}`, nil) +} diff --git a/cmd/overlay-gen/generators/test.go b/cmd/overlay-gen/generators/test.go new file mode 100644 index 00000000000..24c397e1c0f --- /dev/null +++ b/cmd/overlay-gen/generators/test.go @@ -0,0 +1,669 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package generators + +import ( + "io" + "log/slog" + "strconv" + "strings" + + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" +) + +type overlayTestGenerator struct { + generator.GoGenerator + targetPackage string + imports namer.ImportTracker + funcTracker map[string]map[string]struct{} +} + +func NewOverlayTestGenerator(outputFilename, targetPackage string) generator.Generator { + g := &overlayTestGenerator{ + GoGenerator: generator.GoGenerator{ + OutputFilename: outputFilename, + }, + targetPackage: targetPackage, + imports: generator.NewImportTrackerForPackage(targetPackage), + funcTracker: map[string]map[string]struct{}{}, + } + + g.trackFunc("construct", types.Ref("k8s.io/apimachinery/pkg/api/resource", "Quantity")) + g.trackFunc("construct", types.Ref("k8s.io/apimachinery/pkg/apis/meta/v1", "ObjectMeta")) + g.trackFunc("construct", types.Ref("", "map[string]string")) + return g +} + +// Filter returns true if this Generator cares about this type. +// This will be called for every type which made it through this Package's +// Filter method. +func (*overlayTestGenerator) Filter(_ *generator.Context, t *types.Type) bool { + // We only handle exported structs. + return t.Name.Name == "PodSpec" +} + +// Namers returns a set of NameSystems which will be merged with the namers +// provided when executing this package. In case of a name collision, the +// values produced here will win. +func (g *overlayTestGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + // This elides package names when the name is in "this" package. + "raw": namer.NewRawNamer(g.targetPackage, g.imports), + "public": namer.NewPublicNamer(0), + } +} + +// GenerateType should emit code for the specified type. This will be called +// for every type which made it through this Generator's Filter method. +func (g *overlayTestGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + slog.Info("generating overlay test", "type", t.String()) + + sw := generator.NewSnippetWriter(w, c, "$", "$") + + p := NewParams(".Spec", t, nil) + g.generateConstructFunc(sw, p) + return sw.Error() +} + +func (g *overlayTestGenerator) Imports(_ *generator.Context) (imports []string) { + importLines := []string{} + importLines = append(importLines, g.imports.ImportLines()...) + + return importLines +} + +func (g *overlayTestGenerator) trackFunc(ns string, t *types.Type) (hasAdded bool) { + tracker, ok := g.funcTracker[ns] + if !ok { + tracker = map[string]struct{}{} + g.funcTracker[ns] = tracker + } + + if _, ok := tracker[t.Name.String()]; ok { + return true + } + + tracker[t.Name.String()] = struct{}{} + + return false +} + +func (g *overlayTestGenerator) generateConstructFunc(sw *generator.SnippetWriter, p Params, ignoreKeys ...string) { + if g.trackFunc("construct"+p.AnnotateType().FuncPrefix()+IgnoreKeys(ignoreKeys...), p.Type()) { + return + } + switch p.Type().Kind { + case types.Struct: + g.generateConstructStructFunc(sw, p, ignoreKeys...) + case types.Slice, types.Array: + g.generateConstructSliceFunc(sw, p) + case types.Map: + g.generateConstructMapFunc(sw, p) + case types.Pointer: + g.generateConstructPointerFunc(sw, p) + case types.Builtin: + g.generateConstructBuiltinFunc(sw, p) + case types.Alias: + g.generateConstructAliasFunc(sw, p) + default: + Panic("unhandled func generation", "type", p.Type(), "kind", p.Type().Kind) + } +} + +func isMatched(m *types.Member, fs []StructField) bool { + if m.Embedded && m.Type.Kind == types.Struct { + for _, mm := range m.Type.Members { + if isMatched(&mm, fs) { + return true + } + } + } + for _, f := range fs { + if f.Name == m.Name { + return true + } + } + + return false +} + +//nolint:gocyclo // refactor if possible +func (g *overlayTestGenerator) generateConstructStructFunc(sw *generator.SnippetWriter, p Params, ignoreKeys ...string) { + fields, err := getFieldsByKeys(p.Type(), ignoreKeys...) + if err != nil { + Panic("get fields failed", "err", err, "type", p.Type(), "keys", ignoreKeys) + } + g.generateConstructFuncName(sw, p.Type(), p.AnnotateType(), ignoreKeys...) + DoLine(sw, "", "cases := []Case[$.|raw$] {}", p.Type()) + ms := p.Members() + for index, m := range ms { + isKey := isMatched(m.Member(), fields) + policy := "NoLimit" + if isKey { + policy = "NoNotEqual | NoZero | NoNil" + } + + if m.Member().Embedded && isKey { + g.generateConstructFuncCall(sw, "cs"+strconv.Itoa(index)+" :=", policy, m.Type(), m.AnnotateType(), ignoreKeys...) + } else { + g.generateConstructFuncCall(sw, "cs"+strconv.Itoa(index)+" :=", policy, m.Type(), m.AnnotateType()) + } + } + DoLine(sw, "", "maxCount := max(", nil) + for i := range ms { + DoLine(sw, "", "len(cs$.$),", i) + } + DoLine(sw, "", ")", nil) + + for i := range ms { + DoLine(sw, "", "k$.$ := 0", i) + } + + DoLine(sw, "", "for i := range maxCount {", nil) + DoLine(sw, "", "nc := Case[$.|raw$]{}", p.Type()) + for index, m := range ms { + isKey := isMatched(m.Member(), fields) + DoLine(sw, "", "if i / len(cs$.$) > k$.$ {", index) + policy := "NoLimit" + if isKey { + policy = "NoNotEqual | NoZero | NoNil" + } + + if m.Member().Embedded && isKey { + g.generateConstructFuncCall(sw, "cs"+strconv.Itoa(index)+" =", policy, m.Type(), m.AnnotateType(), ignoreKeys...) + } else { + g.generateConstructFuncCall(sw, "cs"+strconv.Itoa(index)+" =", policy, m.Type(), m.AnnotateType()) + } + + DoLine(sw, "", "k$.$ += 1", index) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "c$.$ := &cs$.$[i % len(cs$.$)]", index) + + switch p.AnnotateType() { + case AtomicStruct: + // all fields will be overrided by src + DoLine(sw, "", "nc.expected.$.name$ = c$.index$.src", generator.Args{ + "name": m.Member().Name, + "index": index, + }) + DoLine(sw, "", "nc.dst.$.name$ = c$.index$.dst", generator.Args{ + "name": m.Member().Name, + "index": index, + }) + DoLine(sw, "", "nc.src.$.name$ = c$.index$.src", generator.Args{ + "name": m.Member().Name, + "index": index, + }) + case GranularStruct: + DoLine(sw, "", "nc.expected.$.name$ = c$.index$.expected", generator.Args{ + "name": m.Member().Name, + "index": index, + }) + DoLine(sw, "", "nc.dst.$.name$ = c$.index$.dst", generator.Args{ + "name": m.Member().Name, + "index": index, + }) + DoLine(sw, "", "nc.src.$.name$ = c$.index$.src", generator.Args{ + "name": m.Member().Name, + "index": index, + }) + } + } + DoLine(sw, "", "cases = append(cases, nc)", nil) + DoLine(sw, "", "}", nil) + + DoLine(sw, "", "return cases", nil) + DoLine(sw, "", "}", nil) + + for _, m := range ms { + isKey := isMatched(m.Member(), fields) + if m.Member().Embedded && isKey { + g.generateConstructFunc(sw, m, ignoreKeys...) + } else { + g.generateConstructFunc(sw, m) + } + } +} + +func IgnoreKeys(keys ...string) string { + key := "" + if len(keys) != 0 { + key = "Ignore_" + strings.Join(keys, "_") + } + + return key +} + +func (g *overlayTestGenerator) generateConstructSliceFunc(sw *generator.SnippetWriter, p Params) { + g.generateConstructFuncName(sw, p.Type(), p.AnnotateType()) + elem := p.Slice("", "") + DoLine(sw, "", "cases := []Case[$.|raw$] {", p.Type()) + DoLine(sw, "", "{", nil) + DoLine(sw, "", "expected: nil,", nil) + DoLine(sw, "", "dst: nil,", nil) + DoLine(sw, "", "src: nil,", nil) + DoLine(sw, "", "},", nil) + DoLine(sw, "", "}", nil) + g.generateConstructFuncCall(sw, "cs :=", "NoLimit", elem.Type(), elem.AnnotateType(), p.Keys()...) + DoLine(sw, "", "var nc Case[$.|raw$]", p.Type()) + + switch p.AnnotateType() { + case AtomicList: + // always use src + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "for _, c := range cs {", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.src)", nil) + DoLine(sw, "", "nc.dst = append(nc.dst, c.dst)", nil) + DoLine(sw, "", "nc.src = append(nc.src, c.src)", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // no overlay if src is empty + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "for _, c := range cs {", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.dst)", nil) + DoLine(sw, "", "nc.dst = append(nc.dst, c.dst)", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "nc.src = $.|raw${}", p.Type()) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + case MapList: + // all kesy in src are in dst + // all kesy in dst are also in src + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "for _, c := range cs {", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.expected)", nil) + DoLine(sw, "", "nc.dst = append(nc.dst, c.dst)", nil) + DoLine(sw, "", "nc.src = append(nc.src, c.src)", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // some keys in src are in dst, some keys are not + // some keys in dst are also not in src + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "srcs := $.|raw${}", p.Type()) + DoLine(sw, "", "for i, c := range cs {", nil) + DoLine(sw, "", "switch i % 3 {", nil) + DoLine(sw, "", "case 0:", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.expected)", nil) + DoLine(sw, "", "nc.dst = append(nc.dst, c.dst)", nil) + DoLine(sw, "", "nc.src = append(nc.src, c.src)", nil) + DoLine(sw, "", "case 1:", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.dst)", nil) + DoLine(sw, "", "nc.dst = append(nc.dst, c.dst)", nil) + DoLine(sw, "", "case 2:", nil) + DoLine(sw, "", "srcs = append(srcs, c.src)", nil) + DoLine(sw, "", "nc.src = append(nc.src, c.src)", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, srcs...)", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // all keys in dst are not in src + // all keys in src are also not in dst + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "srcs = $.|raw${}", p.Type()) + DoLine(sw, "", "for i, c := range cs {", nil) + DoLine(sw, "", "switch i % 2 {", nil) + DoLine(sw, "", "case 0:", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.dst)", nil) + DoLine(sw, "", "nc.dst = append(nc.dst, c.dst)", nil) + DoLine(sw, "", "case 1:", nil) + DoLine(sw, "", "srcs = append(srcs, c.src)", nil) + DoLine(sw, "", "nc.src = append(nc.src, c.src)", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, srcs...)", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // dst is empty + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "for _, c := range cs {", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.src)", nil) + DoLine(sw, "", "nc.src = append(nc.src, c.src)", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // src is empty + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "for _, c := range cs {", nil) + DoLine(sw, "", "nc.expected = append(nc.expected, c.dst)", nil) + DoLine(sw, "", "nc.dst = append(nc.dst, c.dst)", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + } + DoLine(sw, "", "return cases", nil) + DoLine(sw, "", "}", nil) + + g.generateConstructFunc(sw, elem, p.Keys()...) +} + +func (*overlayTestGenerator) generateConstructFuncCall(sw *generator.SnippetWriter, + prefix, policy string, t *types.Type, at AnnotateType, keys ...string) { + DoLine(sw, "", "$.prefix$ construct$.annoType$$.type|public$$.key$($.policy$)", generator.Args{ + "prefix": prefix, + "policy": policy, + "type": t, + "annoType": at.FuncPrefix(), + "key": IgnoreKeys(keys...), + }) +} + +func (*overlayTestGenerator) generateConstructFuncName(sw *generator.SnippetWriter, t *types.Type, at AnnotateType, keys ...string) { + DoLine(sw, "", "func construct$.annoType$$.type|public$$.key$(p Policy) []Case[$.type|raw$] {", generator.Args{ + "type": t, + "annoType": at.FuncPrefix(), + "key": IgnoreKeys(keys...), + }) +} + +func (g *overlayTestGenerator) generateConstructMapFunc(sw *generator.SnippetWriter, p Params) { + g.generateConstructFuncName(sw, p.Type(), p.AnnotateType()) + + elem := p.Map() + DoLine(sw, "", "cases := []Case[$.|raw$] {", p.Type()) + DoLine(sw, "", "{", nil) + DoLine(sw, "", "expected: nil,", nil) + DoLine(sw, "", "dst: nil,", nil) + DoLine(sw, "", "src: nil,", nil) + DoLine(sw, "", "},", nil) + DoLine(sw, "", "}", nil) + g.generateConstructFuncCall(sw, "keys :=", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + g.generateConstructFuncCall(sw, "vals :=", "NoLimit", elem.Type(), elem.AnnotateType()) + DoLine(sw, "", "keyIndex := 0", nil) + DoLine(sw, "", "var nc Case[$.|raw$]", p.Type()) + + switch p.AnnotateType() { + case AtomicMap: + // all keys in src are in dst + DoLine(sw, "", "for _, val := range vals {", nil) + DoLine(sw, "", "keyIndex += 1", nil) + DoLine(sw, "", "if keyIndex >= len(keys) {", nil) + g.generateConstructFuncCall(sw, "keys =", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + DoLine(sw, "", "keyIndex = 0", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "key := keys[keyIndex]", nil) + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "nc.expected = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.dst = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.src = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.expected[key.expected] = val.src", nil) + DoLine(sw, "", "nc.dst[key.expected] = val.dst", nil) + DoLine(sw, "", "nc.src[key.expected] = val.src", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // src is empty + DoLine(sw, "", "for _, val := range vals {", nil) + DoLine(sw, "", "keyIndex += 1", nil) + DoLine(sw, "", "if keyIndex >= len(keys) {", nil) + g.generateConstructFuncCall(sw, "keys =", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + DoLine(sw, "", "keyIndex = 0", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "key := keys[keyIndex]", nil) + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "nc.expected = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.dst = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.src = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.expected[key.expected] = val.dst", nil) + DoLine(sw, "", "nc.dst[key.expected] = val.dst", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + case GranularMap: + // all keys in src are in dst + // all keys in dst are also in src + DoLine(sw, "", "for _, val := range vals {", nil) + DoLine(sw, "", "keyIndex += 1", nil) + DoLine(sw, "", "if keyIndex >= len(keys) {", nil) + g.generateConstructFuncCall(sw, "keys =", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + DoLine(sw, "", "keyIndex = 0", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "key := keys[keyIndex]", nil) + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "nc.expected = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.dst = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.src = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.expected[key.expected] = val.expected", nil) + DoLine(sw, "", "nc.dst[key.expected] = val.dst", nil) + DoLine(sw, "", "nc.src[key.expected] = val.src", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // some keys in src are in dst, some keys are not + // some keys in dst are also not in src + DoLine(sw, "", "for i, val := range vals {", nil) + DoLine(sw, "", "keyIndex += 1", nil) + DoLine(sw, "", "if keyIndex >= len(keys) {", nil) + g.generateConstructFuncCall(sw, "keys =", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + DoLine(sw, "", "keyIndex = 0", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "key := keys[keyIndex]", nil) + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "nc.expected = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.dst = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.src = make($.|raw$)", p.Type()) + DoLine(sw, "", "switch i % 3 {", nil) + DoLine(sw, "", "case 0:", nil) + DoLine(sw, "", "nc.expected[key.expected] = val.expected", nil) + DoLine(sw, "", "nc.dst[key.expected] = val.dst", nil) + DoLine(sw, "", "nc.src[key.expected] = val.src", nil) + DoLine(sw, "", "case 1:", nil) + DoLine(sw, "", "nc.expected[key.expected] = val.dst", nil) + DoLine(sw, "", "nc.dst[key.expected] = val.dst", nil) + DoLine(sw, "", "case 2:", nil) + DoLine(sw, "", "nc.expected[key.expected] = val.src", nil) + DoLine(sw, "", "nc.src[key.expected] = val.src", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // all keys in dst are not in src + // all keys in src are also not in dst + DoLine(sw, "", "for i, val := range vals {", nil) + DoLine(sw, "", "keyIndex += 1", nil) + DoLine(sw, "", "if keyIndex >= len(keys) {", nil) + g.generateConstructFuncCall(sw, "keys =", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + DoLine(sw, "", "keyIndex = 0", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "key := keys[keyIndex]", nil) + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "nc.expected = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.dst = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.src = make($.|raw$)", p.Type()) + DoLine(sw, "", "switch i % 2 {", nil) + DoLine(sw, "", "case 0:", nil) + DoLine(sw, "", "nc.expected[key.expected] = val.dst", nil) + DoLine(sw, "", "nc.dst[key.expected] = val.dst", nil) + DoLine(sw, "", "case 1:", nil) + DoLine(sw, "", "nc.expected[key.expected] = val.src", nil) + DoLine(sw, "", "nc.src[key.expected] = val.src", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // dst is empty + DoLine(sw, "", "for _, val := range vals {", nil) + DoLine(sw, "", "keyIndex += 1", nil) + DoLine(sw, "", "if keyIndex >= len(keys) {", nil) + g.generateConstructFuncCall(sw, "keys =", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + DoLine(sw, "", "keyIndex = 0", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "key := keys[keyIndex]", nil) + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "nc.expected = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.dst = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.src = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.expected[key.expected] = val.src", nil) + DoLine(sw, "", "nc.src[key.expected] = val.src", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + + // src is empty + DoLine(sw, "", "for _, val := range vals {", nil) + DoLine(sw, "", "keyIndex += 1", nil) + DoLine(sw, "", "if keyIndex >= len(keys) {", nil) + g.generateConstructFuncCall(sw, "keys =", "NoNil | NoZero | NoNotEqual", p.Type().Key, None) + DoLine(sw, "", "keyIndex = 0", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "key := keys[keyIndex]", nil) + DoLine(sw, "", "nc = Case[$.|raw$]{}", p.Type()) + DoLine(sw, "", "nc.expected = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.dst = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.src = make($.|raw$)", p.Type()) + DoLine(sw, "", "nc.expected[key.expected] = val.dst", nil) + DoLine(sw, "", "nc.dst[key.expected] = val.dst", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "cases = append(cases, nc)", nil) + } + DoLine(sw, "", "return cases", nil) + DoLine(sw, "", "}", nil) + + g.generateConstructFunc(sw, elem) +} + +func (g *overlayTestGenerator) generateConstructPointerFunc(sw *generator.SnippetWriter, p Params) { + g.generateConstructFuncName(sw, p.Type(), p.AnnotateType()) + + elem := p.Pointer() + DoLine(sw, "", "cases := []Case[$.|raw$] {", p.Type()) + DoLine(sw, "", "{", nil) + DoLine(sw, "", "expected: nil,", nil) + DoLine(sw, "", "dst: nil,", nil) + DoLine(sw, "", "src: nil,", nil) + DoLine(sw, "", "},", nil) + DoLine(sw, "", "}", nil) + + g.generateConstructFuncCall(sw, "cs :=", "p", elem.Type(), elem.AnnotateType()) + + DoLine(sw, "", "for _, c := range cs {", nil) + DoLine(sw, "", "cases = append(cases, Case[$.|raw$] {", p.Type()) + DoLine(sw, "", "expected: &c.expected,", nil) + DoLine(sw, "", "dst: &c.dst,", nil) + DoLine(sw, "", "src: &c.src,", nil) + DoLine(sw, "", "})", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "return cases", nil) + DoLine(sw, "", "}", nil) + + g.generateConstructFunc(sw, elem) +} + +//nolint:gocyclo // refactor if possible +func (g *overlayTestGenerator) generateConstructBuiltinFunc(sw *generator.SnippetWriter, p Params) { + g.generateConstructFuncName(sw, p.Type(), p.AnnotateType()) + + DoLine(sw, "", "cases := []Case[$.|raw$] {}", p.Type()) + t := p.Type() + // dst==src && contains 0 + DoLine(sw, "", "if p&(NoZero) == 0 {", nil) + switch t { + case types.Int, types.Int64, types.Int32, types.Int16: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 0, dst: 0, src: 0})`, t) + case types.Uint, types.Uint64, types.Uint32, types.Uint16, types.Byte: + case types.Float, types.Float32, types.Float64: + case types.String: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: "", dst: "", src: ""})`, t) + case types.Bool: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: false, dst: false, src: false})`, t) + } + DoLine(sw, "", "}", nil) + + // dst!=src && no 0 + DoLine(sw, "", "if p&(NoNotEqual) == 0 {", nil) + switch t { + case types.Int, types.Int64, types.Int32, types.Int16: + DoLine(sw, "", "if p&(NoNotEqual) == 0 {", nil) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 1, dst: 2, src: 1})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: -1, dst: 2, src: -1})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 1, dst: -2, src: 1})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: -1, dst: -2, src: -1})`, t) + DoLine(sw, "", "}", nil) + + case types.Uint, types.Uint64, types.Uint32, types.Uint16, types.Byte: + case types.Float, types.Float32, types.Float64: + case types.String: + DoLine(sw, "", `dst, src := randString(), randString()`, nil) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: src, dst: dst, src: src})`, t) + case types.Bool: + } + DoLine(sw, "", "}", nil) + + // dst!=src && contains 0 + DoLine(sw, "", "if p&(NoZero|NoNotEqual) == 0 {", nil) + switch t { + case types.Int, types.Int64, types.Int32, types.Int16: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 1, dst: 1, src: 0})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: -1, dst: -1, src: 0})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 1, dst: 0, src: 1})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: -1, dst: 0, src: -1})`, t) + case types.Uint, types.Uint64, types.Uint32, types.Uint16, types.Byte: + case types.Float, types.Float32, types.Float64: + case types.String: + DoLine(sw, "", `dst, src := randString(), randString()`, nil) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: dst, dst: dst, src: ""})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: src, dst: "", src: src})`, t) + case types.Bool: + } + DoLine(sw, "", "}", nil) + + // dst==src && no 0 + switch t { + case types.Int, types.Int64, types.Int32, types.Int16: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 1, dst: 1, src: 1})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: -1, dst: -1, src: -1})`, t) + + case types.Uint, types.Uint64, types.Uint32, types.Uint16, types.Byte: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 1, dst: 1, src: 1})`, t) + + case types.Float, types.Float32, types.Float64: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: 1.0, dst: 1.0, src: 1.0})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: -1.0, dst: -1.0, src: -1.0})`, t) + + case types.String: + DoLine(sw, "", `var val string`, nil) + for range 3 { + DoLine(sw, "", `val = randString()`, nil) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: val, dst: val, src: val})`, t) + } + + case types.Bool: + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: false, dst: false, src: false})`, t) + DoLine(sw, "", `cases = append(cases, Case[$.|raw$]{expected: true, dst: true, src: true})`, t) + } + DoLine(sw, "", "return cases", nil) + DoLine(sw, "", "}", nil) +} + +func (g *overlayTestGenerator) generateConstructAliasFunc(sw *generator.SnippetWriter, p Params) { + g.generateConstructFuncName(sw, p.Type(), p.AnnotateType()) + + underlying := p.Underlying() + DoLine(sw, "", "cases := []Case[$.|raw$] {}", p.Type()) + g.generateConstructFuncCall(sw, "cs :=", "p", underlying.Type(), underlying.AnnotateType()) + DoLine(sw, "", "for _, c := range cs {", nil) + DoLine(sw, "", "cases = append(cases, Case[$.|raw$] {", p.Type()) + DoLine(sw, "", "expected: $.|raw$(c.expected),", p.Type()) + DoLine(sw, "", "dst: $.|raw$(c.dst),", p.Type()) + DoLine(sw, "", "src: $.|raw$(c.src),", p.Type()) + DoLine(sw, "", "})", nil) + DoLine(sw, "", "}", nil) + DoLine(sw, "", "return cases", nil) + DoLine(sw, "", "}", nil) + + g.generateConstructFunc(sw, p.Underlying()) +} diff --git a/cmd/overlay-gen/generators/util.go b/cmd/overlay-gen/generators/util.go new file mode 100644 index 00000000000..51d4a90e324 --- /dev/null +++ b/cmd/overlay-gen/generators/util.go @@ -0,0 +1,460 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package generators + +import ( + "context" + "log/slog" + "os" + "runtime" + "time" + + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/types" +) + +func DoLine(sw *generator.SnippetWriter, indent, format string, args any) { + for range indent { + sw.Do("\t", nil) + } + sw.Do(format, args) + sw.Do("\n", nil) +} + +// Params defines the messages of overlay params +type Params interface { + Name() string + // Type returns the type dst and src + Type() *types.Type + // Dst returns the variable name of dst + Dst() string + // Src returns the variable name of src + Src() string + + // AnnotateType returns the type from comments + AnnotateType() AnnotateType + // Keys returns additional keys of specified annotate type + Keys() []string + + // some types need check nil before get address + CheckNil() + + Member() *types.Member + + Members() []Params + Pointer() Params + Slice(dst, src string) Params + Map() Params + Underlying() Params +} + +type Refer interface { + Ref(root string) string +} + +// for slice, array, map +type indexRefer struct { + index string +} + +func (r *indexRefer) Ref(root string) string { + return "(" + root + ")[" + r.index + "]" +} + +// for struct +type fieldRefer struct { + field string +} + +func (r *fieldRefer) Ref(root string) string { + return root + "." + r.field +} + +type pointerRefer struct{} + +func (*pointerRefer) Ref(root string) string { + return "*" + root +} + +type addrRefer struct { + notNil *bool +} + +func (r *addrRefer) Ref(root string) string { + if *r.notNil { + return "&(" + root + ")" + } + return root +} + +type prefixRefer struct { + prefix string +} + +func (r *prefixRefer) Ref(root string) string { + return r.prefix + root +} + +func Index(i string) Refer { + return &indexRefer{ + index: i, + } +} + +func Field(f string) Refer { + return &fieldRefer{ + field: f, + } +} + +func Pointer() Refer { + return &pointerRefer{} +} + +func Addr(notNil *bool) Refer { + return &addrRefer{ + notNil: notNil, + } +} + +func Prefix(p string) Refer { + return &prefixRefer{prefix: p} +} + +type AnnotateType int + +const ( + None AnnotateType = iota + AtomicList + SetList + MapList + + AtomicMap + GranularMap + + AtomicStruct + GranularStruct +) + +func (at AnnotateType) FuncPrefix() string { + switch at { + case AtomicList, AtomicMap, AtomicStruct: + return "Atomic" + case SetList: + return "Set" + case MapList: + return "Map" + case GranularMap: + return "" + case GranularStruct: + return "" + } + return "" +} + +type params struct { + t *types.Type + refers []Refer + // if srcRefers is empty, use refers to generate src + srcRefers []Refer + + // name is field name from root + name string + + // means nil has been checked, addr can be called + checkNil bool + + m *types.Member + + annotateType AnnotateType + keys []string +} + +// TODO: handle panic +func NewParams(name string, t *types.Type, comments []string) Params { + return newParams(name, t, comments) +} + +func newParams(name string, t *types.Type, comments []string) *params { + p := parseComment(t, comments) + p.t = t + p.name = name + return p +} + +const ( + typeAtomic = "atomic" + typeMap = "map" + typeGranular = "granular" + typeSet = "set" +) + +//nolint:gocyclo // refactor if possible +func parseComment(t *types.Type, comments []string) *params { + p := ¶ms{} + switch t.Kind { + case types.Alias: + return parseComment(t.Underlying, comments) + case types.Pointer: + return parseComment(t.Elem, comments) + case types.Map: + tags := gengo.ExtractCommentTags("+", comments) + ts := tags["mapType"] + if len(ts) > 1 { + panic("value of map type cannot be set twice") + } + + if len(ts) == 0 { + p.annotateType = GranularMap + return p + } + + switch ts[0] { + case typeAtomic: + p.annotateType = AtomicMap + case typeGranular: + p.annotateType = GranularMap + default: + panic("unknown map type " + ts[0]) + } + return p + + case types.Slice, types.Array: + // ignore []byte + if t.Elem == types.Byte { + p.annotateType = AtomicList + return p + } + tags := gengo.ExtractCommentTags("+", comments) + ts := tags["listType"] + if len(ts) != 1 { + panic("value of list type must be set exactly once") + } + + switch ts[0] { + case typeAtomic: + p.annotateType = AtomicList + case typeMap: + p.annotateType = MapList + p.keys = tags["listMapKey"] + case typeSet: + p.annotateType = SetList + default: + panic("unknown list type " + ts[0]) + } + + return p + case types.Struct: + sp := parseCommentForStruct(comments) + if sp == nil { + // get tag from struct's comment + sp = parseCommentForStruct(t.CommentLines) + if sp != nil { + return sp + } + } + + // cannot get tag from both member comments and struct comments + p.annotateType = GranularStruct + } + + return p +} + +func parseCommentForStruct(comments []string) *params { + tags := gengo.ExtractCommentTags("+", comments) + ts := tags["structType"] + if len(ts) > 1 { + panic("value of struct type cannot be set twice") + } + if len(ts) != 0 { + p := ¶ms{} + switch ts[0] { + case "atomic": + p.annotateType = AtomicStruct + case "granular": + p.annotateType = GranularMap + default: + panic("unknown map type " + ts[0]) + } + + return p + } + + return nil +} + +func (p *params) Name() string { + return p.name +} + +func (p *params) Type() *types.Type { + return p.t +} + +func (p *params) Member() *types.Member { + return p.m +} + +func (p *params) Dst() string { + return p.variable("dst", p.refers) +} + +func (p *params) Src() string { + if len(p.srcRefers) == 0 { + return p.variable("src", p.refers) + } + return p.variable("src", p.srcRefers) +} + +func (*params) variable(root string, refers []Refer) string { + path := root + for _, refer := range refers { + path = refer.Ref(path) + } + return path +} + +func (p *params) AnnotateType() AnnotateType { + if p.annotateType == 0 { + return None + } + return p.annotateType +} + +func (p *params) Keys() []string { + return p.keys +} + +func (p *params) CheckNil() { + p.checkNil = true +} + +func (p *params) Members() []Params { + ms := []Params{} + for i, m := range p.t.Members { + mp := newParams(p.name+"."+m.Name, m.Type, m.CommentLines) + mp.m = &p.t.Members[i] + mp.refers = append(mp.refers, Field(m.Name)) + // always pass addr to overlay func + // for pointer, it's no need to addr + // for builtin, it will be handled inline and it's no need to pass to overlay func + switch m.Type.Kind { + case types.Pointer, types.Builtin: + default: + mp.refers = append(mp.refers, Addr(&mp.checkNil)) + } + ms = append(ms, mp) + } + return ms +} + +func (p *params) Pointer() Params { + switch p.t.Kind { + case types.Pointer: + // preserve annotateType + ep := ¶ms{ + name: p.name, + t: p.t.Elem, + annotateType: p.annotateType, + keys: p.keys, + refers: p.refers, + } + // builtin type will be handled inline but not passed into overlay function + // e.g. for *string + // *dst.string = *src.string + // but for *struct + // overlayXXX(dst.struct, src.struct) + if p.t.Elem.Kind == types.Builtin { + ep.refers = append(ep.refers, Pointer()) + } + return ep + default: + panic("Pointer() called by unexpected kind" + p.t.Kind) + } +} + +func (p *params) Slice(dst, src string) Params { + switch p.t.Kind { + case types.Slice, types.Array: + ep := newParams(p.name+"[]", p.t.Elem, nil) + ep.refers = append(ep.refers, Pointer(), Index(dst)) + ep.srcRefers = append(ep.srcRefers, Pointer(), Index(src)) + switch p.t.Elem.Kind { + case types.Struct, types.Alias: + ep.refers = append(ep.refers, Addr(&ep.checkNil)) + ep.srcRefers = append(ep.srcRefers, Addr(&ep.checkNil)) + case types.Builtin: + // do nothing + default: + panic("unexpected elem kind of slice: " + p.t.Kind) + } + return ep + default: + panic("Slice() called by unexpected kind" + p.t.Kind) + } +} + +func (p *params) Map() Params { + switch p.t.Kind { + case types.Map: + ep := newParams(p.name+"[]", p.t.Elem, nil) + ep.refers = append(ep.refers, Prefix("v")) + switch p.t.Elem.Kind { + case types.Struct, types.Alias: + ep.refers = append(ep.refers, Addr(&ep.checkNil)) + case types.Builtin: + // do nothing + default: + panic("unexpected elem kind of slice: " + p.t.Kind) + } + return ep + default: + panic("Map() called by unexpected kind" + p.t.Kind) + } +} + +func (p *params) Underlying() Params { + switch p.t.Kind { + case types.Alias: + ep := ¶ms{ + name: p.name, + t: p.t.Underlying, + annotateType: p.annotateType, + keys: p.keys, + } + ep.refers = append(ep.refers, Prefix("n")) + if p.t.Underlying.Kind == types.Builtin { + ep.refers = append(ep.refers, Pointer()) + } + return ep + default: + panic("Underlying() called by unexpected kind" + p.t.Kind) + } +} + +func Panic(msg string, args ...any) { + logger := slog.Default() + if !logger.Enabled(context.Background(), slog.LevelError) { + return + } + var pcs [1]uintptr + runtime.Callers(2, pcs[:]) // skip [Callers, Infof] + r := slog.NewRecord(time.Now(), slog.LevelError, msg, pcs[0]) + r.Add(args...) + _ = logger.Handler().Handle(context.Background(), r) + os.Exit(1) +} diff --git a/cmd/overlay-gen/main.go b/cmd/overlay-gen/main.go new file mode 100644 index 00000000000..db985a7ef9c --- /dev/null +++ b/cmd/overlay-gen/main.go @@ -0,0 +1,151 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "log/slog" + "os" + + "github.com/spf13/pflag" + _ "k8s.io/api/core/v1" + _ "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" + + "github.com/pingcap/tidb-operator/cmd/overlay-gen/generators" +) + +func main() { + args := &Args{} + + // Collect and parse flags. + args.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := args.Validate(); err != nil { + slog.Error("validate args failed", "err", err) + os.Exit(1) + } + + targets := func(context *generator.Context) []generator.Target { + return getTargets(context, args) + } + + // Run the tool. + if err := gengo.Execute( + getNameSystems(), + getDefaultNameSystem(), + targets, + gengo.StdBuildTag, + pflag.Args(), + ); err != nil { + slog.Error("execute failed", "err", err) + os.Exit(1) + } + slog.Info("completed successfully") +} + +type Args struct { + outputFile string + outputDir string + goHeaderFile string +} + +// AddFlags adds this tool's flags to the flagset. +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.outputFile, "output-file", "zz_generated.overlay", + "the name of the file to be generated (without extensions)") + fs.StringVar(&args.outputDir, "output-dir", "", + "the output dir, default is dir of input") + fs.StringVar(&args.goHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") +} + +// Validate checks the arguments. +func (args *Args) Validate() error { + if args.goHeaderFile == "" { + return fmt.Errorf("--go-header-file must be specified") + } + + return nil +} + +// getNameSystems returns the name system used by the generators in this package. +func getNameSystems() namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer("", nil), + } +} + +// getDefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func getDefaultNameSystem() string { + return "raw" +} + +// getTargets is called after the inputs have been loaded. It is expected to +// examine the provided context and return a list of Packages which will be +// executed further. +func getTargets(c *generator.Context, args *Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.goHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) + if err != nil { + slog.Error("failed loading boilerplate", "err", err) + os.Exit(1) + } + + targetPackage := "github.com/pingcap/tidb-operator/pkg/overlay" + targets := []generator.Target{} + for _, input := range c.Inputs { + slog.Info("processing", "pkg", input) + + pkg := c.Universe[input] + outputDir := pkg.Dir + if args.outputDir != "" { + outputDir = args.outputDir + } + + targets = append(targets, &generator.SimpleTarget{ + PkgName: "overlay", + PkgPath: targetPackage, + PkgDir: outputDir, + HeaderComment: boilerplate, + + // FilterFunc returns true if this Package cares about this type. + // Each Generator has its own Filter method which will be checked + // subsequently. This will be called for every type in every + // loaded package, not just things in our inputs. + FilterFunc: func(_ *generator.Context, t *types.Type) bool { + return t.Name.Package == pkg.Path + }, + + // GeneratorsFunc returns a list of Generators, each of which is + // responsible for a single output file (though multiple generators + // may write to the same one). + GeneratorsFunc: func(_ *generator.Context) []generator.Generator { + return []generator.Generator{ + generators.NewOverlayGenerator(args.outputFile+".go", targetPackage), + generators.NewOverlayTestGenerator(args.outputFile+"_test.go", targetPackage), + } + }, + }) + } + + return targets +} diff --git a/cmd/prestop-checker/main.go b/cmd/prestop-checker/main.go new file mode 100644 index 00000000000..df850348eab --- /dev/null +++ b/cmd/prestop-checker/main.go @@ -0,0 +1,146 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "flag" + "fmt" + "net/url" + "os" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" +) + +const ( + defaultPDRequestTimout = 10 * time.Second +) + +var ( + pd string + addr string + ca string + cert string + key string +) + +func main() { + flag.StringVar(&pd, "pd", "", "pd url") + flag.StringVar(&addr, "addr", "", "tikv advertised client url") + flag.StringVar(&addr, "ca", "", "ca path") + flag.StringVar(&addr, "cert", "", "cert path") + flag.StringVar(&addr, "key", "", "key path") + flag.Parse() + + u, err := url.Parse(pd) + if err != nil { + panic("cannot recognize pd addr: " + err.Error()) + } + + var tlsCfg *tls.Config + if u.Scheme == "https" { + cfg, err := loadTLSConfig(ca, cert, key) + if err != nil { + panic("cannot load tls config: " + err.Error()) + } + tlsCfg = cfg + } + + c := pdapi.NewPDClient(pd, defaultPDRequestTimout, tlsCfg) + var storeID string + + //nolint:mnd // refactor to a constant if needed + if err := wait.PollUntilContextTimeout(context.TODO(), time.Second, time.Second*30, + true, func(ctx context.Context) (done bool, err error) { + info, err := c.GetStores(ctx) + if err != nil { + fmt.Printf("cannot list stores, try again: %v\n", err) + return false, nil + } + + for _, s := range info.Stores { + if s.Store == nil { + continue + } + if s.Store.Address != addr { + continue + } + + storeID = strconv.FormatUint(s.Store.Id, 10) + return true, nil + } + + return false, nil + }); err != nil { + panic("cannot find the store, arg addr is wrong or the store has been deleted") + } + + fmt.Println("pre stop checking, store id:", storeID) + + if err := wait.PollUntilContextCancel(context.TODO(), time.Second, true, func(ctx context.Context) (done bool, err error) { + s, err := c.GetStore(ctx, storeID) + if err != nil { + fmt.Printf("cannot get store, try again: %v\n", err) + return false, nil + } + if s.Status.LeaderCount != 0 { + fmt.Printf("pre stop checking, current leader count: %v\n", s.Status.LeaderCount) + return false, nil + } + return true, nil + }); err != nil { + panic("poll error: " + err.Error()) + } + + fmt.Printf("all leaders have been evicted\n") +} + +func loadTLSConfig(ca, cert, key string) (*tls.Config, error) { + rootCAs := x509.NewCertPool() + caData, err := os.ReadFile(ca) + if err != nil { + return nil, err + } + + if !rootCAs.AppendCertsFromPEM(caData) { + return nil, fmt.Errorf("failed to append ca certs") + } + + certData, err := os.ReadFile(cert) + if err != nil { + return nil, err + } + keyData, err := os.ReadFile(key) + if err != nil { + return nil, err + } + pair, err := tls.X509KeyPair(certData, keyData) + if err != nil { + return nil, err + } + + //nolint:gosec // we didn't force to use a specific TLS version yet + return &tls.Config{ + RootCAs: rootCAs, + ClientCAs: rootCAs, + Certificates: []tls.Certificate{pair}, + }, nil +} diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 00000000000..57a2849ed03 --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,201 @@ +# TiDB Operator(v2) Development Guide + +## Prerequisites + +Please install [Go 1.23.x](https://go.dev/doc/install). If you want to run TiDB Operator locally, please also install the latest version of [Docker](https://www.docker.com/get-started/). + +## Workflow + +### Step 1: Fork TiDB Operator on GitHub + +Visit [TiDB Operator](https://github.com/pingcap/tidb-operator) + +Click `Fork` button (top right) to establish a cloud-based fork. + +### Step 2: Clone fork to local machine + +Define a local working directory: + +```sh +working_dir=$GOPATH/src/github.com/pingcap +``` + +Set `user` to match your github profile name: + +```sh +user={your github profile name} +``` + +Create your clone: + +```sh +mkdir -p $working_dir +cd $working_dir +git clone git@github.com:$user/tidb-operator.git +``` + +Set your clone to track upstream repository. + +```sh +cd $working_dir/tidb-operator +git remote add upstream https://github.com/pingcap/tidb-operator +``` + +Since you don't have write access to the upstream repository, you need to disable pushing to upstream master: + +```sh +git remote set-url --push upstream no_push +git remote -v +``` + +The output should look like: + +```sh +origin git@github.com:$(user)/tidb-operator.git (fetch) +origin git@github.com:$(user)/tidb-operator.git (push) +upstream https://github.com/pingcap/tidb-operator (fetch) +upstream no_push (push) +``` + +### Step 3: Branch + +Get your local master up to date: + +```sh +cd $working_dir/tidb-operator +git fetch upstream +git checkout v2 +git rebase upstream/v2 +``` + +Branch from v2: + +```sh +git checkout -b myfeature +``` + +### Step 4: Develop + +#### Edit the code + +You can now edit the code on the `myfeature` branch. + +#### Genearate and check + +Sometimes you may have to re-generate code by the following commands. If you don't know whether you need to run it, just run it. + +```sh +make generate +``` + +Run following commands to check your code change. + +```sh +make check +``` + +This will show errors if your code change does not pass checks (e.g. unit, lint). Please fix them before submitting the PR. + + +#### Start TiDB Operator locally and do manual tests + +At first, you must have [Docker](https://www.docker.com/get-started/) installed and running. + +We use [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) to +start a Kubernetes cluster locally. + +Run following commands to run e2e + +```sh +make e2e +``` + +We use [Ginkgo](https://github.com/onsi/ginkgo) to write our e2e cases, So you can run a specified case by following commands + +```sh +GINKGO_OPTS='--focus "regexp of case"' make e2e +``` + +You can also skip preparing e2e environment and run e2e directly by following commands + +```sh +GINKGO_OPTS='--focus "regexp of case"' make e2e/run +``` + +You can see logs of operator by following commands + +```sh +make logs/operator +``` + +And if you have some changes but just want to update operator, you can + +```sh +make push && make reload/operator +``` + +You can also deploy and re-deploy manifests by + +```sh +make deploy +``` + + +### Step 5: Keep your branch in sync + +While on your `myfeature` branch, run the following commands: + +```sh +git fetch upstream +git rebase upstream/v2 +``` + +### Step 6: Commit + +Before you commit, make sure that all checks are passed: + +```sh +make check +``` + +Then commit your changes. + +```sh +git commit +``` + +Likely you'll go back and edit/build/test some more than `commit --amend` +in a few cycles. + +### Step 7: Push + +When your commit is ready for review (or just to establish an offsite backup of your work), +push your branch to your fork on `github.com`: + +```sh +git push origin myfeature +``` + +### Step 8: Create a pull request + +1. Visit your fork at `https://github.com/$user/tidb-operator` (replace `$user` obviously). +2. Click the `Compare & pull request` button next to your `myfeature` branch. +3. Edit the description of the pull request to match your change, and if your pull request introduce a user-facing change, a release note is required. + +### Step 9: Get a code review + +Once your pull request has been opened, it will be assigned to at least two +reviewers. Those reviewers will do a thorough code review, looking for +correctness, bugs, opportunities for improvement, documentation and comments, +and style. + +Commit changes made in response to review comments to the same branch on your +fork. + +Very small PRs are easy to review. Very large PRs are very difficult to +review. + +## Developer Docs + +If you hope to submit a new feature, please see [RFCs Template](./rfcs/0000-template.md) + diff --git a/docs/arch/README.md b/docs/arch/README.md new file mode 100644 index 00000000000..b08754a76ab --- /dev/null +++ b/docs/arch/README.md @@ -0,0 +1,56 @@ +# Architecture of the TiDB Operator(v2) + +## Overview + +TiDB Operator is a software to manage multiple TiDB clusters in the Kubernetes platform. It's designed based on the [operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/). + +The v2 version of TiDB Operator aims to resolve some painful issues and make the operator more extensible and user friendly. + +## Goals + +- Flexible. Advanced users can redefine many default behaviors of TiDB Operator to manage TiDBs in thier own way. +- Extensible. It's easy to extend TiDB Operator(v2) to manage a new component of TiDB. +- User friendly. The validation and kubectl plugin will be supported to make ops more secure and easy. + +## Key changes + +- Split the huge TidbCluster CR into multi-smaller CRs, including Cluster, PDGroup, PD, TiKVGroup, TiKV, ... +- Add more controllers for these new CRs +- Remove StatefulSet dependency + +## Arch + +[controller runtime](https://github.com/kubernetes-sigs/controller-runtime) is used as the underlying framework. + +## CRD + +TODO: need a picture + +### Cluster + +Cluster is a CRD that abstracts a TiDB Cluster. It contains some common configurations and feature gates of the TiDB Cluster and displays the overview status of the whole cluster. This CRD is designed like a "namespace". All components of the TiDB Cluster should refer to a Cluster CR. + +### Component Group + +Component Group means a set of instances for a specified component of TiDB Clusters. For example, PDGroup, TiKVGroup. In a cluster, one component can have more than one Group CR with a different name. A use case is one TiDBGroup for TP workloads and another TiDBGroup for AP workloads. + +Normally, the controller of component groups is resposible for: + +- Instance lifecycle management +- Control replicas +- Specify behavior policy + - Schedule + - Scale + - Update + +Users can specify the template of instance CRs by the field spec.template in component group CR to create or update instances. + +### Instance + +Instance means an instance of a component, for example, TiKV, PD. It manages an individual pod and its volumes. All "states" such as URI and volumes are bound with the instance CR. Users can easily create or remove a PD/TiKV/TiDB by creating or removing an instance CR. + +The controller of instances is resposible for: +- Manage pods, configmaps and volumes +- Provide APIs at the instance level + +Instance CRs are normally managed by their component group. Most fields of a instance CR are immutable for users. diff --git a/docs/convention.md b/docs/convention.md new file mode 100644 index 00000000000..03598f2e553 --- /dev/null +++ b/docs/convention.md @@ -0,0 +1,24 @@ +# Conventions of TiDB Operator + +Conventions are copied from [Kubernetes Conventions](https://www.kubernetes.dev/docs/guide/coding-convention), with some unused items removed. + +## Code conventions + +- [Bash Sytle Guide](https://google.github.io/styleguide/shellguide.html) +- [Effective Go](https://go.dev/doc/effective_go) +- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +- [Go Test Comments](https://go.dev/wiki/TestComments) +- [Go Style Guide](https://google.github.io/styleguide/go/decisions) +- [Kubernetes API Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md) + +## Directory and file conventions + +- Avoid package sprawl. Find an appropriate subdirectory for new packages. +- Avoid general utility packages. Packages called "util" are suspect. Instead, derive a name that describes your desired function. For example, the utility functions dealing with waiting for operations are in the wait package and include functionality like Poll. The full name is wait.Poll. +- All filenames should be lowercase. +- Go source files and directories use underscores, not dashes. +- Package directories should generally avoid using separators as much as possible. When package names are multiple words, they usually should be in nested subdirectories. +- Document directories and filenames should use dashes rather than underscores. +- Forked third party Go code goes in third_party. +- Third-party code must include licenses. This includes modified third-party code and excerpts, as well. + diff --git a/docs/why.md b/docs/why.md new file mode 100644 index 00000000000..0f77b7b86db --- /dev/null +++ b/docs/why.md @@ -0,0 +1,66 @@ +# Why we need a new TiDB Operator + +From its initial design through multiple iterations, TiDB Operator(v1) has faced and resolved many challenges. However, some issues remain inadequately addressed and have grown more pressing as TiDB and Kubernetes have evolved. These unresolved problems hinder the TiDB Operator(v1)'s maintenance and development. + +## Issues + +### StatefulSet is not flexible enough + +StatefulSet is Kubernetes' native abstraction for managing stateful applications. Compared to Deployment, it provides critical features such as stable network identities and per-Pod PVC management, which are essential for stateful workloads. Consequently, TiDB Operator(v1) initially adopted StatefulSet as the foundational resource for managing TiDB clusters. However, it has become evident that StatefulSet has significant limitations as a low-level abstraction for effectively managing TiDB. + +StatefulSet presents three significant limitations: +- Cannot scale in a specific Pod by its index. +- Lack of native support for pre/post hooks during scale-in or scale-out operations. +- Inability to modify the volume template after creation. + +In a Deployment, deleting a Pod triggers the creation of a new Pod to replace the old one. However, in a StatefulSet, deleting a Pod means the Pod being "restarted" and no new Pod with different identity will be created. This behavior is useful for typical stateful applications, as their state is tied to the Pod's name. Changing the Pod's identity often involves complex operations, such as a raft membership change. + +However, for TiKV, both "restart" and "replace" operations are required to effectively manage the cluster. It's because the TiKV is designed like "Cattle" more than "Pet". We hope TiKVs can be easily scaled in/out and replaced if one of them is "sick". In other words, we hope TiKVs: + +- have stable network identity and per-Pod PVC just like StatefulSets +- can "restart" a specified Pod like StatefulSets +- can "replace" a specified Pod like Deployments + +Unfortunately, it's very hard to "replace" a specific Pod in a StatefulSet because the index of Pod cannot be specified when scaling in. Try to resolve this problem, we introduce a forked version of StatefulSet called AdvancedStatefulSet. AdvancedStatefulSet can partially address this issue, but the solution is not particularly elegant. For example, we have to specify indexies of Pods that we want to delete in annotations of TidbCluster forever. + +Another limitation of (Advanced)StatefulSet is the lack of native support for pre/post hooks. (Advanced)StatefulSet is designed as a declarative API, where users typically specify the desired state directly. However, certain additional tasks, such as leader eviction and raft membership changes, must be performed before or after scaling operations. To handle this, we modify the [partitions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions) field each time a Pod is deleted or created to control the scale-in/scale-out process. This workaround deviates from the intended purpose of a declarative API. + +The inability to modify the volume template in StatefulSet is also annoying. To resize a volume, we are forced to delete and re-create the StatefulSet, which is very hacky. + +### TidbCluster is tooooo large + +TidbCluster is an all-in-one CRD. This design was originally intended to simplify TiDB deployment and make it easy to understand and use by users. By consolidating all TiDB cluster components into a single CRD, users only needed to focus on and inspect the information and structure of that CRD. + +However, as the TiDB Operator(v1) evolved, more fields and components were added to the all-in-one CRD (TidbCluster). To date, it encompasses **eight** components in total. This has caused the TidbCluster CRD to deviate entirely from its original design goals, instead significantly increasing the cognitive burden on users when understanding and using it. + +Additionally, the all-in-one CRD currently stores the state information for all components. As cluster size grows, using `kubectl` to view the state of a specific component or instance becomes increasingly challenging. This not only undermines usability but can also lead to performance issues. + +And the implementation for heterogeneous clusters is also weird because of the all-in-one TidbCluster design. Two TidbCluster CRs will be created for heterogeneous cluster but one of them may only contains a sepcific component. + +### Too many unmanaged kubernetes fields + +Kubernetes provides a rich set of capabilities for running Pods, most of which are defined in a structure called `PodTemplate`. As Kubernetes evolves, more fields are continuously added to `PodTemplate`. However, the TiDB Operator(v1) does not handle these fields from `PodTemplate` in a unified way. Instead, each new field is added to the TidbCluster on a case-by-case basis. + +Take `securityContext` as an example. While the TiDB Operator itself does not directly interact with the user's security configurations, `securityContext` is essential for almost all users running TiDB on Kubernetes. + +All of these fields may be reformatted in the TidbCluster CRD without any actual changes to their functionality. This leads to the content of TidbCluster diverging significantly from the original `PodTemplate`, ultimately reducing usability. Even Kubernetes experts need to manually verify whether the TiDB Operator supports some specific fields when deploying TiDB. + +Additionally, this approach prevents the TiDB Operator from quickly supporting new Kubernetes features. As a result, many practical and valuable Kubernetes functionalities remain unsupported by the current TiDB Operator. + +### Missing validation + +Another issue is the need for a validation webhook enhance the user experience when managing TiDB clusters with the TiDB Operator in Kubernetes. + +### Compatibility with Kubernetes and TiDB + +Both Kubernetes and TiDB evolve rapidly. A more robust design is required to maintain compatibility with the continuous updates and changes in both Kubernetes and TiDB. + +## Future + +### Autoscaling + +Autoscaling is a valuable but complex feature for TiDB. We aim to explore practical scenarios where it can address challenges that are difficult to resolve in traditional IDC environments. + +### Kubectl plugin + +We hope to provide a similar user experience like [tiup](https://github.com/pingcap/tiup) by a kubectl plugin. diff --git a/examples/basic/00-cluster.yaml b/examples/basic/00-cluster.yaml new file mode 100644 index 00000000000..95275e02901 --- /dev/null +++ b/examples/basic/00-cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: Cluster +metadata: + name: basic +spec: {} diff --git a/examples/basic/01-pd.yaml b/examples/basic/01-pd.yaml new file mode 100644 index 00000000000..676a4ca0c60 --- /dev/null +++ b/examples/basic/01-pd.yaml @@ -0,0 +1,30 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: PDGroup +metadata: + name: pd + labels: + pingcap.com/group: pd + pingcap.com/component: pd + pingcap.com/cluster: basic +spec: + cluster: + name: basic + version: v8.1.0 + replicas: 3 + template: + metadata: + annotations: + author: pingcap + spec: + resources: + cpu: "4" + memory: 8Gi + config: | + [log] + level = "debug" + volumes: + - name: data + path: /var/lib/pd + for: + - type: data + storage: 20Gi diff --git a/examples/basic/02-tikv.yaml b/examples/basic/02-tikv.yaml new file mode 100644 index 00000000000..282a37d16c4 --- /dev/null +++ b/examples/basic/02-tikv.yaml @@ -0,0 +1,30 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiKVGroup +metadata: + name: tikv + labels: + pingcap.com/group: tikv + pingcap.com/component: tikv + pingcap.com/cluster: basic +spec: + cluster: + name: basic + version: v8.1.0 + replicas: 3 + template: + metadata: + annotations: + author: pingcap + spec: + resources: + cpu: "4" + memory: 8Gi + config: | + [log] + level = "info" + volumes: + - name: data + path: /var/lib/tikv + for: + - type: data + storage: 100Gi diff --git a/examples/basic/03-tidb.yaml b/examples/basic/03-tidb.yaml new file mode 100644 index 00000000000..e9e9581a481 --- /dev/null +++ b/examples/basic/03-tidb.yaml @@ -0,0 +1,26 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiDBGroup +metadata: + name: tidb + labels: + pingcap.com/group: tidb + pingcap.com/component: tidb + pingcap.com/cluster: basic +spec: + cluster: + name: basic + version: v8.1.0 + replicas: 2 + service: + type: ClusterIP + template: + metadata: + annotations: + author: pingcap + spec: + resources: + cpu: "1" + memory: 2Gi + config: | + [log] + level = "debug" diff --git a/examples/basic/04-tiflash.yaml b/examples/basic/04-tiflash.yaml new file mode 100644 index 00000000000..ffda664547c --- /dev/null +++ b/examples/basic/04-tiflash.yaml @@ -0,0 +1,32 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiFlashGroup +metadata: + name: tiflash + labels: + pingcap.com/group: tiflash + pingcap.com/component: tiflash + pingcap.com/cluster: basic +spec: + cluster: + name: basic + version: v8.1.0 + replicas: 1 + template: + metadata: + annotations: + author: pingcap + spec: + resources: + cpu: "4" + memory: 8Gi + config: | + mark_cache_size = 1073741824 + proxyConfig: "" + volumes: + - name: data + # to compatible with the cluster managed by TiDB Operator v1 + # this path should often be /data0 + path: /data0 + for: + - type: data + storage: 100Gi diff --git a/examples/bootstrap-sql/00-cluster.yaml b/examples/bootstrap-sql/00-cluster.yaml new file mode 100644 index 00000000000..8c0f0e64b37 --- /dev/null +++ b/examples/bootstrap-sql/00-cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: Cluster +metadata: + name: bsql +spec: {} diff --git a/examples/bootstrap-sql/01-pd.yaml b/examples/bootstrap-sql/01-pd.yaml new file mode 100644 index 00000000000..469a7fcddd2 --- /dev/null +++ b/examples/bootstrap-sql/01-pd.yaml @@ -0,0 +1,22 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: PDGroup +metadata: + name: pd + labels: + pingcap.com/group: pd + pingcap.com/component: pd + pingcap.com/cluster: bsql +spec: + cluster: + name: bsql + version: v8.1.0 + replicas: 3 + template: + spec: + config: "" + volumes: + - name: data + path: /var/lib/pd + for: + - type: data + storage: 20Gi diff --git a/examples/bootstrap-sql/02-tikv.yaml b/examples/bootstrap-sql/02-tikv.yaml new file mode 100644 index 00000000000..19f88bfd4e1 --- /dev/null +++ b/examples/bootstrap-sql/02-tikv.yaml @@ -0,0 +1,22 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiKVGroup +metadata: + name: tikv + labels: + pingcap.com/group: tikv + pingcap.com/component: tikv + pingcap.com/cluster: bsql +spec: + cluster: + name: bsql + version: v8.1.0 + replicas: 3 + template: + spec: + config: "" + volumes: + - name: data + path: /var/lib/tikv + for: + - type: data + storage: 100Gi diff --git a/examples/bootstrap-sql/03-tidb.yaml b/examples/bootstrap-sql/03-tidb.yaml new file mode 100644 index 00000000000..f06958ea75d --- /dev/null +++ b/examples/bootstrap-sql/03-tidb.yaml @@ -0,0 +1,17 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiDBGroup +metadata: + name: tidb + labels: + pingcap.com/group: tidb + pingcap.com/component: tidb + pingcap.com/cluster: bsql +spec: + cluster: + name: bsql + version: v8.1.0 + replicas: 1 + bootstrapSQLConfigMapName: bsql + template: + spec: + config: "" diff --git a/examples/bootstrap-sql/bsql-cm.yaml b/examples/bootstrap-sql/bsql-cm.yaml new file mode 100644 index 00000000000..e9cd8a32fb5 --- /dev/null +++ b/examples/bootstrap-sql/bsql-cm.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + bootstrap-sql: | + SET PASSWORD FOR 'root'@'%' = 'pingcap'; +kind: ConfigMap +metadata: + name: bsql diff --git a/examples/overlay/00-cluster.yaml b/examples/overlay/00-cluster.yaml new file mode 100644 index 00000000000..bdef67363ed --- /dev/null +++ b/examples/overlay/00-cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: Cluster +metadata: + name: overlay +spec: {} diff --git a/examples/overlay/01-pd.yaml b/examples/overlay/01-pd.yaml new file mode 100644 index 00000000000..46edbcc8584 --- /dev/null +++ b/examples/overlay/01-pd.yaml @@ -0,0 +1,33 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: PDGroup +metadata: + name: pd + labels: + pingcap.com/group: pd + pingcap.com/component: pd + pingcap.com/cluster: overlay +spec: + cluster: + name: overlay + version: v8.1.0 + replicas: 3 + template: + spec: + config: "" + volumes: + - name: data + path: /var/lib/pd + for: + - type: data + storage: 20Gi + overlay: + pod: + spec: + containers: + - name: pd + env: + - name: "DASHBOARD_SESSION_SECRET" + valueFrom: + secretKeyRef: + name: "dashboard-session-secret" + key: "encryption_key" diff --git a/examples/overlay/02-tikv.yaml b/examples/overlay/02-tikv.yaml new file mode 100644 index 00000000000..d942c88e184 --- /dev/null +++ b/examples/overlay/02-tikv.yaml @@ -0,0 +1,22 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiKVGroup +metadata: + name: tikv + labels: + pingcap.com/group: tikv + pingcap.com/component: tikv + pingcap.com/cluster: overlay +spec: + cluster: + name: overlay + version: v8.1.0 + replicas: 3 + template: + spec: + config: "" + volumes: + - name: data + path: /var/lib/tikv + for: + - type: data + storage: 100Gi \ No newline at end of file diff --git a/examples/overlay/03-tidb.yaml b/examples/overlay/03-tidb.yaml new file mode 100644 index 00000000000..2fe5177013c --- /dev/null +++ b/examples/overlay/03-tidb.yaml @@ -0,0 +1,26 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiDBGroup +metadata: + name: tidb + labels: + pingcap.com/group: tidb + pingcap.com/component: tidb + pingcap.com/cluster: overlay +spec: + cluster: + name: overlay + version: v8.1.0 + replicas: 2 + template: + spec: + config: "" + overlay: + pod: + spec: + terminationGracePeriodSeconds: 120 + securityContext: + sysctls: + - name: "net.ipv4.tcp_keepalive_intvl" + value: "75" + - name: "net.ipv4.tcp_keepalive_time" + value: "300" diff --git a/examples/overlay/dashboard-session-secret.yaml b/examples/overlay/dashboard-session-secret.yaml new file mode 100644 index 00000000000..c8cbcb7e98d --- /dev/null +++ b/examples/overlay/dashboard-session-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: dashboard-session-secret +type: Opaque +data: + encryption_key: ZW5jcnlwdGVkLWtleQo= \ No newline at end of file diff --git a/examples/schedule-policy/00-cluster.yaml b/examples/schedule-policy/00-cluster.yaml new file mode 100644 index 00000000000..8134b654f51 --- /dev/null +++ b/examples/schedule-policy/00-cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: Cluster +metadata: + name: schedule +spec: {} diff --git a/examples/schedule-policy/01-pd.yaml b/examples/schedule-policy/01-pd.yaml new file mode 100644 index 00000000000..34392483f7e --- /dev/null +++ b/examples/schedule-policy/01-pd.yaml @@ -0,0 +1,33 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: PDGroup +metadata: + name: pd + labels: + pingcap.com/group: pd + pingcap.com/component: pd + pingcap.com/cluster: schedule +spec: + cluster: + name: schedule + version: v8.1.0 + replicas: 3 + # All PD instances in the group will evenly spread in differnet zones + schedulePolicies: + - type: EvenlySpread + evenlySpread: + topologies: + - topology: + topology.kubernetes.io/zone: us-west-2a + - topology: + topology.kubernetes.io/zone: us-west-2b + - topology: + topology.kubernetes.io/zone: us-west-2c + template: + spec: + config: "" + volumes: + - name: data + path: /var/lib/pd + for: + - type: data + storage: 20Gi \ No newline at end of file diff --git a/examples/schedule-policy/02-tikv.yaml b/examples/schedule-policy/02-tikv.yaml new file mode 100644 index 00000000000..69500b0cf49 --- /dev/null +++ b/examples/schedule-policy/02-tikv.yaml @@ -0,0 +1,33 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiKVGroup +metadata: + name: tikv + labels: + pingcap.com/group: tikv + pingcap.com/component: tikv + pingcap.com/cluster: schedule +spec: + cluster: + name: schedule + version: v8.1.0 + replicas: 3 + # All TiKV instances in the group will evenly spread in differnet zones + schedulePolicies: + - type: EvenlySpread + evenlySpread: + topologies: + - topology: + topology.kubernetes.io/zone: us-west-2a + - topology: + topology.kubernetes.io/zone: us-west-2b + - topology: + topology.kubernetes.io/zone: us-west-2c + template: + spec: + config: "" + volumes: + - name: data + path: /var/lib/tikv + for: + - type: data + storage: 100Gi \ No newline at end of file diff --git a/examples/schedule-policy/03-tidb.yaml b/examples/schedule-policy/03-tidb.yaml new file mode 100644 index 00000000000..0183f25960f --- /dev/null +++ b/examples/schedule-policy/03-tidb.yaml @@ -0,0 +1,29 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiDBGroup +metadata: + name: tidb + labels: + pingcap.com/group: tidb + pingcap.com/component: tidb + pingcap.com/cluster: schedule +spec: + cluster: + name: schedule + version: v8.1.0 + replicas: 2 + # All TiDB instances in the group will evenly spread in differnet zones + schedulePolicies: + - type: EvenlySpread + evenlySpread: + topologies: + - topology: + topology.kubernetes.io/zone: us-west-2a + - topology: + topology.kubernetes.io/zone: us-west-2b + - topology: + topology.kubernetes.io/zone: us-west-2c + template: + spec: + config: "" + + diff --git a/examples/tls/00-cluster.yaml b/examples/tls/00-cluster.yaml new file mode 100644 index 00000000000..378e759a6e7 --- /dev/null +++ b/examples/tls/00-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: Cluster +metadata: + name: tls +spec: + tlsCluster: + enabled: true diff --git a/examples/tls/01-pd.yaml b/examples/tls/01-pd.yaml new file mode 100644 index 00000000000..9e1afee9664 --- /dev/null +++ b/examples/tls/01-pd.yaml @@ -0,0 +1,24 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: PDGroup +metadata: + name: pd + labels: + pingcap.com/group: pd + pingcap.com/component: pd + pingcap.com/cluster: tls +spec: + cluster: + name: tls + version: v8.1.0 + replicas: 3 + template: + spec: + config: | + [security] + cert-allowed-cn = ["TiDB"] + volumes: + - name: data + path: /var/lib/pd + for: + - type: data + storage: 20Gi diff --git a/examples/tls/02-tikv.yaml b/examples/tls/02-tikv.yaml new file mode 100644 index 00000000000..59ca81fbf2f --- /dev/null +++ b/examples/tls/02-tikv.yaml @@ -0,0 +1,24 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiKVGroup +metadata: + name: tikv + labels: + pingcap.com/group: tikv + pingcap.com/component: tikv + pingcap.com/cluster: tls +spec: + cluster: + name: tls + version: v8.1.0 + replicas: 3 + template: + spec: + config: | + [security] + cert-allowed-cn = ["TiDB"] + volumes: + - name: data + path: /var/lib/tikv + for: + - type: data + storage: 100Gi diff --git a/examples/tls/03-tidb.yaml b/examples/tls/03-tidb.yaml new file mode 100644 index 00000000000..8158ecdca8f --- /dev/null +++ b/examples/tls/03-tidb.yaml @@ -0,0 +1,20 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiDBGroup +metadata: + name: tidb + labels: + pingcap.com/group: tidb + pingcap.com/component: tidb + pingcap.com/cluster: tls +spec: + cluster: + name: tls + version: v8.1.0 + replicas: 1 + tlsClient: + enabled: true + template: + spec: + config: | + [security] + cluster-verify-cn = ["TiDB"] diff --git a/examples/tls/04-tiflash.yaml b/examples/tls/04-tiflash.yaml new file mode 100644 index 00000000000..acc14a98ee9 --- /dev/null +++ b/examples/tls/04-tiflash.yaml @@ -0,0 +1,27 @@ +apiVersion: core.pingcap.com/v1alpha1 +kind: TiFlashGroup +metadata: + name: tiflash + labels: + pingcap.com/group: tiflash + pingcap.com/component: tiflash + pingcap.com/cluster: tls +spec: + cluster: + name: tls + version: v8.1.0 + replicas: 1 + template: + spec: + config: | + [security] + cert_allowed_cn = ["TiDB"] + proxyConfig: | + [security] + cert-allowed-cn = ["TiDB"] + volumes: + - name: data + path: /data0 + for: + - type: data + storage: 100Gi diff --git a/examples/tls/issuer-cert.yaml b/examples/tls/issuer-cert.yaml new file mode 100644 index 00000000000..68bb30b5b05 --- /dev/null +++ b/examples/tls/issuer-cert.yaml @@ -0,0 +1,225 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: tls-selfsigned-ca-issuer +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-ca +spec: + secretName: tls-ca-secret + commonName: "TiDB CA" + isCA: true + issuerRef: + name: tls-selfsigned-ca-issuer + kind: Issuer +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: tls-tidb-issuer +spec: + ca: + secretName: tls-ca-secret +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-tidb-server-secret +spec: + secretName: tls-tidb-server-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB Server" + usages: + - server auth + dnsNames: + - "tls-tidb" + - "tls-tidb.default" + - "tls-tidb.default.svc" + - "*.tls-tidb" + - "*.tls-tidb.default" + - "*.tls-tidb.default.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: tls-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-tidb-client-secret +spec: + secretName: tls-tidb-client-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB Client" + usages: + - client auth + issuerRef: + name: tls-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-pd-cluster-secret +spec: + secretName: tls-pd-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "tls-pd" + - "tls-pd.default" + - "tls-pd.default.svc" + - "tls-pd-peer" + - "tls-pd-peer.default" + - "tls-pd-peer.default.svc" + - "*.tls-pd-peer" + - "*.tls-pd-peer.default" + - "*.tls-pd-peer.default.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: tls-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-tikv-cluster-secret +spec: + secretName: tls-tikv-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "tls-tikv" + - "tls-tikv.default" + - "tls-tikv.default.svc" + - "tls-tikv-peer" + - "tls-tikv-peer.default" + - "tls-tikv-peer.default.svc" + - "*.tls-tikv-peer" + - "*.tls-tikv-peer.default" + - "*.tls-tikv-peer.default.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: tls-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-tidb-cluster-secret +spec: + secretName: tls-tidb-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "tls-tidb" + - "tls-tidb.default" + - "tls-tidb.default.svc" + - "tls-tidb-peer" + - "tls-tidb-peer.default" + - "tls-tidb-peer.default.svc" + - "*.tls-tidb-peer" + - "*.tls-tidb-peer.default" + - "*.tls-tidb-peer.default.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: tls-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-tiflash-cluster-secret +spec: + secretName: tls-tiflash-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "tls-tiflash" + - "tls-tiflash.default" + - "tls-tiflash.default.svc" + - "tls-tiflash-peer" + - "tls-tiflash-peer.default" + - "tls-tiflash-peer.default.svc" + - "*.tls-tiflash-peer" + - "*.tls-tiflash-peer.default" + - "*.tls-tiflash-peer.default.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: tls-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-cluster-client-secret +spec: + secretName: tls-cluster-client-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - client auth + issuerRef: + name: tls-tidb-issuer + kind: Issuer + group: cert-manager.io diff --git a/go.mod b/go.mod new file mode 100644 index 00000000000..e7f9b751562 --- /dev/null +++ b/go.mod @@ -0,0 +1,142 @@ +module github.com/pingcap/tidb-operator + +go 1.23.0 + +require ( + github.com/Masterminds/semver/v3 v3.3.0 + github.com/aws/aws-sdk-go-v2 v1.30.5 + github.com/aws/aws-sdk-go-v2/config v1.27.35 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3 + github.com/aws/smithy-go v1.20.4 + github.com/distribution/reference v0.5.0 + github.com/docker/go-units v0.5.0 + github.com/evanphx/json-patch v4.12.0+incompatible + github.com/go-logr/logr v1.4.2 + github.com/go-sql-driver/mysql v1.8.1 + github.com/mitchellh/mapstructure v1.5.0 + github.com/olekukonko/tablewriter v0.0.5 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.34.1 + github.com/pelletier/go-toml/v2 v2.2.2 + github.com/pingcap/kvproto v0.0.0-20240403065636-c699538f7aa1 + github.com/pingcap/tidb-operator/apis/core v0.0.0-00000000000000-000000000000 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.55.0 + github.com/prometheus/prom2json v1.3.3 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.9.0 + go.uber.org/mock v0.4.0 + go.uber.org/zap v1.27.0 + k8s.io/api v0.31.0 + k8s.io/apimachinery v0.31.0 + k8s.io/cli-runtime v0.31.0 + k8s.io/client-go v0.31.0 + k8s.io/code-generator v0.31.0 + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 + k8s.io/klog/v2 v2.130.1 + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 + k8s.io/kubectl v0.31.0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + sigs.k8s.io/controller-runtime v0.19.0-beta.0 + sigs.k8s.io/controller-tools v0.16.1 + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 + sigs.k8s.io/yaml v1.4.0 +) + +replace github.com/pingcap/tidb-operator/apis/core => ./apis/core + +require ( + filippo.io/edwards25519 v1.1.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.33 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.8 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/fatih/camelcase v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.23.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.24.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/component-base v0.31.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000000..59dfaf001a3 --- /dev/null +++ b/go.sum @@ -0,0 +1,398 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/config v1.27.35 h1:jeFgiWYNV0vrgdZqB4kZBjYNdy0IKkwrAjr2fwpHIig= +github.com/aws/aws-sdk-go-v2/config v1.27.35/go.mod h1:qnpEvTq8ZfjrCqmJGRfWZuF+lGZ/vG8LK2K0L/TY1gQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.33 h1:lBHAQQznENv0gLHAZ73ONiTSkCtr8q3pSqWrpbBBZz0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.33/go.mod h1:MBuqCUOT3ChfLuxNDGyra67eskx7ge9e3YKYBce7wpI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3 h1:dqdCh1M8h+j8OGNUpxTs7eBPFr6lOdLpdlE6IPLLSq4= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3/go.mod h1:TFSALWR7Xs7+KyMM87ZAYxncKFBvzEt2rpK/BJCH2ps= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.8 h1:JRwuL+S1Qe1owZQoxblV7ORgRf2o0SrtzDVIbaVCdQ0= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.8/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.8 h1:+HpGETD9463PFSj7lX5+eq7aLDs85QUIA+NBkeAsscA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.8/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.8 h1:bAi+4p5EKnni+jrfcAhb7iHFQ24bthOAV9t0taf3DCE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.8/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= +github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pingcap/kvproto v0.0.0-20240403065636-c699538f7aa1 h1:vDWWJKU6ztczn24XixahtLwcnJ15DOtSRIRM3jVtZNU= +github.com/pingcap/kvproto v0.0.0-20240403065636-c699538f7aa1/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prom2json v1.3.3 h1:IYfSMiZ7sSOfliBoo89PcufjWO4eAR0gznGcETyaUgo= +github.com/prometheus/prom2json v1.3.3/go.mod h1:Pv4yIPktEkK7btWsrUTWDDDrnpUrAELaOCj+oFwlgmc= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.31.0 h1:V2Q1gj1u3/WfhD475HBQrIYsoryg/LrhhK4RwpN+DhA= +k8s.io/cli-runtime v0.31.0/go.mod h1:vg3H94wsubuvWfSmStDbekvbla5vFGC+zLWqcf+bGDw= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/code-generator v0.31.0 h1:w607nrMi1KeDKB3/F/J4lIoOgAwc+gV9ZKew4XRfMp8= +k8s.io/code-generator v0.31.0/go.mod h1:84y4w3es8rOJOUUP1rLsIiGlO1JuEaPFXQPA9e/K6U0= +k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= +k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.31.0 h1:kANwAAPVY02r4U4jARP/C+Q1sssCcN/1p9Nk+7BQKVg= +k8s.io/kubectl v0.31.0/go.mod h1:pB47hhFypGsaHAPjlwrNbvhXgmuAr01ZBvAIIUaI8d4= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.0-beta.0 h1:2dhsJeWBmzrnSE+NMourFWen0lSRg3JYs3Pp04+cJss= +sigs.k8s.io/controller-runtime v0.19.0-beta.0/go.mod h1:DsWafTWWtE45ewmWCXm3Tsend5uwveZCkpYfod82SXE= +sigs.k8s.io/controller-tools v0.16.1 h1:gvIsZm+2aimFDIBiDKumR7EBkc+oLxljoUVfRbDI6RI= +sigs.k8s.io/controller-tools v0.16.1/go.mod h1:0I0xqjR65YTfoO12iR+mZR6s6UAVcUARgXRlsu0ljB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/go.work b/go.work new file mode 100644 index 00000000000..0fc2262c126 --- /dev/null +++ b/go.work @@ -0,0 +1,6 @@ +go 1.23.0 + +use ( + . + ./apis/core +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 00000000000..49ad8ebc1c6 --- /dev/null +++ b/go.work.sum @@ -0,0 +1,86 @@ +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU= +go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI= +go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg= +go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk= +go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0= +go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw= +go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8= +google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= +k8s.io/component-helpers v0.31.0/go.mod h1:MrNIvT4iB7wXIseYSWfHUJB/aNUiFvbilp4qDfBQi6s= +k8s.io/kms v0.31.0/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94= +k8s.io/metrics v0.31.0/go.mod h1:UNsz6swyX8FWkDoKN9ixPF75TBREMbHZIKjD7fydaOY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/kustomize/kustomize/v5 v5.4.2/go.mod h1:5ypfJVYlPb2MKKeoGknVLxvHemDlQT+szI4+KOhnD6k= diff --git a/hack/boilerplate/boilerplate.go.txt b/hack/boilerplate/boilerplate.go.txt new file mode 100644 index 00000000000..1c775619699 --- /dev/null +++ b/hack/boilerplate/boilerplate.go.txt @@ -0,0 +1,13 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/hack/build.sh b/hack/build.sh new file mode 100755 index 00000000000..fd359e3b1c5 --- /dev/null +++ b/hack/build.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/..; pwd -P) + +source $ROOT/hack/lib/vars.sh +source $ROOT/hack/lib/build.sh + +build::all ${@} diff --git a/hack/download.sh b/hack/download.sh new file mode 100755 index 00000000000..06be45f35d9 --- /dev/null +++ b/hack/download.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/..; pwd -P) + +# download +# go_install +# github.com/golangci/golangci-lint/cmd/golangci-lint +# v1.59.1 +# "version --format=short" +function download() { + local type=$1 + shift + + case $type in + go_install) + go_install "$@" + ;; + *) + echo "unknown download type: $type" + exit -1 + ;; + esac +} + +function go_install() { + local output=$1 + local path=$2 + local version=${3:-""} + local cond=${4:-""} + + if [[ -n $cond && -f $output ]]; then + local curVersion=$(eval "$output $cond") + if [[ $curVersion == $version ]]; then + echo "$output@$version has been installed" + return + else + echo "$output@$curVersion is out dated, try to re-install" + fi + fi + + local pkgPath=$path + if [[ -n $version ]]; then + echo "Install $output with version $version" + pkgPath=$path@$version + else + echo "Install $output" + fi + + local output_dir=$(dirname $output) + mkdir -p $output_dir + + local tmp_dir=$(mktemp -d) + + # TODO: define a var presents absolute path of go command + GOBIN=${tmp_dir} go install -v $pkgPath + mv ${tmp_dir}/* $output + rm -rf $tmp_dir +} + +download "$@" diff --git a/hack/e2e.sh b/hack/e2e.sh new file mode 100755 index 00000000000..a8dacb37f3d --- /dev/null +++ b/hack/e2e.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/..; pwd -P) + +source $ROOT/hack/lib/e2e.sh + +e2e::e2e "$@" diff --git a/hack/image.sh b/hack/image.sh new file mode 100755 index 00000000000..d1a5b221cd0 --- /dev/null +++ b/hack/image.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/..; pwd -P) + +source $ROOT/hack/lib/vars.sh +source $ROOT/hack/lib/image.sh + +image::build $@ diff --git a/hack/kind.sh b/hack/kind.sh new file mode 100755 index 00000000000..ba8962da500 --- /dev/null +++ b/hack/kind.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/..; pwd -P) + +source $ROOT/hack/lib/vars.sh +source $ROOT/hack/lib/kind.sh + +kind::ensure_cluster diff --git a/hack/lib/build.sh b/hack/lib/build.sh new file mode 100644 index 00000000000..5494847866d --- /dev/null +++ b/hack/lib/build.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/../..; pwd -P) + +source $ROOT/hack/lib/vars.sh +source $ROOT/hack/lib/version.sh + +OUTPUT_DIR=$ROOT/_output + +function build::go() { + local target=$1 + local os=${2:-""} + local arch=${3:-""} + + CGO_ENABLED=0 \ + GOOS=${os} \ + GOARCH=${arch} \ + go build -v \ + -ldflags "$(version::ldflags)" \ + -o ${OUTPUT_DIR}/${os}/${arch}/bin/${target} \ + ${ROOT}/cmd/${target}/main.go +} + +function build::all() { + local targets=() + while [[ $# -gt 0 ]]; do + targets+=("$1") + shift + done + if [[ ${#targets[@]} -eq 0 ]]; then + targets=("operator" "prestop-checker") + fi + + local platforms + IFS=, read -ra platforms <<< "${V_PLATFORMS}" + + for target in ${targets[@]}; do + if [[ ${#platforms[@]} -eq 0 ]]; then + build::go $target + else + for platform in ${platforms[@]}; do + case $platform in + linux/arm64) + build::go $target linux arm64 ;; + linux/amd64) + build::go $target linux amd64 ;; + *) + echo "unsupported platform ${platform}" + exit 1 + ;; + esac + done + fi + done +} diff --git a/hack/lib/e2e.sh b/hack/lib/e2e.sh new file mode 100755 index 00000000000..90cc6ba92de --- /dev/null +++ b/hack/lib/e2e.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/../..; pwd -P) + +source $ROOT/hack/lib/vars.sh +source $ROOT/hack/lib/kind.sh +source $ROOT/hack/lib/image.sh + +OUTPUT_DIR=$ROOT/_output +KIND=$OUTPUT_DIR/bin/kind +KUBECTL=$OUTPUT_DIR/bin/kubectl +GINKGO=$OUTPUT_DIR/bin/ginkgo +GENERATEJWT=$OUTPUT_DIR/bin/generate_jwt + +CI=${CI:-""} + +function e2e::ensure_kubectl() { + if ! command -v $KUBECTL &>/dev/null; then + echo "kubectl not found, installing..." + curl -L -o $KUBECTL https://dl.k8s.io/release/v1.30.2/bin/${V_OS}/${V_ARCH}/kubectl + chmod +x $KUBECTL + fi +} + +function e2e::switch_kube_context() { + echo "switching to kind context ${V_KIND_CLUSTER}" + $KUBECTL config use-context kind-${V_KIND_CLUSTER} +} + +function e2e::ensure_cert_manager() { + echo "checking if cert-manager is installed..." + if $KUBECTL -n cert-manager get deployment cert-manager &>/dev/null; then + echo "cert-manager already installed, skipping..." + return + fi + + echo "installing cert-manager..." + $KUBECTL apply --server-side=true -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.2/cert-manager.yaml + + echo "waiting for cert-manager to be ready..." + $KUBECTL -n cert-manager wait --for=condition=Available --timeout=5m deployment/cert-manager +} + +function e2e::install_crds() { + echo "installing CRDs..." + $KUBECTL apply --server-side=true -f $ROOT/manifests/crd +} + +function e2e::install_rbac() { + echo "installing RBAC..." + $KUBECTL -n $V_DEPLOY_NAMESPACE apply --server-side=true -f $ROOT/manifests/rbac +} + +function e2e::install_operator() { + echo "installing operator..." + $KUBECTL -n $V_DEPLOY_NAMESPACE apply --server-side=true -f $ROOT/manifests/deploy + + echo "waiting for operator to be ready..." + $KUBECTL -n $V_DEPLOY_NAMESPACE wait --for=condition=Available --timeout=5m deployment/tidb-operator +} + +function e2e::uninstall_operator() { + echo "checking if operator is installed..." + if ! $KUBECTL -n $V_DEPLOY_NAMESPACE get deployment tidb-operator &>/dev/null; then + echo "operator not found, skipping uninstall..." + return + fi + + echo "uninstalling operator..." + $KUBECTL -n $V_DEPLOY_NAMESPACE delete -f $ROOT/manifests/deploy + + echo "waiting for operator to be deleted..." + $KUBECTL -n $V_DEPLOY_NAMESPACE wait --for=delete --timeout=5m deployment/tidb-operator +} + +function e2e::install_ginkgo() { + if ! command -v $GINKGO &>/dev/null; then + echo "ginkgo not found, installing..." + $ROOT/hack/download.sh go_install $GINKGO github.com/onsi/ginkgo/v2/ginkgo + fi +} + +# generate_jwt is a tool to generate JWT token for `tidb_auth_token` test +# ref: https://docs.pingcap.com/tidb/stable/security-compatibility-with-mysql#tidb_auth_token +function e2e::install_generate_jwt() { + if ! command -v $GENERATEJWT &>/dev/null; then + echo "generate_jwt not found, installing..." + $ROOT/hack/download.sh go_install $GENERATEJWT github.com/cbcwestwolf/generate_jwt@latest + fi +} + +function e2e::run() { + if [[ "$CI" == "true" ]]; then + echo "running e2e tests in CI mode with options: $*" + $GINKGO -v -r --timeout=2h --procs=1 --compilers=1 --randomize-all --randomize-suites --fail-on-empty --keep-going --race --trace --github-output "$*" "$ROOT/tests/e2e/..." + else + echo "running e2e tests locally..." + $GINKGO -r -v "$@" "$ROOT/tests/e2e/..." + fi +} + +function e2e::prepare() { + e2e::install_ginkgo + e2e::install_generate_jwt + e2e::ensure_kubectl + kind::ensure_cluster + e2e::switch_kube_context + e2e::ensure_cert_manager + + e2e::install_crds + e2e::install_rbac + + # build the operator image and load it into the kind cluster + image::build prestop-checker operator --push + e2e::uninstall_operator + e2e::install_operator + + image:prepare +} + +function e2e::e2e() { + local ginkgo_opts=() + local prepare=0 + local run=0 + while [[ $# -gt 0 ]]; do + case $1 in + --prepare) + prepare=1 + shift + ;; + run) + run=1 + shift + ;; + *) + if [[ $run -eq 1 ]]; then + ginkgo_opts+=("${1}") + else + echo "Unknown option $1" + exit 1 + fi + shift + ;; + esac + done + + if [[ $prepare -eq 1 ]]; then + e2e::prepare + fi + if [[ $run -eq 1 ]]; then + e2e::run "${ginkgo_opts[@]}" + fi +} diff --git a/hack/lib/image.sh b/hack/lib/image.sh new file mode 100644 index 00000000000..2b4b4bb977b --- /dev/null +++ b/hack/lib/image.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/../..; pwd -P) + +source $ROOT/hack/lib/vars.sh + +OUTPUT_DIR=$ROOT/_output +IMAGE_DIR=$OUTPUT_DIR/image +CACHE_DIR=$OUTPUT_DIR/cache +KIND=$OUTPUT_DIR/bin/kind + +function image::build() { + local targets=() + local with_push=0 + while [[ $# -gt 0 ]]; do + case $1 in + --push) + with_push=1 + shift + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + *) + targets+=("$1") # save positional arg + shift # past argument + ;; + esac + done + + + + mkdir -p ${IMAGE_DIR} + mkdir -p ${CACHE_DIR} + + local args="" + if [[ -n "$V_PLATFORMS" ]]; then + args="--platform $V_PLATFORMS" + fi + + # Check if current builder's driver is 'docker-container' + if docker buildx ls | grep "*" | grep -q "docker-container"; then + echo "'docker-container' exists, no need to execute the 'docker buildx create --use' command." + else + echo "'docker-container' does not exist, executing 'docker buildx create --use'..." + docker buildx create --use + fi + + for target in ${targets[@]}; do + docker buildx build \ + --target $target \ + -o type=oci,dest=$IMAGE_DIR/${target}.tar \ + -t ${V_IMG_PROJECT}/${target}:latest \ + --cache-from=type=local,src=$CACHE_DIR \ + --cache-to=type=local,dest=$CACHE_DIR \ + $args \ + -f $ROOT/image/Dockerfile $ROOT + done + + case $V_IMG_HUB in + kind) + for target in ${targets[@]}; do + if [[ $with_push -eq 1 ]]; then + echo "load ${target} image into kind cluster" + $KIND load image-archive $IMAGE_DIR/${target}.tar --name ${V_KIND_CLUSTER} + fi + done + ;; + *) + echo "Unknown image hub: ${V_IMG_HUB}" + echo "Please see ./hack/lib/vars.sh#V_IMG_HUB" + return 1 + ;; + esac +} + +# Prepare tidb components' images for e2e tests. +function image:prepare() { + echo "load tidb components' images into kind cluster" + for component in pd tikv tidb tiflash; do + for version in "$V_TIDB_CLUSTER_VERSION" "$V_TIDB_CLUSTER_VERSION_PREV"; do + docker pull gcr.io/pingcap-public/dbaas/$component:"$version" -q && \ + $KIND load docker-image gcr.io/pingcap-public/dbaas/$component:"$version" --name ${V_KIND_CLUSTER} + done + done +} diff --git a/hack/lib/kind.sh b/hack/lib/kind.sh new file mode 100644 index 00000000000..f909efbcc57 --- /dev/null +++ b/hack/lib/kind.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/../..; pwd -P) + +# source only once +[[ $(type -t kind::loaded) == function ]] && return 0 + +source $ROOT/hack/lib/vars.sh + +OUTPUT_DIR=$ROOT/_output +KIND=$OUTPUT_DIR/bin/kind +KUBECTL=$OUTPUT_DIR/bin/kubectl +KIND_CFG_DIR=$OUTPUT_DIR/kind + +METAL_LB_NS=metallb-system +METAL_LB_DEPLOYMENT=controller +METAL_LB_MANIFEST=https://raw.githubusercontent.com/metallb/metallb/v0.14.7/config/manifests/metallb-native.yaml +KIND_NETWORK_NAME=kind + +declare -A KIND_IMAGE=( + ["v1.31.0"]="kindest/node:v1.31.0@sha256:53df588e04085fd41ae12de0c3fe4c72f7013bba32a20e7325357a1ac94ba865" +) + +function kind::ensure_cluster() { + if ! command -v $KIND &>/dev/null; then + echo "kind not found, please run 'make bin/kind'" + fi + + mkdir -p $KIND_CFG_DIR + cat << EOF > $KIND_CFG_DIR/config.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + VolumeAttributesClass: true +runtimeConfig: + "storage.k8s.io/v1beta1": "true" +networking: + ipFamily: dual +nodes: +- role: control-plane +EOF + + + if ! $KIND get clusters | grep -q ${V_KIND_CLUSTER}; then + local image=${KIND_IMAGE[${V_KUBE_VERSION}]} + echo "kind cluster ${V_KIND_CLUSTER} not found, creating" + + local opt="--image $image" + if [[ -z $image ]]; then + echo "$V_KUBE_VERSION is not supported, use default one supported by kind" + opt="" + else + echo "create cluster with image $image" + fi + $KIND create cluster --name ${V_KIND_CLUSTER} --config $KIND_CFG_DIR/config.yaml ${opt} + fi +} + +# marker function +function kind::loaded() { + return 0 +} + +function kind::ensure_metal_lb() { + echo "installing MetalLB..." + $KUBECTL apply -f $METAL_LB_MANIFEST + + echo "waiting for MetalLB to be ready..." + $KUBECTL -n $METAL_LB_NS wait --for=condition=Available --timeout=5m deployment/$METAL_LB_DEPLOYMENT + + echo "getting IPv4 subnet from Kind..." + subnet=$(docker network inspect -f '{{index .IPAM.Config 0 "Subnet"}}' $KIND_NETWORK_NAME | cut -d. -f1-2) + if [[ $subnet == *"/"* ]]; then + subnet=$(docker network inspect -f '{{index .IPAM.Config 1 "Subnet"}}' $KIND_NETWORK_NAME | cut -d. -f1-2) + fi + + echo "configuring address pool for MetalLB..." + cat </dev/null; do + echo -e "\nUnexpected dirty working directory:\n" + if tty -s && [[ $interactive -eq 1 ]]; then + git status -s + else + git diff -a # be more verbose in log files without tty + exit 1 + fi | sed 's/^/ /' + echo -e "\nCommit your changes in another terminal and then continue here by pressing enter." + read -r + done 1>&2 +} + +# marker function +function util::loaded() { + return 0 +} diff --git a/hack/lib/vars.sh b/hack/lib/vars.sh new file mode 100644 index 00000000000..4646e41b243 --- /dev/null +++ b/hack/lib/vars.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +# source only once +[[ $(type -t vars::loaded) == function ]] && return 0 + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/../..; pwd -P) + +# --- +# Global variables definitions. +# All variables should be in format V_XXX +# --- + +# V_ARCH defines architecture, which is used to build and test +# - amd64 +# - arm64 +readonly V_ARCH=$(go env GOARCH) + +# V_OS defines operating system, which is used to build and test +readonly V_OS=$(uname -s | tr '[:upper:]' '[:lower:]') + +# V_PLATFORMS defines platforms of build binary, values can be splited by comma +# If empty, host platform will be used automatically +# - linux/amd64 +# - linux/arm64 +readonly V_PLATFORMS=${V_PLATFORMS:-""} + +# V_IMG_HUB defines image hub of image +# - kind +readonly V_IMG_HUB=${V_IMG_HUB:-"kind"} + +# V_IMG_PROJECT defines default project name of image +readonly V_IMG_PROJECT=${V_IMG_PROJECT:-"pingcap"} + +# V_KIND_CLUSTER defines default cluster name of kind, the default value is tidb-operator +readonly V_KIND_CLUSTER=${V_KIND_CLUSTER:-"tidb-operator"} + +# V_DEPLOY_NAMESPACE defines namespace of deploy, the default value is tidb-admin +readonly V_DEPLOY_NAMESPACE=${V_DEPLOY_NAMESPACE:-"tidb-admin"} + +# V_TIDB_CLUSTER_VERSION defines version of TiDB cluster, the default value is v8.2.0 +readonly V_TIDB_CLUSTER_VERSION=${V_TIDB_CLUSTER_VERSION:-"v8.2.0"} + +# V_TIDB_CLUSTER_VERSION_PREV defines a older version of TiDB cluster, the default value is v8.1.0 +readonly V_TIDB_CLUSTER_VERSION_PREV=${V_TIDB_CLUSTER_VERSION_PREV:-"v8.1.0"} + +# V_KUBE_VERSION defines default test version of kubernetes +readonly V_KUBE_VERSION=${V_KUBE_VERSION:-"v1.31.0"} + + +# marker function +function vars::loaded() { + return 0 +} diff --git a/hack/lib/verify.sh b/hack/lib/verify.sh new file mode 100644 index 00000000000..1dbcd85a69a --- /dev/null +++ b/hack/lib/verify.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +# source only once +[[ $(type -t verify::loaded) == function ]] && return 0 + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/../..; pwd -P) + +source $ROOT/hack/lib/util.sh + +# NOTE: this function is forked from kubernetes/hack/lib/verify-generated.sh#kube::verify:generated +# +# This function verifies whether generated files are up-to-date. The first two +# parameters are messages that get printed to stderr when changes are found, +# the rest are the function or command and its parameters for generating files +# in the work tree. +# +# Example: kube::verify::generated "Mock files are out of date" "Please run 'hack/update-mocks.sh'" hack/update-mocks.sh +function verify::generated() { + ( # a subshell prevents environment changes from leaking out of this function + local failure_header=$1 + shift + local failure_tail=$1 + shift + + util::ensure_clean_working_dir + + local tmpdir="$(mktemp -d -t "tidb-operator-verify.XXXXXX")" + git worktree add -f -q "${tmpdir}" HEAD + util::trap_add "git worktree remove -f ${tmpdir}" EXIT + cd "${tmpdir}" + + echo "cmd: $@" + # Update generated files. + "$@" + + # Test for diffs + diffs=$(git status --porcelain | wc -l) + if [[ ${diffs} -gt 0 ]]; then + if [[ -n "${failure_header}" ]]; then + echo "${failure_header}" >&2 + fi + git status >&2 + git diff >&2 + if [[ -n "${failure_tail}" ]]; then + echo "" >&2 + echo "${failure_tail}" >&2 + fi + return 1 + fi + ) +} + + +# marker function +function verify::loaded() { + return 0 +} diff --git a/hack/lib/version.sh b/hack/lib/version.sh new file mode 100644 index 00000000000..8a7eb04c64d --- /dev/null +++ b/hack/lib/version.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is copied and modified from https://github.com/kubernetes/kubernetes/blob/master/hack/lib/version.sh + +set -o errexit +set -o nounset +set -o pipefail + +# ----------------------------------------------------------------------------- +# Version management helpers. These functions help to set the +# following variables: +# +# GIT_COMMIT - The git commit id corresponding to this +# source code. +# GIT_TREE_STATE - "clean" indicates no changes since the git commit id +# "dirty" indicates source code changes after the git commit id +# "archive" indicates the tree was produced by 'git archive' +# GIT_VERSION - "vX.Y" used to indicate the last release version. +function version::get_version_vars() { + if [[ -n ${GIT_COMMIT-} ]] || GIT_COMMIT=$(git rev-parse "HEAD^{commit}" 2>/dev/null); then + if [[ -z ${GIT_TREE_STATE-} ]]; then + # Check if the tree is dirty. default to dirty + if git_status=$(git status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then + GIT_TREE_STATE="clean" + else + GIT_TREE_STATE="dirty" + fi + fi + + # Use git describe to find the version based on tags. + if [[ -n ${GIT_VERSION-} ]] || GIT_VERSION=$(git describe --tags --abbrev=14 "${GIT_COMMIT}^{commit}" 2>/dev/null | awk -F / '{print $NF}'); then + # This translates the "git describe" to an actual semver.org + # compatible semantic version that looks something like this: + # v1.0.0-beta.0.10+4c183422345d8f + # + # TODO: We continue calling this "git version" because so many + # downstream consumers are expecting it there. + DASHES_IN_VERSION=$(echo "${GIT_VERSION}" | sed "s/[^-]//g") + if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then + # We have distance to subversion (v1.1.0-subversion-1-gCommitHash) + GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/") + elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then + # We have distance to base tag (v1.1.0-1-gCommitHash) + GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/") + fi + if [[ "${GIT_TREE_STATE}" == "dirty" ]]; then + # git describe --dirty only considers changes to existing files, but + # that is problematic since new untracked .go files affect the build, + # so use our idea of "dirty" from git status instead. + GIT_VERSION+="-dirty" + fi + + + # If GIT_VERSION is not a valid Semantic Version, then refuse to build. + if ! [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then + echo "GIT_VERSION should be a valid Semantic Version. Current value: ${GIT_VERSION}" + echo "Please see more details here: https://semver.org" + exit 1 + fi + fi + fi +} + +function version::ldflag() { + local key=${1} + local val=${2} + + echo "-X 'github.com/pingcap/tidb-operator/pkg/version.${key}=${val}'" +} + +# Prints the value that needs to be passed to the -ldflags parameter of go build +function version::ldflags() { + version::get_version_vars + + local buildDate= + [[ -z ${SOURCE_DATE_EPOCH-} ]] || buildDate="--date=@${SOURCE_DATE_EPOCH}" + local -a ldflags=($(version::ldflag "buildDate" "$(date ${buildDate} -u +'%Y-%m-%dT%H:%M:%SZ')")) + if [[ -n ${GIT_COMMIT-} ]]; then + ldflags+=($(version::ldflag "gitCommit" "${GIT_COMMIT}")) + ldflags+=($(version::ldflag "gitTreeState" "${GIT_TREE_STATE}")) + fi + + if [[ -n ${GIT_VERSION-} ]]; then + ldflags+=($(version::ldflag "gitVersion" "${GIT_VERSION}")) + fi + + # The -ldflags parameter takes a single string, so join the output. + echo "${ldflags[*]-}" +} diff --git a/hack/verify.sh b/hack/verify.sh new file mode 100755 index 00000000000..5f9a80c6038 --- /dev/null +++ b/hack/verify.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Copyright 2024 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/..; pwd -P) + +source $ROOT/hack/lib/verify.sh + +verify::generated "generated files are out of date" "Please run: $*" "$@" diff --git a/image/Dockerfile b/image/Dockerfile new file mode 100644 index 00000000000..ba7cd690746 --- /dev/null +++ b/image/Dockerfile @@ -0,0 +1,52 @@ +# syntax=docker.io/docker/dockerfile:1.8.1@sha256:e87caa74dcb7d46cd820352bfea12591f3dba3ddc4285e19c7dcd13359f7cefd +FROM --platform=$BUILDPLATFORM gcr.io/pingcap-public/third-party/baseimage/golang:1.23 AS builder + +ARG TARGETPLATFORM + +WORKDIR / + +ENV GOMODCACHE=/go/pkg/mod +ENV GOCACHE=/go/cache + +COPY go.mod . +COPY go.sum . +COPY apis/core/go.mod apis/core/go.mod +COPY apis/core/go.sum apis/core/go.sum + +RUN --mount=type=cache,target=/go/pkg/mod/ \ + go mod download -x +RUN --mount=type=cache,target=/go/pkg/mod/ \ + cd apis/core && go mod download -x + +COPY . . + +RUN --mount=type=cache,target=/go/pkg/mod/ \ + --mount=type=cache,target=/go/cache \ + V_PLATFORMS=$TARGETPLATFORM ./hack/build.sh + +FROM --platform=$TARGETPLATFORM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.2 AS operator + +ARG TARGETPLATFORM + +WORKDIR / + +COPY --from=builder ./_output/$TARGETPLATFORM/bin/operator operator + +# nonroot user of distroless +USER 65532:65532 + +ENTRYPOINT ["/operator"] + + +FROM --platform=$TARGETPLATFORM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.2 AS prestop-checker + +ARG TARGETPLATFORM + +WORKDIR / + +COPY --from=builder ./_output/$TARGETPLATFORM/bin/prestop-checker prestop-checker + +# nonroot user of distroless +USER 65532:65532 + +ENTRYPOINT ["/prestop-checker"] diff --git a/manifests/crd/core.pingcap.com_clusters.yaml b/manifests/crd/core.pingcap.com_clusters.yaml new file mode 100644 index 00000000000..2c9234c89ee --- /dev/null +++ b/manifests/crd/core.pingcap.com_clusters.yaml @@ -0,0 +1,217 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: clusters.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tc + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.components[?(@.kind=="PD")].replicas + name: PD + type: integer + - jsonPath: .status.components[?(@.kind=="TiKV")].replicas + name: TiKV + type: integer + - jsonPath: .status.components[?(@.kind=="TiDB")].replicas + name: TiDB + type: integer + - jsonPath: .status.components[?(@.kind=="TiFlash")].replicas + name: TiFlash + type: integer + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster defines a TiDB cluster + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + paused: + description: Paused specifies whether to pause the reconciliation + loop for all components of the cluster. + type: boolean + revisionHistoryLimit: + description: |- + RevisionHistoryLimit is the maximum number of revisions that will + be maintained in each Group's revision history. + The revision history consists of all revisions not represented by a currently applied version. + The default value is 10. + format: int32 + minimum: 0 + type: integer + suspendAction: + description: SuspendAction defines the suspend actions for the cluster. + properties: + suspendCompute: + description: SuspendCompute indicates delete the pods but keep + the PVCs. + type: boolean + type: object + tlsCluster: + description: Whether enable the TLS connection between TiDB cluster + components. + properties: + enabled: + description: |- + Enable mutual TLS connection between TiDB cluster components. + Once enabled, the mutual authentication applies to all components, + and it does not support applying to only part of the components. + The steps to enable this feature: + 1. Generate TiDB cluster components certificates and a client-side certifiacete for them. + There are multiple ways to generate these certificates: + - user-provided certificates: https://docs.pingcap.com/tidb/stable/generate-self-signed-certificates + - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ + - or use cert-manager signed certificates: https://cert-manager.io/ + 2. Create one secret object for one component group which contains the certificates created above. + The name of this Secret must be: --cluster-secret. + For PD: kubectl create secret generic --cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + For TiKV: kubectl create secret generic --cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + For TiDB: kubectl create secret generic --cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + For Client: kubectl create secret generic -cluster-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + Same for other components. + type: boolean + type: object + upgradePolicy: + description: UpgradePolicy defines the upgrade policy for the cluster. + type: string + type: object + status: + properties: + components: + description: Components is the status of each component in the cluster. + items: + description: ComponentStatus is the status of a component in the + cluster. + properties: + kind: + description: Kind is the kind of the component, e.g., PD, TiKV, + TiDB, TiFlash. + enum: + - PD + - TiKV + - TiDB + - TiFlash + type: string + replicas: + description: Replicas is the number of desired replicas of the + component. + format: int32 + type: integer + required: + - kind + - replicas + type: object + type: array + x-kubernetes-list-map-keys: + - kind + x-kubernetes-list-type: map + conditions: + description: Conditions contains the current status of the cluster. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + observedGeneration is the most recent generation observed for this Cluster. It corresponds to the + Cluster's generation, which is updated on mutation by the API Server. + format: int64 + type: integer + pd: + description: |- + PD means url of the pd service, it's prepared for internal use + e.g. https://pd:2379 + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_pdgroups.yaml b/manifests/crd/core.pingcap.com_pdgroups.yaml new file mode 100644 index 00000000000..c50a575bfce --- /dev/null +++ b/manifests/crd/core.pingcap.com_pdgroups.yaml @@ -0,0 +1,410 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: pdgroups.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tg + kind: PDGroup + listKind: PDGroupList + plural: pdgroups + singular: pdgroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PDGroup defines a group of similar PD instances + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PDGroupSpec describes the common attributes of a PDGroup + properties: + bootstrapped: + description: |- + Bootstrapped means that pd cluster has been bootstrapped + It's no need to initialize a new cluster + Normally, this field is automatically changed by operator. + If it's true, it cannot be set to false for security + type: boolean + cluster: + properties: + name: + type: string + required: + - name + type: object + configUpdateStrategy: + default: RollingUpdate + description: |- + ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + Valid values are "RollingUpdate" (by default) and "InPlace". + enum: + - RollingUpdate + - InPlace + type: string + mountClusterClientSecret: + description: MountClusterClientSecret indicates whether to mount `cluster-client-secret` + to the Pod. + type: boolean + replicas: + format: int32 + type: integer + schedulePolicies: + items: + description: SchedulePolicy defines how instances of the group schedules + its pod. + properties: + evenlySpread: + properties: + topologies: + description: All instances of a group will evenly spread + in differnet topologies + items: + properties: + topology: + additionalProperties: + type: string + description: Topology means the topo for scheduling + type: object + weight: + description: |- + Weight defines how many pods will be scheduled to this topo + default is 1 + format: int32 + type: integer + required: + - topology + type: object + type: array + required: + - topologies + type: object + type: + type: string + required: + - type + type: object + type: array + template: + properties: + metadata: + description: |- + ObjectMeta is defined for replacing the embedded metav1.ObjectMeta + Now only labels and annotations are allowed + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names + type: string + type: object + spec: + description: PDTemplateSpec can only be specified in PDGroup + properties: + config: + description: Config defines config file of PD + type: string + image: + description: |- + Image is pd's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/pd + type: string + overlay: + description: |- + Overlay defines a k8s native resource template patch + All resources(pod, pvcs, ...) managed by PD can be overlayed by this field + x-kubernetes-preserve-unknown-fields: true + resources: + description: |- + ResourceRequirements describes the compute resource requirements. + It's simplified from corev1.ResourceRequirements to fit the most common use cases. + This field will be translated to requests=limits for all resources. + If users need to specify more advanced resource requirements, just try to use overlay to override it + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines server config for PD + properties: + ports: + description: Ports defines all ports listened by pd + properties: + client: + description: Client defines port for pd's api service + properties: + port: + format: int32 + type: integer + required: + - port + type: object + peer: + description: Peer defines port for peer communication + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + volumes: + description: Volumes defines persistent volumes of PD + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this + volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - config + - volumes + type: object + required: + - spec + type: object + version: + type: string + required: + - cluster + - replicas + - template + - version + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentReplicas: + description: |- + CurrentReplicas is the number of Instances created by the Group controller from the Group version + indicated by currentRevision. + format: int32 + type: integer + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + readyReplicas: + description: ReadyReplicas is the number of Instances created for + this ComponentGroup with a Ready Condition. + format: int32 + type: integer + replicas: + description: Replicas is the number of Instance created by the controller. + format: int32 + type: integer + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + updatedReplicas: + description: |- + UpdatedReplicas is the number of Instances created by the Group controller from the Group version + indicated by updateRevision. + format: int32 + type: integer + version: + description: |- + Version is the version of all instances in the group. + It will be same as the `spec.version` only when all instances are upgraded to the desired version. + type: string + required: + - replicas + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_pds.yaml b/manifests/crd/core.pingcap.com_pds.yaml new file mode 100644 index 00000000000..b4830e3f35b --- /dev/null +++ b/manifests/crd/core.pingcap.com_pds.yaml @@ -0,0 +1,310 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: pds.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - peer + kind: PD + listKind: PDList + plural: pds + singular: pd + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.isLeader + name: Leader + type: string + - jsonPath: .status.conditions[?(@.type=="Initialized")].status + name: Initialized + type: string + - jsonPath: .status.conditions[?(@.type=="Health")].status + name: Healthy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PD defines a PD instance + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PDSpec describes the common attributes of a PD instance + properties: + cluster: + description: Cluster is a reference of tidb cluster + properties: + name: + type: string + required: + - name + type: object + config: + description: Config defines config file of PD + type: string + image: + description: |- + Image is pd's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/pd + type: string + overlay: + description: |- + Overlay defines a k8s native resource template patch + All resources(pod, pvcs, ...) managed by PD can be overlayed by this field + x-kubernetes-preserve-unknown-fields: true + resources: + description: |- + ResourceRequirements describes the compute resource requirements. + It's simplified from corev1.ResourceRequirements to fit the most common use cases. + This field will be translated to requests=limits for all resources. + If users need to specify more advanced resource requirements, just try to use overlay to override it + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines server config for PD + properties: + ports: + description: Ports defines all ports listened by pd + properties: + client: + description: Client defines port for pd's api service + properties: + port: + format: int32 + type: integer + required: + - port + type: object + peer: + description: Peer defines port for peer communication + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + subdomain: + description: |- + Subdomain means the subdomain of the exported pd dns. + A same pd cluster will use a same subdomain + type: string + topology: + additionalProperties: + type: string + description: |- + Topology defines the topology domain of this pd instance + It will be translated into a node affinity config + Topology cannot be changed + type: object + version: + description: Version specifies the PD version + type: string + volumes: + description: Volumes defines persistent volumes of PD + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - cluster + - config + - subdomain + - version + - volumes + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + id: + description: ID is the member id of this pd instance + type: string + isLeader: + description: |- + IsLeader indicates whether this pd is the leader + NOTE: it's a snapshot from PD, not always up to date + type: boolean + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + required: + - id + - isLeader + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_tidbgroups.yaml b/manifests/crd/core.pingcap.com_tidbgroups.yaml new file mode 100644 index 00000000000..f4bcc915d44 --- /dev/null +++ b/manifests/crd/core.pingcap.com_tidbgroups.yaml @@ -0,0 +1,527 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tidbgroups.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tg + kind: TiDBGroup + listKind: TiDBGroupList + plural: tidbgroups + singular: tidbgroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TiDBGroup defines a group of similar TiDB instances. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TiDBGroupSpec describes the common attributes of a TiDBGroup. + properties: + bootstrapSQLConfigMapName: + description: |- + BootstrapSQLConfigMapName is the name of the ConfigMap which contains the bootstrap SQL file with the key `bootstrap-sql`, + which will only be executed when a TiDB cluster bootstrap on the first time. + The field should be set ONLY when create the first TiDB group for a cluster, since it only take effect on the first time bootstrap. + Only v6.5.1+ supports this feature. + type: string + cluster: + properties: + name: + type: string + required: + - name + type: object + configUpdateStrategy: + default: RollingUpdate + description: |- + ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + Valid values are "RollingUpdate" (by default) and "InPlace". + enum: + - RollingUpdate + - InPlace + type: string + replicas: + format: int32 + type: integer + schedulePolicies: + items: + description: SchedulePolicy defines how instances of the group schedules + its pod. + properties: + evenlySpread: + properties: + topologies: + description: All instances of a group will evenly spread + in differnet topologies + items: + properties: + topology: + additionalProperties: + type: string + description: Topology means the topo for scheduling + type: object + weight: + description: |- + Weight defines how many pods will be scheduled to this topo + default is 1 + format: int32 + type: integer + required: + - topology + type: object + type: array + required: + - topologies + type: object + type: + type: string + required: + - type + type: object + type: array + service: + description: Service defines some fields used to override the default + service. + properties: + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + template: + properties: + metadata: + description: |- + ObjectMeta is defined for replacing the embedded metav1.ObjectMeta + Now only labels and annotations are allowed + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names + type: string + type: object + spec: + description: TiDBTemplateSpec can only be specified in TiDBGroup. + properties: + config: + description: Config defines config file of TiDB. + type: string + image: + description: |- + Image is tidb's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/tidb + type: string + overlay: + description: |- + Overlay defines a k8s native resource template patch. + All resources(pod, pvcs, ...) managed by TiDB can be overlayed by this field. + x-kubernetes-preserve-unknown-fields: true + probes: + description: Probes defines probes for TiDB. + properties: + readiness: + description: |- + Readiness defines the readiness probe for TiDB. + The default handler is a TCP socket on the client port. + properties: + type: + description: |- + "tcp" will use TCP socket to connect component port. + "command" will probe the status api of tidb. + enum: + - tcp + - command + type: string + type: object + type: object + resources: + description: Resources defines resource required by TiDB. + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines the server configuration of TiDB. + properties: + ports: + description: Port defines all ports listened by TiDB. + properties: + client: + description: Client defines port for TiDB's SQL service. + properties: + port: + format: int32 + type: integer + required: + - port + type: object + status: + description: Status defines port for TiDB status API. + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + slowLog: + description: |- + SlowLog defines the separate slow log configuration for TiDB. + When enabled, a sidecar container will be created to output the slow log to its stdout. + properties: + disable: + description: |- + Disabled indicates whether the separate slow log is disabled. + Defaults to false. In other words, the separate slow log is enabled by default. + type: boolean + image: + description: |- + Image to tail slowlog to stdout + Default is busybox:1.37.0 + type: string + resources: + description: ResourceRequirements defines the resource + requirements for the slow log sidecar. + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + volumeName: + description: |- + VolumeName is the name of the volume used to share the slow log file between the main container and the sidecar. + If not set, a temparary volume will be used. + Otherwise, it should be a name of a volume defined in the `volumes` field of the TiDBTemplateSpec. + type: string + type: object + volumes: + description: Volumes defines data volume of TiDB, it is optional. + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this + volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - config + type: object + required: + - spec + type: object + tidbAuthToken: + description: |- + Whether enable `tidb_auth_token` authentication method. + To enable this feature, a K8s secret named `-tidb-auth-token-jwks-secret` must be created to store the JWKs. + ref: https://docs.pingcap.com/tidb/stable/security-compatibility-with-mysql#tidb_auth_token + Defaults to false. + properties: + enabled: + description: |- + Enabled indicates whether the `tidb_auth_token` authentication method is enabled. + Defaults to false. + type: boolean + type: object + tlsClient: + description: Whether enable the TLS connection between the TiDB server + and MySQL client. + properties: + disableClientAuthn: + description: |- + DisableClientAuthn will skip client's certificate validation from the TiDB server. + Optional: defaults to false + type: boolean + enabled: + description: |- + When enabled, TiDB will accept TLS encrypted connections from MySQL clients. + The steps to enable this feature: + 1. Generate a TiDB server-side certificate and a client-side certifiacete for the TiDB cluster. + There are multiple ways to generate certificates: + - user-provided certificates: https://docs.pingcap.com/tidb/stable/generate-self-signed-certificates + - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ + - or use cert-manager signed certificates: https://cert-manager.io/ + 2. Create a K8s Secret object which contains the TiDB server-side certificate created above. + The name of this Secret must be: --server-secret. + kubectl create secret generic --server-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + 3. Create a K8s Secret object which contains the TiDB client-side certificate created above which will be used by TiDB Operator. + The name of this Secret must be: --client-secret. + kubectl create secret generic --client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + 4. Set Enabled to `true`. + type: boolean + skipInternalClientCA: + description: |- + SkipInternalClientCA will skip TiDB server's certificate validation for internal components like Initializer, Dashboard, etc. + Optional: defaults to false + type: boolean + type: object + version: + type: string + required: + - cluster + - replicas + - template + - version + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentReplicas: + description: |- + CurrentReplicas is the number of Instances created by the Group controller from the Group version + indicated by currentRevision. + format: int32 + type: integer + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + readyReplicas: + description: ReadyReplicas is the number of Instances created for + this ComponentGroup with a Ready Condition. + format: int32 + type: integer + replicas: + description: Replicas is the number of Instance created by the controller. + format: int32 + type: integer + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + updatedReplicas: + description: |- + UpdatedReplicas is the number of Instances created by the Group controller from the Group version + indicated by updateRevision. + format: int32 + type: integer + version: + description: |- + Version is the version of all instances in the group. + It will be same as the `spec.version` only when all instances are upgraded to the desired version. + type: string + required: + - replicas + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_tidbs.yaml b/manifests/crd/core.pingcap.com_tidbs.yaml new file mode 100644 index 00000000000..bc079490272 --- /dev/null +++ b/manifests/crd/core.pingcap.com_tidbs.yaml @@ -0,0 +1,343 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tidbs.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tidb + kind: TiDB + listKind: TiDBList + plural: tidbs + singular: tidb + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.conditions[?(@.type=="Health")].status + name: Healthy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TiDB defines a TiDB instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + cluster: + properties: + name: + type: string + required: + - name + type: object + config: + description: Config defines config file of TiDB. + type: string + image: + description: |- + Image is tidb's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/tidb + type: string + overlay: + description: |- + Overlay defines a k8s native resource template patch. + All resources(pod, pvcs, ...) managed by TiDB can be overlayed by this field. + x-kubernetes-preserve-unknown-fields: true + probes: + description: Probes defines probes for TiDB. + properties: + readiness: + description: |- + Readiness defines the readiness probe for TiDB. + The default handler is a TCP socket on the client port. + properties: + type: + description: |- + "tcp" will use TCP socket to connect component port. + "command" will probe the status api of tidb. + enum: + - tcp + - command + type: string + type: object + type: object + resources: + description: Resources defines resource required by TiDB. + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines the server configuration of TiDB. + properties: + ports: + description: Port defines all ports listened by TiDB. + properties: + client: + description: Client defines port for TiDB's SQL service. + properties: + port: + format: int32 + type: integer + required: + - port + type: object + status: + description: Status defines port for TiDB status API. + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + slowLog: + description: |- + SlowLog defines the separate slow log configuration for TiDB. + When enabled, a sidecar container will be created to output the slow log to its stdout. + properties: + disable: + description: |- + Disabled indicates whether the separate slow log is disabled. + Defaults to false. In other words, the separate slow log is enabled by default. + type: boolean + image: + description: |- + Image to tail slowlog to stdout + Default is busybox:1.37.0 + type: string + resources: + description: ResourceRequirements defines the resource requirements + for the slow log sidecar. + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + volumeName: + description: |- + VolumeName is the name of the volume used to share the slow log file between the main container and the sidecar. + If not set, a temparary volume will be used. + Otherwise, it should be a name of a volume defined in the `volumes` field of the TiDBTemplateSpec. + type: string + type: object + subdomain: + description: |- + Subdomain means the subdomain of the exported pd dns. + A same pd cluster will use a same subdomain + type: string + topology: + additionalProperties: + type: string + description: |- + Topology defines the topology domain of this TiDB instance. + It will be translated into a node affnity config. + Topology cannot be changed. + type: object + version: + description: Version specifies the TiDB version. + type: string + volumes: + description: Volumes defines data volume of TiDB, it is optional. + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - cluster + - config + - subdomain + - version + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_tiflashes.yaml b/manifests/crd/core.pingcap.com_tiflashes.yaml new file mode 100644 index 00000000000..16ccdfdad58 --- /dev/null +++ b/manifests/crd/core.pingcap.com_tiflashes.yaml @@ -0,0 +1,349 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tiflashes.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tiflash + kind: TiFlash + listKind: TiFlashList + plural: tiflashes + singular: tiflash + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.id + name: StoreID + type: string + - jsonPath: .status.state + name: StoreState + type: string + - jsonPath: .status.conditions[?(@.type=="Health")].status + name: Healthy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TiFlash defines a TiFlash instance + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + cluster: + description: Cluster is a reference of tidb cluster + properties: + name: + type: string + required: + - name + type: object + config: + description: Config defines config file of TiFlash + type: string + image: + description: |- + Image is tiflash's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/tiflash + type: string + logTailer: + description: |- + LogTailer defines the sidercar log tailer config of TiFlash. + We always use sidecar to tail the log of TiFlash now. + properties: + image: + description: |- + Image to tail log to stdout + Default is busybox:1.37.0 + type: string + resources: + description: ResourceRequirements defines the resource requirements + for the log sidecar. + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + overlay: + description: |- + Overlay defines a k8s native resource template patch + All resources(pod, pvcs, ...) managed by TiFlash can be overlayed by this field + x-kubernetes-preserve-unknown-fields: true + proxyConfig: + description: ProxyConfig defines config file of TiFlash proxy + type: string + resources: + description: Resources defines resource required by TiFlash + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines the server config of TiFlash + properties: + ports: + description: Ports defines all ports listened by tiflash + properties: + flash: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + metrics: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + proxy: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + proxyStatus: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + subdomain: + description: |- + Subdomain means the subdomain of the exported TiFlash dns. + A same TiFlash group will use a same subdomain + type: string + topology: + additionalProperties: + type: string + description: |- + Topology defines the topology domain of this pd instance + It will be translated into a node affinity config + Topology cannot be changed + type: object + version: + description: Version specifies the TiFlash version + type: string + volumes: + description: Volumes defines data volume of TiFlash + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - cluster + - config + - subdomain + - version + - volumes + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + id: + description: Store ID + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + state: + description: Store State + type: string + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_tiflashgroups.yaml b/manifests/crd/core.pingcap.com_tiflashgroups.yaml new file mode 100644 index 00000000000..73d1bd21dbc --- /dev/null +++ b/manifests/crd/core.pingcap.com_tiflashgroups.yaml @@ -0,0 +1,442 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tiflashgroups.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tg + kind: TiFlashGroup + listKind: TiFlashGroupList + plural: tiflashgroups + singular: tiflashgroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TiFlashGroup defines a group of similar TiFlash instances + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + cluster: + properties: + name: + type: string + required: + - name + type: object + configUpdateStrategy: + default: RollingUpdate + description: |- + ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + Valid values are "RollingUpdate" (by default) and "InPlace". + enum: + - RollingUpdate + - InPlace + type: string + replicas: + format: int32 + type: integer + schedulePolicies: + items: + description: SchedulePolicy defines how instances of the group schedules + its pod. + properties: + evenlySpread: + properties: + topologies: + description: All instances of a group will evenly spread + in differnet topologies + items: + properties: + topology: + additionalProperties: + type: string + description: Topology means the topo for scheduling + type: object + weight: + description: |- + Weight defines how many pods will be scheduled to this topo + default is 1 + format: int32 + type: integer + required: + - topology + type: object + type: array + required: + - topologies + type: object + type: + type: string + required: + - type + type: object + type: array + template: + properties: + metadata: + description: |- + ObjectMeta is defined for replacing the embedded metav1.ObjectMeta + Now only labels and annotations are allowed + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names + type: string + type: object + spec: + properties: + config: + description: Config defines config file of TiFlash + type: string + image: + description: |- + Image is tiflash's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/tiflash + type: string + logTailer: + description: |- + LogTailer defines the sidercar log tailer config of TiFlash. + We always use sidecar to tail the log of TiFlash now. + properties: + image: + description: |- + Image to tail log to stdout + Default is busybox:1.37.0 + type: string + resources: + description: ResourceRequirements defines the resource + requirements for the log sidecar. + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + overlay: + description: |- + Overlay defines a k8s native resource template patch + All resources(pod, pvcs, ...) managed by TiFlash can be overlayed by this field + x-kubernetes-preserve-unknown-fields: true + proxyConfig: + description: ProxyConfig defines config file of TiFlash proxy + type: string + resources: + description: Resources defines resource required by TiFlash + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines the server config of TiFlash + properties: + ports: + description: Ports defines all ports listened by tiflash + properties: + flash: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + metrics: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + proxy: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + proxyStatus: + description: Port defines a listen port + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + volumes: + description: Volumes defines data volume of TiFlash + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this + volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - config + - volumes + type: object + required: + - spec + type: object + version: + type: string + required: + - cluster + - replicas + - template + - version + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentReplicas: + description: |- + CurrentReplicas is the number of Instances created by the Group controller from the Group version + indicated by currentRevision. + format: int32 + type: integer + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + readyReplicas: + description: ReadyReplicas is the number of Instances created for + this ComponentGroup with a Ready Condition. + format: int32 + type: integer + replicas: + description: Replicas is the number of Instance created by the controller. + format: int32 + type: integer + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + updatedReplicas: + description: |- + UpdatedReplicas is the number of Instances created by the Group controller from the Group version + indicated by updateRevision. + format: int32 + type: integer + version: + description: |- + Version is the version of all instances in the group. + It will be same as the `spec.version` only when all instances are upgraded to the desired version. + type: string + required: + - replicas + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_tikvgroups.yaml b/manifests/crd/core.pingcap.com_tikvgroups.yaml new file mode 100644 index 00000000000..facd86b086d --- /dev/null +++ b/manifests/crd/core.pingcap.com_tikvgroups.yaml @@ -0,0 +1,411 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tikvgroups.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tg + kind: TiKVGroup + listKind: TiKVGroupList + plural: tikvgroups + singular: tikvgroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TiKVGroup defines a group of similar TiKV instances + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TiKVGroupSpec describes the common attributes of a TiKVGroup + properties: + cluster: + properties: + name: + type: string + required: + - name + type: object + configUpdateStrategy: + default: RollingUpdate + description: |- + ConfigUpdateStrategy determines how the configuration change is applied to the cluster. + Valid values are "RollingUpdate" (by default) and "InPlace". + enum: + - RollingUpdate + - InPlace + type: string + mountClusterClientSecret: + description: MountClusterClientSecret indicates whether to mount `cluster-client-secret` + to the Pod. + type: boolean + replicas: + format: int32 + type: integer + schedulePolicies: + items: + description: SchedulePolicy defines how instances of the group schedules + its pod. + properties: + evenlySpread: + properties: + topologies: + description: All instances of a group will evenly spread + in differnet topologies + items: + properties: + topology: + additionalProperties: + type: string + description: Topology means the topo for scheduling + type: object + weight: + description: |- + Weight defines how many pods will be scheduled to this topo + default is 1 + format: int32 + type: integer + required: + - topology + type: object + type: array + required: + - topologies + type: object + type: + type: string + required: + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + template: + properties: + metadata: + description: |- + ObjectMeta is defined for replacing the embedded metav1.ObjectMeta + Now only labels and annotations are allowed + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names + type: string + type: object + spec: + description: TiKVTemplateSpec can only be specified in TiKVGroup + properties: + config: + description: Config defines config file of TiKV + type: string + image: + description: |- + Image is tikv's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/tikv + type: string + overlay: + description: |- + Overlay defines a k8s native resource template patch + All resources(pod, pvcs, ...) managed by TiKV can be overlayed by this field + x-kubernetes-preserve-unknown-fields: true + preStop: + description: PreStop defines preStop config + properties: + image: + description: |- + Image of pre stop checker + Default is pingcap/prestop-checker:latest + type: string + type: object + resources: + description: Resources defines resource required by TiKV + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines the server config of TiKV + properties: + ports: + description: Ports defines all ports listened by tikv + properties: + client: + description: Client defines port for tikv's api service + properties: + port: + format: int32 + type: integer + required: + - port + type: object + peer: + description: Status defines port for tikv status api + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + volumes: + description: Volumes defines data volume of TiKV + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this + volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - config + - volumes + type: object + required: + - spec + type: object + version: + type: string + required: + - cluster + - replicas + - template + - version + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentReplicas: + description: |- + CurrentReplicas is the number of Instances created by the Group controller from the Group version + indicated by currentRevision. + format: int32 + type: integer + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + readyReplicas: + description: ReadyReplicas is the number of Instances created for + this ComponentGroup with a Ready Condition. + format: int32 + type: integer + replicas: + description: Replicas is the number of Instance created by the controller. + format: int32 + type: integer + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + updatedReplicas: + description: |- + UpdatedReplicas is the number of Instances created by the Group controller from the Group version + indicated by updateRevision. + format: int32 + type: integer + version: + description: |- + Version is the version of all instances in the group. + It will be same as the `spec.version` only when all instances are upgraded to the desired version. + type: string + required: + - replicas + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/crd/core.pingcap.com_tikvs.yaml b/manifests/crd/core.pingcap.com_tikvs.yaml new file mode 100644 index 00000000000..6e0e7089a81 --- /dev/null +++ b/manifests/crd/core.pingcap.com_tikvs.yaml @@ -0,0 +1,309 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tikvs.core.pingcap.com +spec: + group: core.pingcap.com + names: + categories: + - tikv + kind: TiKV + listKind: TiKVList + plural: tikvs + singular: tikv + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.id + name: StoreID + type: string + - jsonPath: .status.state + name: StoreState + type: string + - jsonPath: .status.conditions[?(@.type=="Health")].status + name: Healthy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TiKV defines a TiKV instance + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + cluster: + description: Cluster is a reference of tidb cluster + properties: + name: + type: string + required: + - name + type: object + config: + description: Config defines config file of TiKV + type: string + image: + description: |- + Image is tikv's image + If tag is omitted, version will be used as the image tag. + Default is pingcap/tikv + type: string + overlay: + description: |- + Overlay defines a k8s native resource template patch + All resources(pod, pvcs, ...) managed by TiKV can be overlayed by this field + x-kubernetes-preserve-unknown-fields: true + preStop: + description: PreStop defines preStop config + properties: + image: + description: |- + Image of pre stop checker + Default is pingcap/prestop-checker:latest + type: string + type: object + resources: + description: Resources defines resource required by TiKV + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + server: + description: Server defines the server config of TiKV + properties: + ports: + description: Ports defines all ports listened by tikv + properties: + client: + description: Client defines port for tikv's api service + properties: + port: + format: int32 + type: integer + required: + - port + type: object + peer: + description: Status defines port for tikv status api + properties: + port: + format: int32 + type: integer + required: + - port + type: object + type: object + type: object + subdomain: + description: |- + Subdomain means the subdomain of the exported tikv dns. + A same tikv group will use a same subdomain + type: string + topology: + additionalProperties: + type: string + description: |- + Topology defines the topology domain of this pd instance + It will be translated into a node affinity config + Topology cannot be changed + type: object + version: + description: Version specifies the TiKV version + type: string + volumes: + description: Volumes defines data volume of TiKV + items: + description: |- + Volume defines a persistent volume, it will be mounted at a specified root path + A volume can be mounted for multiple different usages. + For example, a volume can be mounted for both data and raft log. + properties: + for: + description: |- + For defines the usage of this volume + At least one usage is needed for a new volume + items: + properties: + subPath: + description: |- + SubPath is the relative path of the volume's mount path. + The default value of sub path is determined by the usage type. + type: string + type: + description: |- + Type is a usage type of the volume. + A volume can be defined for multiple usages. + type: string + required: + - type + type: object + type: array + name: + description: |- + Name is volume name. + If not specified, the PVC name will be "{component}-{podName}" + type: string + path: + description: Path is mount path of this volume + type: string + storage: + anyOf: + - type: integer + - type: string + description: Storage defines the request size of this volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClassName: + description: |- + StorageClassName means the storage class the volume used. + You can modify volumes' attributes by changing the StorageClass + when VolumeAttributesClass is not available. + Note that only newly created PV will use the new StorageClass. + type: string + volumeAttributesClassName: + description: |- + VolumeAttributesClassName means the VolumeAttributesClass the volume used. + You can modify volumes' attributes by changing it. + This feature is introduced since K8s 1.29 as alpha feature and disabled by default. + It's only available when the feature is enabled. + type: string + required: + - for + - path + - storage + type: object + type: array + required: + - cluster + - config + - subdomain + - version + - volumes + type: object + status: + properties: + collisionCount: + description: |- + CollisionCount is the count of hash collisions. The controller + uses this field as a collision avoidance mechanism when it needs to create the name for the + newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Conditions contain details of the current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + currentRevision: + description: CurrentRevision is the revision of the Controller that + created the resource. + type: string + id: + description: Store ID + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed by the controller. + It's used to determine whether the controller has reconciled the latest spec. + format: int64 + type: integer + state: + description: Store State + type: string + updateRevision: + description: UpdateRevision is the revision of the Controller that + should modify the resource. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/manifests/deploy/deploy.yaml b/manifests/deploy/deploy.yaml new file mode 100644 index 00000000000..5311e78fc87 --- /dev/null +++ b/manifests/deploy/deploy.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tidb-operator + namespace: tidb-admin +spec: + selector: + matchLabels: + app.kubernetes.io/name: tidb-operator + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: tidb-operator + spec: + securityContext: + runAsNonRoot: true + containers: + - command: + - /operator + image: pingcap/operator:latest + imagePullPolicy: IfNotPresent + name: operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: tidb-operator + terminationGracePeriodSeconds: 10 diff --git a/manifests/rbac/namespace.yaml b/manifests/rbac/namespace.yaml new file mode 100644 index 00000000000..2389d489b89 --- /dev/null +++ b/manifests/rbac/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: tidb-admin diff --git a/manifests/rbac/role.yaml b/manifests/rbac/role.yaml new file mode 100644 index 00000000000..f4b3f39210c --- /dev/null +++ b/manifests/rbac/role.yaml @@ -0,0 +1,131 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tidb-operator +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - events + - persistentvolumeclaims + - pods + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - nodes + - persistentvolumes + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - core.pingcap.com + resources: + - clusters + - pdgroups/status + - tidbgroups/status + - tiflashgroups/status + - tikvgroups/status + verbs: + - get + - list + - update + - watch +- apiGroups: + - core.pingcap.com + resources: + - clusters/status + verbs: + - get + - patch + - update +- apiGroups: + - core.pingcap.com + resources: + - pdgroups + - tidbgroups + - tiflashgroups + - tikvgroups + verbs: + - delete + - get + - list + - update + - watch +- apiGroups: + - core.pingcap.com + resources: + - pds + - tidbs + - tiflashes + - tikvs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - core.pingcap.com + resources: + - pds/status + - tidbs/status + - tiflashes/status + - tikvs/status + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + - volumeattributesclasses + verbs: + - get + - list + - watch diff --git a/manifests/rbac/role_binding.yaml b/manifests/rbac/role_binding.yaml new file mode 100644 index 00000000000..e6ba8e5b123 --- /dev/null +++ b/manifests/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tidb-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tidb-operator +subjects: +- kind: ServiceAccount + name: tidb-operator + namespace: tidb-admin diff --git a/manifests/rbac/service_account.yaml b/manifests/rbac/service_account.yaml new file mode 100644 index 00000000000..3abf5840ce0 --- /dev/null +++ b/manifests/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tidb-operator + namespace: tidb-admin diff --git a/pkg/action/upgrader.go b/pkg/action/upgrader.go new file mode 100644 index 00000000000..7bf9b80820b --- /dev/null +++ b/pkg/action/upgrader.go @@ -0,0 +1,156 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package action + +import ( + "context" + "fmt" + + "github.com/Masterminds/semver/v3" + "github.com/go-logr/logr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" +) + +type UpgradePolicy interface { + ArePreconditionsMet(ctx context.Context, cli client.Client, group v1alpha1.Group) (bool, error) +} + +var ( + _ UpgradePolicy = &defaultPolicy{} + _ UpgradePolicy = &noConstraint{} +) + +type defaultPolicy struct{} + +func (defaultPolicy) ArePreconditionsMet(ctx context.Context, cli client.Client, group v1alpha1.Group) (bool, error) { + groups, err := getDependentGroups(ctx, cli, group) + if err != nil { + return false, fmt.Errorf("cannot get dependent groups for %s/%s: %w", group.GetNamespace(), group.GetName(), err) + } + return areGroupsUpgraded(group.GetDesiredVersion(), groups) +} + +// getDependentGroups returns the groups that depend on the given group when upgrade. +func getDependentGroups(ctx context.Context, cli client.Client, group v1alpha1.Group) (groups []v1alpha1.Group, err error) { + switch group.ComponentKind() { + case v1alpha1.ComponentKindPD: + + case v1alpha1.ComponentKindTiDB: + var kvgList v1alpha1.TiKVGroupList + if err = cli.List(ctx, &kvgList, client.InNamespace(group.GetNamespace()), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: group.GetClusterName(), + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + }); err != nil { + return nil, fmt.Errorf("cannot list TiKVGroups: %w", err) + } + groups = kvgList.ToSlice() + + case v1alpha1.ComponentKindTiKV: + var tiflashGroupList v1alpha1.TiFlashGroupList + if err = cli.List(ctx, &tiflashGroupList, client.InNamespace(group.GetNamespace()), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: group.GetClusterName(), + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + }); err != nil { + return nil, fmt.Errorf("cannot list TiFlashGroups: %w", err) + } + groups = tiflashGroupList.ToSlice() + // If there is no TiFlashGroup, should check PDGroups. + if len(groups) == 0 { + groups, err = listPDGroups(ctx, cli, group.GetNamespace(), group.GetClusterName()) + if err != nil { + return nil, fmt.Errorf("cannot list PDGroups: %w", err) + } + } + + case v1alpha1.ComponentKindTiFlash: + groups, err = listPDGroups(ctx, cli, group.GetNamespace(), group.GetClusterName()) + if err != nil { + return nil, fmt.Errorf("cannot list PDGroups: %w", err) + } + } + + return groups, nil +} + +func listPDGroups(ctx context.Context, cli client.Client, ns, clusterName string) ([]v1alpha1.Group, error) { + var pdgList v1alpha1.PDGroupList + if err := cli.List(ctx, &pdgList, client.InNamespace(ns), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: clusterName, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + }); err != nil { + return nil, fmt.Errorf("cannot list PDGroups: %w", err) + } + return pdgList.ToSlice(), nil +} + +// areGroupsUpgraded checks if all groups' version are greater or equal to the desired version. +func areGroupsUpgraded(version string, groups []v1alpha1.Group) (bool, error) { + desiredVer, err := semver.NewVersion(version) + if err != nil { + return false, fmt.Errorf("cannot parse the desired version: %s", version) + } + + for _, group := range groups { + v, e := semver.NewVersion(group.GetActualVersion()) + if e != nil { + return false, fmt.Errorf("cannot parse the group status version: %s", group.GetActualVersion()) + } + if !v.GreaterThanEqual(desiredVer) || !v1alpha1.IsGroupHealthyAndUpToDate(group) { + return false, nil + } + } + return true, nil +} + +type noConstraint struct{} + +func (noConstraint) ArePreconditionsMet(_ context.Context, _ client.Client, _ v1alpha1.Group) (bool, error) { + return true, nil +} + +type UpgradeChecker interface { + CanUpgrade(context.Context, v1alpha1.Group) bool +} + +type upgradeChecker struct { + cli client.Client + logger logr.Logger + policy UpgradePolicy +} + +func NewUpgradeChecker(cli client.Client, cluster *v1alpha1.Cluster, logger logr.Logger) UpgradeChecker { + var policy UpgradePolicy + switch cluster.Spec.UpgradePolicy { + case v1alpha1.UpgradePolicyNoConstraints: + policy = &noConstraint{} + case v1alpha1.UpgradePolicyDefault: + policy = &defaultPolicy{} + default: + logger.Info("unknown upgrade policy, use the default one", "policy", cluster.Spec.UpgradePolicy) + policy = &defaultPolicy{} + } + return &upgradeChecker{cli: cli, logger: logger, policy: policy} +} + +func (c *upgradeChecker) CanUpgrade(ctx context.Context, group v1alpha1.Group) bool { + yes, err := c.policy.ArePreconditionsMet(ctx, c.cli, group) + if err != nil { + c.logger.Error(err, "failed to check preconditions for upgrading", "group_ns", + group.GetNamespace(), "group_name", group.GetName(), "component", group.ComponentKind()) + } + return yes +} diff --git a/pkg/action/upgrader_test.go b/pkg/action/upgrader_test.go new file mode 100644 index 00000000000..f59a0fae66d --- /dev/null +++ b/pkg/action/upgrader_test.go @@ -0,0 +1,185 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package action + +import ( + "context" + "reflect" + "testing" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +func Test_areGroupsUpgraded(t *testing.T) { + tests := []struct { + name string + version string + groups []v1alpha1.Group + want bool + wantError bool + }{ + { + name: "invalid version", + version: "foo", + groups: []v1alpha1.Group{}, + want: false, + wantError: true, + }, + { + name: "no groups", + version: "v8.1.0", + groups: []v1alpha1.Group{}, + want: true, + wantError: false, + }, + { + name: "all groups upgraded to the same version", + version: "v8.1.0", + groups: []v1alpha1.Group{ + &v1alpha1.FakeGroup{Healthy: true, ActualVersion: "v8.1.0"}, + &v1alpha1.FakeGroup{Healthy: true, ActualVersion: "v8.1.0"}, + }, + want: true, + wantError: false, + }, + { + name: "all groups upgraded to the newer version", + version: "v8.1.0", + groups: []v1alpha1.Group{ + &v1alpha1.FakeGroup{Healthy: true, ActualVersion: "v8.1.1"}, + &v1alpha1.FakeGroup{Healthy: true, ActualVersion: "v8.1.1"}, + }, + want: true, + wantError: false, + }, + { + name: "one group not upgraded", + version: "v6.5.1", + groups: []v1alpha1.Group{ + &v1alpha1.FakeGroup{Healthy: true, ActualVersion: "v6.5.1"}, + &v1alpha1.FakeGroup{Healthy: true, ActualVersion: "v6.5.0"}, + }, + want: false, + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := areGroupsUpgraded(tt.version, tt.groups) + if (err != nil) != tt.wantError { + t.Errorf("areGroupsUpgraded() error = %v, wantError %v", err, tt.wantError) + return + } + if got != tt.want { + t.Errorf("areGroupsUpgraded() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getDependentGroups(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + group v1alpha1.Group + wantGroups []v1alpha1.Group + wantErr bool + }{ + { + name: "pd: no dependent groups", + group: &v1alpha1.FakeGroup{ComponentKindVal: v1alpha1.ComponentKindPD}, + }, + { + name: "tidb depends on tikv but not found tikv groups", + group: &v1alpha1.FakeGroup{ComponentKindVal: v1alpha1.ComponentKindTiDB}, + wantGroups: []v1alpha1.Group{}, + }, + { + name: "tikv depends on tiflash when has tiflash", + existingObjs: []client.Object{ + fake.FakeObj[v1alpha1.TiFlashGroup]("tiflash", + fake.SetNamespace[v1alpha1.TiFlashGroup]("test"), + fake.Label[v1alpha1.TiFlashGroup](v1alpha1.LabelKeyCluster, "tc"), + fake.Label[v1alpha1.TiFlashGroup](v1alpha1.LabelKeyComponent, v1alpha1.LabelValComponentTiFlash), + ), + fake.FakeObj[v1alpha1.PDGroup]("pd", + fake.SetNamespace[v1alpha1.PDGroup]("test"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyCluster, "tc"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyComponent, v1alpha1.LabelValComponentPD), + ), + }, + group: &v1alpha1.FakeGroup{ComponentKindVal: v1alpha1.ComponentKindTiKV, Namespace: "test", ClusterName: "tc"}, + wantGroups: []v1alpha1.Group{ + fake.FakeObj[v1alpha1.TiFlashGroup]("tiflash", + fake.SetNamespace[v1alpha1.TiFlashGroup]("test"), + fake.Label[v1alpha1.TiFlashGroup](v1alpha1.LabelKeyCluster, "tc"), + fake.Label[v1alpha1.TiFlashGroup](v1alpha1.LabelKeyComponent, v1alpha1.LabelValComponentTiFlash), + ), + }, + }, + { + name: "tikv depends on pd when has no tiflash", + existingObjs: []client.Object{ + fake.FakeObj[v1alpha1.PDGroup]("pd", + fake.SetNamespace[v1alpha1.PDGroup]("test"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyCluster, "tc"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyComponent, v1alpha1.LabelValComponentPD), + ), + }, + group: &v1alpha1.FakeGroup{ComponentKindVal: v1alpha1.ComponentKindTiKV, Namespace: "test", ClusterName: "tc"}, + wantGroups: []v1alpha1.Group{ + fake.FakeObj[v1alpha1.PDGroup]("pd", + fake.SetNamespace[v1alpha1.PDGroup]("test"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyCluster, "tc"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyComponent, v1alpha1.LabelValComponentPD), + ), + }, + }, + { + name: "tiflash depends on pd", + existingObjs: []client.Object{ + fake.FakeObj[v1alpha1.PDGroup]("pd", + fake.SetNamespace[v1alpha1.PDGroup]("test"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyCluster, "tc"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyComponent, v1alpha1.LabelValComponentPD), + ), + }, + group: &v1alpha1.FakeGroup{ComponentKindVal: v1alpha1.ComponentKindTiFlash, Namespace: "test", ClusterName: "tc"}, + wantGroups: []v1alpha1.Group{ + fake.FakeObj[v1alpha1.PDGroup]("pd", + fake.SetNamespace[v1alpha1.PDGroup]("test"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyCluster, "tc"), + fake.Label[v1alpha1.PDGroup](v1alpha1.LabelKeyComponent, v1alpha1.LabelValComponentPD), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cli := client.NewFakeClient(tt.existingObjs...) + gotGroups, err := getDependentGroups(context.TODO(), cli, tt.group) + if (err != nil) != tt.wantErr { + t.Errorf("getDependentGroups() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotGroups, tt.wantGroups) { + t.Errorf("getDependentGroups() gotGroups = %v, want %v", gotGroups, tt.wantGroups) + } + }) + } +} diff --git a/pkg/client/alias.go b/pkg/client/alias.go new file mode 100644 index 00000000000..08b4a1853d9 --- /dev/null +++ b/pkg/client/alias.go @@ -0,0 +1,39 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "sigs.k8s.io/controller-runtime/pkg/client" + +// Add alias of client.XXX to avoid import +// two client pkgs +type Object = client.Object +type ObjectList = client.ObjectList +type ObjectKey = client.ObjectKey + +type Options = client.Options +type DeleteOption = client.DeleteOption + +type MatchingLabels = client.MatchingLabels +type MatchingFields = client.MatchingFields +type InNamespace = client.InNamespace +type ListOptions = client.ListOptions + +type PropagationPolicy = client.PropagationPolicy + +var ObjectKeyFromObject = client.ObjectKeyFromObject + +var IgnoreNotFound = client.IgnoreNotFound + +type GracePeriodSeconds = client.GracePeriodSeconds diff --git a/pkg/client/client.go b/pkg/client/client.go new file mode 100644 index 00000000000..443f53a658d --- /dev/null +++ b/pkg/client/client.go @@ -0,0 +1,205 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/structured-merge-diff/v4/typed" + + "github.com/pingcap/tidb-operator/pkg/scheme" +) + +const ( + DefaultFieldManager = "tidb-operator" +) + +type Client interface { + client.WithWatch + Apply(ctx context.Context, obj client.Object) error + ApplyWithResult(ctx context.Context, obj client.Object) (ApplyResult, error) +} + +type ApplyResult int + +const ( + ApplyResultUpdated ApplyResult = iota + ApplyResultUnchanged + ApplyResultCreated +) + +type applier struct { + client.WithWatch + parser GVKParser +} + +func (p *applier) Apply(ctx context.Context, obj client.Object) error { + _, err := p.ApplyWithResult(ctx, obj) + return err +} + +func (p *applier) ApplyWithResult(ctx context.Context, obj client.Object) (ApplyResult, error) { + gvks, _, err := scheme.Scheme.ObjectKinds(obj) + if err != nil { + return ApplyResultUnchanged, fmt.Errorf("cannot get gvks of the obj %T: %w", obj, err) + } + if len(gvks) == 0 { + return ApplyResultUnchanged, fmt.Errorf("cannot get gvk of obj %T", obj) + } + + expected, ok := obj.DeepCopyObject().(client.Object) + if !ok { + panic("it's unreachable") + } + + hasCreated := true + if err := p.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if !errors.IsNotFound(err) { + return ApplyResultUnchanged, err + } + + hasCreated = false + } + + if hasCreated { + lastApplied := newObject(obj) + + if err := p.Extract(obj, DefaultFieldManager, gvks[0], lastApplied, ""); err != nil { + return ApplyResultUnchanged, fmt.Errorf("cannot extract last applied patch: %w", err) + } + + // ignore name, namespace and gvk + lastApplied.SetName(obj.GetName()) + lastApplied.SetNamespace(obj.GetNamespace()) + lastApplied.GetObjectKind().SetGroupVersionKind(expected.GetObjectKind().GroupVersionKind()) + + if equality.Semantic.DeepEqual(expected, lastApplied) { + return ApplyResultUnchanged, nil + } + } + + if err := p.Patch(ctx, obj, &applyPatch{ + expected: expected, + gvk: gvks[0], + }, &client.PatchOptions{ + FieldManager: DefaultFieldManager, + }); err != nil { + return ApplyResultUnchanged, fmt.Errorf("cannot apply patch: %w", err) + } + + if hasCreated { + return ApplyResultUpdated, nil + } + + return ApplyResultCreated, nil +} + +func (p *applier) Extract(current client.Object, fieldManager string, gvk schema.GroupVersionKind, patch any, subresource string) error { + tpd := p.parser.Type(gvk) + if tpd == nil { + return fmt.Errorf("can't find specified type: %s", gvk) + } + if err := managedfields.ExtractInto(current, *tpd, fieldManager, patch, subresource); err != nil { + return err + } + return nil +} + +type applyPatch struct { + expected client.Object + gvk schema.GroupVersionKind +} + +func (*applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +func (p *applyPatch) Data(client.Object) ([]byte, error) { + encoder := scheme.Codecs.EncoderForVersion( + json.NewSerializerWithOptions( + json.DefaultMetaFactory, + scheme.Scheme, + scheme.Scheme, + json.SerializerOptions{ + Yaml: true, + Pretty: false, + Strict: true, + }), + p.gvk.GroupVersion(), + ) + buf := bytes.Buffer{} + if err := encoder.Encode(p.expected, &buf); err != nil { + return nil, fmt.Errorf("failed to encode patch: %w", err) + } + return buf.Bytes(), nil +} + +type GVKParser interface { + Type(gvk schema.GroupVersionKind) *typed.ParseableType +} + +func New(cfg *rest.Config, opts client.Options) (Client, error) { + dc, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + + doc, err := dc.OpenAPISchema() + if err != nil { + return nil, err + } + + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + parser, err := managedfields.NewGVKParser(models, false) + if err != nil { + return nil, err + } + + c, err := client.NewWithWatch(cfg, opts) + if err != nil { + return nil, err + } + + return &applier{ + WithWatch: c, + parser: parser, + }, nil +} + +func newObject(x client.Object) client.Object { + if x == nil { + return nil + } + res := reflect.ValueOf(x).Elem() + n := reflect.New(res.Type()) + return n.Interface().(client.Object) +} diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go new file mode 100644 index 00000000000..2fe61da90c6 --- /dev/null +++ b/pkg/client/client_test.go @@ -0,0 +1,89 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +func TestApply(t *testing.T) { + cases := []struct { + desc string + objs []client.Object + obj client.Object + expected client.Object + res ApplyResult + hasErr bool + }{ + { + desc: "apply a new obj", + objs: []client.Object{ + fake.FakeObj[corev1.Pod]("aa"), + }, + obj: fake.FakeObj[corev1.Pod]("bb"), + expected: fake.FakeObj("bb", fake.GVK[corev1.Pod](corev1.SchemeGroupVersion)), + res: ApplyResultCreated, + }, + { + desc: "add label for an existing obj", + objs: []client.Object{ + fake.FakeObj[corev1.Pod]("aa"), + }, + obj: fake.FakeObj("aa", fake.Label[corev1.Pod]("test", "test")), + expected: fake.FakeObj("aa", fake.GVK[corev1.Pod](corev1.SchemeGroupVersion), fake.Label[corev1.Pod]("test", "test")), + res: ApplyResultUpdated, + }, + { + desc: "apply again for an existing obj", + objs: []client.Object{ + fake.FakeObj("aa", fake.Label[corev1.Pod]("test", "test")), + }, + obj: fake.FakeObj("aa", fake.Label[corev1.Pod]("test", "test")), + expected: fake.FakeObj("aa", fake.GVK[corev1.Pod](corev1.SchemeGroupVersion), fake.Label[corev1.Pod]("test", "test")), + res: ApplyResultUnchanged, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + p := NewFakeClient() + for _, obj := range c.objs { + err := p.Apply(context.TODO(), obj) + require.NoError(tt, err) + } + res, err := p.ApplyWithResult(context.TODO(), c.obj) + if c.hasErr { + assert.Error(tt, err) + } else { + require.NoError(tt, err) + assert.Equal(tt, c.res, res) + + c.obj.SetManagedFields(nil) + assert.Equal(tt, c.expected, c.obj) + } + }) + } +} diff --git a/pkg/client/fake.go b/pkg/client/fake.go new file mode 100644 index 00000000000..900319fc314 --- /dev/null +++ b/pkg/client/fake.go @@ -0,0 +1,596 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta/testrestmapper" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/testing" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/yaml" + + "github.com/pingcap/tidb-operator/pkg/scheme" +) + +type fakeParser struct{} + +func (*fakeParser) Type(schema.GroupVersionKind) *typed.ParseableType { + return &typed.DeducedParseableType +} + +func NewFakeClient(objs ...client.Object) Client { + c := newFakeUnderlayClient(objs...) + + return &applier{ + WithWatch: c, + parser: &fakeParser{}, + } +} + +type fakeUnderlayClient struct { + testing.Fake + tracker testing.ObjectTracker + scheme *runtime.Scheme + restMapper meta.RESTMapper +} + +var _ client.WithWatch = &fakeUnderlayClient{} + +func newFakeUnderlayClient(objs ...client.Object) client.WithWatch { + t := testing.NewObjectTracker(scheme.Scheme, scheme.Codecs.UniversalDecoder()) + for _, obj := range objs { + if err := t.Add(obj); err != nil { + panic(err) + } + } + mapper := testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme) + + c := fakeUnderlayClient{ + tracker: t, + scheme: scheme.Scheme, + restMapper: mapper, + } + c.AddReactor("*", "*", c.ObjectReaction()) + c.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + w, err := t.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, w, nil + }) + + return &c +} + +func (c *fakeUnderlayClient) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *fakeUnderlayClient) RESTMapper() meta.RESTMapper { + return c.restMapper +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *fakeUnderlayClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *fakeUnderlayClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.restMapper) +} + +func (c *fakeUnderlayClient) GroupVersionResourceFor(obj runtime.Object) (schema.GroupVersionResource, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return schema.GroupVersionResource{}, err + } + mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return schema.GroupVersionResource{}, err + } + + return mapping.Resource, nil +} + +func (c *fakeUnderlayClient) Create(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + namespaced, err := apiutil.IsGVKNamespaced(gvk, c.restMapper) + if err != nil { + return err + } + + mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + + var action testing.CreateAction + if namespaced { + action = testing.NewCreateAction(mapping.Resource, obj.GetNamespace(), obj) + } else { + action = testing.NewRootCreateAction(mapping.Resource, obj) + } + newObj, err := c.Invokes(action, nil) + if err != nil { + return err + } + if newObj == nil { + return fmt.Errorf("obj is not handled") + } + + nv := reflect.ValueOf(newObj).Elem() + v := reflect.ValueOf(obj).Elem() + v.Set(nv) + + return nil +} + +func (c *fakeUnderlayClient) Delete(_ context.Context, obj client.Object, opts ...client.DeleteOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + namespaced, err := apiutil.IsGVKNamespaced(gvk, c.restMapper) + if err != nil { + return err + } + + mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + + options := client.DeleteOptions{} + options.ApplyOptions(opts) + + var action testing.DeleteAction + if namespaced { + action = testing.NewDeleteActionWithOptions(mapping.Resource, obj.GetNamespace(), obj.GetName(), *options.AsDeleteOptions()) + } else { + action = testing.NewRootDeleteActionWithOptions(mapping.Resource, obj.GetName(), *options.AsDeleteOptions()) + } + + if _, err := c.Invokes(action, nil); err != nil { + return err + } + + return nil +} + +// TODO(liubo02): impl it +func (*fakeUnderlayClient) DeleteAllOf(_ context.Context, _ client.Object, _ ...client.DeleteAllOfOption) error { + return nil +} + +func (c *fakeUnderlayClient) Update(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + namespaced, err := apiutil.IsGVKNamespaced(gvk, c.restMapper) + if err != nil { + return err + } + + mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + + var action testing.UpdateAction + if namespaced { + action = testing.NewUpdateAction(mapping.Resource, obj.GetNamespace(), obj) + } else { + action = testing.NewRootUpdateAction(mapping.Resource, obj) + } + newObj, err := c.Invokes(action, nil) + if err != nil { + return err + } + if newObj == nil { + return fmt.Errorf("obj is not handled") + } + + nv := reflect.ValueOf(newObj).Elem() + v := reflect.ValueOf(obj).Elem() + v.Set(nv) + + return nil +} + +func (c *fakeUnderlayClient) Patch(_ context.Context, obj client.Object, patch client.Patch, _ ...client.PatchOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + namespaced, err := apiutil.IsGVKNamespaced(gvk, c.restMapper) + if err != nil { + return err + } + + mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + var action testing.PatchAction + if namespaced { + action = testing.NewPatchAction(mapping.Resource, obj.GetNamespace(), obj.GetName(), patch.Type(), data) + } else { + action = testing.NewRootPatchAction(mapping.Resource, obj.GetName(), patch.Type(), data) + } + newObj, err := c.Invokes(action, nil) + if err != nil { + return err + } + if newObj == nil { + return fmt.Errorf("obj is not handled") + } + + nv := reflect.ValueOf(newObj).Elem() + v := reflect.ValueOf(obj).Elem() + v.Set(nv) + + return nil +} + +func (c *fakeUnderlayClient) Get(_ context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + namespaced, err := apiutil.IsGVKNamespaced(gvk, c.restMapper) + if err != nil { + return err + } + + mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + + var action testing.GetAction + if namespaced { + action = testing.NewGetAction(mapping.Resource, key.Namespace, key.Name) + } else { + action = testing.NewRootGetAction(mapping.Resource, key.Name) + } + + newObj, err := c.Invokes(action, nil) + if err != nil { + return err + } + if newObj == nil { + return fmt.Errorf("obj is not handled") + } + + nv := reflect.ValueOf(newObj).Elem() + v := reflect.ValueOf(obj).Elem() + v.Set(nv) + + return nil +} + +func (c *fakeUnderlayClient) Watch(_ context.Context, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + namespaced, err := apiutil.IsGVKNamespaced(gvk, c.restMapper) + if err != nil { + return nil, err + } + + mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + options := client.ListOptions{} + options.ApplyOptions(opts) + + var action testing.WatchAction + if namespaced { + action = testing.NewWatchAction(mapping.Resource, options.Namespace, *options.AsListOptions()) + } else { + action = testing.NewRootWatchAction(mapping.Resource, *options.AsListOptions()) + } + + return c.InvokesWatch(action) +} + +func (c *fakeUnderlayClient) List(_ context.Context, obj client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + itemGVK := gvk.GroupVersion().WithKind(strings.TrimSuffix(gvk.Kind, "List")) + + namespaced, err := apiutil.IsGVKNamespaced(itemGVK, c.restMapper) + if err != nil { + return err + } + + mapping, err := c.restMapper.RESTMapping(itemGVK.GroupKind(), itemGVK.Version) + if err != nil { + return err + } + + options := client.ListOptions{} + options.ApplyOptions(opts) + + var action testing.ListAction + if namespaced { + action = testing.NewListAction(mapping.Resource, itemGVK, options.Namespace, *options.AsListOptions()) + } else { + action = testing.NewRootListAction(mapping.Resource, itemGVK, *options.AsListOptions()) + } + + newObj, err := c.Invokes(action, nil) + if err != nil { + return err + } + if newObj == nil { + return fmt.Errorf("obj is not handled") + } + + nv := reflect.ValueOf(newObj).Elem() + v := reflect.ValueOf(obj).Elem() + v.Set(nv) + + return nil +} + +type SubResourceClient struct { + *fakeUnderlayClient + subResource string +} + +func (c *fakeUnderlayClient) Status() client.SubResourceWriter { + return &SubResourceClient{ + fakeUnderlayClient: c, + subResource: "status", + } +} + +func (c *fakeUnderlayClient) SubResource(sub string) client.SubResourceClient { + return &SubResourceClient{ + fakeUnderlayClient: c, + subResource: sub, + } +} + +func (*SubResourceClient) Create(_ context.Context, _, _ client.Object, _ ...client.SubResourceCreateOption) error { + return nil +} + +func (*SubResourceClient) Update(_ context.Context, _ client.Object, _ ...client.SubResourceUpdateOption) error { + return nil +} + +func (*SubResourceClient) Patch(_ context.Context, _ client.Object, _ client.Patch, _ ...client.SubResourcePatchOption) error { + return nil +} + +func (*SubResourceClient) Get(_ context.Context, _, _ client.Object, _ ...client.SubResourceGetOption) error { + return nil +} + +func (c *fakeUnderlayClient) ObjectReaction() testing.ReactionFunc { + return func(action testing.Action) (bool, runtime.Object, error) { + switch action := action.(type) { + case testing.ListActionImpl: + return c.ListReactionFunc(&action) + case testing.PatchActionImpl: + return c.PatchReactionFunc(&action) + default: + return testing.ObjectReaction(c.tracker)(action) + } + } +} + +func (c *fakeUnderlayClient) ListReactionFunc(action *testing.ListActionImpl) (bool, runtime.Object, error) { + obj, err := c.tracker.List(action.GetResource(), action.GetKind(), action.GetNamespace()) + if err != nil { + return true, nil, err + } + + items, err := meta.ExtractList(obj) + if err != nil { + return true, nil, err + } + + filtered, err := filterList(items, action.ListRestrictions.Labels, action.ListRestrictions.Fields) + if err != nil { + return true, nil, err + } + + if err := meta.SetList(obj, filtered); err != nil { + return true, nil, err + } + + return true, obj, nil +} + +// TODO: support field selector +func filterList(objs []runtime.Object, ls labels.Selector, _ fields.Selector) ([]runtime.Object, error) { + out := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if ls != nil { + if !ls.Matches(labels.Set(m.GetLabels())) { + continue + } + } + out = append(out, obj) + } + return out, nil +} + +//nolint:gocyclo // refactor if possible +func (c *fakeUnderlayClient) PatchReactionFunc(action *testing.PatchActionImpl) (bool, runtime.Object, error) { + gvr := action.GetResource() + ns := action.GetNamespace() + name := action.GetName() + + gvk, err := c.restMapper.KindFor(gvr) + if err != nil { + return true, nil, err + } + + manager, err := managedfields.NewDefaultFieldManager( + managedfields.NewDeducedTypeConverter(), + scheme.Scheme, + scheme.Scheme, + scheme.Scheme, + gvk, + gvk.GroupVersion(), + action.Subresource, + nil, + ) + if err != nil { + return true, nil, err + } + + exist := true + + obj, err := c.tracker.Get(gvr, ns, name) + if err != nil { + if !errors.IsNotFound(err) { + return true, nil, err + } + + if action.GetPatchType() == types.ApplyPatchType { + exist = false + newObj, err2 := c.scheme.New(gvk) + if err2 != nil { + return true, nil, err2 + } + obj = newObj + } else { + return true, nil, err + } + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err2 := jsonpatch.DecodePatch(action.GetPatch()) + if err2 != nil { + return true, nil, err2 + } + modified, err2 := patch.Apply(old) + if err2 != nil { + return true, nil, err2 + } + + //nolint:gocritic // use := shadow err + if err2 = json.Unmarshal(modified, obj); err2 != nil { + return true, nil, err2 + } + case types.MergePatchType: + modified, err2 := jsonpatch.MergePatch(old, action.GetPatch()) + if err2 != nil { + return true, nil, err2 + } + + //nolint:gocritic // use := shadow err + if err2 = json.Unmarshal(modified, obj); err2 != nil { + return true, nil, err2 + } + case types.StrategicMergePatchType: + mergedByte, err2 := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err2 != nil { + return true, nil, err2 + } + //nolint:gocritic // use := shadow err + if err2 = json.Unmarshal(mergedByte, obj); err2 != nil { + return true, nil, err2 + } + case types.ApplyPatchType: + patchObj := &unstructured.Unstructured{Object: map[string]any{}} + if err = yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil { + return true, nil, fmt.Errorf("error decoding YAML: %w", err) + } + obj, err = manager.Apply(obj, patchObj, "tidb-operator", true) + if err != nil { + return true, nil, err + } + + default: + return true, nil, fmt.Errorf("PatchType is not supported") + } + + if !exist { + if err := c.tracker.Create(gvr, obj, ns); err != nil { + return true, nil, err + } + } + + if err := c.tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil +} diff --git a/pkg/client/fake_test.go b/pkg/client/fake_test.go new file mode 100644 index 00000000000..6e76348916a --- /dev/null +++ b/pkg/client/fake_test.go @@ -0,0 +1,134 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +func TestGet(t *testing.T) { + cases := []struct { + desc string + objs []client.Object + obj client.Object + expected client.Object + hasErr bool + }{ + { + desc: "get new obj", + objs: []client.Object{ + fake.FakeObj( + "aa", + fake.Label[corev1.Pod]("test", "test"), + ), + }, + // without label + obj: fake.FakeObj[corev1.Pod]("aa"), + expected: fake.FakeObj( + "aa", + fake.Label[corev1.Pod]("test", "test"), + ), + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + p := NewFakeClient(c.objs...) + err := p.Get(context.TODO(), ObjectKeyFromObject(c.obj), c.obj) + if c.hasErr { + assert.Error(tt, err) + } else { + require.NoError(tt, err) + assert.Equal(tt, c.expected, c.obj) + } + }) + } +} + +func TestList(t *testing.T) { + cases := []struct { + desc string + objs []client.Object + opts []client.ListOption + obj client.ObjectList + expected client.ObjectList + hasErr bool + }{ + { + desc: "list objs", + objs: []client.Object{ + fake.FakeObj("aa", fake.Label[corev1.Pod]("test", "test")), + fake.FakeObj("cc", fake.Label[corev1.Pod]("test", "test")), + fake.FakeObj("bb", fake.Label[corev1.Pod]("test", "test")), + }, + obj: &corev1.PodList{}, + // without label + expected: &corev1.PodList{ + Items: []corev1.Pod{ + *fake.FakeObj("aa", fake.Label[corev1.Pod]("test", "test")), + *fake.FakeObj("bb", fake.Label[corev1.Pod]("test", "test")), + *fake.FakeObj("cc", fake.Label[corev1.Pod]("test", "test")), + }, + }, + }, + { + desc: "list objs with label selector", + objs: []client.Object{ + fake.FakeObj("aa", fake.Label[corev1.Pod]("test", "test")), + fake.FakeObj("cc", fake.Label[corev1.Pod]("test", "test")), + fake.FakeObj("bb", fake.Label[corev1.Pod]("test", "not-selected")), + }, + opts: []client.ListOption{ + client.MatchingLabels{"test": "test"}, + }, + obj: &corev1.PodList{}, + // without label + expected: &corev1.PodList{ + Items: []corev1.Pod{ + *fake.FakeObj("aa", fake.Label[corev1.Pod]("test", "test")), + *fake.FakeObj("cc", fake.Label[corev1.Pod]("test", "test")), + }, + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + p := NewFakeClient(c.objs...) + + err := p.List(context.TODO(), c.obj, c.opts...) + if c.hasErr { + assert.Error(tt, err) + } else { + require.NoError(tt, err) + assert.Equal(tt, c.expected, c.obj) + } + }) + } +} diff --git a/pkg/configs/pd/config.go b/pkg/configs/pd/config.go new file mode 100644 index 00000000000..bb9136813c4 --- /dev/null +++ b/pkg/configs/pd/config.go @@ -0,0 +1,190 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "fmt" + "path" + "strconv" + "strings" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +const ( + InitialClusterStateNew = "new" + InitialClusterStateExisting = "existing" +) + +// Config is a subset config of pd +// Only our managed fields are defined in here +type Config struct { + Name string `toml:"name"` + DataDir string `toml:"data-dir"` + + ClientUrls string `toml:"client-urls"` + PeerUrls string `toml:"peer-urls"` + AdvertiseClientUrls string `toml:"advertise-client-urls"` + AdvertisePeerUrls string `toml:"advertise-peer-urls"` + + InitialCluster string `toml:"initial-cluster"` + InitialClusterState string `toml:"initial-cluster-state"` + InitialClusterToken string `toml:"initial-cluster-token"` + Join string `toml:"join"` + + Security Security `toml:"security"` +} + +type Security struct { + // CAPath is the path of file that contains list of trusted SSL CAs. + CAPath string `toml:"cacert-path"` + // CertPath is the path of file that contains X509 certificate in PEM format. + CertPath string `toml:"cert-path"` + // KeyPath is the path of file that contains X509 key in PEM format. + KeyPath string `toml:"key-path"` +} + +func (c *Config) Overlay(cluster *v1alpha1.Cluster, pd *v1alpha1.PD, peers []*v1alpha1.PD) error { + if err := c.Validate(); err != nil { + return err + } + + scheme := "http" + if cluster.IsTLSClusterEnabled() { + scheme = "https" + c.Security.CAPath = path.Join(v1alpha1.PDClusterTLSMountPath, corev1.ServiceAccountRootCAKey) + c.Security.CertPath = path.Join(v1alpha1.PDClusterTLSMountPath, corev1.TLSCertKey) + c.Security.KeyPath = path.Join(v1alpha1.PDClusterTLSMountPath, corev1.TLSPrivateKeyKey) + } + + c.Name = pd.Name + c.ClientUrls = getClientURLs(pd, scheme) + c.AdvertiseClientUrls = getAdvertiseClientURLs(pd, scheme) + c.PeerUrls = getPeerURLs(pd, scheme) + c.AdvertisePeerUrls = getAdvertisePeerURLs(pd, scheme) + + for i := range pd.Spec.Volumes { + vol := &pd.Spec.Volumes[i] + for _, usage := range vol.For { + if usage.Type == v1alpha1.VolumeUsageTypePDData { + p := string(usage.Type) + if usage.SubPath != "" { + p = usage.SubPath + } + c.DataDir = path.Join(vol.Path, p) + } + } + } + + c.InitialClusterToken = pd.Spec.Cluster.Name + initialClusterNum, ok := pd.Annotations[v1alpha1.AnnoKeyInitialClusterNum] + if !ok { + c.Join = cluster.Status.PD + } + + if c.Join == "" { + num, err := strconv.ParseInt(initialClusterNum, 10, 32) + if err != nil { + return fmt.Errorf("cannot parse initial cluster num %v: %w", initialClusterNum, err) + } + if num != int64(len(peers)) { + return fmt.Errorf("unexpected number of replicas, expected is %v, current is %v", num, len(peers)) + } + c.InitialCluster = getInitialCluster(peers, scheme) + c.InitialClusterState = InitialClusterStateNew + } + + return nil +} + +func (c *Config) Validate() error { + fields := []string{} + + if c.Name != "" { + fields = append(fields, "name") + } + if c.DataDir != "" { + fields = append(fields, "data-dir") + } + if c.ClientUrls != "" { + fields = append(fields, "client-urls") + } + if c.PeerUrls != "" { + fields = append(fields, "peer-urls") + } + if c.AdvertiseClientUrls != "" { + fields = append(fields, "advertise-client-urls") + } + if c.AdvertisePeerUrls != "" { + fields = append(fields, "advertise-peer-urls") + } + + if c.InitialCluster != "" { + fields = append(fields, "initial-cluster") + } + if c.InitialClusterState != "" { + fields = append(fields, "initial-cluster-state") + } + if c.InitialClusterToken != "" { + fields = append(fields, "initial-cluster-token") + } + if c.Join != "" { + fields = append(fields, "join") + } + + if len(fields) == 0 { + return nil + } + + return fmt.Errorf("%v: %w", fields, v1alpha1.ErrFieldIsManagedByOperator) +} + +func getClientURLs(pd *v1alpha1.PD, scheme string) string { + return fmt.Sprintf("%s://[::]:%d", scheme, pd.GetClientPort()) +} + +func getAdvertiseClientURLs(pd *v1alpha1.PD, scheme string) string { + ns := pd.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + host := pd.Name + "." + pd.Spec.Subdomain + "." + ns + return fmt.Sprintf("%s://%s:%d", scheme, host, pd.GetClientPort()) +} + +func getPeerURLs(pd *v1alpha1.PD, scheme string) string { + return fmt.Sprintf("%s://[::]:%d", scheme, pd.GetPeerPort()) +} + +func getAdvertisePeerURLs(pd *v1alpha1.PD, scheme string) string { + ns := pd.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + host := pd.Name + "." + pd.Spec.Subdomain + "." + ns + return fmt.Sprintf("%s://%s:%d", scheme, host, pd.GetPeerPort()) +} + +func getInitialCluster(peers []*v1alpha1.PD, scheme string) string { + urls := []string{} + for _, peer := range peers { + url := getAdvertisePeerURLs(peer, scheme) + urls = append(urls, peer.Name+"="+url) + } + + return strings.Join(urls, ",") +} diff --git a/pkg/configs/tidb/config.go b/pkg/configs/tidb/config.go new file mode 100644 index 00000000000..12f52019c82 --- /dev/null +++ b/pkg/configs/tidb/config.go @@ -0,0 +1,163 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidb + +import ( + "fmt" + "path" + "strings" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +const ( + // defaultGracefulWaitBeforeShutdownInSeconds is the default value of the tidb config `graceful-wait-before-shutdown`, + // which is set by the operator if not set by the user, for graceful shutdown. + // Note that the default value is zero in tidb-server. + defaultGracefulWaitBeforeShutdownInSeconds = 30 +) + +// Config is a subset config of tidb +// Only our managed fields are defined in here. +// ref: https://github.com/pingcap/tidb/blob/master/pkg/config/config.go +type Config struct { + Store string `toml:"store"` + AdvertiseAddress string `toml:"advertise-address"` + Host string `toml:"host"` + Path string `toml:"path"` + + Security Security `toml:"security"` + + Log Log `toml:"log"` + + InitializeSQLFile string `toml:"initialize-sql-file"` + GracefulWaitBeforeShutdown int `toml:"graceful-wait-before-shutdown"` +} + +type Security struct { + // TLS config for communication between TiDB server and MySQL client + SSLCA string `toml:"ssl-ca"` + SSLCert string `toml:"ssl-cert"` + SSLKey string `toml:"ssl-key"` + + // mTLS config + ClusterSSLCA string `toml:"cluster-ssl-ca"` + ClusterSSLCert string `toml:"cluster-ssl-cert"` + ClusterSSLKey string `toml:"cluster-ssl-key"` + + // tidb_auth_token + AuthTokenJwks string `toml:"auth-token-jwks"` +} + +type Log struct { + SlowQueryFile string `toml:"slow-query-file"` +} + +func (c *Config) Overlay(cluster *v1alpha1.Cluster, dbg *v1alpha1.TiDBGroup, tidb *v1alpha1.TiDB) error { + if err := c.Validate(); err != nil { + return err + } + + c.Store = "tikv" // always use tikv + c.AdvertiseAddress = getAdvertiseAddress(tidb) + c.Host = "::" + c.Path = removeHTTPPrefix(cluster.Status.PD) + + if dbg.IsTLSClientEnabled() { + // TODO(csuzhangxc): disable Client Authn + c.Security.SSLCA = path.Join(v1alpha1.TiDBServerTLSMountPath, corev1.ServiceAccountRootCAKey) + c.Security.SSLCert = path.Join(v1alpha1.TiDBServerTLSMountPath, corev1.TLSCertKey) + c.Security.SSLKey = path.Join(v1alpha1.TiDBServerTLSMountPath, corev1.TLSPrivateKeyKey) + } + + if cluster.IsTLSClusterEnabled() { + c.Security.ClusterSSLCA = path.Join(v1alpha1.TiDBClusterTLSMountPath, corev1.ServiceAccountRootCAKey) + c.Security.ClusterSSLCert = path.Join(v1alpha1.TiDBClusterTLSMountPath, corev1.TLSCertKey) + c.Security.ClusterSSLKey = path.Join(v1alpha1.TiDBClusterTLSMountPath, corev1.TLSPrivateKeyKey) + } + + c.Log.SlowQueryFile = getSlowQueryFile(tidb) + + if dbg.IsBootstrapSQLEnabled() { + c.InitializeSQLFile = path.Join(v1alpha1.BootstrapSQLFilePath, v1alpha1.BootstrapSQLFileName) + } + + if dbg.IsTokenBasedAuthEnabled() { + c.Security.AuthTokenJwks = path.Join(v1alpha1.TiDBAuthTokenPath, v1alpha1.TiDBAuthTokenJWKS) + } + + // If not set, use default value. + if c.GracefulWaitBeforeShutdown == 0 { + c.GracefulWaitBeforeShutdown = defaultGracefulWaitBeforeShutdownInSeconds + } + + return nil +} + +func (c *Config) Validate() error { + var fields []string + + if c.Store != "" { + fields = append(fields, "store") + } + if c.AdvertiseAddress != "" { + fields = append(fields, "advertise-address") + } + if c.Host != "" { + fields = append(fields, "host") + } + if c.Path != "" { + fields = append(fields, "path") + } + + if len(fields) == 0 { + return nil + } + + return fmt.Errorf("%v: %w", fields, v1alpha1.ErrFieldIsManagedByOperator) +} + +func getAdvertiseAddress(tidb *v1alpha1.TiDB) string { + ns := tidb.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + return tidb.Name + "." + tidb.Spec.Subdomain + "." + ns + ".svc" +} + +func removeHTTPPrefix(url string) string { + url = strings.TrimPrefix(url, "http://") + url = strings.TrimPrefix(url, "https://") + return url +} + +func getSlowQueryFile(tidb *v1alpha1.TiDB) string { + if !tidb.IsSeperateSlowLogEnabled() { + return "" + } else if tidb.Spec.SlowLog == nil || tidb.Spec.SlowLog.VolumeName == "" { + return path.Join(v1alpha1.TiDBDefaultSlowLogDir, v1alpha1.TiDBSlowLogFileName) + } + + for i := range tidb.Spec.Volumes { + vol := &tidb.Spec.Volumes[i] + if vol.Name == tidb.Spec.SlowLog.VolumeName { + return path.Join(vol.Path, v1alpha1.TiDBSlowLogFileName) + } + } + + return "" // should not reach here +} diff --git a/pkg/configs/tiflash/config.go b/pkg/configs/tiflash/config.go new file mode 100644 index 00000000000..39e5a276d45 --- /dev/null +++ b/pkg/configs/tiflash/config.go @@ -0,0 +1,243 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflash + +import ( + "fmt" + "path" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +// Config is a subset config of tiflash +// Only our managed fields are defined in here. +// ref: https://github.com/pingcap/tiflash/blob/master/etc/config-template.toml +// NOTE: the following config items are set in TiDB Operator v1, but not in TiDB Operator v2 as they are removed from v6.1 +// - flash.tidb_status_addr +// - flash.flash_cluster +type Config struct { + // NOTE: in TiFlash, some fields use "_" instead of "-" + + TmpPath string `toml:"tmp_path"` + Storage Storage `toml:"storage"` + Flash Flash `toml:"flash"` + Raft Raft `toml:"raft"` + Status Status `toml:"status"` + Logger Logger `toml:"logger"` + + Security Security `toml:"security"` +} + +type Storage struct { + Main StorageMain `toml:"main"` + Raft StorageRaft `toml:"raft"` +} + +type StorageMain struct { + Dir []string `toml:"dir"` +} + +type StorageRaft struct { + Dir []string `toml:"dir"` +} + +type Flash struct { + ServiceAddr string `toml:"service_addr"` + + Proxy Proxy `toml:"proxy"` +} + +type Proxy struct { + Config string `toml:"config"` + DataDir string `toml:"data-dir"` + Addr string `toml:"addr"` + AdvertiseAddr string `toml:"advertise-addr"` + AdvertiseStatusAddr string `toml:"advertise-status-addr"` +} + +type Raft struct { + PdAddr string `toml:"pd_addr"` +} + +type Status struct { + MetricsPort int `toml:"metrics_port"` +} + +type Logger struct { + Log string `toml:"log"` + Errorlog string `toml:"errorlog"` +} + +type Security struct { + // CAPath is the path of file that contains list of trusted SSL CAs. + CAPath string `toml:"ca_path"` + // CertPath is the path of file that contains X509 certificate in PEM format. + CertPath string `toml:"cert_path"` + // KeyPath is the path of file that contains X509 key in PEM format. + KeyPath string `toml:"key_path"` +} + +func (c *Config) Overlay(cluster *v1alpha1.Cluster, tiflash *v1alpha1.TiFlash) error { + if err := c.Validate(); err != nil { + return err + } + + if cluster.IsTLSClusterEnabled() { + c.Security.CAPath = path.Join(v1alpha1.TiFlashClusterTLSMountPath, corev1.ServiceAccountRootCAKey) + c.Security.CertPath = path.Join(v1alpha1.TiFlashClusterTLSMountPath, corev1.TLSCertKey) + c.Security.KeyPath = path.Join(v1alpha1.TiFlashClusterTLSMountPath, corev1.TLSPrivateKeyKey) + } + + c.TmpPath = getTmpPath(tiflash) + c.Storage.Main.Dir = []string{getMainStorageDir(tiflash)} + c.Storage.Raft.Dir = []string{getRaftStorageDir(tiflash)} + + c.Flash.ServiceAddr = GetServiceAddr(tiflash) + // /etc/tiflash/proxy.toml + c.Flash.Proxy.Config = path.Join(v1alpha1.DirNameConfigTiFlash, v1alpha1.ConfigFileTiFlashProxyName) + c.Flash.Proxy.DataDir = getProxyDataDir(tiflash) + c.Flash.Proxy.Addr = getProxyAddr(tiflash) + c.Flash.Proxy.AdvertiseAddr = getProxyAdvertiseAddr(tiflash) + c.Flash.Proxy.AdvertiseStatusAddr = getProxyAdvertiseStatusAddr(tiflash) + + c.Raft.PdAddr = cluster.Status.PD + + c.Status.MetricsPort = int(tiflash.GetMetricsPort()) + + c.Logger.Log = GetServerLogPath(tiflash) + c.Logger.Errorlog = GetErrorLogPath(tiflash) + + return nil +} + +//nolint:gocyclo // refactor if possible +func (c *Config) Validate() error { + fields := []string{} + + if c.Security.CAPath != "" { + fields = append(fields, "security.ca_path") + } + if c.Security.CertPath != "" { + fields = append(fields, "security.cert_path") + } + if c.Security.KeyPath != "" { + fields = append(fields, "security.key_path") + } + + if c.TmpPath != "" { + fields = append(fields, "tmp_path") + } + + if c.Flash.ServiceAddr != "" { + fields = append(fields, "flash.service_addr") + } + if c.Flash.Proxy.Config != "" { + fields = append(fields, "flash.proxy.config") + } + if c.Flash.Proxy.Addr != "" { + fields = append(fields, "flash.proxy.addr") + } + if c.Flash.Proxy.AdvertiseAddr != "" { + fields = append(fields, "flash.proxy.advertise-addr") + } + if c.Flash.Proxy.AdvertiseStatusAddr != "" { + fields = append(fields, "flash.proxy.advertise-status-addr") + } + + if c.Raft.PdAddr != "" { + fields = append(fields, "raft.pd_addr") + } + + if c.Logger.Log != "" { + fields = append(fields, "logger.log") + } + if c.Logger.Errorlog != "" { + fields = append(fields, "logger.errorlog") + } + + if c.Raft.PdAddr != "" { + fields = append(fields, "raft.pd-addr") + } + + if c.Status.MetricsPort != 0 { + fields = append(fields, "status.metrics-port") + } + + if len(fields) == 0 { + return nil + } + return fmt.Errorf("%v: %w", fields, v1alpha1.ErrFieldIsManagedByOperator) +} + +func GetServiceAddr(tiflash *v1alpha1.TiFlash) string { + ns := tiflash.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + return fmt.Sprintf("%s.%s.%s:%d", tiflash.Name, tiflash.Spec.Subdomain, ns, tiflash.GetFlashPort()) +} + +func getProxyAddr(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("[::]:%d", tiflash.GetProxyPort()) +} + +func getProxyAdvertiseAddr(tiflash *v1alpha1.TiFlash) string { + ns := tiflash.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + return fmt.Sprintf("%s.%s.%s:%d", tiflash.Name, tiflash.Spec.Subdomain, ns, tiflash.GetProxyPort()) +} + +func getProxyAdvertiseStatusAddr(tiflash *v1alpha1.TiFlash) string { + ns := tiflash.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + return fmt.Sprintf("%s.%s.%s:%d", tiflash.Name, tiflash.Spec.Subdomain, ns, tiflash.GetProxyStatusPort()) +} + +func GetServerLogPath(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("%s/logs/server.log", getDefaultMountPath(tiflash)) +} + +func GetErrorLogPath(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("%s/logs/error.log", getDefaultMountPath(tiflash)) +} + +func getTmpPath(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("%s/tmp", getDefaultMountPath(tiflash)) +} + +func getMainStorageDir(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("%s/db", getDefaultMountPath(tiflash)) +} + +func getRaftStorageDir(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("%s/kvstore", getDefaultMountPath(tiflash)) +} + +func getProxyDataDir(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("%s/proxy", getDefaultMountPath(tiflash)) +} + +// in TiDB Operator v1, we mount the first data volume to /data0, +// so for an existing TiFlash cluster, we should set the first data volume mount path to /data0. +func getDefaultMountPath(tiflash *v1alpha1.TiFlash) string { + vol := tiflash.Spec.Volumes[0] + return vol.Path +} diff --git a/pkg/configs/tiflash/proxy_config.go b/pkg/configs/tiflash/proxy_config.go new file mode 100644 index 00000000000..9ca27a5ef65 --- /dev/null +++ b/pkg/configs/tiflash/proxy_config.go @@ -0,0 +1,98 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflash + +import ( + "fmt" + "path" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +const ( + defaultProxyLogLevel = "info" +) + +type ProxyConfig struct { + LogLevel string `toml:"log-level"` + + Server ProxyServer `toml:"server"` + + Security ProxySecurity `toml:"security"` +} + +type ProxyServer struct { + StatusAddr string `toml:"status-addr"` +} + +// ProxySecurity has different fields from Security in TiFlash. +// It uses "-" instead of "_". +type ProxySecurity struct { + // CAPath is the path of file that contains list of trusted SSL CAs. + CAPath string `toml:"ca-path"` + // CertPath is the path of file that contains X509 certificate in PEM format. + CertPath string `toml:"cert-path"` + // KeyPath is the path of file that contains X509 key in PEM format. + KeyPath string `toml:"key-path"` +} + +func (c *ProxyConfig) Overlay(cluster *v1alpha1.Cluster, tiflash *v1alpha1.TiFlash) error { + if err := c.Validate(); err != nil { + return err + } + + if cluster.IsTLSClusterEnabled() { + c.Security.CAPath = path.Join(v1alpha1.TiFlashClusterTLSMountPath, corev1.ServiceAccountRootCAKey) + c.Security.CertPath = path.Join(v1alpha1.TiFlashClusterTLSMountPath, corev1.TLSCertKey) + c.Security.KeyPath = path.Join(v1alpha1.TiFlashClusterTLSMountPath, corev1.TLSPrivateKeyKey) + } + + if c.LogLevel == "" { + c.LogLevel = defaultProxyLogLevel + } + + c.Server.StatusAddr = getProxyStatusAddr(tiflash) + + return nil +} + +func (c *ProxyConfig) Validate() error { + fields := []string{} + + if c.Security.CAPath != "" { + fields = append(fields, "security.ca-path") + } + if c.Security.CertPath != "" { + fields = append(fields, "security.cert-path") + } + if c.Security.KeyPath != "" { + fields = append(fields, "security.key-path") + } + + if c.Server.StatusAddr != "" { + fields = append(fields, "server.status-addr") + } + + if len(fields) == 0 { + return nil + } + return fmt.Errorf("%v: %w", fields, v1alpha1.ErrFieldIsManagedByOperator) +} + +func getProxyStatusAddr(tiflash *v1alpha1.TiFlash) string { + return fmt.Sprintf("[::]:%d", tiflash.GetProxyStatusPort()) +} diff --git a/pkg/configs/tikv/config.go b/pkg/configs/tikv/config.go new file mode 100644 index 00000000000..fe80013256e --- /dev/null +++ b/pkg/configs/tikv/config.go @@ -0,0 +1,173 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "fmt" + "path" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type Config struct { + Server Server `toml:"server"` + Storage Storage `toml:"storage"` + PD PD `toml:"pd"` + RaftEngine RaftEngine `toml:"raft-engine"` + RocksDB RocksDB `toml:"rocksdb"` + Security Security `toml:"security"` +} + +type Server struct { + Addr string `toml:"addr"` + AdvertiseAddr string `toml:"advertise-addr"` + StatusAddr string `toml:"status-addr"` + AdvertiseStatusAddr string `toml:"advertise-status-addr"` +} + +type Storage struct { + DataDir string `toml:"data-dir"` +} + +type PD struct { + Endpoints []string `toml:"endpoints"` +} + +type RaftEngine struct { + // Only for validation, it cannot be disabled + Enable *bool `toml:"enable"` + Dir string `toml:"dir"` +} + +type RocksDB struct { + WALDir string `toml:"wal-dir"` +} + +type Security struct { + // CAPath is the path of file that contains list of trusted SSL CAs. + CAPath string `toml:"ca-path"` + // CertPath is the path of file that contains X509 certificate in PEM format. + CertPath string `toml:"cert-path"` + // KeyPath is the path of file that contains X509 key in PEM format. + KeyPath string `toml:"key-path"` +} + +func (c *Config) Overlay(cluster *v1alpha1.Cluster, tikv *v1alpha1.TiKV) error { + if err := c.Validate(); err != nil { + return err + } + + if cluster.IsTLSClusterEnabled() { + c.Security.CAPath = path.Join(v1alpha1.TiKVClusterTLSMountPath, corev1.ServiceAccountRootCAKey) + c.Security.CertPath = path.Join(v1alpha1.TiKVClusterTLSMountPath, corev1.TLSCertKey) + c.Security.KeyPath = path.Join(v1alpha1.TiKVClusterTLSMountPath, corev1.TLSPrivateKeyKey) + } + + c.Server.Addr = getClientURLs(tikv) + c.Server.AdvertiseAddr = GetAdvertiseClientURLs(tikv) + c.Server.StatusAddr = getStatusURLs(tikv) + c.Server.AdvertiseStatusAddr = getAdvertiseStatusURLs(tikv) + c.PD.Endpoints = []string{cluster.Status.PD} + + for i := range tikv.Spec.Volumes { + vol := &tikv.Spec.Volumes[i] + for _, usage := range vol.For { + switch usage.Type { + case v1alpha1.VolumeUsageTypeTiKVData: + p := string(usage.Type) + if usage.SubPath != "" { + p = usage.SubPath + } + c.Storage.DataDir = path.Join(vol.Path, p) + case v1alpha1.VolumeUsageTypeTiKVRaftEngine: + p := string(usage.Type) + if usage.SubPath != "" { + p = usage.SubPath + } + c.RaftEngine.Dir = path.Join(vol.Path, p) + case v1alpha1.VolumeUsageTypeTiKVWAL: + p := string(usage.Type) + if usage.SubPath != "" { + p = usage.SubPath + } + c.RocksDB.WALDir = path.Join(vol.Path, p) + } + } + } + + return nil +} + +func (c *Config) Validate() error { + fields := []string{} + + if c.Server.Addr != "" { + fields = append(fields, "server.addr") + } + if c.Server.AdvertiseAddr != "" { + fields = append(fields, "server.advertise-addr") + } + if c.Server.StatusAddr != "" { + fields = append(fields, "server.status-addr") + } + if c.Storage.DataDir != "" { + fields = append(fields, "storage.data-dir") + } + if len(c.PD.Endpoints) != 0 { + fields = append(fields, "pd.endpoints") + } + + if c.RaftEngine.Enable != nil { + fields = append(fields, "raft-engine.enable") + } + if c.RaftEngine.Dir != "" { + fields = append(fields, "raft-engine.dir") + } + if c.RocksDB.WALDir != "" { + fields = append(fields, "rocksdb.wal-dir") + } + + if len(fields) == 0 { + return nil + } + + return fmt.Errorf("%v: %w", fields, v1alpha1.ErrFieldIsManagedByOperator) +} + +func getClientURLs(tikv *v1alpha1.TiKV) string { + return fmt.Sprintf("[::]:%d", tikv.GetClientPort()) +} + +func GetAdvertiseClientURLs(tikv *v1alpha1.TiKV) string { + ns := tikv.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + return fmt.Sprintf("%s.%s.%s:%d", tikv.Name, tikv.Spec.Subdomain, ns, tikv.GetClientPort()) +} + +func getStatusURLs(tikv *v1alpha1.TiKV) string { + return fmt.Sprintf("[::]:%d", tikv.GetStatusPort()) +} + +func getAdvertiseStatusURLs(tikv *v1alpha1.TiKV) string { + ns := tikv.Namespace + if ns == "" { + ns = corev1.NamespaceDefault + } + return fmt.Sprintf("%s.%s.%s:%d", tikv.Name, tikv.Spec.Subdomain, ns, tikv.GetStatusPort()) +} diff --git a/pkg/controllers/cluster/controller.go b/pkg/controllers/cluster/controller.go new file mode 100644 index 00000000000..f7b18c74ac7 --- /dev/null +++ b/pkg/controllers/cluster/controller.go @@ -0,0 +1,92 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "context" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/cluster/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client +} + +func Setup(mgr manager.Manager, c client.Client) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("Cluster"), + Client: c, + } + return ctrl.NewControllerManagedBy(mgr).For(&v1alpha1.Cluster{}). + Watches(&v1alpha1.PDGroup{}, handler.EnqueueRequestsFromMapFunc(enqueueForGroup)). + Watches(&v1alpha1.TiKVGroup{}, handler.EnqueueRequestsFromMapFunc(enqueueForGroup)). + Watches(&v1alpha1.TiDBGroup{}, handler.EnqueueRequestsFromMapFunc(enqueueForGroup)). + Watches(&v1alpha1.TiFlashGroup{}, handler.EnqueueRequestsFromMapFunc(enqueueForGroup)). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + Complete(r) +} + +func enqueueForGroup(_ context.Context, obj client.Object) []reconcile.Request { + group := obj.(v1alpha1.Group) + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: obj.GetNamespace(), + Name: group.GetClusterName(), + }, + }, + } +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("cluster", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := task.NewTaskRunner[tasks.ReconcileContext](reporter) + runner.AddTasks( + tasks.NewTaskContext(logger, r.Client), + tasks.NewTaskFinalizer(logger, r.Client), + tasks.NewTaskStatus(logger, r.Client), + ) + + return runner.Run(rtx) +} diff --git a/pkg/controllers/cluster/tasks/ctx.go b/pkg/controllers/cluster/tasks/ctx.go new file mode 100644 index 00000000000..38f541660ca --- /dev/null +++ b/pkg/controllers/cluster/tasks/ctx.go @@ -0,0 +1,118 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + Cluster *v1alpha1.Cluster + PDGroup *v1alpha1.PDGroup + TiKVGroups []*v1alpha1.TiKVGroup + TiFlashGroups []*v1alpha1.TiFlashGroup + TiDBGroups []*v1alpha1.TiDBGroup +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +type TaskContext struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskContext(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskContext{ + Logger: logger, + Client: c, + } +} + +func (*TaskContext) Name() string { + return "Context" +} + +func (t *TaskContext) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var cluster v1alpha1.Cluster + if err := t.Client.Get(ctx, rtx.Key, &cluster); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get tidb cluster: %w", err) + } + + return task.Complete().Break().With("tidb cluster has been deleted") + } + rtx.Cluster = &cluster + + if cluster.ShouldPauseReconcile() { + return task.Complete().Break().With("cluster reconciliation is paused") + } + + var pdGroupList v1alpha1.PDGroupList + if err := t.Client.List(ctx, &pdGroupList, client.InNamespace(rtx.Key.Namespace), + client.MatchingFields{"spec.cluster.name": rtx.Key.Name}); err != nil { + return task.Fail().With("can't list pd group: %w", err) + } + if len(pdGroupList.Items) > 1 { + return task.Fail().With("more than one pd group") + } + if len(pdGroupList.Items) != 0 { + rtx.PDGroup = &pdGroupList.Items[0] + } + + var tikvGroupList v1alpha1.TiKVGroupList + if err := t.Client.List(ctx, &tikvGroupList, client.InNamespace(rtx.Key.Namespace), + client.MatchingFields{"spec.cluster.name": rtx.Key.Name}); err != nil { + return task.Fail().With("can't list tikv group: %w", err) + } + for i := range tikvGroupList.Items { + rtx.TiKVGroups = append(rtx.TiKVGroups, &tikvGroupList.Items[i]) + } + + var tiflashGroupList v1alpha1.TiFlashGroupList + if err := t.Client.List(ctx, &tiflashGroupList, client.InNamespace(rtx.Key.Namespace), + client.MatchingFields{"spec.cluster.name": rtx.Key.Name}); err != nil { + return task.Fail().With("can't list tiflash group: %w", err) + } + for i := range tiflashGroupList.Items { + rtx.TiFlashGroups = append(rtx.TiFlashGroups, &tiflashGroupList.Items[i]) + } + + var tidbGroupList v1alpha1.TiDBGroupList + if err := t.Client.List(ctx, &tidbGroupList, client.InNamespace(rtx.Key.Namespace), + client.MatchingFields{"spec.cluster.name": rtx.Key.Name}); err != nil { + return task.Fail().With("can't list tidb group: %w", err) + } + for i := range tidbGroupList.Items { + rtx.TiDBGroups = append(rtx.TiDBGroups, &tidbGroupList.Items[i]) + } + + return task.Complete().With("new context completed") +} diff --git a/pkg/controllers/cluster/tasks/ctx_test.go b/pkg/controllers/cluster/tasks/ctx_test.go new file mode 100644 index 00000000000..77aa5649718 --- /dev/null +++ b/pkg/controllers/cluster/tasks/ctx_test.go @@ -0,0 +1,86 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +func FakeContext(key types.NamespacedName, changes ...fake.ChangeFunc[ReconcileContext, *ReconcileContext]) *ReconcileContext { + ctx := fake.Fake(changes...) + ctx.Context = context.TODO() + ctx.Key = key + return ctx +} + +func TestContext(t *testing.T) { + cases := []struct { + desc string + key types.NamespacedName + objs []client.Object + expected task.Result + expectedCluster *v1alpha1.Cluster + }{ + { + desc: "cluster has been deleted", + key: types.NamespacedName{ + Name: "test", + }, + objs: []client.Object{}, + expected: task.Complete().Break().With(""), + }, + { + desc: "new context complete", + key: types.NamespacedName{ + Name: "test", + }, + objs: []client.Object{ + fake.FakeObj[v1alpha1.Cluster]("test"), + }, + expected: task.Complete().With(""), + expectedCluster: fake.FakeObj[v1alpha1.Cluster]("test"), + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + ctx := FakeContext(c.key) + fc := client.NewFakeClient(c.objs...) + tk := NewTaskContext(logr.Discard(), fc) + res := tk.Sync(ctx) + + assert.Equal(tt, c.expected.IsFailed(), res.IsFailed()) + assert.Equal(tt, c.expected.ShouldContinue(), res.ShouldContinue()) + assert.Equal(tt, c.expected.RequeueAfter(), res.RequeueAfter()) + // Ignore message assertion + // TODO: maybe assert the message format? + + assert.Equal(tt, c.expectedCluster, ctx.Self().Cluster) + }) + } +} diff --git a/pkg/controllers/cluster/tasks/finalizer.go b/pkg/controllers/cluster/tasks/finalizer.go new file mode 100644 index 00000000000..d9e0e301ca6 --- /dev/null +++ b/pkg/controllers/cluster/tasks/finalizer.go @@ -0,0 +1,87 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskFinalizer struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskFinalizer(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskFinalizer{ + Logger: logger, + Client: c, + } +} + +func (*TaskFinalizer) Name() string { + return "Finalizer" +} + +//nolint:gocyclo // refactor if possible +func (t *TaskFinalizer) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.Cluster.GetDeletionTimestamp().IsZero() { + if err := k8s.EnsureFinalizer(ctx, t.Client, rtx.Cluster); err != nil { + return task.Fail().With("can't ensure finalizer: %w", err) + } + return task.Complete().With("ensured finalizer") + } + + if rtx.PDGroup == nil && len(rtx.TiKVGroups) == 0 && len(rtx.TiDBGroups) == 0 && len(rtx.TiFlashGroups) == 0 { + if err := k8s.RemoveFinalizer(ctx, t.Client, rtx.Cluster); err != nil { + return task.Fail().With("can't remove finalizer: %w", err) + } + return task.Complete().Break().With("removed finalizer") + } + + // trigger the deletion of the components + if rtx.PDGroup != nil { + //nolint:gocritic // not a real issue, see https://github.com/go-critic/go-critic/issues/1448 + if err := t.Client.Delete(ctx, rtx.PDGroup); client.IgnoreNotFound(err) != nil { + return task.Fail().With("can't delete pd group: %w", err) + } + } + for _, tikvGroup := range rtx.TiKVGroups { + //nolint:gocritic // not a real issue, see https://github.com/go-critic/go-critic/issues/1448 + if err := t.Client.Delete(ctx, tikvGroup); client.IgnoreNotFound(err) != nil { + return task.Fail().With("can't delete tikv group: %w", err) + } + } + for _, tiflashGroup := range rtx.TiFlashGroups { + //nolint:gocritic // not a real issue, see https://github.com/go-critic/go-critic/issues/1448 + if err := t.Client.Delete(ctx, tiflashGroup); client.IgnoreNotFound(err) != nil { + return task.Fail().With("can't delete tiflash group: %w", err) + } + } + for _, tidbGroup := range rtx.TiDBGroups { + //nolint:gocritic // not a real issue, see https://github.com/go-critic/go-critic/issues/1448 + if err := t.Client.Delete(ctx, tidbGroup); client.IgnoreNotFound(err) != nil { + return task.Fail().With("can't delete tidb group: %w", err) + } + } + + // wait for the components to be deleted + return task.Fail().With("deleting components") +} diff --git a/pkg/controllers/cluster/tasks/finalizer_test.go b/pkg/controllers/cluster/tasks/finalizer_test.go new file mode 100644 index 00000000000..282a36cc7b3 --- /dev/null +++ b/pkg/controllers/cluster/tasks/finalizer_test.go @@ -0,0 +1,77 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +func TestFinalizer(t *testing.T) { + cases := []struct { + desc string + cluster *v1alpha1.Cluster + pdGroup *v1alpha1.PDGroup + expected task.Result + hasFinalizer bool + }{ + { + desc: "ensured finalizer", + cluster: fake.FakeObj[v1alpha1.Cluster]("test"), + expected: task.Complete().With("ensured finalizer"), + hasFinalizer: true, + }, + { + desc: "removed finalizer", + cluster: fake.FakeObj[v1alpha1.Cluster]("test", + fake.SetDeleteTimestamp[v1alpha1.Cluster](), fake.AddFinalizer[v1alpha1.Cluster]()), + expected: task.Complete().Break().With("removed finalizer"), + }, + { + desc: "deleting components", + cluster: fake.FakeObj[v1alpha1.Cluster]("test", + fake.SetDeleteTimestamp[v1alpha1.Cluster](), fake.AddFinalizer[v1alpha1.Cluster]()), + pdGroup: fake.FakeObj[v1alpha1.PDGroup]("pd-group"), + expected: task.Fail().With("deleting components"), + hasFinalizer: true, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + ctx := FakeContext(types.NamespacedName{Name: "test"}) + ctx.Cluster = c.cluster + ctx.PDGroup = c.pdGroup + + fc := client.NewFakeClient(c.cluster) + tk := NewTaskFinalizer(logr.Discard(), fc) + res := tk.Sync(ctx) + assert.Equal(tt, c.expected, res) + assert.Equal(tt, c.hasFinalizer, controllerutil.ContainsFinalizer(c.cluster, v1alpha1.Finalizer)) + }) + } +} diff --git a/pkg/controllers/cluster/tasks/status.go b/pkg/controllers/cluster/tasks/status.go new file mode 100644 index 00000000000..ab9f08969cf --- /dev/null +++ b/pkg/controllers/cluster/tasks/status.go @@ -0,0 +1,195 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + "reflect" + "sort" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskStatus struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Logger: logger, + Client: c, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var needUpdate bool + if rtx.Cluster.Status.ObservedGeneration != rtx.Cluster.Generation { + rtx.Cluster.Status.ObservedGeneration = rtx.Cluster.Generation + needUpdate = true + } + if rtx.PDGroup != nil { + // TODO: extract into a common util + scheme := "http" + if rtx.Cluster.IsTLSClusterEnabled() { + scheme = "https" + } + pdAddr := fmt.Sprintf("%s://%s-%s.%s:%d", scheme, rtx.Cluster.Name, + rtx.PDGroup.Name, rtx.PDGroup.Namespace, rtx.PDGroup.GetClientPort()) + if rtx.Cluster.Status.PD != pdAddr { // TODO(csuzhangxc): verify switch between TLS and non-TLS + rtx.Cluster.Status.PD = pdAddr + needUpdate = true + } + } + needUpdate = t.syncComponentStatus(rtx) || needUpdate + needUpdate = t.syncConditions(rtx) || needUpdate + + if needUpdate { + if err := t.Client.Status().Update(ctx, rtx.Cluster); err != nil { + return task.Fail().With(fmt.Sprintf("can't update cluster status: %v", err)) + } + } + + return task.Complete().With("updated status") +} + +func (*TaskStatus) syncComponentStatus(rtx *ReconcileContext) bool { + components := make([]v1alpha1.ComponentStatus, 0) + if rtx.PDGroup != nil { + pd := v1alpha1.ComponentStatus{Kind: v1alpha1.ComponentKindPD} + if rtx.PDGroup.Spec.Replicas != nil { + // TODO: use real replicas + pd.Replicas += *rtx.PDGroup.Spec.Replicas + } + components = append(components, pd) + } + + if len(rtx.TiKVGroups) > 0 { + tikv := v1alpha1.ComponentStatus{Kind: v1alpha1.ComponentKindTiKV} + for _, tikvGroup := range rtx.TiKVGroups { + if tikvGroup.Spec.Replicas != nil { + // TODO: use real replicas + tikv.Replicas += *tikvGroup.Spec.Replicas + } + } + components = append(components, tikv) + } + + if len(rtx.TiFlashGroups) > 0 { + tiflash := v1alpha1.ComponentStatus{Kind: v1alpha1.ComponentKindTiFlash} + for _, tiflashGroup := range rtx.TiFlashGroups { + if tiflashGroup.Spec.Replicas != nil { + tiflash.Replicas += *tiflashGroup.Spec.Replicas + } + } + components = append(components, tiflash) + } + + if len(rtx.TiDBGroups) > 0 { + tidb := v1alpha1.ComponentStatus{Kind: v1alpha1.ComponentKindTiDB} + for _, tidbGroup := range rtx.TiDBGroups { + if tidbGroup.Spec.Replicas != nil { + tidb.Replicas += *tidbGroup.Spec.Replicas + } + } + components = append(components, tidb) + } + + sort.Slice(components, func(i, j int) bool { + return components[i].Kind < components[j].Kind + }) + + if reflect.DeepEqual(rtx.Cluster.Status.Components, components) { + return false + } + rtx.Cluster.Status.Components = components + return true +} + +func (*TaskStatus) syncConditions(rtx *ReconcileContext) bool { + // TODO(csuzhangxc): calculate progressing condition based on components' observed generation? + prgCond := metav1.Condition{ + Type: v1alpha1.ClusterCondProgressing, + Status: metav1.ConditionTrue, + ObservedGeneration: rtx.Cluster.Generation, + Reason: v1alpha1.ClusterCreationReason, + Message: "Cluster is being created", + } + if rtx.Cluster.DeletionTimestamp != nil { + prgCond.Reason = v1alpha1.ClusterDeletionReason + prgCond.Message = "Cluster is being deleted" + } + changed := meta.SetStatusCondition(&rtx.Cluster.Status.Conditions, prgCond) + + availCond := metav1.Condition{ + Type: v1alpha1.ClusterCondAvailable, + Status: metav1.ConditionFalse, + ObservedGeneration: rtx.Cluster.Generation, + Reason: v1alpha1.ClusterAvailableReason, + Message: "Cluster is not available", + } + suspended := rtx.PDGroup != nil && meta.IsStatusConditionTrue(rtx.PDGroup.Status.Conditions, v1alpha1.PDGroupCondSuspended) + for _, tidbg := range rtx.TiDBGroups { + if meta.IsStatusConditionTrue(tidbg.Status.Conditions, v1alpha1.TiDBGroupCondAvailable) { + // if any tidb group is available, the cluster is available + availCond.Status = metav1.ConditionTrue + availCond.Message = "Cluster is available" + break + } + if !meta.IsStatusConditionTrue(tidbg.Status.Conditions, v1alpha1.TiDBGroupCondSuspended) { + // if any group is not suspended, the cluster is not suspended + suspended = false + } + } + changed = meta.SetStatusCondition(&rtx.Cluster.Status.Conditions, availCond) || changed + + if suspended { + for _, tikvGroup := range rtx.TiKVGroups { + if !meta.IsStatusConditionTrue(tikvGroup.Status.Conditions, v1alpha1.TiKVGroupCondSuspended) { + suspended = false + break + } + } + } + var ( + suspendStatus = metav1.ConditionFalse + suspendMessage = "Cluster is not suspended" + ) + if suspended { + suspendStatus = metav1.ConditionTrue + suspendMessage = "Cluster is suspended" + } else if rtx.Cluster.ShouldSuspendCompute() { + suspendMessage = "Cluster is suspending" + } + return meta.SetStatusCondition(&rtx.Cluster.Status.Conditions, metav1.Condition{ + Type: v1alpha1.ClusterCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.Cluster.Generation, + Reason: v1alpha1.ClusterSuspendReason, + Message: suspendMessage, + }) || changed +} diff --git a/pkg/controllers/cluster/tasks/status_test.go b/pkg/controllers/cluster/tasks/status_test.go new file mode 100644 index 00000000000..983c5268256 --- /dev/null +++ b/pkg/controllers/cluster/tasks/status_test.go @@ -0,0 +1,104 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +func TestStatusUpdater(t *testing.T) { + cases := []struct { + desc string + cluster *v1alpha1.Cluster + pdGroup *v1alpha1.PDGroup + expected task.Result + components []v1alpha1.ComponentStatus + conditions []metav1.Condition + }{ + { + desc: "creating cluster", + cluster: fake.FakeObj( + "test", + fake.SetGeneration[v1alpha1.Cluster](1), + ), + pdGroup: fake.FakeObj( + "pd-group", + func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 3 + return obj + }, + ), + expected: task.Complete().With("updated status"), + components: []v1alpha1.ComponentStatus{ + { + Kind: v1alpha1.ComponentKindPD, + Replicas: 3, + }, + }, + conditions: []metav1.Condition{ + { + Type: v1alpha1.ClusterCondProgressing, + Status: metav1.ConditionTrue, + }, + { + Type: v1alpha1.ClusterCondAvailable, + Status: metav1.ConditionFalse, + }, + { + Type: v1alpha1.ClusterCondSuspended, + Status: metav1.ConditionFalse, + }, + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + ctx := FakeContext(types.NamespacedName{Name: "test"}) + ctx.Cluster = c.cluster + ctx.PDGroup = c.pdGroup + + fc := client.NewFakeClient(c.cluster) + tk := NewTaskStatus(logr.Discard(), fc) + res := tk.Sync(ctx) + assert.Equal(tt, c.expected, res) + assert.Equal(tt, c.cluster.Generation, c.cluster.Status.ObservedGeneration) + assert.Equal(tt, c.components, c.cluster.Status.Components) + + conditions := make([]metav1.Condition, 0) + for _, condition := range c.cluster.Status.Conditions { + conditions = append(conditions, metav1.Condition{ + Type: condition.Type, + Status: condition.Status, + }) + } + assert.Equal(tt, c.conditions, conditions) + }) + } +} diff --git a/pkg/controllers/common/cond.go b/pkg/controllers/common/cond.go new file mode 100644 index 00000000000..f397fa26e4a --- /dev/null +++ b/pkg/controllers/common/cond.go @@ -0,0 +1,41 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import "github.com/pingcap/tidb-operator/pkg/utils/task/v3" + +func CondPDHasBeenDeleted(ctx PDGetter) task.Condition { + return task.CondFunc(func() bool { + return ctx.GetPD() == nil + }) +} + +func CondPDIsDeleting(ctx PDGetter) task.Condition { + return task.CondFunc(func() bool { + return !ctx.GetPD().GetDeletionTimestamp().IsZero() + }) +} + +func CondClusterIsSuspending(ctx ClusterGetter) task.Condition { + return task.CondFunc(func() bool { + return ctx.GetCluster().ShouldSuspendCompute() + }) +} + +func CondClusterIsPaused(ctx ClusterGetter) task.Condition { + return task.CondFunc(func() bool { + return ctx.GetCluster().ShouldPauseReconcile() + }) +} diff --git a/pkg/controllers/common/task.go b/pkg/controllers/common/task.go new file mode 100644 index 00000000000..2b906e0a222 --- /dev/null +++ b/pkg/controllers/common/task.go @@ -0,0 +1,156 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "cmp" + "context" + "slices" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +type PDContextSetter interface { + context.Context + PDKey() types.NamespacedName + SetPD(pd *v1alpha1.PD) +} + +type PDGetter interface { + GetPD() *v1alpha1.PD +} + +func TaskContextPD(ctx PDContextSetter, c client.Client) task.Task { + return task.NameTaskFunc("ContextPD", func() task.Result { + var pd v1alpha1.PD + if err := c.Get(ctx, ctx.PDKey(), &pd); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get pd instance %s: %v", ctx.PDKey(), err) + } + + return task.Complete().With("pd instance has been deleted") + } + ctx.SetPD(&pd) + return task.Complete().With("pd is set") + }) +} + +type ClusterContextSetter interface { + context.Context + ClusterKey() types.NamespacedName + SetCluster(cluster *v1alpha1.Cluster) +} + +type ClusterGetter interface { + GetCluster() *v1alpha1.Cluster +} + +func TaskContextCluster(ctx ClusterContextSetter, c client.Client) task.Task { + return task.NameTaskFunc("ContextCluster", func() task.Result { + var cluster v1alpha1.Cluster + if err := c.Get(ctx, ctx.ClusterKey(), &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %v", ctx.ClusterKey(), err) + } + ctx.SetCluster(&cluster) + return task.Complete().With("cluster is set") + }) +} + +type PodContextSetter interface { + context.Context + PodKey() types.NamespacedName + SetPod(pod *corev1.Pod) +} + +type PodGetter interface { + GetPod() *corev1.Pod +} + +func TaskContextPod(ctx PodContextSetter, c client.Client) task.Task { + return task.NameTaskFunc("ContextPod", func() task.Result { + var pod corev1.Pod + if err := c.Get(ctx, ctx.PodKey(), &pod); err != nil { + if errors.IsNotFound(err) { + return task.Complete().With("pod is not created") + } + return task.Fail().With("failed to get pod %s: %v", ctx.PodKey(), err) + } + + ctx.SetPod(&pod) + + return task.Complete().With("pod is set") + }) +} + +type PDSliceContextSetter interface { + context.Context + ClusterKey() types.NamespacedName + SetPDSlice(pds []*v1alpha1.PD) +} + +// TODO: combine with pd slice context in PDGroup controller +func TaskContextPDSlice(ctx PDSliceContextSetter, c client.Client) task.Task { + return task.NameTaskFunc("ContextPDSlice", func() task.Result { + var pdl v1alpha1.PDList + ck := ctx.ClusterKey() + if err := c.List(ctx, &pdl, client.InNamespace(ck.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: ck.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + }); err != nil { + return task.Fail().With("cannot list pd peers: %v", err) + } + + peers := []*v1alpha1.PD{} + for i := range pdl.Items { + peers = append(peers, &pdl.Items[i]) + } + slices.SortFunc(peers, func(a, b *v1alpha1.PD) int { + return cmp.Compare(a.Name, b.Name) + }) + + ctx.SetPDSlice(peers) + + return task.Complete().With("peers is set") + }) +} + +type PodContext interface { + context.Context + PodGetter +} + +func TaskSuspendPod(ctx PodContext, c client.Client) task.Task { + return task.NameTaskFunc("SuspendPod", func() task.Result { + pod := ctx.GetPod() + if pod == nil { + return task.Complete().With("pod has been deleted") + } + if !pod.GetDeletionTimestamp().IsZero() { + return task.Complete().With("pod has been terminating") + } + if err := c.Delete(ctx, pod); err != nil { + return task.Fail().With("can't delete pod %s/%s: %v", pod.Namespace, pod.Name, err) + } + + return task.Wait().With("pod is deleting") + }) +} diff --git a/pkg/controllers/pd/builder.go b/pkg/controllers/pd/builder.go new file mode 100644 index 00000000000..9f88830f406 --- /dev/null +++ b/pkg/controllers/pd/builder.go @@ -0,0 +1,65 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "github.com/pingcap/tidb-operator/pkg/controllers/common" + "github.com/pingcap/tidb-operator/pkg/controllers/pd/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +func (r *Reconciler) NewRunner(ctx *tasks.ReconcileContext, reporter task.TaskReporter) task.TaskRunner { + runner := task.NewTaskRunner(reporter, + // get pd + common.TaskContextPD(ctx, r.Client), + // if it's deleted just return + task.IfBreak(common.CondPDHasBeenDeleted(ctx)), + + // get info from pd + tasks.TaskContextInfoFromPD(ctx, r.PDClientManager), + task.IfBreak(common.CondPDIsDeleting(ctx), + tasks.TaskFinalizerDel(ctx, r.Client), + ), + + // get cluster and check whether it's paused + common.TaskContextCluster(ctx, r.Client), + task.IfBreak( + common.CondClusterIsPaused(ctx), + ), + + // get pod and check whether the cluster is suspending + common.TaskContextPod(ctx, r.Client), + task.IfBreak( + common.CondClusterIsSuspending(ctx), + tasks.TaskFinalizerAdd(ctx, r.Client), + common.TaskSuspendPod(ctx, r.Client), + // TODO: extract as a common task + tasks.TaskStatusSuspend(ctx, r.Client), + ), + + tasks.TaskContextPeers(ctx, r.Client), + tasks.TaskFinalizerAdd(ctx, r.Client), + tasks.TaskConfigMap(ctx, r.Logger, r.Client), + tasks.TaskPVC(ctx, r.Logger, r.Client, r.VolumeModifier), + tasks.TaskPod(ctx, r.Logger, r.Client), + // If pd client has not been registered yet, do not update status of the pd + task.IfBreak(tasks.CondPDClientIsNotRegisterred(ctx), + tasks.TaskStatusUnknown(), + ), + tasks.TaskStatus(ctx, r.Logger, r.Client), + ) + + return runner +} diff --git a/pkg/controllers/pd/controller.go b/pkg/controllers/pd/controller.go new file mode 100644 index 00000000000..d41237708fd --- /dev/null +++ b/pkg/controllers/pd/controller.go @@ -0,0 +1,86 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/pd/tasks" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client + PDClientManager pdm.PDClientManager + VolumeModifier volumes.Modifier +} + +func Setup(mgr manager.Manager, c client.Client, pdcm pdm.PDClientManager, vm volumes.Modifier) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("PD"), + Client: c, + PDClientManager: pdcm, + VolumeModifier: vm, + } + return ctrl.NewControllerManagedBy(mgr).For(&v1alpha1.PD{}). + Owns(&corev1.Pod{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.PersistentVolumeClaim{}). + // for PD instance controller, we only need to care about the cluster spec change event now + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler(), + builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WatchesRawSource(pdcm.Source(&pdv1.Member{}, r.MemberEventHandler())). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("pd", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := r.NewRunner(rtx, reporter) + + return runner.Run() +} diff --git a/pkg/controllers/pd/handler.go b/pkg/controllers/pd/handler.go new file mode 100644 index 00000000000..3432ba68640 --- /dev/null +++ b/pkg/controllers/pd/handler.go @@ -0,0 +1,140 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "reflect" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" +) + +func (r *Reconciler) EventLogger() predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.TypedCreateEvent[client.Object]) bool { + r.Logger.Info("create event", "type", reflect.TypeOf(e.Object), "name", e.Object.GetName()) + return false + }, + UpdateFunc: func(e event.TypedUpdateEvent[client.Object]) bool { + r.Logger.Info("update event", "type", reflect.TypeOf(e.ObjectNew), "name", e.ObjectNew.GetName()) + return false + }, + DeleteFunc: func(e event.TypedDeleteEvent[client.Object]) bool { + r.Logger.Info("delete event", "type", reflect.TypeOf(e.Object), "name", e.Object.GetName()) + return false + }, + GenericFunc: func(e event.TypedGenericEvent[client.Object]) bool { + r.Logger.Info("generic event", "type", reflect.TypeOf(e.Object), "name", e.Object.GetName()) + return false + }, + } +} + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + oldObj := event.ObjectOld.(*v1alpha1.Cluster) + newObj := event.ObjectNew.(*v1alpha1.Cluster) + + if !reflect.DeepEqual(oldObj.Spec.SuspendAction, newObj.Spec.SuspendAction) { + r.Logger.Info("suspend action is updating", "from", oldObj.Spec.SuspendAction, "to", newObj.Spec.SuspendAction) + } else if oldObj.Spec.Paused != newObj.Spec.Paused { + r.Logger.Info("cluster paused is updating", "from", oldObj.Spec.Paused, "to", newObj.Spec.Paused) + } else { + return + } + + var pdl v1alpha1.PDList + if err := r.Client.List(ctx, &pdl, client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: newObj.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + }, client.InNamespace(newObj.Namespace)); err != nil { + r.Logger.Error(err, "cannot list all pd instances", "ns", newObj.Namespace, "cluster", newObj.Name) + return + } + + for i := range pdl.Items { + pd := &pdl.Items[i] + r.Logger.Info("queue add", "reason", "cluster change", "namespace", pd.Namespace, "cluster", pd.Spec.Cluster, "name", pd.Name) + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: pd.Name, + Namespace: pd.Namespace, + }, + }) + } + }, + } +} + +func (r *Reconciler) MemberEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(_ context.Context, event event.TypedCreateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m := event.Object.(*pdv1.Member) + ns, cluster := pdm.SplitPrimaryKey(m.Namespace) + + r.Logger.Info("add member", "namespace", ns, "cluster", cluster, "name", m.Name) + + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: m.Name, + Namespace: ns, + }, + }) + }, + + UpdateFunc: func(_ context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m := event.ObjectNew.(*pdv1.Member) + ns, cluster := pdm.SplitPrimaryKey(m.Namespace) + + r.Logger.Info("update member", "namespace", ns, "cluster", cluster, "name", m.Name) + + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: m.Name, + Namespace: ns, + }, + }) + }, + DeleteFunc: func(_ context.Context, event event.TypedDeleteEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + m := event.Object.(*pdv1.Member) + ns, cluster := pdm.SplitPrimaryKey(m.Namespace) + + r.Logger.Info("delete member", "namespace", ns, "cluster", cluster, "name", m.Name) + + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: m.Name, + Namespace: ns, + }, + }) + }, + } +} diff --git a/pkg/controllers/pd/tasks/cm.go b/pkg/controllers/pd/tasks/cm.go new file mode 100644 index 00000000000..a18ec1b1cd2 --- /dev/null +++ b/pkg/controllers/pd/tasks/cm.go @@ -0,0 +1,79 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + pdcfg "github.com/pingcap/tidb-operator/pkg/configs/pd" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" + "github.com/pingcap/tidb-operator/pkg/utils/toml" +) + +func TaskConfigMap(ctx *ReconcileContext, _ logr.Logger, c client.Client) task.Task { + return task.NameTaskFunc("ConfigMap", func() task.Result { + // TODO: DON'T add bootstrap config back + // We need to check current config and forbid adding bootstrap cfg back + + cfg := pdcfg.Config{} + decoder, encoder := toml.Codec[pdcfg.Config]() + if err := decoder.Decode([]byte(ctx.PD.Spec.Config), &cfg); err != nil { + return task.Fail().With("pd config cannot be decoded: %v", err) + } + if err := cfg.Overlay(ctx.Cluster, ctx.PD, ctx.Peers); err != nil { + return task.Fail().With("cannot generate pd config: %v", err) + } + + data, err := encoder.Encode(&cfg) + if err != nil { + return task.Fail().With("pd config cannot be encoded: %v", err) + } + + hash, err := toml.GenerateHash(ctx.PD.Spec.Config) + if err != nil { + return task.Fail().With("failed to generate hash for `pd.spec.config`: %v", err) + } + ctx.ConfigHash = hash + expected := newConfigMap(ctx.PD, data, ctx.ConfigHash) + if err := c.Apply(ctx, expected); err != nil { + return task.Fail().With("can't create/update the cm of pd: %v", err) + } + return task.Complete().With("cm is synced") + }) +} + +func newConfigMap(pd *v1alpha1.PD, data []byte, hash string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(pd.Name), + Namespace: pd.Namespace, + Labels: maputil.Merge(pd.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: pd.Name, + v1alpha1.LabelKeyConfigHash: hash, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(pd, v1alpha1.SchemeGroupVersion.WithKind("PD")), + }, + }, + Data: map[string]string{ + v1alpha1.ConfigFileName: string(data), + }, + } +} diff --git a/pkg/controllers/pd/tasks/ctx.go b/pkg/controllers/pd/tasks/ctx.go new file mode 100644 index 00000000000..63f0f7f63ca --- /dev/null +++ b/pkg/controllers/pd/tasks/ctx.go @@ -0,0 +1,182 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "cmp" + "context" + "slices" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +type ReconcileContext struct { + context.Context + Key types.NamespacedName + + PDClient pdm.PDClient + // this means whether pd is available + IsAvailable bool + // This is single truth whether pd is initialized + Initialized bool + Healthy bool + MemberID string + IsLeader bool + + PD *v1alpha1.PD + PDGroup *v1alpha1.PDGroup + Peers []*v1alpha1.PD + Cluster *v1alpha1.Cluster + Pod *corev1.Pod + + // ConfigHash stores the hash of **user-specified** config (i.e.`.Spec.Config`), + // which will be used to determine whether the config has changed. + // This ensures that our config overlay logic will not restart the tidb cluster unexpectedly. + ConfigHash string + + // Pod cannot be updated when call DELETE API, so we have to set this field to indicate + // the underlay pod has been deleting + PodIsTerminating bool +} + +func (ctx *ReconcileContext) PDKey() types.NamespacedName { + return ctx.Key +} + +func (ctx *ReconcileContext) SetPD(pd *v1alpha1.PD) { + ctx.PD = pd +} + +func (ctx *ReconcileContext) GetPD() *v1alpha1.PD { + return ctx.PD +} + +func (ctx *ReconcileContext) ClusterKey() types.NamespacedName { + return types.NamespacedName{ + Namespace: ctx.PD.Namespace, + Name: ctx.PD.Spec.Cluster.Name, + } +} + +func (ctx *ReconcileContext) GetCluster() *v1alpha1.Cluster { + return ctx.Cluster +} + +func (ctx *ReconcileContext) SetCluster(c *v1alpha1.Cluster) { + ctx.Cluster = c +} + +// Pod always uses same namespace and name of PD +func (ctx *ReconcileContext) PodKey() types.NamespacedName { + return ctx.Key +} + +func (ctx *ReconcileContext) GetPod() *corev1.Pod { + return ctx.Pod +} + +func (ctx *ReconcileContext) SetPod(pod *corev1.Pod) { + ctx.Pod = pod + if !pod.DeletionTimestamp.IsZero() { + ctx.PodIsTerminating = true + } +} + +func TaskContextInfoFromPD(ctx *ReconcileContext, cm pdm.PDClientManager) task.Task { + return task.NameTaskFunc("ContextInfoFromPD", func() task.Result { + ck := ctx.ClusterKey() + pc, ok := cm.Get(pdm.PrimaryKey(ck.Namespace, ck.Name)) + if !ok { + return task.Wait().With("pd client has not been registered yet") + } + + ctx.PDClient = pc + + if !pc.HasSynced() { + return task.Complete().With("context without member info is completed, cache of pd info is not synced") + } + + ctx.Initialized = true + + m, err := pc.Members().Get(ctx.PD.Name) + if err != nil { + if errors.IsNotFound(err) { + return task.Complete().With("context without member info is completed, pd is not initialized") + } + return task.Fail().With("cannot get member: %w", err) + } + + ctx.MemberID = m.ID + ctx.IsLeader = m.IsLeader + + // set available and trust health info only when member info is valid + if !m.Invalid { + ctx.IsAvailable = true + ctx.Healthy = m.Health + } + + return task.Complete().With("pd is ready") + }) +} + +func TaskContextPeers(ctx *ReconcileContext, c client.Client) task.Task { + return task.NameTaskFunc("ContextPeers", func() task.Result { + // TODO: don't get pdg in pd task, move MountClusterClientSecret opt to pd spec + if len(ctx.PD.OwnerReferences) == 0 { + return task.Fail().With("pd instance has no owner, this should not happen") + } + var pdg v1alpha1.PDGroup + if err := c.Get(ctx, client.ObjectKey{ + Name: ctx.PD.OwnerReferences[0].Name, // only one owner now + Namespace: ctx.PD.Namespace, + }, &pdg); err != nil { + return task.Fail().With("cannot find pd group %s: %v", ctx.PD.OwnerReferences[0].Name, err) + } + ctx.PDGroup = &pdg + + var pdl v1alpha1.PDList + if err := c.List(ctx, &pdl, client.InNamespace(ctx.PD.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: ctx.Cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + }); err != nil { + return task.Fail().With("cannot list pd peers: %w", err) + } + + peers := []*v1alpha1.PD{} + for i := range pdl.Items { + peers = append(peers, &pdl.Items[i]) + } + slices.SortFunc(peers, func(a, b *v1alpha1.PD) int { + return cmp.Compare(a.Name, b.Name) + }) + ctx.Peers = peers + return task.Complete().With("peers is set") + }) +} + +func CondPDClientIsNotRegisterred(ctx *ReconcileContext) task.Condition { + return task.CondFunc(func() bool { + // TODO: do not use HasSynced twice, it may return different results + return ctx.PDClient == nil || !ctx.PDClient.HasSynced() + }) +} diff --git a/pkg/controllers/pd/tasks/finalizer.go b/pkg/controllers/pd/tasks/finalizer.go new file mode 100644 index 00000000000..8acc62752b7 --- /dev/null +++ b/pkg/controllers/pd/tasks/finalizer.go @@ -0,0 +1,66 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +func TaskFinalizerDel(ctx *ReconcileContext, c client.Client) task.Task { + return task.NameTaskFunc("FinalizerDel", func() task.Result { + switch { + // get member info successfully and the member still exists + case ctx.IsAvailable && ctx.MemberID != "": + // TODO: check whether quorum will be lost? + if err := ctx.PDClient.Underlay().DeleteMember(ctx, ctx.PD.Name); err != nil { + return task.Fail().With("cannot delete member: %v", err) + } + + if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, + ctx.PD.Namespace, ctx.PD.Name); err != nil { + return task.Fail().With("cannot delete subresources: %v", err) + } + + if err := k8s.RemoveFinalizer(ctx, c, ctx.PD); err != nil { + return task.Fail().With("cannot remove finalizer: %v", err) + } + case ctx.IsAvailable: + if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, + ctx.PD.Namespace, ctx.PD.Name); err != nil { + return task.Fail().With("cannot delete subresources: %v", err) + } + + if err := k8s.RemoveFinalizer(ctx, c, ctx.PD); err != nil { + return task.Fail().With("cannot remove finalizer: %v", err) + } + case !ctx.IsAvailable: + // it may block some unsafe operations + return task.Fail().With("pd cluster is not available") + } + + return task.Complete().With("finalizer is removed") + }) +} + +func TaskFinalizerAdd(ctx *ReconcileContext, c client.Client) task.Task { + return task.NameTaskFunc("FinalizerAdd", func() task.Result { + if err := k8s.EnsureFinalizer(ctx, c, ctx.PD); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %v", err) + } + return task.Complete().With("finalizer is added") + }) +} diff --git a/pkg/controllers/pd/tasks/pod.go b/pkg/controllers/pd/tasks/pod.go new file mode 100644 index 00000000000..fa4bc72e1b1 --- /dev/null +++ b/pkg/controllers/pd/tasks/pod.go @@ -0,0 +1,270 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/image" + "github.com/pingcap/tidb-operator/pkg/overlay" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/statefulset" +) + +const ( + defaultReadinessProbeInitialDelaySeconds = 5 +) + +func TaskPod(ctx *ReconcileContext, logger logr.Logger, c client.Client) task.Task { + return task.NameTaskFunc("ConfigMap", func() task.Result { + expected := newPod(ctx.Cluster, ctx.PDGroup, ctx.PD, ctx.ConfigHash) + if ctx.Pod == nil { + // We have to refresh cache of members to make sure a pd without pod is unhealthy. + // If the healthy info is out of date, the operator may mark this pd up-to-date unexpectedly + // and begin to update the next PD. + if ctx.Healthy { + ctx.PDClient.Members().Refresh() + return task.Wait().With("wait until pd's status becomes unhealthy") + } + if err := c.Apply(ctx, expected); err != nil { + return task.Fail().With("can't create pod of pd: %v", err) + } + ctx.SetPod(expected) + return task.Complete().With("pod is synced") + } + + res := k8s.ComparePods(ctx.Pod, expected) + curHash, expectHash := ctx.Pod.Labels[v1alpha1.LabelKeyConfigHash], expected.Labels[v1alpha1.LabelKeyConfigHash] + configChanged := curHash != expectHash + logger.Info("compare pod", "result", res, "configChanged", configChanged, "currentConfigHash", curHash, "expectConfigHash", expectHash) + + if res == k8s.CompareResultRecreate || + (configChanged && ctx.PDGroup.Spec.ConfigUpdateStrategy == v1alpha1.ConfigUpdateStrategyRollingUpdate) { + // NOTE: both rtx.Healthy and rtx.Pod are not always newest + // So pre delete check may also be skipped in some cases, for example, + // the PD is just started. + if ctx.Healthy || statefulset.IsPodReady(ctx.Pod) { + wait, err := preDeleteCheck(ctx, logger, ctx.PDClient, ctx.PD, ctx.Peers, ctx.IsLeader) + if err != nil { + return task.Fail().With("can't delete pod of pd: %v", err) + } + + if wait { + return task.Wait().With("wait for pd leader being transferred") + } + } + + logger.Info("will delete the pod to recreate", "name", ctx.Pod.Name, "namespace", ctx.Pod.Namespace, "UID", ctx.Pod.UID) + + if err := c.Delete(ctx, ctx.Pod); err != nil { + return task.Fail().With("can't delete pod of pd: %v", err) + } + + ctx.PodIsTerminating = true + + return task.Complete().With("pod is deleting") + } else if res == k8s.CompareResultUpdate { + logger.Info("will update the pod in place") + if err := c.Apply(ctx, expected); err != nil { + return task.Fail().With("can't apply pod of pd: %v", err) + } + ctx.SetPod(expected) + } + + return task.Complete().With("pod is synced") + }) +} + +func preDeleteCheck( + ctx context.Context, + logger logr.Logger, + pdc pdm.PDClient, + pd *v1alpha1.PD, + peers []*v1alpha1.PD, + isLeader bool, +) (bool, error) { + // TODO: add quorum check. After stopping this pd, quorum should not be lost + + if isLeader { + peer := LongestHealthPeer(pd, peers) + if peer == "" { + return false, fmt.Errorf("no healthy transferee available") + } + + logger.Info("try to transfer leader", "from", pd.Name, "to", peer) + + if err := pdc.Underlay().TransferPDLeader(ctx, peer); err != nil { + return false, fmt.Errorf("transfer leader failed: %w", err) + } + + return true, nil + } + + return false, nil +} + +func newPod(cluster *v1alpha1.Cluster, pdg *v1alpha1.PDGroup, pd *v1alpha1.PD, configHash string) *corev1.Pod { + vols := []corev1.Volume{ + { + Name: v1alpha1.VolumeNameConfig, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ConfigMapName(pd.Name), + }, + }, + }, + }, + } + + mounts := []corev1.VolumeMount{ + { + Name: v1alpha1.VolumeNameConfig, + MountPath: v1alpha1.DirNameConfigPD, + }, + } + + for i := range pd.Spec.Volumes { + vol := &pd.Spec.Volumes[i] + name := v1alpha1.NamePrefix + "pd" + if vol.Name != "" { + name = name + "-" + vol.Name + } + vols = append(vols, corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: PersistentVolumeClaimName(pd.Name, vol.Name), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: name, + MountPath: vol.Path, + }) + } + + if cluster.IsTLSClusterEnabled() { + groupName := pd.Labels[v1alpha1.LabelKeyGroup] + vols = append(vols, corev1.Volume{ + Name: v1alpha1.PDClusterTLSVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.TLSClusterSecretName(groupName), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.PDClusterTLSVolumeName, + MountPath: v1alpha1.PDClusterTLSMountPath, + ReadOnly: true, + }) + + if pdg.MountClusterClientSecret() { + vols = append(vols, corev1.Volume{ + Name: v1alpha1.ClusterTLSClientVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.ClusterClientTLSSecretName(), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.ClusterTLSClientVolumeName, + MountPath: v1alpha1.ClusterTLSClientMountPath, + ReadOnly: true, + }) + } + } + + anno := maputil.Copy(pd.GetAnnotations()) + // TODO: should not inherit all labels and annotations into pod + delete(anno, v1alpha1.AnnoKeyInitialClusterNum) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: pd.Namespace, + Name: pd.Name, + Labels: maputil.Merge(pd.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: pd.Name, + v1alpha1.LabelKeyConfigHash: configHash, + }), + Annotations: anno, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(pd, v1alpha1.SchemeGroupVersion.WithKind("PD")), + }, + }, + Spec: corev1.PodSpec{ + Hostname: pd.Name, + Subdomain: pd.Spec.Subdomain, + NodeSelector: pd.Spec.Topology, + Containers: []corev1.Container{ + { + Name: v1alpha1.ContainerNamePD, + Image: image.PD.Image(pd.Spec.Image, pd.Spec.Version), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "/pd-server", + "--config", + filepath.Join(v1alpha1.DirNameConfigPD, v1alpha1.ConfigFileName), + }, + Ports: []corev1.ContainerPort{ + { + Name: v1alpha1.PDPortNameClient, + ContainerPort: pd.GetClientPort(), + }, + { + Name: v1alpha1.PDPortNamePeer, + ContainerPort: pd.GetPeerPort(), + }, + }, + VolumeMounts: mounts, + Resources: k8s.GetResourceRequirements(pd.Spec.Resources), + ReadinessProbe: buildPDReadinessProbe(pd.GetClientPort()), + }, + }, + Volumes: vols, + }, + } + + if pd.Spec.Overlay != nil { + overlay.OverlayPod(pod, pd.Spec.Overlay.Pod) + } + + k8s.CalculateHashAndSetLabels(pod) + return pod +} + +func buildPDReadinessProbe(port int32) *corev1.Probe { + return &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt32(port), + }, + }, + InitialDelaySeconds: defaultReadinessProbeInitialDelaySeconds, + } +} diff --git a/pkg/controllers/pd/tasks/pvc.go b/pkg/controllers/pd/tasks/pvc.go new file mode 100644 index 00000000000..c8262d7893a --- /dev/null +++ b/pkg/controllers/pd/tasks/pvc.go @@ -0,0 +1,75 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +func TaskPVC(ctx *ReconcileContext, logger logr.Logger, c client.Client, vm volumes.Modifier) task.Task { + return task.NameTaskFunc("PVC", func() task.Result { + pvcs := newPVCs(ctx.PD) + if wait, err := volumes.SyncPVCs(ctx, c, pvcs, vm, logger); err != nil { + return task.Fail().With("failed to sync pvcs: %v", err) + } else if wait { + return task.Wait().With("waiting for pvcs to be synced") + } + + // TODO: check config updation + + return task.Complete().With("pvcs are synced") + }) +} + +func newPVCs(pd *v1alpha1.PD) []*corev1.PersistentVolumeClaim { + pvcs := make([]*corev1.PersistentVolumeClaim, 0, len(pd.Spec.Volumes)) + for i := range pd.Spec.Volumes { + vol := &pd.Spec.Volumes[i] + pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: PersistentVolumeClaimName(pd.Name, vol.Name), + Namespace: pd.Namespace, + Labels: maputil.Merge(pd.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: pd.Name, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(pd, v1alpha1.SchemeGroupVersion.WithKind("PD")), + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: vol.Storage, + }, + }, + StorageClassName: vol.StorageClassName, + VolumeAttributesClassName: vol.VolumeAttributesClassName, + }, + }) + } + + return pvcs +} diff --git a/pkg/controllers/pd/tasks/status.go b/pkg/controllers/pd/tasks/status.go new file mode 100644 index 00000000000..96b301f29b5 --- /dev/null +++ b/pkg/controllers/pd/tasks/status.go @@ -0,0 +1,180 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/statefulset" +) + +func TaskStatusSuspend(ctx *ReconcileContext, c client.Client) task.Task { + return task.NameTaskFunc("StatusSuspend", func() task.Result { + ctx.PD.Status.ObservedGeneration = ctx.PD.Generation + var ( + suspendStatus = metav1.ConditionFalse + suspendMessage = "pd is suspending" + + // when suspending, the health status should be false + healthStatus = metav1.ConditionFalse + healthMessage = "pd is not healthy" + ) + + if ctx.Pod == nil { + suspendStatus = metav1.ConditionTrue + suspendMessage = "pd is suspended" + } + needUpdate := meta.SetStatusCondition(&ctx.PD.Status.Conditions, metav1.Condition{ + Type: v1alpha1.PDCondSuspended, + Status: suspendStatus, + ObservedGeneration: ctx.PD.Generation, + // TODO: use different reason for suspending and suspended + Reason: v1alpha1.PDSuspendReason, + Message: suspendMessage, + }) + + needUpdate = meta.SetStatusCondition(&ctx.PD.Status.Conditions, metav1.Condition{ + Type: v1alpha1.PDCondHealth, + Status: healthStatus, + ObservedGeneration: ctx.PD.Generation, + Reason: v1alpha1.PDHealthReason, + Message: healthMessage, + }) || needUpdate + if needUpdate { + if err := c.Status().Update(ctx, ctx.PD); err != nil { + return task.Fail().With("cannot update status: %v", err) + } + } + + return task.Complete().With("status of suspend pd is updated") + }) +} + +func TaskStatusUnknown() task.Task { + return task.NameTaskFunc("StatusUnknown", func() task.Result { + return task.Wait().With("status of the pd is unknown") + }) +} + +//nolint:gocyclo // refactor if possible +func TaskStatus(ctx *ReconcileContext, _ logr.Logger, c client.Client) task.Task { + return task.NameTaskFunc("Status", func() task.Result { + var ( + healthStatus = metav1.ConditionFalse + healthMessage = "pd is not healthy" + + suspendStatus = metav1.ConditionFalse + suspendMessage = "pd is not suspended" + + needUpdate = false + ) + + if ctx.MemberID != "" { + needUpdate = SetIfChanged(&ctx.PD.Status.ID, ctx.MemberID) || needUpdate + } + + needUpdate = SetIfChanged(&ctx.PD.Status.IsLeader, ctx.IsLeader) || needUpdate + needUpdate = syncInitializedCond(ctx.PD, ctx.Initialized) || needUpdate + + needUpdate = meta.SetStatusCondition(&ctx.PD.Status.Conditions, metav1.Condition{ + Type: v1alpha1.PDCondSuspended, + Status: suspendStatus, + ObservedGeneration: ctx.PD.Generation, + Reason: v1alpha1.PDSuspendReason, + Message: suspendMessage, + }) || needUpdate + + needUpdate = SetIfChanged(&ctx.PD.Status.ObservedGeneration, ctx.PD.Generation) || needUpdate + needUpdate = SetIfChanged(&ctx.PD.Status.UpdateRevision, ctx.PD.Labels[v1alpha1.LabelKeyInstanceRevisionHash]) || needUpdate + + if ctx.Pod == nil || ctx.PodIsTerminating { + ctx.Healthy = false + } else if statefulset.IsPodRunningAndReady(ctx.Pod) && ctx.Healthy { + if ctx.PD.Status.CurrentRevision != ctx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] { + ctx.PD.Status.CurrentRevision = ctx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] + needUpdate = true + } + } else { + ctx.Healthy = false + } + + if ctx.Healthy { + healthStatus = metav1.ConditionTrue + healthMessage = "pd is healthy" + } + needUpdate = meta.SetStatusCondition(&ctx.PD.Status.Conditions, metav1.Condition{ + Type: v1alpha1.PDCondHealth, + Status: healthStatus, + ObservedGeneration: ctx.PD.Generation, + Reason: v1alpha1.PDHealthReason, + Message: healthMessage, + }) || needUpdate + + if needUpdate { + if err := c.Status().Update(ctx, ctx.PD); err != nil { + return task.Fail().With("cannot update status: %v", err) + } + } + if ctx.PodIsTerminating { + //nolint:mnd // refactor to use a constant + return task.Retry(5 * time.Second).With("pod is terminating, retry after it's terminated") + } + + if !ctx.Initialized || !ctx.Healthy { + return task.Wait().With("pd may not be initialized or healthy, wait for next event") + } + + return task.Complete().With("status is synced") + }) +} + +// Status of this condition can only transfer as the below +// 1. false => true +// 2. true <=> unknown +func syncInitializedCond(pd *v1alpha1.PD, initialized bool) bool { + cond := meta.FindStatusCondition(pd.Status.Conditions, v1alpha1.PDCondInitialized) + status := metav1.ConditionUnknown + switch { + case initialized: + status = metav1.ConditionTrue + case !initialized && (cond == nil || cond.Status == metav1.ConditionFalse): + status = metav1.ConditionFalse + } + + return meta.SetStatusCondition(&pd.Status.Conditions, metav1.Condition{ + Type: v1alpha1.PDCondInitialized, + Status: status, + ObservedGeneration: pd.Generation, + Reason: "initialized", + Message: "instance has joined the cluster", + }) +} + +// TODO: move to utils +func SetIfChanged[T comparable](dst *T, src T) bool { + if *dst != src { + *dst = src + return true + } + + return false +} diff --git a/pkg/controllers/pd/tasks/util.go b/pkg/controllers/pd/tasks/util.go new file mode 100644 index 00000000000..3a358268110 --- /dev/null +++ b/pkg/controllers/pd/tasks/util.go @@ -0,0 +1,56 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +func ConfigMapName(pdName string) string { + return pdName +} + +func PersistentVolumeClaimName(pdName, volName string) string { + // ref: https://github.com/pingcap/tidb-operator/blob/v1.6.0/pkg/apis/pingcap/v1alpha1/helpers.go#L92 + if volName == "" { + return "pd-" + pdName + } + return "pd-" + pdName + "-" + volName +} + +func LongestHealthPeer(pd *v1alpha1.PD, peers []*v1alpha1.PD) string { + var p string + lastTime := time.Now() + for _, peer := range peers { + if peer.Name == pd.Name { + continue + } + cond := meta.FindStatusCondition(peer.Status.Conditions, v1alpha1.PDCondHealth) + if cond == nil || cond.Status != metav1.ConditionTrue { + continue + } + if cond.LastTransitionTime.Time.Before(lastTime) { + lastTime = cond.LastTransitionTime.Time + p = peer.Name + } + } + + return p +} diff --git a/pkg/controllers/pdgroup/controller.go b/pkg/controllers/pdgroup/controller.go new file mode 100644 index 00000000000..1297fe02d4d --- /dev/null +++ b/pkg/controllers/pdgroup/controller.go @@ -0,0 +1,120 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdgroup + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/pdgroup/tasks" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client + PDClientManager pdm.PDClientManager +} + +func Setup(mgr manager.Manager, c client.Client, pdcm pdm.PDClientManager) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("PDGroup"), + Client: c, + PDClientManager: pdcm, + } + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.PDGroup{}). + Owns(&v1alpha1.PD{}). + // Only care about the generation change (i.e. spec update) + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler(), builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + Complete(r) +} + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + cluster := event.ObjectNew.(*v1alpha1.Cluster) + + var list v1alpha1.PDGroupList + if err := r.Client.List(ctx, &list, client.InNamespace(cluster.Namespace), + client.MatchingFields{"spec.cluster.name": cluster.Name}); err != nil { + if !errors.IsNotFound(err) { + r.Logger.Error(err, "cannot list all pd groups", "ns", cluster.Namespace, "cluster", cluster.Name) + } + return + } + + for i := range list.Items { + pdg := &list.Items[i] + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: pdg.Name, + Namespace: pdg.Namespace, + }, + }) + } + }, + } +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("pdgroup", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summary: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := task.NewTaskRunner[tasks.ReconcileContext](reporter) + runner.AddTasks( + tasks.NewTaskContext(logger, r.Client, r.PDClientManager), + tasks.NewTaskFinalizer(logger, r.Client, r.PDClientManager), + tasks.NewTaskBoot(logger, r.Client), + tasks.NewTaskService(logger, r.Client), + tasks.NewTaskUpdater(logger, r.Client), + tasks.NewTaskStatus(logger, r.Client), + ) + + return runner.Run(rtx) +} diff --git a/pkg/controllers/pdgroup/tasks/boot.go b/pkg/controllers/pdgroup/tasks/boot.go new file mode 100644 index 00000000000..131b82e0cdb --- /dev/null +++ b/pkg/controllers/pdgroup/tasks/boot.go @@ -0,0 +1,56 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskBoot struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskBoot(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskBoot{ + Logger: logger, + Client: c, + } +} + +func (*TaskBoot) Name() string { + return "Boot" +} + +func (t *TaskBoot) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.IsAvailable && !rtx.PDGroup.Spec.Bootstrapped { + rtx.PDGroup.Spec.Bootstrapped = true + if err := t.Client.Update(ctx, rtx.PDGroup); err != nil { + return task.Fail().With("pd cluster is available but not marked as bootstrapped: %w", err) + } + } + + if !rtx.PDGroup.Spec.Bootstrapped { + // TODO: use task.Retry? + return task.Fail().Continue().With("pd cluster is not bootstrapped") + } + + return task.Complete().With("pd cluster is bootstrapped") +} diff --git a/pkg/controllers/pdgroup/tasks/ctx.go b/pkg/controllers/pdgroup/tasks/ctx.go new file mode 100644 index 00000000000..6731e056252 --- /dev/null +++ b/pkg/controllers/pdgroup/tasks/ctx.go @@ -0,0 +1,176 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "cmp" + "context" + "slices" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/action" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + PDClient pdapi.PDClient + Members []Member + + // check whether pd is available + IsAvailable bool + + Suspended bool + + PDGroup *v1alpha1.PDGroup + Peers []*v1alpha1.PD + Cluster *v1alpha1.Cluster + UpgradeChecker action.UpgradeChecker + + // Status fields + v1alpha1.CommonStatus +} + +// TODO: move to pdapi +type Member struct { + ID string + Name string +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +type TaskContext struct { + Logger logr.Logger + Client client.Client + PDClientManager pdm.PDClientManager +} + +func NewTaskContext(logger logr.Logger, c client.Client, pdcm pdm.PDClientManager) task.Task[ReconcileContext] { + return &TaskContext{ + Logger: logger, + Client: c, + PDClientManager: pdcm, + } +} + +func (*TaskContext) Name() string { + return "Context" +} + +//nolint:gocyclo // refactor if possible +func (t *TaskContext) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var pdg v1alpha1.PDGroup + if err := t.Client.Get(ctx, rtx.Key, &pdg); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get pd group: %w", err) + } + + return task.Complete().Break().With("pd group has been deleted") + } + rtx.PDGroup = &pdg + + var cluster v1alpha1.Cluster + if err := t.Client.Get(ctx, client.ObjectKey{ + Name: pdg.Spec.Cluster.Name, + Namespace: pdg.Namespace, + }, &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %w", pdg.Spec.Cluster.Name, err) + } + rtx.Cluster = &cluster + + if cluster.ShouldPauseReconcile() { + return task.Complete().Break().With("cluster reconciliation is paused") + } + + var pdList v1alpha1.PDList + if err := t.Client.List(ctx, &pdList, client.InNamespace(pdg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyGroup: pdg.Name, + }); err != nil { + return task.Fail().With("cannot list pd peers: %w", err) + } + + rtx.Peers = make([]*v1alpha1.PD, len(pdList.Items)) + rtx.Suspended = len(pdList.Items) > 0 + for i := range pdList.Items { + rtx.Peers[i] = &pdList.Items[i] + if !meta.IsStatusConditionTrue(rtx.Peers[i].Status.Conditions, v1alpha1.PDGroupCondSuspended) { + // PD Group is not suspended if any of its members is not suspended + rtx.Suspended = false + } + } + slices.SortFunc(rtx.Peers, func(a, b *v1alpha1.PD) int { + return cmp.Compare(a.Name, b.Name) + }) + + if rtx.PDGroup.GetDeletionTimestamp().IsZero() && len(rtx.Peers) > 0 { + // TODO: register pd client after it is ready + if err := t.PDClientManager.Register(rtx.PDGroup); err != nil { + return task.Fail().With("cannot register pd client: %v", err) + } + } + + if rtx.Suspended { + return task.Complete().With("context without member info is completed, pd is suspended") + } + + c, ok := t.PDClientManager.Get(pdm.PrimaryKey(pdg.Namespace, pdg.Spec.Cluster.Name)) + if !ok { + return task.Complete().With("context without pd client is completed, pd cannot be visited") + } + rtx.PDClient = c.Underlay() + + if !c.HasSynced() { + return task.Complete().With("context without pd client is completed, cache of pd info is not synced") + } + + rtx.IsAvailable = true + + ms, err := c.Members().List(labels.Everything()) + if err != nil { + return task.Fail().With("cannot list members: %w", err) + } + + for _, m := range ms { + rtx.Members = append(rtx.Members, Member{ + Name: m.Name, + ID: m.ID, + }) + } + slices.SortFunc(rtx.Members, func(a, b Member) int { + return cmp.Compare(a.Name, b.Name) + }) + + rtx.UpgradeChecker = action.NewUpgradeChecker(t.Client, rtx.Cluster, t.Logger) + return task.Complete().With("context is fully completed") +} diff --git a/pkg/controllers/pdgroup/tasks/finalizer.go b/pkg/controllers/pdgroup/tasks/finalizer.go new file mode 100644 index 00000000000..d1ea14ee337 --- /dev/null +++ b/pkg/controllers/pdgroup/tasks/finalizer.go @@ -0,0 +1,94 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + utilerr "k8s.io/apimachinery/pkg/util/errors" + + "github.com/pingcap/tidb-operator/pkg/client" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskFinalizer struct { + Client client.Client + Logger logr.Logger + PDClientManager pdm.PDClientManager +} + +func NewTaskFinalizer(logger logr.Logger, c client.Client, pdcm pdm.PDClientManager) task.Task[ReconcileContext] { + return &TaskFinalizer{ + Client: c, + Logger: logger, + PDClientManager: pdcm, + } +} + +func (*TaskFinalizer) Name() string { + return "Finalizer" +} + +func (t *TaskFinalizer) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.PDGroup.GetDeletionTimestamp().IsZero() { + if err := k8s.EnsureFinalizer(ctx, t.Client, rtx.PDGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %w", err) + } + return task.Complete().With("finalizer is synced") + } + + errList := []error{} + for _, peer := range rtx.Peers { + if err := t.Client.Delete(ctx, peer); err != nil { + if errors.IsNotFound(err) { + continue + } + + errList = append(errList, err) + continue + } + + // PD controller cannot clean up finalizer after quorum is lost + // Forcely clean up all finalizers of pd instances + if err := k8s.RemoveFinalizer(ctx, t.Client, peer); err != nil { + errList = append(errList, err) + } + } + + if len(errList) != 0 { + return task.Fail().With("failed to delete all pd instances: %v", utilerr.NewAggregate(errList)) + } + + if len(rtx.Peers) != 0 { + return task.Fail().With("wait for all pd instances being removed, %v still exists", rtx.Peers) + } + + if err := k8s.EnsureGroupSubResourceDeleted(ctx, t.Client, + rtx.PDGroup.Namespace, rtx.PDGroup.Name); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + + if err := k8s.RemoveFinalizer(ctx, t.Client, rtx.PDGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been removed: %w", err) + } + + t.PDClientManager.Deregister(pdm.PrimaryKey(rtx.PDGroup.Namespace, rtx.PDGroup.Spec.Cluster.Name)) + + return task.Complete().With("finalizer has been removed") +} diff --git a/pkg/controllers/pdgroup/tasks/status.go b/pkg/controllers/pdgroup/tasks/status.go new file mode 100644 index 00000000000..b22a40bf885 --- /dev/null +++ b/pkg/controllers/pdgroup/tasks/status.go @@ -0,0 +1,96 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskStatus struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Client: c, + Logger: logger, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + suspendStatus := metav1.ConditionFalse + suspendMessage := "pd group is not suspended" + if rtx.Suspended { + suspendStatus = metav1.ConditionTrue + suspendMessage = "pd group is suspended" + } else if rtx.Cluster.ShouldSuspendCompute() { + suspendMessage = "pd group is suspending" + } + conditionChanged := meta.SetStatusCondition(&rtx.PDGroup.Status.Conditions, metav1.Condition{ + Type: v1alpha1.PDGroupCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.PDGroup.Generation, + Reason: v1alpha1.PDGroupSuspendReason, + Message: suspendMessage, + }) + + // Update the current revision if all instances are synced. + if int(rtx.PDGroup.GetDesiredReplicas()) == len(rtx.Peers) && v1alpha1.AllInstancesSynced(rtx.Peers, rtx.UpdateRevision) { + conditionChanged = true + rtx.CurrentRevision = rtx.UpdateRevision + rtx.PDGroup.Status.Version = rtx.PDGroup.Spec.Version + } + var readyReplicas int32 + for _, peer := range rtx.Peers { + if peer.IsHealthy() { + readyReplicas++ + } + } + + if conditionChanged || rtx.PDGroup.Status.ReadyReplicas != readyReplicas || + rtx.PDGroup.Status.Replicas != int32(len(rtx.Peers)) || //nolint:gosec // expected type conversion + !v1alpha1.IsReconciled(rtx.PDGroup) || + v1alpha1.StatusChanged(rtx.PDGroup, rtx.CommonStatus) { + rtx.PDGroup.Status.ReadyReplicas = readyReplicas + rtx.PDGroup.Status.Replicas = int32(len(rtx.Peers)) //nolint:gosec // expected type conversion + rtx.PDGroup.Status.ObservedGeneration = rtx.PDGroup.Generation + rtx.PDGroup.Status.CurrentRevision = rtx.CurrentRevision + rtx.PDGroup.Status.UpdateRevision = rtx.UpdateRevision + rtx.PDGroup.Status.CollisionCount = rtx.CollisionCount + + if err := t.Client.Status().Update(ctx, rtx.PDGroup); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + if !rtx.IsAvailable && !rtx.Suspended { + return task.Fail().With("pd group may not be available, requeue to retry") + } + + return task.Complete().With("status is synced") +} diff --git a/pkg/controllers/pdgroup/tasks/svc.go b/pkg/controllers/pdgroup/tasks/svc.go new file mode 100644 index 00000000000..fafea1b2510 --- /dev/null +++ b/pkg/controllers/pdgroup/tasks/svc.go @@ -0,0 +1,154 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskService struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskService(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskService{ + Logger: logger, + Client: c, + } +} + +func (*TaskService) Name() string { + return "Service" +} + +func (t *TaskService) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.Cluster.ShouldSuspendCompute() { + return task.Complete().With("skip service for suspension") + } + + pdg := rtx.PDGroup + + headless := newHeadlessService(pdg) + if err := t.Client.Apply(ctx, headless); err != nil { + return task.Fail().With(fmt.Sprintf("can't create headless service of pd: %v", err)) + } + + svc := newInternalService(pdg) + if err := t.Client.Apply(ctx, svc); err != nil { + return task.Fail().With(fmt.Sprintf("can't create internal service of pd: %v", err)) + } + + return task.Complete().With("services of pd have been applied") +} + +func newHeadlessService(pdg *v1alpha1.PDGroup) *corev1.Service { + ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: HeadlessServiceName(pdg.Spec.Cluster.Name, pdg.Name), + Namespace: pdg.Namespace, + Labels: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: pdg.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(pdg, v1alpha1.SchemeGroupVersion.WithKind("PDGroup")), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: corev1.ClusterIPNone, + IPFamilyPolicy: &ipFamilyPolicy, + Selector: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: pdg.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: v1alpha1.PDPortNameClient, + Port: pdg.GetClientPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.PDPortNameClient), + }, + { + Name: v1alpha1.PDPortNamePeer, + Port: pdg.GetPeerPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.PDPortNamePeer), + }, + }, + PublishNotReadyAddresses: true, + }, + } +} + +func newInternalService(pdg *v1alpha1.PDGroup) *corev1.Service { + ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", pdg.Spec.Cluster.Name, pdg.Name), + Namespace: pdg.Namespace, + Labels: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: pdg.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(pdg, v1alpha1.SchemeGroupVersion.WithKind("PDGroup")), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + IPFamilyPolicy: &ipFamilyPolicy, + Selector: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: pdg.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: v1alpha1.PDPortNameClient, + Port: pdg.GetClientPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.PDPortNameClient), + }, + { + Name: v1alpha1.PDPortNamePeer, + Port: pdg.GetPeerPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.PDPortNamePeer), + }, + }, + }, + } +} diff --git a/pkg/controllers/pdgroup/tasks/updater.go b/pkg/controllers/pdgroup/tasks/updater.go new file mode 100644 index 00000000000..233fbd015da --- /dev/null +++ b/pkg/controllers/pdgroup/tasks/updater.go @@ -0,0 +1,212 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + "strconv" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/updater" + "github.com/pingcap/tidb-operator/pkg/updater/policy" + "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/random" + "github.com/pingcap/tidb-operator/pkg/utils/task" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" +) + +// TaskUpdater is a task to scale or update PD when spec of PDGroup is changed. +type TaskUpdater struct { + Logger logr.Logger + Client client.Client + CRCli history.Interface +} + +func NewTaskUpdater(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskUpdater{ + Logger: logger, + Client: c, + CRCli: history.NewClient(c), + } +} + +func (*TaskUpdater) Name() string { + return "Updater" +} + +func (t *TaskUpdater) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + // TODO: move to task v2 + if !rtx.PDGroup.GetDeletionTimestamp().IsZero() { + return task.Complete().With("pd group has been deleted") + } + + if rtx.Cluster.ShouldSuspendCompute() { + return task.Complete().With("skip updating PDGroup for suspension") + } + + // List all controller revisions for the PDGroup + selector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + v1alpha1.LabelKeyCluster: rtx.Cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyGroup: rtx.PDGroup.Name, + }, + }) + revisions, err := t.CRCli.ListControllerRevisions(rtx.PDGroup, selector) + if err != nil { + return task.Fail().With("cannot list controller revisions: %w", err) + } + history.SortControllerRevisions(revisions) + + // Get the current(old) and update(new) ControllerRevisions + currentRevision, updateRevision, collisionCount, err := func() (*appsv1.ControllerRevision, *appsv1.ControllerRevision, int32, error) { + // always ignore bootstrapped field in spec + bootstrapped := rtx.PDGroup.Spec.Bootstrapped + rtx.PDGroup.Spec.Bootstrapped = false + defer func() { + rtx.PDGroup.Spec.Bootstrapped = bootstrapped + }() + return revision.GetCurrentAndUpdate(rtx.PDGroup, revisions, t.CRCli, rtx.PDGroup) + }() + if err != nil { + return task.Fail().With("cannot get revisions: %w", err) + } + rtx.CurrentRevision = currentRevision.Name + rtx.UpdateRevision = updateRevision.Name + rtx.CollisionCount = &collisionCount + + if err = revision.TruncateHistory(t.CRCli, rtx.Peers, revisions, + currentRevision, updateRevision, rtx.Cluster.Spec.RevisionHistoryLimit); err != nil { + t.Logger.Error(err, "failed to truncate history") + } + + if needVersionUpgrade(rtx.PDGroup) && !rtx.UpgradeChecker.CanUpgrade(ctx, rtx.PDGroup) { + return task.Fail().Continue().With("preconditions of upgrading the pd group %s/%s are not met", rtx.PDGroup.Namespace, rtx.PDGroup.Name) + } + + desired := 1 + if rtx.PDGroup.Spec.Replicas != nil { + desired = int(*rtx.PDGroup.Spec.Replicas) + } + + var topos []v1alpha1.ScheduleTopology + for _, p := range rtx.PDGroup.Spec.SchedulePolicies { + switch p.Type { + case v1alpha1.SchedulePolicyTypeEvenlySpread: + topos = p.EvenlySpread.Topologies + default: + // do nothing + } + } + + topoPolicy, err := policy.NewTopologyPolicy[*runtime.PD](topos) + if err != nil { + return task.Fail().With("invalid topo policy, it should be validated: %w", err) + } + + for _, pd := range rtx.Peers { + topoPolicy.Add(runtime.FromPD(pd)) + } + + wait, err := updater.New[*runtime.PD](). + WithInstances(runtime.FromPDSlice(rtx.Peers)...). + WithDesired(desired). + WithClient(t.Client). + WithMaxSurge(0). + WithMaxUnavailable(1). + WithRevision(rtx.UpdateRevision). + WithNewFactory(PDNewer(rtx.PDGroup, rtx.UpdateRevision)). + WithAddHooks(topoPolicy). + WithUpdateHooks( + policy.KeepName[*runtime.PD](), + policy.KeepTopology[*runtime.PD](), + ). + WithDelHooks(topoPolicy). + WithScaleInPreferPolicy( + NotLeaderPolicy(), + topoPolicy, + ). + WithUpdatePreferPolicy( + NotLeaderPolicy(), + ). + Build(). + Do(ctx) + if err != nil { + return task.Fail().With("cannot update instances: %w", err) + } + if wait { + return task.Complete().With("wait for all instances ready") + } + return task.Complete().With("all instances are synced") +} + +func needVersionUpgrade(pdg *v1alpha1.PDGroup) bool { + return pdg.Spec.Version != pdg.Status.Version && pdg.Status.Version != "" +} + +func PDNewer(pdg *v1alpha1.PDGroup, rev string) updater.NewFactory[*runtime.PD] { + return updater.NewFunc[*runtime.PD](func() *runtime.PD { + //nolint:mnd // refactor to use a constant + name := fmt.Sprintf("%s-%s-%s", pdg.Spec.Cluster.Name, pdg.Name, random.Random(6)) + spec := pdg.Spec.Template.Spec.DeepCopy() + + var bootAnno map[string]string + if !pdg.Spec.Bootstrapped { + replicas := int64(1) + if pdg.Spec.Replicas != nil { + replicas = int64(*pdg.Spec.Replicas) + } + initialNum := strconv.FormatInt(replicas, 10) + bootAnno = map[string]string{ + v1alpha1.AnnoKeyInitialClusterNum: initialNum, + } + } + + peer := &v1alpha1.PD{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: pdg.Namespace, + Name: name, + Labels: maputil.Merge(pdg.Spec.Template.Labels, map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: pdg.Name, + v1alpha1.LabelKeyInstanceRevisionHash: rev, + }), + Annotations: maputil.Merge(pdg.Spec.Template.Annotations, bootAnno), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(pdg, v1alpha1.SchemeGroupVersion.WithKind("PDGroup")), + }, + }, + Spec: v1alpha1.PDSpec{ + Cluster: pdg.Spec.Cluster, + Version: pdg.Spec.Version, + Subdomain: HeadlessServiceName(pdg.Spec.Cluster.Name, pdg.Name), + PDTemplateSpec: *spec, + }, + } + + return runtime.FromPD(peer) + }) +} diff --git a/pkg/controllers/pdgroup/tasks/util.go b/pkg/controllers/pdgroup/tasks/util.go new file mode 100644 index 00000000000..9e0cd0c29c2 --- /dev/null +++ b/pkg/controllers/pdgroup/tasks/util.go @@ -0,0 +1,39 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/updater" +) + +// TODO: fix length issue +func HeadlessServiceName(clusterName, groupName string) string { + return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +} + +func NotLeaderPolicy() updater.PreferPolicy[*runtime.PD] { + return updater.PreferPolicyFunc[*runtime.PD](func(pds []*runtime.PD) []*runtime.PD { + notLeader := []*runtime.PD{} + for _, pd := range pds { + if !pd.Status.IsLeader { + notLeader = append(notLeader, pd) + } + } + return notLeader + }) +} diff --git a/pkg/controllers/tidb/builder.go b/pkg/controllers/tidb/builder.go new file mode 100644 index 00000000000..92ac3df9542 --- /dev/null +++ b/pkg/controllers/tidb/builder.go @@ -0,0 +1,60 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidb + +import ( + "github.com/pingcap/tidb-operator/pkg/controllers/tidb/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +func (r *Reconciler) NewRunner(reporter task.TaskReporter) task.TaskRunner[tasks.ReconcileContext] { + runner := task.NewTaskRunner(reporter, + // get tidb + tasks.TaskContextTiDB(r.Client), + // if it's deleted just return + task.NewSwitchTask(tasks.CondTiDBHasBeenDeleted()), + + // get cluster info, FinalizerDel will use it + tasks.TaskContextCluster(r.Client), + task.NewSwitchTask(tasks.CondPDIsNotInitialized()), + + task.NewSwitchTask(tasks.CondTiDBIsDeleting(), + tasks.TaskFinalizerDel(r.Client), + ), + + // check whether it's paused + task.NewSwitchTask(tasks.CondClusterIsPaused()), + + // get pod and check whether the cluster is suspending + tasks.TaskContextPod(r.Client), + task.NewSwitchTask(tasks.CondClusterIsSuspending(), + tasks.TaskFinalizerAdd(r.Client), + tasks.TaskPodSuspend(r.Client), + tasks.TaskStatusSuspend(r.Client), + ), + + // normal process + tasks.TaskContextTiDBGroup(r.Client), + tasks.TaskContextInfoFromPDAndTiDB(r.Client), + tasks.TaskFinalizerAdd(r.Client), + tasks.NewTaskConfigMap(r.Logger, r.Client), + tasks.NewTaskPVC(r.Logger, r.Client, r.VolumeModifier), + tasks.NewTaskPod(r.Logger, r.Client), + tasks.NewTaskServerLabels(r.Logger, r.Client), + tasks.NewTaskStatus(r.Logger, r.Client), + ) + + return runner +} diff --git a/pkg/controllers/tidb/controller.go b/pkg/controllers/tidb/controller.go new file mode 100644 index 00000000000..58e6e6407a0 --- /dev/null +++ b/pkg/controllers/tidb/controller.go @@ -0,0 +1,76 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidb + +import ( + "context" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/tidb/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client + VolumeModifier volumes.Modifier +} + +func Setup(mgr manager.Manager, c client.Client, vm volumes.Modifier) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("TiDB"), + Client: c, + VolumeModifier: vm, + } + return ctrl.NewControllerManagedBy(mgr).For(&v1alpha1.TiDB{}). + Owns(&corev1.Pod{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.PersistentVolumeClaim{}). + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler()). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("tidb", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := r.NewRunner(reporter) + return runner.Run(rtx) +} diff --git a/pkg/controllers/tidb/handler.go b/pkg/controllers/tidb/handler.go new file mode 100644 index 00000000000..043d4d82390 --- /dev/null +++ b/pkg/controllers/tidb/handler.go @@ -0,0 +1,69 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidb + +import ( + "context" + "reflect" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" +) + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + oldObj := event.ObjectOld.(*v1alpha1.Cluster) + newObj := event.ObjectNew.(*v1alpha1.Cluster) + + if newObj.Status.PD != oldObj.Status.PD { + r.Logger.Info("pd url is updating", "from", oldObj.Status.PD, "to", newObj.Status.PD) + } else if !reflect.DeepEqual(oldObj.Spec.SuspendAction, newObj.Spec.SuspendAction) { + r.Logger.Info("suspend action is updating", "from", oldObj.Spec.SuspendAction, "to", newObj.Spec.SuspendAction) + } else if oldObj.Spec.Paused != newObj.Spec.Paused { + r.Logger.Info("cluster paused is updating", "from", oldObj.Spec.Paused, "to", newObj.Spec.Paused) + } else { + return + } + + var kvl v1alpha1.TiDBList + if err := r.Client.List(ctx, &kvl, client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: newObj.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + }, client.InNamespace(newObj.Namespace)); err != nil { + r.Logger.Error(err, "cannot list all tidb instances", "ns", newObj.Namespace, "cluster", newObj.Name) + return + } + + for i := range kvl.Items { + tidb := &kvl.Items[i] + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tidb.Name, + Namespace: tidb.Namespace, + }, + }) + } + }, + } +} diff --git a/pkg/controllers/tidb/tasks/cm.go b/pkg/controllers/tidb/tasks/cm.go new file mode 100644 index 00000000000..8512a231dc9 --- /dev/null +++ b/pkg/controllers/tidb/tasks/cm.go @@ -0,0 +1,92 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tidbcfg "github.com/pingcap/tidb-operator/pkg/configs/tidb" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/utils/toml" +) + +type TaskConfigMap struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskConfigMap(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskConfigMap{ + Client: c, + Logger: logger, + } +} + +func (*TaskConfigMap) Name() string { + return "ConfigMap" +} + +func (t *TaskConfigMap) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + c := tidbcfg.Config{} + decoder, encoder := toml.Codec[tidbcfg.Config]() + if err := decoder.Decode([]byte(rtx.TiDB.Spec.Config), &c); err != nil { + return task.Fail().With("tidb config cannot be decoded: %w", err) + } + if err := c.Overlay(rtx.Cluster, rtx.TiDBGroup, rtx.TiDB); err != nil { + return task.Fail().With("cannot generate tidb config: %w", err) + } + rtx.GracefulWaitTimeInSeconds = int64(c.GracefulWaitBeforeShutdown) + + data, err := encoder.Encode(&c) + if err != nil { + return task.Fail().With("tidb config cannot be encoded: %w", err) + } + + rtx.ConfigHash, err = toml.GenerateHash(rtx.TiDB.Spec.Config) + if err != nil { + return task.Fail().With("failed to generate hash for `tidb.spec.config`: %w", err) + } + expected := newConfigMap(rtx.TiDB, data, rtx.ConfigHash) + if e := t.Client.Apply(rtx, expected); e != nil { + return task.Fail().With("can't create/update cm of tidb: %w", e) + } + return task.Complete().With("cm is synced") +} + +func newConfigMap(tidb *v1alpha1.TiDB, data []byte, hash string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(tidb.Name), + Namespace: tidb.Namespace, + Labels: maputil.Merge(tidb.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tidb.Name, + v1alpha1.LabelKeyConfigHash: hash, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tidb, v1alpha1.SchemeGroupVersion.WithKind("TiDB")), + }, + }, + Data: map[string]string{ + v1alpha1.ConfigFileName: string(data), + }, + } +} diff --git a/pkg/controllers/tidb/tasks/cm_test.go b/pkg/controllers/tidb/tasks/cm_test.go new file mode 100644 index 00000000000..10ee3b1ec47 --- /dev/null +++ b/pkg/controllers/tidb/tasks/cm_test.go @@ -0,0 +1,246 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +func FakeContext(key types.NamespacedName, changes ...fake.ChangeFunc[ReconcileContext, *ReconcileContext]) *ReconcileContext { + ctx := fake.Fake(changes...) + ctx.Context = context.TODO() + ctx.Key = key + return ctx +} + +func WithTiDB(tidb *v1alpha1.TiDB) fake.ChangeFunc[ReconcileContext, *ReconcileContext] { + return func(obj *ReconcileContext) *ReconcileContext { + obj.TiDB = tidb + return obj + } +} + +func WithCluster(name string) fake.ChangeFunc[v1alpha1.TiDB, *v1alpha1.TiDB] { + return func(tidb *v1alpha1.TiDB) *v1alpha1.TiDB { + tidb.Spec.Cluster.Name = name + return tidb + } +} + +func withStatusPDURL(pdURL string) fake.ChangeFunc[v1alpha1.Cluster, *v1alpha1.Cluster] { + return func(cluster *v1alpha1.Cluster) *v1alpha1.Cluster { + cluster.Status.PD = pdURL + return cluster + } +} + +func withConfigForTiDB(config v1alpha1.ConfigFile) fake.ChangeFunc[v1alpha1.TiDB, *v1alpha1.TiDB] { + return func(tidb *v1alpha1.TiDB) *v1alpha1.TiDB { + tidb.Spec.Config = config + return tidb + } +} + +func withSubdomain(subdomain string) fake.ChangeFunc[v1alpha1.TiDB, *v1alpha1.TiDB] { + return func(tidb *v1alpha1.TiDB) *v1alpha1.TiDB { + tidb.Spec.Subdomain = subdomain + return tidb + } +} + +func TestConfigMap(t *testing.T) { + cases := []struct { + desc string + key types.NamespacedName + objs []client.Object + tidb *v1alpha1.TiDB + tidbGroup *v1alpha1.TiDBGroup + cluster *v1alpha1.Cluster + expected task.Result + expectedCM *corev1.ConfigMap + }{ + { + desc: "cm doesn't exist", + key: types.NamespacedName{ + Name: "test-tidb", + }, + objs: []client.Object{}, + tidb: fake.FakeObj( + "test-tidb", + WithCluster("test-cluster"), + withConfigForTiDB(v1alpha1.ConfigFile("")), + withSubdomain("subdomain"), + fake.Label[v1alpha1.TiDB]("aaa", "bbb"), + fake.UID[v1alpha1.TiDB]("test-uid"), + fake.Label[v1alpha1.TiDB](v1alpha1.LabelKeyInstanceRevisionHash, "foo"), + ), + tidbGroup: fake.FakeObj[v1alpha1.TiDBGroup]("test-tidb-group"), + cluster: fake.FakeObj("test-cluster", + withStatusPDURL("http://test-pd.default:2379"), + ), + expected: task.Complete().With(""), + expectedCM: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-tidb", + Labels: map[string]string{ + "aaa": "bbb", + v1alpha1.LabelKeyInstance: "test-tidb", + v1alpha1.LabelKeyInstanceRevisionHash: "foo", + v1alpha1.LabelKeyConfigHash: "7d6fc488b7", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "TiDB", + Name: "test-tidb", + UID: "test-uid", + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + }, + }, + }, + Data: map[string]string{ + v1alpha1.ConfigFileName: `advertise-address = 'test-tidb.subdomain.default.svc' +graceful-wait-before-shutdown = 30 +host = '::' +path = 'test-pd.default:2379' +store = 'tikv' + +[log] +slow-query-file = '/var/log/tidb/slowlog' +`, + }, + }, + }, + { + desc: "cm exists, update cm", + key: types.NamespacedName{ + Name: "test-tidb", + }, + objs: []client.Object{ + fake.FakeObj[corev1.ConfigMap]( + "test-tidb", + ), + }, + tidb: fake.FakeObj( + "test-tidb", + WithCluster("test-cluster"), + withConfigForTiDB(`zzz = 'zzz' +graceful-wait-before-shutdown = 60`), + withSubdomain("subdomain"), + fake.Label[v1alpha1.TiDB]("aaa", "bbb"), + fake.UID[v1alpha1.TiDB]("test-uid"), + fake.Label[v1alpha1.TiDB](v1alpha1.LabelKeyInstanceRevisionHash, "foo"), + ), + tidbGroup: fake.FakeObj[v1alpha1.TiDBGroup]("test-tidb-group"), + cluster: fake.FakeObj("test-cluster", + withStatusPDURL("http://test-pd.default:2379"), + ), + expected: task.Complete().With(""), + expectedCM: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-tidb", + Labels: map[string]string{ + "aaa": "bbb", + v1alpha1.LabelKeyInstance: "test-tidb", + v1alpha1.LabelKeyInstanceRevisionHash: "foo", + v1alpha1.LabelKeyConfigHash: "7bd44dcc66", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "TiDB", + Name: "test-tidb", + UID: "test-uid", + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + }, + }, + }, + Data: map[string]string{ + v1alpha1.ConfigFileName: `advertise-address = 'test-tidb.subdomain.default.svc' +graceful-wait-before-shutdown = 60 +host = '::' +path = 'test-pd.default:2379' +store = 'tikv' +zzz = 'zzz' + +[log] +slow-query-file = '/var/log/tidb/slowlog' +`, + }, + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + c.tidb.OwnerReferences = []metav1.OwnerReference{ + *metav1.NewControllerRef(c.tidbGroup, v1alpha1.SchemeGroupVersion.WithKind("TiDBGroup")), + } + + // append TiDB into store + objs := c.objs + objs = append(objs, c.tidb) + + ctx := FakeContext(c.key, WithTiDB(c.tidb)) + ctx.TiDBGroup = c.tidbGroup + ctx.Cluster = c.cluster + fc := client.NewFakeClient(objs...) + tk := NewTaskConfigMap(logr.Discard(), fc) + res := tk.Sync(ctx) + + assert.Equal(tt, c.expected.Status(), res.Status(), res.Message()) + assert.Equal(tt, c.expected.RequeueAfter(), res.RequeueAfter(), res.Message()) + // Ignore message assertion + // TODO: maybe assert the message format? + + if res.Status() == task.SFail { + return + } + + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(ctx.TiDB.Name), + }, + } + err := fc.Get(ctx, client.ObjectKeyFromObject(&cm), &cm) + require.NoError(tt, err) + + // reset cm gvk and managed fields + cm.APIVersion = "" + cm.Kind = "" + cm.SetManagedFields(nil) + assert.Equal(tt, c.expectedCM, &cm) + }) + } +} diff --git a/pkg/controllers/tidb/tasks/ctx.go b/pkg/controllers/tidb/tasks/ctx.go new file mode 100644 index 00000000000..67c3022a247 --- /dev/null +++ b/pkg/controllers/tidb/tasks/ctx.go @@ -0,0 +1,204 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" + "github.com/pingcap/tidb-operator/pkg/tidbapi/v1" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + tlsutil "github.com/pingcap/tidb-operator/pkg/utils/tls" +) + +const ( + pdRequestTimeout = 10 * time.Second + tidbRequestTimeout = 10 * time.Second +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + TiDBClient tidbapi.TiDBClient + PDClient pdapi.PDClient + + Healthy bool + Suspended bool + + Cluster *v1alpha1.Cluster + TiDB *v1alpha1.TiDB + TiDBGroup *v1alpha1.TiDBGroup // the owner of the tidb instance + Pod *corev1.Pod + + GracefulWaitTimeInSeconds int64 + + // ConfigHash stores the hash of **user-specified** config (i.e.`.Spec.Config`), + // which will be used to determine whether the config has changed. + // This ensures that our config overlay logic will not restart the tidb cluster unexpectedly. + ConfigHash string + + // Pod cannot be updated when call DELETE API, so we have to set this field to indicate + // the underlay pod has been deleting + PodIsTerminating bool +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +func TaskContextTiDB(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextTiDB", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var tidb v1alpha1.TiDB + if err := c.Get(ctx, rtx.Key, &tidb); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get tidb instance: %w", err) + } + return task.Complete().With("tidb instance has been deleted") + } + rtx.TiDB = &tidb + return task.Complete().With("tidb is set") + }) +} + +func TaskContextCluster(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextCluster", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var cluster v1alpha1.Cluster + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiDB.Spec.Cluster.Name, + Namespace: rtx.TiDB.Namespace, + }, &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %w", rtx.TiDB.Spec.Cluster.Name, err) + } + rtx.Cluster = &cluster + return task.Complete().With("cluster is set") + }) +} + +func TaskContextPod(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextPod", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var pod corev1.Pod + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiDB.Name, + Namespace: rtx.TiDB.Namespace, + }, &pod); err != nil { + if errors.IsNotFound(err) { + return task.Complete().With("pod is not created") + } + return task.Fail().With("failed to get pod of tidb: %w", err) + } + + rtx.Pod = &pod + if !rtx.Pod.GetDeletionTimestamp().IsZero() { + rtx.PodIsTerminating = true + } + return task.Complete().With("pod is set") + }) +} + +func TaskContextInfoFromPDAndTiDB(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextInfoFromPDAndTiDB", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if rtx.Cluster.Status.PD == "" { + return task.Fail().With("pd url is not initialized") + } + var ( + scheme = "http" + tlsConfig *tls.Config + ) + if rtx.Cluster.IsTLSClusterEnabled() { + scheme = "https" + var err error + tlsConfig, err = tlsutil.GetTLSConfigFromSecret(ctx, c, + rtx.Cluster.Namespace, v1alpha1.TLSClusterClientSecretName(rtx.Cluster.Name)) + if err != nil { + return task.Fail().With("cannot get tls config from secret: %w", err) + } + } + rtx.TiDBClient = tidbapi.NewTiDBClient(TiDBServiceURL(rtx.TiDB, scheme), tidbRequestTimeout, tlsConfig) + health, err := rtx.TiDBClient.GetHealth(ctx) + if err != nil { + return task.Complete().With( + fmt.Sprintf("context without health info is completed, tidb can't be reached: %v", err)) + } + rtx.Healthy = health + rtx.PDClient = pdapi.NewPDClient(rtx.Cluster.Status.PD, pdRequestTimeout, tlsConfig) + + return task.Complete().With("get info from tidb") + }) +} + +func TaskContextTiDBGroup(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextTiDBGroup", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if len(rtx.TiDB.OwnerReferences) == 0 { + return task.Fail().With("tidb instance has no owner, this should not happen") + } + + var tidbGroup v1alpha1.TiDBGroup + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiDB.OwnerReferences[0].Name, // only one owner now + Namespace: rtx.TiDB.Namespace, + }, &tidbGroup); err != nil { + return task.Fail().With("cannot find tidb group %s: %w", rtx.TiDB.OwnerReferences[0].Name, err) + } + rtx.TiDBGroup = &tidbGroup + return task.Complete().With("tidb group is set") + }) +} + +func CondTiDBHasBeenDeleted() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().TiDB == nil + }) +} + +func CondTiDBIsDeleting() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return !ctx.Self().TiDB.GetDeletionTimestamp().IsZero() + }) +} + +func CondClusterIsPaused() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().Cluster.ShouldPauseReconcile() + }) +} + +func CondClusterIsSuspending() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().Cluster.ShouldSuspendCompute() + }) +} + +func CondPDIsNotInitialized() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().Cluster.Status.PD == "" + }) +} diff --git a/pkg/controllers/tidb/tasks/finalizer.go b/pkg/controllers/tidb/tasks/finalizer.go new file mode 100644 index 00000000000..0ce32277254 --- /dev/null +++ b/pkg/controllers/tidb/tasks/finalizer.go @@ -0,0 +1,47 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("FinalizerDel", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, + rtx.TiDB.Namespace, rtx.TiDB.Name); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + if err := k8s.RemoveFinalizer(ctx, c, rtx.TiDB); err != nil { + return task.Fail().With("cannot remove finalizer: %w", err) + } + + return task.Complete().With("finalizer is removed") + }) +} + +func TaskFinalizerAdd(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("FinalizerAdd", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if err := k8s.EnsureFinalizer(ctx, c, rtx.TiDB); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %w", err) + } + + return task.Complete().With("finalizer is added") + }) +} diff --git a/pkg/controllers/tidb/tasks/pod.go b/pkg/controllers/tidb/tasks/pod.go new file mode 100644 index 00000000000..e9536c63698 --- /dev/null +++ b/pkg/controllers/tidb/tasks/pod.go @@ -0,0 +1,416 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + "path" + "path/filepath" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/image" + "github.com/pingcap/tidb-operator/pkg/overlay" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +const ( + // bufferSeconds is the extra seconds to wait for the pod to be deleted. + bufferSeconds = 3 + // preStopSleepSeconds is the seconds to sleep before the pod is deleted. + preStopSleepSeconds = 10 + + // defaultReadinessProbeInitialDelaySeconds is the default initial delay seconds for readiness probe. + // This is the same value as TiDB Operator v1. + defaultReadinessProbeInitialDelaySeconds = 10 +) + +func TaskPodSuspend(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("PodSuspend", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if rtx.Pod == nil { + return task.Complete().With("pod has been deleted") + } + if err := c.Delete(rtx, rtx.Pod); err != nil { + return task.Fail().With("can't delete pod of tidb: %w", err) + } + rtx.PodIsTerminating = true + return task.Wait().With("pod is deleting") + }) +} + +type TaskPod struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskPod(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskPod{ + Client: c, + Logger: logger, + } +} + +func (*TaskPod) Name() string { + return "Pod" +} + +func (t *TaskPod) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + expected := t.newPod(rtx.Cluster, rtx.TiDBGroup, rtx.TiDB, rtx.GracefulWaitTimeInSeconds, rtx.ConfigHash) + if rtx.Pod == nil { + if err := t.Client.Apply(rtx, expected); err != nil { + return task.Fail().With("can't create pod of tidb: %w", err) + } + + rtx.Pod = expected + return task.Complete().With("pod is created") + } + + res := k8s.ComparePods(rtx.Pod, expected) + curHash, expectHash := rtx.Pod.Labels[v1alpha1.LabelKeyConfigHash], expected.Labels[v1alpha1.LabelKeyConfigHash] + configChanged := curHash != expectHash + t.Logger.Info("compare pod", "result", res, "configChanged", configChanged, "currentConfigHash", curHash, "expectConfigHash", expectHash) + + if res == k8s.CompareResultRecreate || (configChanged && + rtx.TiDBGroup.Spec.ConfigUpdateStrategy == v1alpha1.ConfigUpdateStrategyRollingUpdate) { + t.Logger.Info("will recreate the pod") + if err := t.Client.Delete(rtx, rtx.Pod); err != nil { + return task.Fail().With("can't delete pod of tidb: %w", err) + } + + rtx.PodIsTerminating = true + return task.Complete().With("pod is deleting") + } else if res == k8s.CompareResultUpdate { + t.Logger.Info("will update the pod in place") + if err := t.Client.Apply(rtx, expected); err != nil { + return task.Fail().With("can't apply pod of tidb: %w", err) + } + + rtx.Pod = expected + } + + return task.Complete().With("pod is synced") +} + +func (*TaskPod) newPod(cluster *v1alpha1.Cluster, dbg *v1alpha1.TiDBGroup, + tidb *v1alpha1.TiDB, gracePeriod int64, configHash string) *corev1.Pod { + vols := []corev1.Volume{ + { + Name: v1alpha1.VolumeNameConfig, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ConfigMapName(tidb.Name), + }, + }, + }, + }, + } + + mounts := []corev1.VolumeMount{ + { + Name: v1alpha1.VolumeNameConfig, + MountPath: v1alpha1.DirNameConfigTiDB, + }, + } + + for i := range tidb.Spec.Volumes { + vol := &tidb.Spec.Volumes[i] + name := genVolumeNameFromVolume(vol) + vols = append(vols, corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: PersistentVolumeClaimName(tidb.Name, vol.Name), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: name, + MountPath: vol.Path, + }) + } + + if dbg.IsTLSClientEnabled() { + vols = append(vols, corev1.Volume{ + Name: v1alpha1.TiDBServerTLSVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: dbg.TiDBServerTLSSecretName(), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.TiDBServerTLSVolumeName, + MountPath: v1alpha1.TiDBServerTLSMountPath, + ReadOnly: true, + }) + } + + if cluster.IsTLSClusterEnabled() { + vols = append(vols, corev1.Volume{ + Name: v1alpha1.TiDBClusterTLSVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.TLSClusterSecretName(dbg.Name), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.TiDBClusterTLSVolumeName, + MountPath: v1alpha1.TiDBClusterTLSMountPath, + ReadOnly: true, + }) + } + + if dbg.IsBootstrapSQLEnabled() { + vols = append(vols, corev1.Volume{ + Name: v1alpha1.BootstrapSQLVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: *dbg.Spec.BootstrapSQLConfigMapName, + }, + Items: []corev1.KeyToPath{ + { + Key: v1alpha1.BootstrapSQLConfigMapKey, + Path: v1alpha1.BootstrapSQLFileName, + }, + }, + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.BootstrapSQLVolumeName, + MountPath: v1alpha1.BootstrapSQLFilePath, + ReadOnly: true, + }) + } + + if dbg.IsTokenBasedAuthEnabled() { + vols = append(vols, corev1.Volume{ + Name: v1alpha1.TiDBAuthTokenVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: dbg.TiDBAuthTokenJWKSSecretName(), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.TiDBAuthTokenVolumeName, + MountPath: v1alpha1.TiDBAuthTokenPath, + ReadOnly: true, + }) + } + + var slowLogContainer *corev1.Container + if tidb.IsSeperateSlowLogEnabled() { + vol, mount := buildSlowLogVolumeAndMount(tidb) + if vol != nil { + vols = append(vols, *vol) + } + if mount != nil { + mounts = append(mounts, *mount) + } + slowLogContainer = buildSlowLogContainer(tidb, mount) + } + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: tidb.Namespace, + Name: tidb.Name, + Labels: maputil.Merge(tidb.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tidb.Name, + v1alpha1.LabelKeyConfigHash: configHash, + }), + Annotations: maputil.Copy(tidb.GetAnnotations()), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tidb, v1alpha1.SchemeGroupVersion.WithKind("TiDB")), + }, + }, + Spec: corev1.PodSpec{ + Hostname: tidb.Name, + Subdomain: tidb.Spec.Subdomain, + NodeSelector: tidb.Spec.Topology, + Containers: []corev1.Container{ + { + Name: v1alpha1.ContainerNameTiDB, + Image: image.TiDB.Image(tidb.Spec.Image, tidb.Spec.Version), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "/tidb-server", + "--config", + filepath.Join(v1alpha1.DirNameConfigTiDB, v1alpha1.ConfigFileName), + }, + Ports: []corev1.ContainerPort{ + { + Name: v1alpha1.TiDBPortNameClient, + ContainerPort: tidb.GetClientPort(), + }, + { + Name: v1alpha1.TiDBPortNameStatus, + ContainerPort: tidb.GetStatusPort(), + }, + }, + VolumeMounts: mounts, + Resources: k8s.GetResourceRequirements(tidb.Spec.Resources), + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Sleep: &corev1.SleepAction{ + Seconds: preStopSleepSeconds, + }, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: buildTiDBReadinessProbHandler(cluster, tidb, tidb.GetClientPort(), tidb.GetStatusPort()), + InitialDelaySeconds: defaultReadinessProbeInitialDelaySeconds, + }, + }, + }, + Volumes: vols, + TerminationGracePeriodSeconds: ptr.To(gracePeriod + preStopSleepSeconds + bufferSeconds), + }, + } + + if slowLogContainer != nil { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, *slowLogContainer) + } + + if tidb.Spec.Overlay != nil { + overlay.OverlayPod(pod, tidb.Spec.Overlay.Pod) + } + + k8s.CalculateHashAndSetLabels(pod) + return pod +} + +func genVolumeNameFromVolume(vol *v1alpha1.Volume) string { + name := v1alpha1.NamePrefix + "tidb" + if vol.Name != "" { + name = name + "-" + vol.Name + } + return name +} + +func buildTiDBReadinessProbHandler(cluster *v1alpha1.Cluster, tidb *v1alpha1.TiDB, clientPort, statusPort int32) corev1.ProbeHandler { + probeType := v1alpha1.TCPProbeType // default to TCP probe + if tidb.Spec.Probes.Readiness != nil && tidb.Spec.Probes.Readiness.Type != nil { + probeType = *tidb.Spec.Probes.Readiness.Type + } + + if probeType == v1alpha1.CommandProbeType { + return corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: buildTiDBProbeCommand(cluster, statusPort), + }, + } + } + + return corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(int(clientPort)), + }, + } +} + +func buildTiDBProbeCommand(cluster *v1alpha1.Cluster, statusPort int32) (command []string) { + scheme := "http" + if cluster.IsTLSClusterEnabled() { + scheme = "https" + } + host := "127.0.0.1" + + readinessURL := fmt.Sprintf("%s://%s:%d/status", scheme, host, statusPort) + command = append(command, "curl", readinessURL, + // Fail silently (no output at all) on server errors + // without this if the server return 500, the exist code will be 0 + // and probe is success. + "--fail", + // follow 301 or 302 redirect + "--location") + + if cluster.IsTLSClusterEnabled() { + cacert := path.Join(v1alpha1.TiDBClusterTLSMountPath, corev1.ServiceAccountRootCAKey) + cert := path.Join(v1alpha1.TiDBClusterTLSMountPath, corev1.TLSCertKey) + key := path.Join(v1alpha1.TiDBClusterTLSMountPath, corev1.TLSPrivateKeyKey) + command = append(command, "--cacert", cacert, "--cert", cert, "--key", key) + } + return +} + +func buildSlowLogVolumeAndMount(tidb *v1alpha1.TiDB) (*corev1.Volume, *corev1.VolumeMount) { + if tidb.Spec.SlowLog == nil || tidb.Spec.SlowLog.VolumeName == "" { + return &corev1.Volume{ + Name: v1alpha1.TiDBDefaultSlowLogVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, &corev1.VolumeMount{ + Name: v1alpha1.TiDBDefaultSlowLogVolumeName, + MountPath: v1alpha1.TiDBDefaultSlowLogDir, + } + } + + // if using a custom volume, the volume and mount should already be defined + return nil, nil +} + +func buildSlowLogContainer(tidb *v1alpha1.TiDB, mount *corev1.VolumeMount) *corev1.Container { + if mount == nil { + // no temparary volume for slow log, find the volume defined in the spec + for i := range tidb.Spec.Volumes { + vol := &tidb.Spec.Volumes[i] + if vol.Name == tidb.Spec.SlowLog.VolumeName { + mount = &corev1.VolumeMount{ + Name: genVolumeNameFromVolume(vol), + MountPath: vol.Path, + } + break // should always find the volume + } + } + } + + slowlogFile := path.Join(mount.MountPath, v1alpha1.TiDBSlowLogFileName) + img := v1alpha1.DefaultHelperImage + if tidb.Spec.SlowLog != nil && tidb.Spec.SlowLog.Image != nil && *tidb.Spec.SlowLog.Image != "" { + img = *tidb.Spec.SlowLog.Image + } + restartPolicy := corev1.ContainerRestartPolicyAlways // sidecar container in `initContainers` + c := &corev1.Container{ + Name: v1alpha1.TiDBSlowLogContainerName, + Image: img, + RestartPolicy: &restartPolicy, + VolumeMounts: []corev1.VolumeMount{*mount}, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("touch %s; tail -n0 -F %s;", slowlogFile, slowlogFile), + }, + } + if tidb.Spec.SlowLog != nil { + c.Resources = k8s.GetResourceRequirements(tidb.Spec.SlowLog.Resources) + } + return c +} diff --git a/pkg/controllers/tidb/tasks/pvc.go b/pkg/controllers/tidb/tasks/pvc.go new file mode 100644 index 00000000000..a5468452018 --- /dev/null +++ b/pkg/controllers/tidb/tasks/pvc.go @@ -0,0 +1,91 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +type TaskPVC struct { + Client client.Client + Logger logr.Logger + VolumeModifier volumes.Modifier +} + +func NewTaskPVC(logger logr.Logger, c client.Client, vm volumes.Modifier) task.Task[ReconcileContext] { + return &TaskPVC{ + Client: c, + Logger: logger, + VolumeModifier: vm, + } +} + +func (*TaskPVC) Name() string { + return "PVC" +} + +func (t *TaskPVC) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + pvcs := newPVCs(rtx.TiDB) + if wait, err := volumes.SyncPVCs(rtx, t.Client, pvcs, t.VolumeModifier, t.Logger); err != nil { + return task.Fail().With("failed to sync pvcs: %w", err) + } else if wait { + return task.Complete().With("waiting for pvcs to be synced") + } + + return task.Complete().With("pvcs are synced") +} + +func newPVCs(tidb *v1alpha1.TiDB) []*corev1.PersistentVolumeClaim { + pvcs := make([]*corev1.PersistentVolumeClaim, 0, len(tidb.Spec.Volumes)) + for i := range tidb.Spec.Volumes { + vol := &tidb.Spec.Volumes[i] + pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: PersistentVolumeClaimName(tidb.Name, vol.Name), + Namespace: tidb.Namespace, + Labels: maputil.Merge(tidb.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tidb.Name, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tidb, v1alpha1.SchemeGroupVersion.WithKind("TiDB")), + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: vol.Storage, + }, + }, + StorageClassName: vol.StorageClassName, + VolumeAttributesClassName: vol.VolumeAttributesClassName, + }, + }) + } + + return pvcs +} diff --git a/pkg/controllers/tidb/tasks/server_labels.go b/pkg/controllers/tidb/tasks/server_labels.go new file mode 100644 index 00000000000..073e82596cb --- /dev/null +++ b/pkg/controllers/tidb/tasks/server_labels.go @@ -0,0 +1,98 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +var ( + // node labels that can be used as tidb DC label Name + topologyZoneLabels = []string{"zone", corev1.LabelTopologyZone} + + // tidb DC label Name + tidbDCLabel = "zone" +) + +type TaskServerLabels struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskServerLabels(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskServerLabels{ + Client: c, + Logger: logger, + } +} + +func (*TaskServerLabels) Name() string { + return "ServerLabels" +} + +func (t *TaskServerLabels) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if !rtx.Healthy || rtx.Pod == nil || rtx.PodIsTerminating { + return task.Complete().With("skip sync server labels as the instance is not healthy") + } + + nodeName := rtx.Pod.Spec.NodeName + if nodeName == "" { + return task.Fail().With("pod %s/%s has not been scheduled", rtx.TiDB.Namespace, rtx.TiDB.Name) + } + var node corev1.Node + if err := t.Client.Get(ctx, client.ObjectKey{Name: nodeName}, &node); err != nil { + return task.Fail().With("failed to get node %s: %s", nodeName, err) + } + + // TODO: too many API calls to PD? + pdCfg, err := rtx.PDClient.GetConfig(ctx) + if err != nil { + return task.Fail().With("failed to get pd config: %s", err) + } + + var zoneLabel string +outer: + for _, zl := range topologyZoneLabels { + for _, ll := range pdCfg.Replication.LocationLabels { + if ll == zl { + zoneLabel = zl + break outer + } + } + } + if zoneLabel == "" { + return task.Complete().With("zone labels not found in pd location-label, skip sync server labels") + } + + serverLabels := k8s.GetNodeLabelsForKeys(&node, pdCfg.Replication.LocationLabels) + if len(serverLabels) == 0 { + return task.Complete().With("no server labels from node %s to sync", nodeName) + } + serverLabels[tidbDCLabel] = serverLabels[zoneLabel] + + // TODO: is there any way to avoid unnecessary update? + if err := rtx.TiDBClient.SetServerLabels(ctx, serverLabels); err != nil { + return task.Fail().With("failed to set server labels: %s", err) + } + + return task.Complete().With("server labels synced") +} diff --git a/pkg/controllers/tidb/tasks/status.go b/pkg/controllers/tidb/tasks/status.go new file mode 100644 index 00000000000..a6c8ba75481 --- /dev/null +++ b/pkg/controllers/tidb/tasks/status.go @@ -0,0 +1,158 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/statefulset" +) + +const ( + defaultTaskWaitDuration = 5 * time.Second +) + +func TaskStatusSuspend(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("StatusSuspend", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + rtx.TiDB.Status.ObservedGeneration = rtx.TiDB.Generation + + var ( + suspendStatus = metav1.ConditionFalse + suspendMessage = "tidb is suspending" + + // when suspending, the health status should be false + healthStatus = metav1.ConditionFalse + healthMessage = "tidb is not healthy" + ) + + if rtx.Pod == nil { + suspendStatus = metav1.ConditionTrue + suspendMessage = "tidb is suspended" + } + needUpdate := meta.SetStatusCondition(&rtx.TiDB.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiDBCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiDB.Generation, + // TODO: use different reason for suspending and suspended + Reason: v1alpha1.TiDBSuspendReason, + Message: suspendMessage, + }) + + needUpdate = meta.SetStatusCondition(&rtx.TiDB.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiDBCondHealth, + Status: healthStatus, + ObservedGeneration: rtx.TiDB.Generation, + Reason: v1alpha1.TiDBHealthReason, + Message: healthMessage, + }) || needUpdate + + if needUpdate { + if err := c.Status().Update(ctx, rtx.TiDB); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + return task.Complete().With("status is suspend tidb is updated") + }) +} + +type TaskStatus struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Client: c, + Logger: logger, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var ( + healthStatus = metav1.ConditionFalse + healthMessage = "tidb is not healthy" + + suspendStatus = metav1.ConditionFalse + suspendMessage = "tidb is not suspended" + + needUpdate = false + ) + + conditionChanged := meta.SetStatusCondition(&rtx.TiDB.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiDBCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiDB.Generation, + Reason: v1alpha1.TiDBSuspendReason, + Message: suspendMessage, + }) + + if !v1alpha1.IsReconciled(rtx.TiDB) || rtx.TiDB.Status.UpdateRevision != rtx.TiDB.Labels[v1alpha1.LabelKeyInstanceRevisionHash] { + rtx.TiDB.Status.ObservedGeneration = rtx.TiDB.Generation + rtx.TiDB.Status.UpdateRevision = rtx.TiDB.Labels[v1alpha1.LabelKeyInstanceRevisionHash] + needUpdate = true + } + + if rtx.Pod == nil || rtx.PodIsTerminating { + rtx.Healthy = false + } else if statefulset.IsPodRunningAndReady(rtx.Pod) && rtx.Healthy { + if rtx.TiDB.Status.CurrentRevision != rtx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] { + rtx.TiDB.Status.CurrentRevision = rtx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] + needUpdate = true + } + } else { + rtx.Healthy = false + } + + if rtx.Healthy { + healthStatus = metav1.ConditionTrue + healthMessage = "tidb is healthy" + } + updateCond := metav1.Condition{ + Type: v1alpha1.TiDBCondHealth, + Status: healthStatus, + ObservedGeneration: rtx.TiDB.Generation, + Reason: v1alpha1.TiDBHealthReason, + Message: healthMessage, + } + conditionChanged = meta.SetStatusCondition(&rtx.TiDB.Status.Conditions, updateCond) || conditionChanged + + if needUpdate || conditionChanged { + if err := t.Client.Status().Update(ctx, rtx.TiDB); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + if !rtx.Healthy || !v1alpha1.IsUpToDate(rtx.TiDB) { + // can we only rely on Pod status events to trigger the retry? + return task.Retry(defaultTaskWaitDuration).With("tidb may not be healthy, requeue to retry") + } + + return task.Complete().With("status is synced") +} diff --git a/pkg/controllers/tidb/tasks/util.go b/pkg/controllers/tidb/tasks/util.go new file mode 100644 index 00000000000..90a536eb4a0 --- /dev/null +++ b/pkg/controllers/tidb/tasks/util.go @@ -0,0 +1,38 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +func ConfigMapName(tidbName string) string { + return tidbName +} + +func PersistentVolumeClaimName(tidbName, volName string) string { + // ref: https://github.com/pingcap/tidb-operator/blob/v1.6.0/pkg/apis/pingcap/v1alpha1/helpers.go#L92 + if volName == "" { + return "tidb-" + tidbName + } + return "tidb-" + tidbName + "-" + volName +} + +// TiDBServiceURL returns the service URL of a tidb member. +func TiDBServiceURL(tidb *v1alpha1.TiDB, scheme string) string { + return fmt.Sprintf("%s://%s.%s.%s.svc:%d", scheme, tidb.Name, tidb.Spec.Subdomain, tidb.Namespace, tidb.GetStatusPort()) +} diff --git a/pkg/controllers/tidbgroup/controller.go b/pkg/controllers/tidbgroup/controller.go new file mode 100644 index 00000000000..16b52f2852b --- /dev/null +++ b/pkg/controllers/tidbgroup/controller.go @@ -0,0 +1,116 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidbgroup + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/tidbgroup/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client +} + +func Setup(mgr manager.Manager, c client.Client) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("TiDBGroup"), + Client: c, + } + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.TiDBGroup{}). + Owns(&v1alpha1.TiDB{}). + // Only care about the generation change (i.e. spec update) + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler(), builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + Complete(r) +} + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + cluster := event.ObjectNew.(*v1alpha1.Cluster) + + var list v1alpha1.TiDBGroupList + if err := r.Client.List(ctx, &list, client.InNamespace(cluster.Namespace), + client.MatchingFields{"spec.cluster.name": cluster.Name}); err != nil { + if !errors.IsNotFound(err) { + r.Logger.Error(err, "cannot list all tidb groups", "ns", cluster.Namespace, "cluster", cluster.Name) + } + return + } + + for i := range list.Items { + dbg := &list.Items[i] + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: dbg.Name, + Namespace: dbg.Namespace, + }, + }) + } + }, + } +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("tidbgroup", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := task.NewTaskRunner[tasks.ReconcileContext](reporter) + runner.AddTasks( + tasks.NewTaskContext(logger, r.Client), + tasks.NewTaskFinalizer(logger, r.Client), + tasks.NewTaskService(logger, r.Client), + tasks.NewTaskUpdater(logger, r.Client), + tasks.NewTaskStatus(logger, r.Client), + ) + + return runner.Run(rtx) +} diff --git a/pkg/controllers/tidbgroup/tasks/ctx.go b/pkg/controllers/tidbgroup/tasks/ctx.go new file mode 100644 index 00000000000..a6a8c086c2f --- /dev/null +++ b/pkg/controllers/tidbgroup/tasks/ctx.go @@ -0,0 +1,130 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "cmp" + "context" + "slices" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/action" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/tidbapi/v1" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + TiDBClient tidbapi.TiDBClient + + IsAvailable bool + Suspended bool + + TiDBGroup *v1alpha1.TiDBGroup + TiDBs []*v1alpha1.TiDB + Cluster *v1alpha1.Cluster + UpgradeChecker action.UpgradeChecker + + // Status fields + v1alpha1.CommonStatus +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +type TaskContext struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskContext(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskContext{ + Logger: logger, + Client: c, + } +} + +func (*TaskContext) Name() string { + return "Context" +} + +func (t *TaskContext) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var tidbg v1alpha1.TiDBGroup + if err := t.Client.Get(ctx, rtx.Key, &tidbg); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get tidb group: %w", err) + } + + return task.Complete().Break().With("tidb group has been deleted") + } + rtx.TiDBGroup = &tidbg + + var cluster v1alpha1.Cluster + if err := t.Client.Get(ctx, client.ObjectKey{ + Name: tidbg.Spec.Cluster.Name, + Namespace: tidbg.Namespace, + }, &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %w", tidbg.Spec.Cluster.Name, err) + } + rtx.Cluster = &cluster + + if cluster.ShouldPauseReconcile() { + return task.Complete().Break().With("cluster reconciliation is paused") + } + + var tidbList v1alpha1.TiDBList + if err := t.Client.List(ctx, &tidbList, client.InNamespace(tidbg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyGroup: tidbg.Name, + }); err != nil { + return task.Fail().With("cannot list tidb instances: %w", err) + } + + rtx.TiDBs = make([]*v1alpha1.TiDB, len(tidbList.Items)) + rtx.Suspended = len(tidbList.Items) > 0 + for i := range tidbList.Items { + rtx.TiDBs[i] = &tidbList.Items[i] + if meta.IsStatusConditionTrue(tidbList.Items[i].Status.Conditions, v1alpha1.TiDBCondHealth) { + // TiDB Group is available if any of its members is available + rtx.IsAvailable = true + } + if !meta.IsStatusConditionTrue(tidbList.Items[i].Status.Conditions, v1alpha1.TiDBCondSuspended) { + // TiDB Group is not suspended if any of its members is not suspended + rtx.Suspended = false + } + } + + slices.SortFunc(rtx.TiDBs, func(a, b *v1alpha1.TiDB) int { + return cmp.Compare(a.Name, b.Name) + }) + + rtx.UpgradeChecker = action.NewUpgradeChecker(t.Client, rtx.Cluster, t.Logger) + + return task.Complete().With("new context completed") +} diff --git a/pkg/controllers/tidbgroup/tasks/finalizer.go b/pkg/controllers/tidbgroup/tasks/finalizer.go new file mode 100644 index 00000000000..dc103a65c65 --- /dev/null +++ b/pkg/controllers/tidbgroup/tasks/finalizer.go @@ -0,0 +1,81 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + utilerr "k8s.io/apimachinery/pkg/util/errors" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskFinalizer struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskFinalizer(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskFinalizer{ + Client: c, + Logger: logger, + } +} + +func (*TaskFinalizer) Name() string { + return "Finalizer" +} + +func (t *TaskFinalizer) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if !rtx.TiDBGroup.GetDeletionTimestamp().IsZero() { + errList := []error{} + names := []string{} + for _, tidb := range rtx.TiDBs { + names = append(names, tidb.Name) + if tidb.GetDeletionTimestamp().IsZero() { + if err := t.Client.Delete(ctx, tidb); err != nil { + errList = append(errList, fmt.Errorf("try to delete the tidb instance %v failed: %w", tidb.Name, err)) + } + } + } + + if len(errList) != 0 { + return task.Fail().With("failed to delete all tidb instances: %v", utilerr.NewAggregate(errList)) + } + + if len(rtx.TiDBs) != 0 { + return task.Fail().With("wait for all tidb instances being removed, %v still exists", names) + } + + if err := k8s.EnsureGroupSubResourceDeleted(ctx, t.Client, + rtx.TiDBGroup.Namespace, rtx.TiDBGroup.Name); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + if err := k8s.RemoveFinalizer(ctx, t.Client, rtx.TiDBGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been removed: %w", err) + } + } else { + if err := k8s.EnsureFinalizer(ctx, t.Client, rtx.TiDBGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %w", err) + } + } + + return task.Complete().With("finalizer is synced") +} diff --git a/pkg/controllers/tidbgroup/tasks/status.go b/pkg/controllers/tidbgroup/tasks/status.go new file mode 100644 index 00000000000..2a2c1b4bb1d --- /dev/null +++ b/pkg/controllers/tidbgroup/tasks/status.go @@ -0,0 +1,111 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskStatus struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Client: c, + Logger: logger, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +//nolint:gocyclo // refactor if possible +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + availStatus := metav1.ConditionFalse + availMessage := "tidb group is not available" + if rtx.IsAvailable { + availStatus = metav1.ConditionTrue + availMessage = "tidb group is available" + } + conditionChanged := meta.SetStatusCondition(&rtx.TiDBGroup.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiDBGroupCondAvailable, + Status: availStatus, + ObservedGeneration: rtx.TiDBGroup.Generation, + Reason: v1alpha1.TiDBGroupAvailableReason, + Message: availMessage, + }) + + suspendStatus := metav1.ConditionFalse + suspendMessage := "tidb group is not suspended" + if rtx.Suspended { + suspendStatus = metav1.ConditionTrue + suspendMessage = "tidb group is suspended" + } else if rtx.Cluster.ShouldSuspendCompute() { + suspendMessage = "tidb group is suspending" + } + conditionChanged = meta.SetStatusCondition(&rtx.TiDBGroup.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiDBGroupCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiDBGroup.Generation, + Reason: v1alpha1.TiDBGroupSuspendReason, + Message: suspendMessage, + }) || conditionChanged + + // Update the current revision if all instances are synced. + if int(rtx.TiDBGroup.GetDesiredReplicas()) == len(rtx.TiDBs) && v1alpha1.AllInstancesSynced(rtx.TiDBs, rtx.UpdateRevision) { + conditionChanged = true + rtx.CurrentRevision = rtx.UpdateRevision + rtx.TiDBGroup.Status.Version = rtx.TiDBGroup.Spec.Version + } + var readyReplicas int32 + for _, tidb := range rtx.TiDBs { + if tidb.IsHealthy() { + readyReplicas++ + } + } + + if conditionChanged || rtx.TiDBGroup.Status.ReadyReplicas != readyReplicas || + rtx.TiDBGroup.Status.Replicas != int32(len(rtx.TiDBs)) || //nolint:gosec // expected type conversion + !v1alpha1.IsReconciled(rtx.TiDBGroup) || + v1alpha1.StatusChanged(rtx.TiDBGroup, rtx.CommonStatus) { + rtx.TiDBGroup.Status.ReadyReplicas = readyReplicas + rtx.TiDBGroup.Status.Replicas = int32(len(rtx.TiDBs)) //nolint:gosec// expected type conversion + rtx.TiDBGroup.Status.ObservedGeneration = rtx.TiDBGroup.Generation + rtx.TiDBGroup.Status.CurrentRevision = rtx.CurrentRevision + rtx.TiDBGroup.Status.UpdateRevision = rtx.UpdateRevision + rtx.TiDBGroup.Status.CollisionCount = rtx.CollisionCount + + if err := t.Client.Status().Update(ctx, rtx.TiDBGroup); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + if !rtx.IsAvailable && !rtx.Suspended { + return task.Fail().With("tidb group may not be available, requeue to retry") + } + + return task.Complete().With("status is synced") +} diff --git a/pkg/controllers/tidbgroup/tasks/svc.go b/pkg/controllers/tidbgroup/tasks/svc.go new file mode 100644 index 00000000000..94e8a40656f --- /dev/null +++ b/pkg/controllers/tidbgroup/tasks/svc.go @@ -0,0 +1,152 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskService struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskService(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskService{ + Logger: logger, + Client: c, + } +} + +func (*TaskService) Name() string { + return "Service" +} + +func (t *TaskService) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.Cluster.ShouldSuspendCompute() { + return task.Complete().With("skip service for suspension") + } + + tidbg := rtx.TiDBGroup + + svcHeadless := newHeadlessService(tidbg) + if err := t.Client.Apply(ctx, svcHeadless); err != nil { + return task.Fail().With(fmt.Sprintf("can't create headless service of tidb: %v", err)) + } + + svc := newService(tidbg) + if err := t.Client.Apply(ctx, svc); err != nil { + return task.Fail().With(fmt.Sprintf("can't create service of tidb: %v", err)) + } + + return task.Complete().With("service of tidb has been applied") +} + +func newHeadlessService(tidbg *v1alpha1.TiDBGroup) *corev1.Service { + ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: HeadlessServiceName(tidbg.Spec.Cluster.Name, tidbg.Name), + Namespace: tidbg.Namespace, + Labels: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyCluster: tidbg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: tidbg.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tidbg, v1alpha1.SchemeGroupVersion.WithKind("TiDBGroup")), + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyCluster: tidbg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: tidbg.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: v1alpha1.TiDBPortNameStatus, + Port: tidbg.GetStatusPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiDBPortNameStatus), + }, + }, + ClusterIP: corev1.ClusterIPNone, + IPFamilyPolicy: &ipFamilyPolicy, + PublishNotReadyAddresses: true, + }, + } +} + +func newService(tidbg *v1alpha1.TiDBGroup) *corev1.Service { + svcType := corev1.ServiceTypeClusterIP + if tidbg.Spec.Service != nil && tidbg.Spec.Service.Type != "" { + svcType = tidbg.Spec.Service.Type + } + ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack + + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", tidbg.Spec.Cluster.Name, tidbg.Name), + Namespace: tidbg.Namespace, + Labels: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyCluster: tidbg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: tidbg.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tidbg, v1alpha1.SchemeGroupVersion.WithKind("TiDBGroup")), + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyCluster: tidbg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: tidbg.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: v1alpha1.TiDBPortNameClient, + Port: tidbg.GetClientPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiDBPortNameClient), + }, + { + Name: v1alpha1.TiDBPortNameStatus, + Port: tidbg.GetStatusPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiDBPortNameStatus), + }, + }, + Type: svcType, + IPFamilyPolicy: &ipFamilyPolicy, + }, + } +} diff --git a/pkg/controllers/tidbgroup/tasks/updater.go b/pkg/controllers/tidbgroup/tasks/updater.go new file mode 100644 index 00000000000..46b5d2bfac0 --- /dev/null +++ b/pkg/controllers/tidbgroup/tasks/updater.go @@ -0,0 +1,188 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/updater" + "github.com/pingcap/tidb-operator/pkg/updater/policy" + "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/random" + "github.com/pingcap/tidb-operator/pkg/utils/task" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" +) + +// TaskUpdater is a task for updating TiDBGroup when its spec is changed. +type TaskUpdater struct { + Logger logr.Logger + Client client.Client + CRCli history.Interface +} + +func NewTaskUpdater(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskUpdater{ + Logger: logger, + Client: c, + CRCli: history.NewClient(c), + } +} + +func (*TaskUpdater) Name() string { + return "Updater" +} + +func (t *TaskUpdater) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + // TODO: move to task v2 + if !rtx.TiDBGroup.GetDeletionTimestamp().IsZero() { + return task.Complete().With("tidb group has been deleted") + } + + if rtx.Cluster.ShouldSuspendCompute() { + return task.Complete().With("skip updating TiDBGroup for suspension") + } + + // List all controller revisions for the TiDBGroup + selector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + v1alpha1.LabelKeyCluster: rtx.Cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyGroup: rtx.TiDBGroup.Name, + }, + }) + revisions, err := t.CRCli.ListControllerRevisions(rtx.TiDBGroup, selector) + if err != nil { + return task.Fail().With("cannot list controller revisions: %w", err) + } + history.SortControllerRevisions(revisions) + + // Get the current(old) and update(new) ControllerRevisions for TiDBGroup + currentRevision, updateRevision, collisionCount, err := revision.GetCurrentAndUpdate(rtx.TiDBGroup, revisions, t.CRCli, rtx.TiDBGroup) + if err != nil { + return task.Fail().With("cannot get revisions: %w", err) + } + rtx.CurrentRevision = currentRevision.Name + rtx.UpdateRevision = updateRevision.Name + rtx.CollisionCount = &collisionCount + + if err = revision.TruncateHistory(t.CRCli, rtx.TiDBs, revisions, + currentRevision, updateRevision, rtx.Cluster.Spec.RevisionHistoryLimit); err != nil { + t.Logger.Error(err, "failed to truncate history") + } + + if needVersionUpgrade(rtx.TiDBGroup) && !rtx.UpgradeChecker.CanUpgrade(ctx, rtx.TiDBGroup) { + return task.Fail().Continue().With( + "preconditions of upgrading the tidb group %s/%s are not met", + rtx.TiDBGroup.Namespace, rtx.TiDBGroup.Name) + } + + desired := 1 + if rtx.TiDBGroup.Spec.Replicas != nil { + desired = int(*rtx.TiDBGroup.Spec.Replicas) + } + + var topos []v1alpha1.ScheduleTopology + for _, p := range rtx.TiDBGroup.Spec.SchedulePolicies { + switch p.Type { + case v1alpha1.SchedulePolicyTypeEvenlySpread: + topos = p.EvenlySpread.Topologies + default: + // do nothing + } + } + + topoPolicy, err := policy.NewTopologyPolicy[*runtime.TiDB](topos) + if err != nil { + return task.Fail().With("invalid topo policy, it should be validated: %w", err) + } + + for _, tidb := range rtx.TiDBs { + topoPolicy.Add(runtime.FromTiDB(tidb)) + } + + wait, err := updater.New[*runtime.TiDB](). + WithInstances(runtime.FromTiDBSlice(rtx.TiDBs)...). + WithDesired(desired). + WithClient(t.Client). + WithMaxSurge(0). + WithMaxUnavailable(1). + WithRevision(rtx.UpdateRevision). + WithNewFactory(TiDBNewer(rtx.TiDBGroup, rtx.UpdateRevision)). + WithAddHooks(topoPolicy). + WithUpdateHooks( + policy.KeepName[*runtime.TiDB](), + policy.KeepTopology[*runtime.TiDB](), + ). + WithDelHooks(topoPolicy). + WithScaleInPreferPolicy( + topoPolicy, + ). + Build(). + Do(ctx) + if err != nil { + return task.Fail().With("cannot update instances: %w", err) + } + if wait { + return task.Complete().With("wait for all instances ready") + } + return task.Complete().With("all instances are synced") +} + +func needVersionUpgrade(dbg *v1alpha1.TiDBGroup) bool { + return dbg.Spec.Version != dbg.Status.Version && dbg.Status.Version != "" +} + +func TiDBNewer(dbg *v1alpha1.TiDBGroup, rev string) updater.NewFactory[*runtime.TiDB] { + return updater.NewFunc[*runtime.TiDB](func() *runtime.TiDB { + //nolint:mnd // refactor to use a constant + name := fmt.Sprintf("%s-%s-%s", dbg.Spec.Cluster.Name, dbg.Name, random.Random(6)) + spec := dbg.Spec.Template.Spec.DeepCopy() + + tidb := &v1alpha1.TiDB{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: dbg.Namespace, + Name: name, + Labels: maputil.Merge(dbg.Spec.Template.Labels, map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyCluster: dbg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: dbg.Name, + v1alpha1.LabelKeyInstanceRevisionHash: rev, + }), + Annotations: maputil.Copy(dbg.Spec.Template.Annotations), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(dbg, v1alpha1.SchemeGroupVersion.WithKind("TiDBGroup")), + }, + }, + Spec: v1alpha1.TiDBSpec{ + Cluster: dbg.Spec.Cluster, + Version: dbg.Spec.Version, + Subdomain: HeadlessServiceName(dbg.Spec.Cluster.Name, dbg.Name), // same as headless service + TiDBTemplateSpec: *spec, + }, + } + + return runtime.FromTiDB(tidb) + }) +} diff --git a/pkg/controllers/tidbgroup/tasks/updater_test.go b/pkg/controllers/tidbgroup/tasks/updater_test.go new file mode 100644 index 00000000000..12eca42fc68 --- /dev/null +++ b/pkg/controllers/tidbgroup/tasks/updater_test.go @@ -0,0 +1,95 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +func FakeContext(changes ...fake.ChangeFunc[ReconcileContext, *ReconcileContext]) *ReconcileContext { + ctx := fake.Fake(changes...) + ctx.Context = context.TODO() + return ctx +} + +func WithCluster(cluster *v1alpha1.Cluster) fake.ChangeFunc[ReconcileContext, *ReconcileContext] { + return func(obj *ReconcileContext) *ReconcileContext { + obj.Cluster = cluster + return obj + } +} + +func WithTiDBGroup(dbg *v1alpha1.TiDBGroup) fake.ChangeFunc[ReconcileContext, *ReconcileContext] { + return func(obj *ReconcileContext) *ReconcileContext { + obj.TiDBGroup = dbg + return obj + } +} + +func TestUpdater(t *testing.T) { + tests := []struct { + name string + ctx *ReconcileContext + objs []client.Object + expected task.Result + expectFunc func(t *testing.T, ctx *ReconcileContext, cli client.Client) + }{ + { + name: "first time to sync", + ctx: FakeContext( + WithCluster(fake.FakeObj[v1alpha1.Cluster]("test")), + WithTiDBGroup(fake.FakeObj("test-tidbgroup", + func(dbg *v1alpha1.TiDBGroup) *v1alpha1.TiDBGroup { + dbg.Spec.Cluster = v1alpha1.ClusterReference{Name: "test"} + dbg.Spec.Replicas = ptr.To(int32(1)) + return dbg + }, + )), + ), + expected: task.Complete().With(""), + expectFunc: func(t *testing.T, _ *ReconcileContext, cli client.Client) { + var crList appsv1.ControllerRevisionList + require.NoError(t, cli.List(context.TODO(), &crList)) + assert.Len(t, crList.Items, 1) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fc := client.NewFakeClient(tt.objs...) + updaterTask := NewTaskUpdater(logr.Discard(), fc) + got := updaterTask.Sync(tt.ctx) + assert.Equal(t, tt.expected.IsFailed(), got.IsFailed()) + assert.Equal(t, tt.expected.ShouldContinue(), got.ShouldContinue()) + assert.Equal(t, tt.expected.RequeueAfter(), got.RequeueAfter()) + + if tt.expectFunc != nil { + tt.expectFunc(t, tt.ctx, fc) + } + }) + } +} diff --git a/pkg/controllers/tidbgroup/tasks/util.go b/pkg/controllers/tidbgroup/tasks/util.go new file mode 100644 index 00000000000..cb1ec897804 --- /dev/null +++ b/pkg/controllers/tidbgroup/tasks/util.go @@ -0,0 +1,23 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" +) + +func HeadlessServiceName(clusterName, groupName string) string { + return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +} diff --git a/pkg/controllers/tiflash/builder.go b/pkg/controllers/tiflash/builder.go new file mode 100644 index 00000000000..ade0c8851ff --- /dev/null +++ b/pkg/controllers/tiflash/builder.go @@ -0,0 +1,60 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflash + +import ( + "github.com/pingcap/tidb-operator/pkg/controllers/tiflash/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +func (r *Reconciler) NewRunner(reporter task.TaskReporter) task.TaskRunner[tasks.ReconcileContext] { + runner := task.NewTaskRunner(reporter, + // Get tiflash + tasks.TaskContextTiFlash(r.Client), + // If it's deleted just return + task.NewSwitchTask(tasks.CondTiFlashHasBeenDeleted()), + + // get cluster info, FinalizerDel will use it + tasks.TaskContextCluster(r.Client), + // get info from pd + tasks.TaskContextInfoFromPD(r.PDClientManager), + + task.NewSwitchTask(tasks.CondTiFlashIsDeleting(), + tasks.TaskFinalizerDel(r.Client), + ), + + // check whether it's paused + task.NewSwitchTask(tasks.CondClusterIsPaused()), + + // get pod and check whether the cluster is suspending + tasks.TaskContextPod(r.Client), + task.NewSwitchTask(tasks.CondClusterIsSuspending(), + tasks.TaskFinalizerAdd(r.Client), + tasks.TaskPodSuspend(r.Client), + tasks.TaskStatusSuspend(r.Client), + ), + + // normal process + tasks.TaskContextTiFlashGroup(r.Client), + tasks.TaskFinalizerAdd(r.Client), + tasks.NewTaskConfigMap(r.Logger, r.Client), + tasks.NewTaskPVC(r.Logger, r.Client, r.VolumeModifier), + tasks.NewTaskPod(r.Logger, r.Client), + tasks.NewTaskStoreLabels(r.Logger, r.Client), + tasks.NewTaskStatus(r.Logger, r.Client), + ) + + return runner +} diff --git a/pkg/controllers/tiflash/controller.go b/pkg/controllers/tiflash/controller.go new file mode 100644 index 00000000000..bdd9b3a8392 --- /dev/null +++ b/pkg/controllers/tiflash/controller.go @@ -0,0 +1,81 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflash + +import ( + "context" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/tiflash/tasks" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client + VolumeModifier volumes.Modifier + PDClientManager pdm.PDClientManager +} + +func Setup(mgr manager.Manager, c client.Client, pdcm pdm.PDClientManager, vm volumes.Modifier) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("TiFlash"), + Client: c, + VolumeModifier: vm, + PDClientManager: pdcm, + } + return ctrl.NewControllerManagedBy(mgr).For(&v1alpha1.TiFlash{}). + Owns(&corev1.Pod{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.PersistentVolumeClaim{}). + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler()). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + WatchesRawSource(pdcm.Source(&pdv1.Store{}, r.StoreEventHandler())). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("tiflash", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := r.NewRunner(reporter) + return runner.Run(rtx) +} diff --git a/pkg/controllers/tiflash/handler.go b/pkg/controllers/tiflash/handler.go new file mode 100644 index 00000000000..4df2c483c8f --- /dev/null +++ b/pkg/controllers/tiflash/handler.go @@ -0,0 +1,140 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflash + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tiflashconfig "github.com/pingcap/tidb-operator/pkg/configs/tiflash" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" +) + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + oldObj := event.ObjectOld.(*v1alpha1.Cluster) + newObj := event.ObjectNew.(*v1alpha1.Cluster) + + if newObj.Status.PD != oldObj.Status.PD { + r.Logger.Info("pd url is updating", "from", oldObj.Status.PD, "to", newObj.Status.PD) + } else if !reflect.DeepEqual(oldObj.Spec.SuspendAction, newObj.Spec.SuspendAction) { + r.Logger.Info("suspend action is updating", "from", oldObj.Spec.SuspendAction, "to", newObj.Spec.SuspendAction) + } else if oldObj.Spec.Paused != newObj.Spec.Paused { + r.Logger.Info("cluster paused is updating", "from", oldObj.Spec.Paused, "to", newObj.Spec.Paused) + } else { + return + } + + var flashl v1alpha1.TiFlashList + if err := r.Client.List(ctx, &flashl, client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: newObj.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + }, client.InNamespace(newObj.Namespace)); err != nil { + r.Logger.Error(err, "cannot list all tiflash instances", "ns", newObj.Namespace, "cluster", newObj.Name) + return + } + + for i := range flashl.Items { + tiflash := &flashl.Items[i] + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tiflash.Name, + Namespace: tiflash.Namespace, + }, + }) + } + }, + } +} + +func (r *Reconciler) StoreEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(ctx context.Context, event event.TypedCreateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + s := event.Object.(*pdv1.Store) + req, err := r.getRequestOfTiFlashStore(ctx, s) + if err != nil { + return + } + queue.Add(req) + }, + + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + s := event.ObjectNew.(*pdv1.Store) + req, err := r.getRequestOfTiFlashStore(ctx, s) + if err != nil { + return + } + queue.Add(req) + }, + + DeleteFunc: func(ctx context.Context, event event.TypedDeleteEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + s := event.Object.(*pdv1.Store) + req, err := r.getRequestOfTiFlashStore(ctx, s) + if err != nil { + return + } + queue.Add(req) + }, + } +} + +func (r *Reconciler) getRequestOfTiFlashStore(ctx context.Context, s *pdv1.Store) (reconcile.Request, error) { + if s.Engine() != pdv1.StoreEngineTiFlash { + return reconcile.Request{}, fmt.Errorf("store is not tiflash") + } + + ns, cluster := pdm.SplitPrimaryKey(s.Namespace) + var tiflashList v1alpha1.TiFlashList + if err := r.Client.List(ctx, &tiflashList, client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: cluster, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + }, client.InNamespace(ns)); err != nil { + r.Logger.Error(err, "cannot list all tiflash instances", "ns", ns, "cluster", cluster) + return reconcile.Request{}, err + } + + for i := range tiflashList.Items { + tiflash := &tiflashList.Items[i] + if s.Name == tiflashconfig.GetServiceAddr(tiflash) { + return reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tiflash.Name, + Namespace: tiflash.Namespace, + }, + }, nil + } + } + + err := fmt.Errorf("store: %v/%v, addr: %v", s.Namespace, s.Name, s.Address) + r.Logger.Error(err, "failed to find tiflash of store") + return reconcile.Request{}, err +} diff --git a/pkg/controllers/tiflash/tasks/cm.go b/pkg/controllers/tiflash/tasks/cm.go new file mode 100644 index 00000000000..4b2fb74d7e2 --- /dev/null +++ b/pkg/controllers/tiflash/tasks/cm.go @@ -0,0 +1,104 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tiflashcfg "github.com/pingcap/tidb-operator/pkg/configs/tiflash" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/utils/toml" +) + +type TaskConfigMap struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskConfigMap(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskConfigMap{ + Client: c, + Logger: logger, + } +} + +func (*TaskConfigMap) Name() string { + return "ConfigMap" +} + +func (t *TaskConfigMap) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + flashConfig := tiflashcfg.Config{} + decoder, encoder := toml.Codec[tiflashcfg.Config]() + if err := decoder.Decode([]byte(rtx.TiFlash.Spec.Config), &flashConfig); err != nil { + return task.Fail().With("tiflash config cannot be decoded: %w", err) + } + if err := flashConfig.Overlay(rtx.Cluster, rtx.TiFlash); err != nil { + return task.Fail().With("cannot generate tiflash config: %w", err) + } + flashData, err := encoder.Encode(&flashConfig) + if err != nil { + return task.Fail().With("tiflash config cannot be encoded: %w", err) + } + + proxyConfig := tiflashcfg.ProxyConfig{} + decoderProxy, encoderProxy := toml.Codec[tiflashcfg.ProxyConfig]() + if err = decoderProxy.Decode([]byte(rtx.TiFlash.Spec.ProxyConfig), &proxyConfig); err != nil { + return task.Fail().With("tiflash proxy config cannot be decoded: %w", err) + } + if err = proxyConfig.Overlay(rtx.Cluster, rtx.TiFlash); err != nil { + return task.Fail().With("cannot generate tiflash proxy config: %w", err) + } + proxyData, err := encoderProxy.Encode(&proxyConfig) + if err != nil { + return task.Fail().With("tiflash proxy config cannot be encoded: %w", err) + } + + rtx.ConfigHash, err = toml.GenerateHash(rtx.TiFlash.Spec.Config) + if err != nil { + return task.Fail().With("failed to generate hash for `tiflash.spec.config`: %w", err) + } + expected := newConfigMap(rtx.TiFlash, flashData, proxyData, rtx.ConfigHash) + if e := t.Client.Apply(rtx, expected); e != nil { + return task.Fail().With("can't create/update cm of tiflash: %w", e) + } + return task.Complete().With("cm is synced") +} + +func newConfigMap(tiflash *v1alpha1.TiFlash, flashData, proxyData []byte, hash string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(tiflash.Name), + Namespace: tiflash.Namespace, + Labels: maputil.Merge(tiflash.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tiflash.Name, + v1alpha1.LabelKeyConfigHash: hash, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tiflash, v1alpha1.SchemeGroupVersion.WithKind("TiFlash")), + }, + }, + Data: map[string]string{ + v1alpha1.ConfigFileName: string(flashData), + v1alpha1.ConfigFileTiFlashProxyName: string(proxyData), + }, + } +} diff --git a/pkg/controllers/tiflash/tasks/ctx.go b/pkg/controllers/tiflash/tasks/ctx.go new file mode 100644 index 00000000000..ad6eaf05df2 --- /dev/null +++ b/pkg/controllers/tiflash/tasks/ctx.go @@ -0,0 +1,194 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/kvproto/pkg/metapb" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tiflashconfig "github.com/pingcap/tidb-operator/pkg/configs/tiflash" + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + PDClient pdapi.PDClient + + Healthy bool + + Store *pdv1.Store + StoreID string + StoreState string + StoreLabels []*metapb.StoreLabel + + Cluster *v1alpha1.Cluster + TiFlash *v1alpha1.TiFlash + TiFlashGroup *v1alpha1.TiFlashGroup + Pod *corev1.Pod + + // ConfigHash stores the hash of **user-specified** config (i.e.`.Spec.Config`), + // which will be used to determine whether the config has changed. + // This ensures that our config overlay logic will not restart the tidb cluster unexpectedly. + ConfigHash string + + // Pod cannot be updated when call DELETE API, so we have to set this field to indicate + // the underlay pod has been deleting + PodIsTerminating bool +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +func TaskContextTiFlash(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextTiFlash", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var tiflash v1alpha1.TiFlash + if err := c.Get(ctx, rtx.Key, &tiflash); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get tiflash instance: %w", err) + } + + return task.Complete().With("tiflash instance has been deleted") + } + rtx.TiFlash = &tiflash + return task.Complete().With("tiflash is set") + }) +} + +func TaskContextTiFlashGroup(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextTiFlashGroup", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if len(rtx.TiFlash.OwnerReferences) == 0 { + return task.Fail().With("tiflash instance has no owner, this should not happen") + } + + var tiflashGroup v1alpha1.TiFlashGroup + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiFlash.OwnerReferences[0].Name, // only one owner now + Namespace: rtx.TiFlash.Namespace, + }, &tiflashGroup); err != nil { + return task.Fail().With("cannot find tiflash group %s: %w", rtx.TiFlash.OwnerReferences[0].Name, err) + } + rtx.TiFlashGroup = &tiflashGroup + return task.Complete().With("tiflash group is set") + }) +} + +func CondTiFlashHasBeenDeleted() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().TiFlash == nil + }) +} + +func CondTiFlashIsDeleting() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return !ctx.Self().TiFlash.GetDeletionTimestamp().IsZero() + }) +} + +func TaskContextCluster(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextCluster", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var cluster v1alpha1.Cluster + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiFlash.Spec.Cluster.Name, + Namespace: rtx.TiFlash.Namespace, + }, &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %w", rtx.TiFlash.Spec.Cluster.Name, err) + } + rtx.Cluster = &cluster + return task.Complete().With("cluster is set") + }) +} + +func CondClusterIsSuspending() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().Cluster.ShouldSuspendCompute() + }) +} + +func TaskContextPod(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextPod", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var pod corev1.Pod + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiFlash.Name, + Namespace: rtx.TiFlash.Namespace, + }, &pod); err != nil { + if errors.IsNotFound(err) { + return task.Complete().With("pod is not created") + } + return task.Fail().With("failed to get pod of pd: %w", err) + } + + rtx.Pod = &pod + if !rtx.Pod.GetDeletionTimestamp().IsZero() { + rtx.PodIsTerminating = true + } + return task.Complete().With("pod is set") + }) +} + +func CondClusterIsPaused() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().Cluster.ShouldPauseReconcile() + }) +} + +func TaskContextInfoFromPD(cm pdm.PDClientManager) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextInfoFromPD", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + c, ok := cm.Get(pdm.PrimaryKey(rtx.TiFlash.Namespace, rtx.TiFlash.Spec.Cluster.Name)) + if !ok { + return task.Complete().With("pd client is not registered") + } + rtx.PDClient = c.Underlay() + + if !c.HasSynced() { + return task.Complete().With("store info is not synced, just wait for next sync") + } + + s, err := c.Stores().Get(tiflashconfig.GetServiceAddr(rtx.TiFlash)) + if err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("failed to get store info: %w", err) + } + return task.Complete().With("store does not exist") + } + + rtx.Store, rtx.StoreID, rtx.StoreState = s, s.ID, string(s.NodeState) + rtx.StoreLabels = make([]*metapb.StoreLabel, len(s.Labels)) + for k, v := range s.Labels { + rtx.StoreLabels = append(rtx.StoreLabels, &metapb.StoreLabel{Key: k, Value: v}) + } + return task.Complete().With("got store info") + }) +} diff --git a/pkg/controllers/tiflash/tasks/finalizer.go b/pkg/controllers/tiflash/tasks/finalizer.go new file mode 100644 index 00000000000..55048a04979 --- /dev/null +++ b/pkg/controllers/tiflash/tasks/finalizer.go @@ -0,0 +1,78 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "time" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +const ( + removingWaitInterval = 10 * time.Second +) + +func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("FinalizerDel", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + switch { + case !rtx.Cluster.GetDeletionTimestamp().IsZero(): + if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, + rtx.TiFlash.Namespace, rtx.TiFlash.Name, client.GracePeriodSeconds(1)); err != nil { + return task.Fail().With("cannot delete sub resources: %w", err) + } + // whole cluster is deleting + if err := k8s.RemoveFinalizer(ctx, c, rtx.TiFlash); err != nil { + return task.Fail().With("cannot remove finalizer: %w", err) + } + + case rtx.StoreState == v1alpha1.StoreStateRemoving: + // TODO: Complete task and retrigger reconciliation by polling PD + return task.Retry(removingWaitInterval).With("wait until the store is removed") + + case rtx.StoreState == v1alpha1.StoreStateRemoved || rtx.StoreID == "": + if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, + rtx.TiFlash.Namespace, rtx.TiFlash.Name, client.GracePeriodSeconds(1)); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + // Store ID is empty may because of tiflash is not initialized + // TODO: check whether tiflash is initialized + if err := k8s.RemoveFinalizer(ctx, c, rtx.TiFlash); err != nil { + return task.Fail().With("cannot remove finalizer: %w", err) + } + default: + // get store info successfully and the store still exists + if err := rtx.PDClient.DeleteStore(ctx, rtx.StoreID); err != nil { + return task.Fail().With("cannot delete store %s: %v", rtx.StoreID, err) + } + + return task.Retry(removingWaitInterval).With("the store is removing") + } + return task.Complete().With("finalizer is removed") + }) +} + +func TaskFinalizerAdd(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("FinalizerAdd", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if err := k8s.EnsureFinalizer(ctx, c, rtx.TiFlash); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %w", err) + } + return task.Complete().With("finalizer is added") + }) +} diff --git a/pkg/controllers/tiflash/tasks/pod.go b/pkg/controllers/tiflash/tasks/pod.go new file mode 100644 index 00000000000..c0f94ece768 --- /dev/null +++ b/pkg/controllers/tiflash/tasks/pod.go @@ -0,0 +1,258 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + "path/filepath" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tiflashcfg "github.com/pingcap/tidb-operator/pkg/configs/tiflash" + "github.com/pingcap/tidb-operator/pkg/image" + "github.com/pingcap/tidb-operator/pkg/overlay" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +func TaskPodSuspend(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("PodSuspend", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if rtx.Pod == nil { + return task.Complete().With("pod has been deleted") + } + if err := c.Delete(rtx, rtx.Pod); err != nil { + return task.Fail().With("can't delete pod of pd: %w", err) + } + rtx.PodIsTerminating = true + return task.Wait().With("pod is deleting") + }) +} + +type TaskPod struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskPod(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskPod{ + Client: c, + Logger: logger, + } +} + +func (*TaskPod) Name() string { + return "Pod" +} + +func (t *TaskPod) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + expected := t.newPod(rtx.Cluster, rtx.TiFlashGroup, rtx.TiFlash, rtx.ConfigHash) + if rtx.Pod == nil { + if err := t.Client.Apply(rtx, expected); err != nil { + return task.Fail().With("can't apply pod of tiflash: %w", err) + } + + rtx.Pod = expected + return task.Complete().With("pod is created") + } + + res := k8s.ComparePods(rtx.Pod, expected) + curHash, expectHash := rtx.Pod.Labels[v1alpha1.LabelKeyConfigHash], expected.Labels[v1alpha1.LabelKeyConfigHash] + configChanged := curHash != expectHash + t.Logger.Info("compare pod", "result", res, "configChanged", configChanged, "currentConfigHash", curHash, "expectConfigHash", expectHash) + + if res == k8s.CompareResultRecreate || (configChanged && + rtx.TiFlashGroup.Spec.ConfigUpdateStrategy == v1alpha1.ConfigUpdateStrategyRollingUpdate) { + t.Logger.Info("will recreate the pod") + if err := t.Client.Delete(rtx, rtx.Pod); err != nil { + return task.Fail().With("can't delete pod of tiflash: %w", err) + } + + rtx.PodIsTerminating = true + return task.Complete().With("pod is deleting") + } else if res == k8s.CompareResultUpdate { + t.Logger.Info("will update the pod in place") + if err := t.Client.Apply(rtx, expected); err != nil { + return task.Fail().With("can't apply pod of tiflash: %w", err) + } + rtx.Pod = expected + } + + return task.Complete().With("pod is synced") +} + +func (*TaskPod) newPod(cluster *v1alpha1.Cluster, _ *v1alpha1.TiFlashGroup, tiflash *v1alpha1.TiFlash, configHash string) *corev1.Pod { + vols := []corev1.Volume{ + { + Name: v1alpha1.VolumeNameConfig, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ConfigMapName(tiflash.Name), + }, + }, + }, + }, + } + + mounts := []corev1.VolumeMount{ + { + Name: v1alpha1.VolumeNameConfig, + MountPath: v1alpha1.DirNameConfigTiFlash, + }, + } + + var firstMount *corev1.VolumeMount + for i := range tiflash.Spec.Volumes { + vol := &tiflash.Spec.Volumes[i] + name := v1alpha1.NamePrefix + "tiflash" + if vol.Name != "" { + name = name + "-" + vol.Name + } + vols = append(vols, corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + // the format is "data{i}-tiflash-xxx" to compatible with TiDB Operator v1 + ClaimName: PersistentVolumeClaimName(tiflash.Name, i), + }, + }, + }) + mount := corev1.VolumeMount{ + Name: name, + MountPath: vol.Path, + } + mounts = append(mounts, mount) + if i == 0 { + firstMount = &mount + } + } + + if cluster.IsTLSClusterEnabled() { + groupName := tiflash.Labels[v1alpha1.LabelKeyGroup] + vols = append(vols, corev1.Volume{ + Name: v1alpha1.TiFlashClusterTLSVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.TLSClusterSecretName(groupName), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.TiFlashClusterTLSVolumeName, + MountPath: v1alpha1.TiFlashClusterTLSMountPath, + ReadOnly: true, + }) + } + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: tiflash.Namespace, + Name: tiflash.Name, + Labels: maputil.Merge(tiflash.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tiflash.Name, + v1alpha1.LabelKeyConfigHash: configHash, + }), + Annotations: maputil.Copy(tiflash.GetAnnotations()), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tiflash, v1alpha1.SchemeGroupVersion.WithKind("TiFlash")), + }, + }, + Spec: corev1.PodSpec{ + Hostname: tiflash.Name, + Subdomain: tiflash.Spec.Subdomain, + NodeSelector: tiflash.Spec.Topology, + InitContainers: []corev1.Container{ + *buildLogTailerContainer(tiflash, v1alpha1.TiFlashServerLogContainerName, tiflashcfg.GetServerLogPath(tiflash), firstMount), + *buildLogTailerContainer(tiflash, v1alpha1.TiFlashErrorLogContainerName, tiflashcfg.GetErrorLogPath(tiflash), firstMount), + }, + Containers: []corev1.Container{ + { + Name: v1alpha1.ContainerNameTiFlash, + Image: image.TiFlash.Image(tiflash.Spec.Image, tiflash.Spec.Version), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "/tiflash/tiflash", + "server", + "--config-file", + filepath.Join(v1alpha1.DirNameConfigTiFlash, v1alpha1.ConfigFileName), + }, + Ports: []corev1.ContainerPort{ + // no `tcp_port` and `http_port` as they are are deprecated in tiflash since v7.1.0. + // ref: https://github.com/pingcap/tidb-operator/pull/5075 + // and also no `interserver_http_port` + { + Name: v1alpha1.TiFlashPortNameFlash, + ContainerPort: tiflash.GetFlashPort(), + }, + { + Name: v1alpha1.TiFlashPortNameMetrics, + ContainerPort: tiflash.GetMetricsPort(), + }, + { + Name: v1alpha1.TiFlashPortNameProxy, + ContainerPort: tiflash.GetProxyPort(), + }, + { + // no this port in v1 + Name: v1alpha1.TiFlashPortNameProxyStatus, + ContainerPort: tiflash.GetProxyStatusPort(), + }, + }, + VolumeMounts: mounts, + Resources: k8s.GetResourceRequirements(tiflash.Spec.Resources), + }, + }, + Volumes: vols, + }, + } + + if tiflash.Spec.Overlay != nil { + overlay.OverlayPod(pod, tiflash.Spec.Overlay.Pod) + } + + k8s.CalculateHashAndSetLabels(pod) + return pod +} + +func buildLogTailerContainer(tiflash *v1alpha1.TiFlash, containerName, logFile string, mount *corev1.VolumeMount) *corev1.Container { + img := v1alpha1.DefaultHelperImage + if tiflash.Spec.LogTailer != nil && tiflash.Spec.LogTailer.Image != nil && *tiflash.Spec.LogTailer.Image != "" { + img = *tiflash.Spec.LogTailer.Image + } + restartPolicy := corev1.ContainerRestartPolicyAlways // sidecar container in `initContainers` + c := &corev1.Container{ + Name: containerName, + Image: img, + RestartPolicy: &restartPolicy, + VolumeMounts: []corev1.VolumeMount{*mount}, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("touch %s; tail -n0 -F %s;", logFile, logFile), + }, + } + if tiflash.Spec.LogTailer != nil { + c.Resources = k8s.GetResourceRequirements(tiflash.Spec.LogTailer.Resources) + } + return c +} diff --git a/pkg/controllers/tiflash/tasks/pvc.go b/pkg/controllers/tiflash/tasks/pvc.go new file mode 100644 index 00000000000..ddbfcb4f5eb --- /dev/null +++ b/pkg/controllers/tiflash/tasks/pvc.go @@ -0,0 +1,92 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +type TaskPVC struct { + Client client.Client + Logger logr.Logger + VolumeModifier volumes.Modifier +} + +func NewTaskPVC(logger logr.Logger, c client.Client, vm volumes.Modifier) task.Task[ReconcileContext] { + return &TaskPVC{ + Client: c, + Logger: logger, + VolumeModifier: vm, + } +} + +func (*TaskPVC) Name() string { + return "PVC" +} + +func (t *TaskPVC) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + pvcs := newPVCs(rtx.TiFlash) + if wait, err := volumes.SyncPVCs(rtx, t.Client, pvcs, t.VolumeModifier, t.Logger); err != nil { + return task.Fail().With("failed to sync pvcs: %w", err) + } else if wait { + return task.Wait().With("waiting for pvcs to be synced") + } + + return task.Complete().With("pvcs are synced") +} + +func newPVCs(tiflash *v1alpha1.TiFlash) []*corev1.PersistentVolumeClaim { + pvcs := make([]*corev1.PersistentVolumeClaim, 0, len(tiflash.Spec.Volumes)) + for i := range tiflash.Spec.Volumes { + vol := &tiflash.Spec.Volumes[i] + pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + // the format is "data{i}-tiflash-xxx" to compatible with TiDB Operator v1 + Name: PersistentVolumeClaimName(tiflash.Name, i), + Namespace: tiflash.Namespace, + Labels: maputil.Merge(tiflash.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tiflash.Name, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tiflash, v1alpha1.SchemeGroupVersion.WithKind("TiFlash")), + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: vol.Storage, + }, + }, + StorageClassName: vol.StorageClassName, + VolumeAttributesClassName: vol.VolumeAttributesClassName, + }, + }) + } + + return pvcs +} diff --git a/pkg/controllers/tiflash/tasks/status.go b/pkg/controllers/tiflash/tasks/status.go new file mode 100644 index 00000000000..4af2829316a --- /dev/null +++ b/pkg/controllers/tiflash/tasks/status.go @@ -0,0 +1,171 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/statefulset" +) + +func TaskStatusSuspend(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("StatusSuspend", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + rtx.TiFlash.Status.ObservedGeneration = rtx.TiFlash.Generation + + var ( + suspendStatus = metav1.ConditionFalse + suspendMessage = "tiflash is suspending" + + // when suspending, the health status should be false + healthStatus = metav1.ConditionFalse + healthMessage = "tiflash is not healthy" + ) + + if rtx.Pod == nil { + suspendStatus = metav1.ConditionTrue + suspendMessage = "tiflash is suspended" + } + needUpdate := meta.SetStatusCondition(&rtx.TiFlash.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiFlashCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiFlash.Generation, + // TODO: use different reason for suspending and suspended + Reason: v1alpha1.TiFlashSuspendReason, + Message: suspendMessage, + }) + + needUpdate = meta.SetStatusCondition(&rtx.TiFlash.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiFlashCondHealth, + Status: healthStatus, + ObservedGeneration: rtx.TiFlash.Generation, + Reason: v1alpha1.TiFlashHealthReason, + Message: healthMessage, + }) || needUpdate + + if needUpdate { + if err := c.Status().Update(ctx, rtx.TiFlash); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + return task.Complete().With("status of suspend tiflash is updated") + }) +} + +type TaskStatus struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Client: c, + Logger: logger, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +//nolint:gocyclo // refactor is possible +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var ( + healthStatus = metav1.ConditionFalse + healthMessage = "tiflash is not healthy" + + suspendStatus = metav1.ConditionFalse + suspendMessage = "tiflash is not suspended" + + needUpdate = false + ) + + if rtx.StoreID != "" { + if rtx.TiFlash.Status.ID != rtx.StoreID { + rtx.TiFlash.Status.ID = rtx.StoreID + needUpdate = true + } + + info, err := rtx.PDClient.GetStore(ctx, rtx.StoreID) + if err == nil && info != nil && info.Store != nil { + rtx.StoreState = info.Store.NodeState.String() + } else { + t.Logger.Error(err, "failed to get tiflash store info", "store", rtx.StoreID) + } + } + if rtx.StoreState != "" && rtx.TiFlash.Status.State != rtx.StoreState { + rtx.TiFlash.Status.State = rtx.StoreState + needUpdate = true + } + + needUpdate = meta.SetStatusCondition(&rtx.TiFlash.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiFlashCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiFlash.Generation, + Reason: v1alpha1.TiFlashSuspendReason, + Message: suspendMessage, + }) || needUpdate + + if needUpdate || !v1alpha1.IsReconciled(rtx.TiFlash) || + rtx.TiFlash.Status.UpdateRevision != rtx.TiFlash.Labels[v1alpha1.LabelKeyInstanceRevisionHash] { + rtx.TiFlash.Status.ObservedGeneration = rtx.TiFlash.Generation + rtx.TiFlash.Status.UpdateRevision = rtx.TiFlash.Labels[v1alpha1.LabelKeyInstanceRevisionHash] + needUpdate = true + } + + if rtx.Pod == nil || rtx.PodIsTerminating { + } else if statefulset.IsPodRunningAndReady(rtx.Pod) && rtx.StoreState == v1alpha1.StoreStateServing { + rtx.Healthy = true + if rtx.TiFlash.Status.CurrentRevision != rtx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] { + rtx.TiFlash.Status.CurrentRevision = rtx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] + needUpdate = true + } + } else { + rtx.Healthy = false + } + + if rtx.Healthy { + healthStatus = metav1.ConditionTrue + healthMessage = "tiflash is healthy" + } + needUpdate = meta.SetStatusCondition(&rtx.TiFlash.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiFlashCondHealth, + Status: healthStatus, + ObservedGeneration: rtx.TiFlash.Generation, + Reason: v1alpha1.TiFlashHealthReason, + Message: healthMessage, + }) || needUpdate + + if needUpdate { + if err := t.Client.Status().Update(ctx, rtx.TiFlash); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + // TODO: use a condition to refactor it + if rtx.TiFlash.Status.ID == "" || rtx.TiFlash.Status.State != v1alpha1.StoreStateServing || !v1alpha1.IsUpToDate(rtx.TiFlash) { + return task.Fail().With("tiflash may not be initialized, retry") + } + + return task.Complete().With("status is synced") +} diff --git a/pkg/controllers/tiflash/tasks/store_labels.go b/pkg/controllers/tiflash/tasks/store_labels.go new file mode 100644 index 00000000000..93c037e34dc --- /dev/null +++ b/pkg/controllers/tiflash/tasks/store_labels.go @@ -0,0 +1,105 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "reflect" + "strconv" + + "github.com/go-logr/logr" + "github.com/pingcap/kvproto/pkg/metapb" + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +type TaskStoreLabels struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStoreLabels(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStoreLabels{ + Client: c, + Logger: logger, + } +} + +func (*TaskStoreLabels) Name() string { + return "StoreLabels" +} + +func (t *TaskStoreLabels) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.StoreState != v1alpha1.StoreStateServing || rtx.PodIsTerminating || rtx.Pod == nil { + return task.Complete().With("skip sync store labels as the store is not serving") + } + + nodeName := rtx.Pod.Spec.NodeName + if nodeName == "" { + return task.Fail().With("pod %s/%s has not been scheduled", rtx.TiFlash.Namespace, rtx.TiFlash.Name) + } + + var node corev1.Node + if err := t.Client.Get(ctx, client.ObjectKey{Name: nodeName}, &node); err != nil { + return task.Fail().With("failed to get node %s: %s", nodeName, err) + } + + // TODO: too many API calls to PD? + pdCfg, err := rtx.PDClient.GetConfig(ctx) + if err != nil { + return task.Fail().With("failed to get pd config: %s", err) + } + keys := pdCfg.Replication.LocationLabels + if len(keys) == 0 { + return task.Complete().With("no store labels need to sync") + } + + storeLabels := k8s.GetNodeLabelsForKeys(&node, keys) + if len(storeLabels) == 0 { + return task.Complete().With("no store labels from node %s to sync", nodeName) + } + + if !storeLabelsEqualNodeLabels(rtx.StoreLabels, storeLabels) { + storeID, err := strconv.ParseUint(rtx.StoreID, 10, 64) + if err != nil { + return task.Fail().With("failed to parse store id %s: %s", rtx.StoreID, err) + } + set, err := rtx.PDClient.SetStoreLabels(ctx, storeID, storeLabels) + if err != nil { + return task.Fail().With("failed to set store labels: %s", err) + } else if set { + t.Logger.Info("store labels synced", "storeID", rtx.StoreID, "storeLabels", storeLabels) + } + } + + return task.Complete().With("store labels synced") +} + +func storeLabelsEqualNodeLabels(storeLabels []*metapb.StoreLabel, nodeLabels map[string]string) bool { + ls := map[string]string{} + for _, label := range storeLabels { + key := label.GetKey() + if _, ok := nodeLabels[key]; ok { + val := label.GetValue() + ls[key] = val + } + } + return reflect.DeepEqual(ls, nodeLabels) +} diff --git a/pkg/controllers/tiflash/tasks/util.go b/pkg/controllers/tiflash/tasks/util.go new file mode 100644 index 00000000000..d4e6d3d3576 --- /dev/null +++ b/pkg/controllers/tiflash/tasks/util.go @@ -0,0 +1,26 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import "fmt" + +func ConfigMapName(tiflashName string) string { + return tiflashName +} + +func PersistentVolumeClaimName(tiflashName string, volIndex int) string { + // ref: https://github.com/pingcap/tidb-operator/blob/486cc85c8380efc4f36b3125a1abba9e3146a2c8/pkg/apis/pingcap/v1alpha1/helpers.go#L105 + return fmt.Sprintf("data%d-%s", volIndex, tiflashName) +} diff --git a/pkg/controllers/tiflashgroup/controller.go b/pkg/controllers/tiflashgroup/controller.go new file mode 100644 index 00000000000..b8e8d4bf071 --- /dev/null +++ b/pkg/controllers/tiflashgroup/controller.go @@ -0,0 +1,116 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflashgroup + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/tiflashgroup/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client +} + +func Setup(mgr manager.Manager, c client.Client) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("TiFlashGroup"), + Client: c, + } + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.TiFlashGroup{}). + Owns(&v1alpha1.TiFlash{}). + // Only care about the generation change (i.e. spec update) + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler(), builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + Complete(r) +} + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + cluster := event.ObjectNew.(*v1alpha1.Cluster) + + var list v1alpha1.TiFlashGroupList + if err := r.Client.List(ctx, &list, client.InNamespace(cluster.Namespace), + client.MatchingFields{"spec.cluster.name": cluster.Name}); err != nil { + if !errors.IsNotFound(err) { + r.Logger.Error(err, "cannot list all tiflash groups", "ns", cluster.Namespace, "cluster", cluster.Name) + } + return + } + + for i := range list.Items { + flashGroup := &list.Items[i] + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: flashGroup.Name, + Namespace: flashGroup.Namespace, + }, + }) + } + }, + } +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("tiflashgroup", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := task.NewTaskRunner[tasks.ReconcileContext](reporter) + runner.AddTasks( + tasks.NewTaskContext(logger, r.Client), + tasks.NewTaskFinalizer(logger, r.Client), + tasks.NewTaskService(logger, r.Client), + tasks.NewTaskUpdater(logger, r.Client), + tasks.NewTaskStatus(logger, r.Client), + ) + + return runner.Run(rtx) +} diff --git a/pkg/controllers/tiflashgroup/tasks/ctx.go b/pkg/controllers/tiflashgroup/tasks/ctx.go new file mode 100644 index 00000000000..8442d347f50 --- /dev/null +++ b/pkg/controllers/tiflashgroup/tasks/ctx.go @@ -0,0 +1,121 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "cmp" + "context" + "slices" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/action" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + Suspended bool + + Cluster *v1alpha1.Cluster + + TiFlashGroup *v1alpha1.TiFlashGroup + Peers []*v1alpha1.TiFlash + UpgradeChecker action.UpgradeChecker + + // Status fields + v1alpha1.CommonStatus +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +type TaskContext struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskContext(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskContext{ + Logger: logger, + Client: c, + } +} + +func (*TaskContext) Name() string { + return "Context" +} + +func (t *TaskContext) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var flashg v1alpha1.TiFlashGroup + if err := t.Client.Get(ctx, rtx.Key, &flashg); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get tiflash group: %w", err) + } + + return task.Complete().Break().With("tiflash group has been deleted") + } + rtx.TiFlashGroup = &flashg + + var cluster v1alpha1.Cluster + if err := t.Client.Get(ctx, client.ObjectKey{ + Name: flashg.Spec.Cluster.Name, + Namespace: flashg.Namespace, + }, &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %w", flashg.Spec.Cluster.Name, err) + } + rtx.Cluster = &cluster + + if cluster.ShouldPauseReconcile() { + return task.Complete().Break().With("cluster reconciliation is paused") + } + + var flashList v1alpha1.TiFlashList + if err := t.Client.List(ctx, &flashList, client.InNamespace(flashg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyGroup: flashg.Name, + }); err != nil { + return task.Fail().With("cannot list tiflash peers: %w", err) + } + + rtx.Peers = make([]*v1alpha1.TiFlash, len(flashList.Items)) + rtx.Suspended = len(flashList.Items) > 0 + for i := range flashList.Items { + rtx.Peers[i] = &flashList.Items[i] + if !meta.IsStatusConditionTrue(flashList.Items[i].Status.Conditions, v1alpha1.TiKVCondSuspended) { + // TiFlash Group is not suspended if any of its members is not suspended + rtx.Suspended = false + } + } + slices.SortFunc(rtx.Peers, func(a, b *v1alpha1.TiFlash) int { + return cmp.Compare(a.Name, b.Name) + }) + + rtx.UpgradeChecker = action.NewUpgradeChecker(t.Client, rtx.Cluster, t.Logger) + return task.Complete().With("new context completed") +} diff --git a/pkg/controllers/tiflashgroup/tasks/finalizer.go b/pkg/controllers/tiflashgroup/tasks/finalizer.go new file mode 100644 index 00000000000..8e52d4b30bf --- /dev/null +++ b/pkg/controllers/tiflashgroup/tasks/finalizer.go @@ -0,0 +1,81 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + utilerr "k8s.io/apimachinery/pkg/util/errors" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskFinalizer struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskFinalizer(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskFinalizer{ + Client: c, + Logger: logger, + } +} + +func (*TaskFinalizer) Name() string { + return "Finalizer" +} + +func (t *TaskFinalizer) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if !rtx.TiFlashGroup.GetDeletionTimestamp().IsZero() { + errList := []error{} + names := []string{} + for _, peer := range rtx.Peers { + names = append(names, peer.Name) + if peer.GetDeletionTimestamp().IsZero() { + if err := t.Client.Delete(ctx, peer); err != nil { + errList = append(errList, fmt.Errorf("try to delete the tiflash instance %v failed: %w", peer.Name, err)) + } + } + } + + if len(errList) != 0 { + return task.Fail().With("failed to delete all tiflash instances: %v", utilerr.NewAggregate(errList)) + } + + if len(rtx.Peers) != 0 { + return task.Fail().With("wait for all tiflash instances being removed, %v still exists", names) + } + + if err := k8s.EnsureGroupSubResourceDeleted(ctx, t.Client, + rtx.TiFlashGroup.Namespace, rtx.TiFlashGroup.Name); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + if err := k8s.RemoveFinalizer(ctx, t.Client, rtx.TiFlashGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been removed: %w", err) + } + } else { + if err := k8s.EnsureFinalizer(ctx, t.Client, rtx.TiFlashGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %w", err) + } + } + + return task.Complete().With("finalizer is synced") +} diff --git a/pkg/controllers/tiflashgroup/tasks/status.go b/pkg/controllers/tiflashgroup/tasks/status.go new file mode 100644 index 00000000000..fab63a43fd1 --- /dev/null +++ b/pkg/controllers/tiflashgroup/tasks/status.go @@ -0,0 +1,94 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskStatus struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Client: c, + Logger: logger, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + suspendStatus := metav1.ConditionFalse + suspendMessage := "tiflash group is not suspended" + if rtx.Suspended { + suspendStatus = metav1.ConditionTrue + suspendMessage = "tiflash group is suspended" + } else if rtx.Cluster.ShouldSuspendCompute() { + suspendMessage = "tiflash group is suspending" + } + conditionChanged := meta.SetStatusCondition(&rtx.TiFlashGroup.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiFlashGroupCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiFlashGroup.Generation, + Reason: v1alpha1.TiFlashGroupSuspendReason, + Message: suspendMessage, + }) + + // Update the current revision if all instances are synced. + if int(rtx.TiFlashGroup.GetDesiredReplicas()) == len(rtx.Peers) && v1alpha1.AllInstancesSynced(rtx.Peers, rtx.UpdateRevision) { + if rtx.CurrentRevision != rtx.UpdateRevision || rtx.TiFlashGroup.Status.Version != rtx.TiFlashGroup.Spec.Version { + rtx.CurrentRevision = rtx.UpdateRevision + rtx.TiFlashGroup.Status.Version = rtx.TiFlashGroup.Spec.Version + conditionChanged = true + } + } + var readyReplicas int32 + for _, peer := range rtx.Peers { + if peer.IsHealthy() { + readyReplicas++ + } + } + + if conditionChanged || rtx.TiFlashGroup.Status.ReadyReplicas != readyReplicas || + rtx.TiFlashGroup.Status.Replicas != int32(len(rtx.Peers)) || //nolint:gosec // expected type conversion + !v1alpha1.IsReconciled(rtx.TiFlashGroup) || + v1alpha1.StatusChanged(rtx.TiFlashGroup, rtx.CommonStatus) { + rtx.TiFlashGroup.Status.ReadyReplicas = readyReplicas + rtx.TiFlashGroup.Status.Replicas = int32(len(rtx.Peers)) //nolint:gosec // expected type conversion + rtx.TiFlashGroup.Status.ObservedGeneration = rtx.TiFlashGroup.Generation + rtx.TiFlashGroup.Status.CurrentRevision = rtx.CurrentRevision + rtx.TiFlashGroup.Status.UpdateRevision = rtx.UpdateRevision + rtx.TiFlashGroup.Status.CollisionCount = rtx.CollisionCount + + if err := t.Client.Status().Update(ctx, rtx.TiFlashGroup); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + return task.Complete().With("status is synced") +} diff --git a/pkg/controllers/tiflashgroup/tasks/svc.go b/pkg/controllers/tiflashgroup/tasks/svc.go new file mode 100644 index 00000000000..ebe4305c416 --- /dev/null +++ b/pkg/controllers/tiflashgroup/tasks/svc.go @@ -0,0 +1,118 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskService struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskService(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskService{ + Logger: logger, + Client: c, + } +} + +func (*TaskService) Name() string { + return "Service" +} + +func (t *TaskService) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.Cluster.ShouldSuspendCompute() { + return task.Complete().With("skip service for suspension") + } + + flashg := rtx.TiFlashGroup + + svc := newHeadlessService(flashg) + if err := t.Client.Apply(ctx, svc); err != nil { + return task.Fail().With(fmt.Sprintf("can't create headless service of tiflash: %v", err)) + } + + return task.Complete().With("headless service of tiflash has been applied") +} + +func newHeadlessService(flashg *v1alpha1.TiFlashGroup) *corev1.Service { + ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: HeadlessServiceName(flashg.Spec.Cluster.Name, flashg.Name), + Namespace: flashg.Namespace, + Labels: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyCluster: flashg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: flashg.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(flashg, v1alpha1.SchemeGroupVersion.WithKind("TiFlashGroup")), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: corev1.ClusterIPNone, + IPFamilyPolicy: &ipFamilyPolicy, + Selector: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyCluster: flashg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: flashg.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: v1alpha1.TiFlashPortNameFlash, + Port: flashg.GetFlashPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameFlash), + }, + { + Name: v1alpha1.TiFlashPortNameProxy, + Port: flashg.GetProxyPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameProxy), + }, + { + Name: v1alpha1.TiFlashPortNameMetrics, + Port: flashg.GetMetricsPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameMetrics), + }, + { + Name: v1alpha1.TiFlashPortNameProxyStatus, + Port: flashg.GetProxyStatusPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameProxyStatus), + }, + }, + PublishNotReadyAddresses: true, + }, + } +} diff --git a/pkg/controllers/tiflashgroup/tasks/updater.go b/pkg/controllers/tiflashgroup/tasks/updater.go new file mode 100644 index 00000000000..6fe7396f77f --- /dev/null +++ b/pkg/controllers/tiflashgroup/tasks/updater.go @@ -0,0 +1,185 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/updater" + "github.com/pingcap/tidb-operator/pkg/updater/policy" + "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/random" + "github.com/pingcap/tidb-operator/pkg/utils/task" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" +) + +// TaskUpdater is a task for updating TiFlashGroup when its spec is changed. +type TaskUpdater struct { + Logger logr.Logger + Client client.Client + CRCli history.Interface +} + +func NewTaskUpdater(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskUpdater{ + Logger: logger, + Client: c, + CRCli: history.NewClient(c), + } +} + +func (*TaskUpdater) Name() string { + return "Updater" +} + +func (t *TaskUpdater) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + // TODO: move to task v2 + if !rtx.TiFlashGroup.GetDeletionTimestamp().IsZero() { + return task.Complete().With("tiflash group has been deleted") + } + + // List all controller revisions for the TiFlashGroup. + selector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + v1alpha1.LabelKeyCluster: rtx.Cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyGroup: rtx.TiFlashGroup.Name, + }, + }) + revisions, err := t.CRCli.ListControllerRevisions(rtx.TiFlashGroup, selector) + if err != nil { + return task.Fail().With("cannot list controller revisions: %w", err) + } + history.SortControllerRevisions(revisions) + + // Get the current(old) and update(new) ControllerRevisions. + currentRevision, updateRevision, collisionCount, err := revision.GetCurrentAndUpdate( + rtx.TiFlashGroup, revisions, t.CRCli, rtx.TiFlashGroup) + if err != nil { + return task.Fail().With("cannot get revisions: %w", err) + } + rtx.CurrentRevision = currentRevision.Name + rtx.UpdateRevision = updateRevision.Name + rtx.CollisionCount = &collisionCount + + if err = revision.TruncateHistory(t.CRCli, rtx.Peers, revisions, + currentRevision, updateRevision, rtx.Cluster.Spec.RevisionHistoryLimit); err != nil { + t.Logger.Error(err, "failed to truncate history") + } + + if needVersionUpgrade(rtx.TiFlashGroup) && !rtx.UpgradeChecker.CanUpgrade(ctx, rtx.TiFlashGroup) { + return task.Fail().Continue().With( + "preconditions of upgrading the tiflash group %s/%s are not met", + rtx.TiFlashGroup.Namespace, rtx.TiFlashGroup.Name) + } + + desired := 1 + if rtx.TiFlashGroup.Spec.Replicas != nil { + desired = int(*rtx.TiFlashGroup.Spec.Replicas) + } + + var topos []v1alpha1.ScheduleTopology + for _, p := range rtx.TiFlashGroup.Spec.SchedulePolicies { + switch p.Type { + case v1alpha1.SchedulePolicyTypeEvenlySpread: + topos = p.EvenlySpread.Topologies + default: + // do nothing + } + } + + topoPolicy, err := policy.NewTopologyPolicy[*runtime.TiFlash](topos) + if err != nil { + return task.Fail().With("invalid topo policy, it should be validated: %w", err) + } + + for _, tiflash := range rtx.Peers { + topoPolicy.Add(runtime.FromTiFlash(tiflash)) + } + + wait, err := updater.New[*runtime.TiFlash](). + WithInstances(runtime.FromTiFlashSlice(rtx.Peers)...). + WithDesired(desired). + WithClient(t.Client). + WithMaxSurge(0). + WithMaxUnavailable(1). + WithRevision(rtx.UpdateRevision). + WithNewFactory(TiFlashNewer(rtx.TiFlashGroup, rtx.UpdateRevision)). + WithAddHooks(topoPolicy). + WithUpdateHooks( + policy.KeepName[*runtime.TiFlash](), + policy.KeepTopology[*runtime.TiFlash](), + ). + WithDelHooks(topoPolicy). + WithScaleInPreferPolicy( + topoPolicy, + ). + Build(). + Do(ctx) + if err != nil { + return task.Fail().With("cannot update instances: %w", err) + } + if wait { + return task.Complete().With("wait for all instances ready") + } + return task.Complete().With("all instances are synced") +} + +func needVersionUpgrade(flashg *v1alpha1.TiFlashGroup) bool { + return flashg.Spec.Version != flashg.Status.Version && flashg.Status.Version != "" +} + +func TiFlashNewer(fg *v1alpha1.TiFlashGroup, rev string) updater.NewFactory[*runtime.TiFlash] { + return updater.NewFunc[*runtime.TiFlash](func() *runtime.TiFlash { + //nolint:mnd // refactor to use a constant + name := fmt.Sprintf("%s-%s-%s", fg.Spec.Cluster.Name, fg.Name, random.Random(6)) + spec := fg.Spec.Template.Spec.DeepCopy() + + tiflash := &v1alpha1.TiFlash{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: fg.Namespace, + Name: name, + Labels: maputil.Merge(fg.Spec.Template.Labels, map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyCluster: fg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: fg.Name, + v1alpha1.LabelKeyInstanceRevisionHash: rev, + }), + Annotations: maputil.Copy(fg.Spec.Template.Annotations), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(fg, v1alpha1.SchemeGroupVersion.WithKind("TiFlashGroup")), + }, + }, + Spec: v1alpha1.TiFlashSpec{ + Cluster: fg.Spec.Cluster, + Version: fg.Spec.Version, + Subdomain: HeadlessServiceName(fg.Spec.Cluster.Name, fg.Name), + TiFlashTemplateSpec: *spec, + }, + } + + return runtime.FromTiFlash(tiflash) + }) +} diff --git a/pkg/controllers/tiflashgroup/tasks/util.go b/pkg/controllers/tiflashgroup/tasks/util.go new file mode 100644 index 00000000000..2b10ada1e26 --- /dev/null +++ b/pkg/controllers/tiflashgroup/tasks/util.go @@ -0,0 +1,24 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" +) + +// TODO: fix length issue +func HeadlessServiceName(clusterName, groupName string) string { + return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +} diff --git a/pkg/controllers/tikv/builder.go b/pkg/controllers/tikv/builder.go new file mode 100644 index 00000000000..49a5b00f329 --- /dev/null +++ b/pkg/controllers/tikv/builder.go @@ -0,0 +1,61 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "github.com/pingcap/tidb-operator/pkg/controllers/tikv/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +func (r *Reconciler) NewRunner(reporter task.TaskReporter) task.TaskRunner[tasks.ReconcileContext] { + runner := task.NewTaskRunner(reporter, + // get tikv + tasks.TaskContextTiKV(r.Client), + // if it's deleted just return + task.NewSwitchTask(tasks.CondTiKVHasBeenDeleted()), + + // get cluster info, FinalizerDel will use it + tasks.TaskContextCluster(r.Client), + // get info from pd + tasks.TaskContextInfoFromPD(r.PDClientManager), + + task.NewSwitchTask(tasks.CondTiKVIsDeleting(), + tasks.TaskFinalizerDel(r.Client), + ), + + // check whether it's paused + task.NewSwitchTask(tasks.CondClusterIsPaused()), + + // get pod and check whether the cluster is suspending + tasks.TaskContextPod(r.Client), + task.NewSwitchTask(tasks.CondClusterIsSuspending(), + tasks.TaskFinalizerAdd(r.Client), + tasks.TaskPodSuspend(r.Client), + tasks.TaskStatusSuspend(r.Client), + ), + + // normal process + tasks.TaskContextTiKVGroup(r.Client), + tasks.TaskFinalizerAdd(r.Client), + tasks.NewTaskConfigMap(r.Logger, r.Client), + tasks.NewTaskPVC(r.Logger, r.Client, r.VolumeModifier), + tasks.NewTaskPod(r.Logger, r.Client), + tasks.NewTaskStoreLabels(r.Logger, r.Client), + tasks.NewTaskEvictLeader(r.Logger, r.Client), + tasks.NewTaskStatus(r.Logger, r.Client), + ) + + return runner +} diff --git a/pkg/controllers/tikv/controller.go b/pkg/controllers/tikv/controller.go new file mode 100644 index 00000000000..08c85783886 --- /dev/null +++ b/pkg/controllers/tikv/controller.go @@ -0,0 +1,82 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/tikv/tasks" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client + PDClientManager pdm.PDClientManager + VolumeModifier volumes.Modifier +} + +func Setup(mgr manager.Manager, c client.Client, pdcm pdm.PDClientManager, vm volumes.Modifier) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("TiKV"), + Client: c, + PDClientManager: pdcm, + VolumeModifier: vm, + } + + return ctrl.NewControllerManagedBy(mgr).For(&v1alpha1.TiKV{}). + Owns(&corev1.Pod{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.PersistentVolumeClaim{}). + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler()). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + WatchesRawSource(pdcm.Source(&pdv1.Store{}, r.StoreEventHandler())). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("tikv", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := r.NewRunner(reporter) + return runner.Run(rtx) +} diff --git a/pkg/controllers/tikv/handler.go b/pkg/controllers/tikv/handler.go new file mode 100644 index 00000000000..6f7e4a2bb94 --- /dev/null +++ b/pkg/controllers/tikv/handler.go @@ -0,0 +1,140 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + kvcfg "github.com/pingcap/tidb-operator/pkg/configs/tikv" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" +) + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + oldObj := event.ObjectOld.(*v1alpha1.Cluster) + newObj := event.ObjectNew.(*v1alpha1.Cluster) + + if newObj.Status.PD != oldObj.Status.PD { + r.Logger.Info("pd url is updating", "from", oldObj.Status.PD, "to", newObj.Status.PD) + } else if !reflect.DeepEqual(oldObj.Spec.SuspendAction, newObj.Spec.SuspendAction) { + r.Logger.Info("suspend action is updating", "from", oldObj.Spec.SuspendAction, "to", newObj.Spec.SuspendAction) + } else if oldObj.Spec.Paused != newObj.Spec.Paused { + r.Logger.Info("cluster paused is updating", "from", oldObj.Spec.Paused, "to", newObj.Spec.Paused) + } else { + return + } + + var kvl v1alpha1.TiKVList + if err := r.Client.List(ctx, &kvl, client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: newObj.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + }, client.InNamespace(newObj.Namespace)); err != nil { + r.Logger.Error(err, "cannot list all tikv instances", "ns", newObj.Namespace, "cluster", newObj.Name) + return + } + + for i := range kvl.Items { + tikv := &kvl.Items[i] + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tikv.Name, + Namespace: tikv.Namespace, + }, + }) + } + }, + } +} + +func (r *Reconciler) StoreEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(ctx context.Context, event event.TypedCreateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + s := event.Object.(*pdv1.Store) + req, err := r.getRequestOfTiKVStore(ctx, s) + if err != nil { + return + } + queue.Add(req) + }, + + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + s := event.ObjectNew.(*pdv1.Store) + req, err := r.getRequestOfTiKVStore(ctx, s) + if err != nil { + return + } + queue.Add(req) + }, + + DeleteFunc: func(ctx context.Context, event event.TypedDeleteEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + s := event.Object.(*pdv1.Store) + req, err := r.getRequestOfTiKVStore(ctx, s) + if err != nil { + return + } + queue.Add(req) + }, + } +} + +func (r *Reconciler) getRequestOfTiKVStore(ctx context.Context, s *pdv1.Store) (reconcile.Request, error) { + if s.Engine() != pdv1.StoreEngineTiKV { + return reconcile.Request{}, fmt.Errorf("store is not tikv") + } + + ns, cluster := pdm.SplitPrimaryKey(s.Namespace) + var kvl v1alpha1.TiKVList + if err := r.Client.List(ctx, &kvl, client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: cluster, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + }, client.InNamespace(ns)); err != nil { + r.Logger.Error(err, "cannot list all tikv instances", "ns", ns, "cluster", cluster) + return reconcile.Request{}, err + } + + for i := range kvl.Items { + tikv := &kvl.Items[i] + if s.Name == kvcfg.GetAdvertiseClientURLs(tikv) { + return reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tikv.Name, + Namespace: tikv.Namespace, + }, + }, nil + } + } + + err := fmt.Errorf("store: %v/%v, addr: %v", s.Namespace, s.Name, s.Address) + r.Logger.Error(err, "failed to find tikv of store") + return reconcile.Request{}, err +} diff --git a/pkg/controllers/tikv/tasks/cm.go b/pkg/controllers/tikv/tasks/cm.go new file mode 100644 index 00000000000..e2cef72b2ac --- /dev/null +++ b/pkg/controllers/tikv/tasks/cm.go @@ -0,0 +1,91 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tikvcfg "github.com/pingcap/tidb-operator/pkg/configs/tikv" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/utils/toml" +) + +type TaskConfigMap struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskConfigMap(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskConfigMap{ + Client: c, + Logger: logger, + } +} + +func (*TaskConfigMap) Name() string { + return "ConfigMap" +} + +func (t *TaskConfigMap) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + c := tikvcfg.Config{} + decoder, encoder := toml.Codec[tikvcfg.Config]() + if err := decoder.Decode([]byte(rtx.TiKV.Spec.Config), &c); err != nil { + return task.Fail().With("tikv config cannot be decoded: %w", err) + } + if err := c.Overlay(rtx.Cluster, rtx.TiKV); err != nil { + return task.Fail().With("cannot generate tikv config: %w", err) + } + + data, err := encoder.Encode(&c) + if err != nil { + return task.Fail().With("tikv config cannot be encoded: %w", err) + } + + rtx.ConfigHash, err = toml.GenerateHash(rtx.TiKV.Spec.Config) + if err != nil { + return task.Fail().With("failed to generate hash for `tikv.spec.config`: %w", err) + } + expected := newConfigMap(rtx.TiKV, data, rtx.ConfigHash) + if e := t.Client.Apply(rtx, expected); e != nil { + return task.Fail().With("can't create/update cm of tikv: %w", e) + } + return task.Complete().With("cm is synced") +} + +func newConfigMap(tikv *v1alpha1.TiKV, data []byte, hash string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(tikv.Name), + Namespace: tikv.Namespace, + Labels: maputil.Merge(tikv.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tikv.Name, + v1alpha1.LabelKeyConfigHash: hash, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tikv, v1alpha1.SchemeGroupVersion.WithKind("TiKV")), + }, + }, + Data: map[string]string{ + v1alpha1.ConfigFileName: string(data), + }, + } +} diff --git a/pkg/controllers/tikv/tasks/ctx.go b/pkg/controllers/tikv/tasks/ctx.go new file mode 100644 index 00000000000..b61c79e3df4 --- /dev/null +++ b/pkg/controllers/tikv/tasks/ctx.go @@ -0,0 +1,203 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + kvcfg "github.com/pingcap/tidb-operator/pkg/configs/tikv" + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + pdm "github.com/pingcap/tidb-operator/pkg/timanager/pd" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + PDClient pdapi.PDClient + + Healthy bool + + StoreExists bool + StoreID string + StoreState string + LeaderEvicting bool + + Suspended bool + + Cluster *v1alpha1.Cluster + TiKV *v1alpha1.TiKV + TiKVGroup *v1alpha1.TiKVGroup + Pod *corev1.Pod + + Store *pdv1.Store + + // ConfigHash stores the hash of **user-specified** config (i.e.`.Spec.Config`), + // which will be used to determine whether the config has changed. + // This ensures that our config overlay logic will not restart the tidb cluster unexpectedly. + ConfigHash string + + // Pod cannot be updated when call DELETE API, so we have to set this field to indicate + // the underlay pod has been deleting + PodIsTerminating bool +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +func TaskContextTiKV(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextTiKV", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var tikv v1alpha1.TiKV + if err := c.Get(ctx, rtx.Key, &tikv); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get tikv instance: %w", err) + } + + return task.Complete().With("tikv instance has been deleted") + } + rtx.TiKV = &tikv + return task.Complete().With("tikv is set") + }) +} + +func TaskContextInfoFromPD(cm pdm.PDClientManager) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextInfoFromPD", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + c, ok := cm.Get(pdm.PrimaryKey(rtx.TiKV.Namespace, rtx.TiKV.Spec.Cluster.Name)) + if !ok { + return task.Complete().With("pd client is not registered") + } + rtx.PDClient = c.Underlay() + + if !c.HasSynced() { + return task.Complete().With("store info is not synced, just wait for next sync") + } + + s, err := c.Stores().Get(kvcfg.GetAdvertiseClientURLs(rtx.TiKV)) + if err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("failed to get store info: %w", err) + } + return task.Complete().With("store does not exist") + } + rtx.Store, rtx.StoreID, rtx.StoreState = s, s.ID, string(s.NodeState) + + // TODO: cache evict leader scheduler info, then we don't need to check suspend here + if rtx.Cluster.ShouldSuspendCompute() { + return task.Complete().With("cluster is suspending") + } + scheduler, err := rtx.PDClient.GetEvictLeaderScheduler(ctx, rtx.StoreID) + if err != nil { + return task.Fail().With("pd is unexpectedly crashed: %w", err) + } + if scheduler != "" { + rtx.LeaderEvicting = true + } + + return task.Complete().With("get store info") + }) +} + +func TaskContextCluster(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextCluster", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var cluster v1alpha1.Cluster + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiKV.Spec.Cluster.Name, + Namespace: rtx.TiKV.Namespace, + }, &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %w", rtx.TiKV.Spec.Cluster.Name, err) + } + rtx.Cluster = &cluster + return task.Complete().With("cluster is set") + }) +} + +func TaskContextPod(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextPod", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + var pod corev1.Pod + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiKV.Name, + Namespace: rtx.TiKV.Namespace, + }, &pod); err != nil { + if errors.IsNotFound(err) { + return task.Complete().With("pod is not created") + } + return task.Fail().With("failed to get pod of tikv: %w", err) + } + + rtx.Pod = &pod + if !rtx.Pod.GetDeletionTimestamp().IsZero() { + rtx.PodIsTerminating = true + } + return task.Complete().With("pod is set") + }) +} + +func TaskContextTiKVGroup(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("ContextTiKVGroup", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if len(rtx.TiKV.OwnerReferences) == 0 { + return task.Fail().With("tikv instance has no owner, this should not happen") + } + + var tikvGroup v1alpha1.TiKVGroup + if err := c.Get(ctx, client.ObjectKey{ + Name: rtx.TiKV.OwnerReferences[0].Name, // only one owner now + Namespace: rtx.TiKV.Namespace, + }, &tikvGroup); err != nil { + return task.Fail().With("cannot find tikv group %s: %w", rtx.TiKV.OwnerReferences[0].Name, err) + } + rtx.TiKVGroup = &tikvGroup + return task.Complete().With("tikv group is set") + }) +} + +func CondTiKVHasBeenDeleted() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().TiKV == nil + }) +} + +func CondTiKVIsDeleting() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return !ctx.Self().TiKV.GetDeletionTimestamp().IsZero() + }) +} + +func CondClusterIsPaused() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().Cluster.ShouldPauseReconcile() + }) +} + +func CondClusterIsSuspending() task.Condition[ReconcileContext] { + return task.CondFunc[ReconcileContext](func(ctx task.Context[ReconcileContext]) bool { + return ctx.Self().Cluster.ShouldSuspendCompute() + }) +} diff --git a/pkg/controllers/tikv/tasks/evict_leader.go b/pkg/controllers/tikv/tasks/evict_leader.go new file mode 100644 index 00000000000..1abde936ae6 --- /dev/null +++ b/pkg/controllers/tikv/tasks/evict_leader.go @@ -0,0 +1,61 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +type TaskEvictLeader struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskEvictLeader(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskEvictLeader{ + Client: c, + Logger: logger, + } +} + +func (*TaskEvictLeader) Name() string { + return "EvictLeader" +} + +func (*TaskEvictLeader) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + switch { + case rtx.Store == nil: + return task.Complete().With("store has been deleted or not created") + case rtx.PodIsTerminating: + if !rtx.LeaderEvicting { + if err := rtx.PDClient.BeginEvictLeader(ctx, rtx.StoreID); err != nil { + return task.Fail().With("cannot add evict leader scheduler: %v", err) + } + } + return task.Complete().With("ensure evict leader scheduler exists") + default: + if rtx.LeaderEvicting { + if err := rtx.PDClient.EndEvictLeader(ctx, rtx.StoreID); err != nil { + return task.Fail().With("cannot remove evict leader scheduler: %v", err) + } + } + return task.Complete().With("ensure evict leader scheduler doesn't exist") + } +} diff --git a/pkg/controllers/tikv/tasks/finalizer.go b/pkg/controllers/tikv/tasks/finalizer.go new file mode 100644 index 00000000000..c1f984f90d0 --- /dev/null +++ b/pkg/controllers/tikv/tasks/finalizer.go @@ -0,0 +1,80 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "time" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +const ( + removingWaitInterval = 10 * time.Second +) + +func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("FinalizerDel", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + switch { + case !rtx.Cluster.GetDeletionTimestamp().IsZero(): + if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, + rtx.TiKV.Namespace, rtx.TiKV.Name, client.GracePeriodSeconds(1)); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + + // whole cluster is deleting + if err := k8s.RemoveFinalizer(ctx, c, rtx.TiKV); err != nil { + return task.Fail().With("cannot remove finalizer: %w", err) + } + case rtx.StoreState == v1alpha1.StoreStateRemoving: + // TODO: Complete task and retrigger reconciliation by polling PD + return task.Retry(removingWaitInterval).With("wait until the store is removed") + + case rtx.StoreState == v1alpha1.StoreStateRemoved || rtx.StoreID == "": + if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, + rtx.TiKV.Namespace, rtx.TiKV.Name, client.GracePeriodSeconds(1)); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + // Store ID is empty may because of tikv is not initialized + // TODO: check whether tikv is initialized + if err := k8s.RemoveFinalizer(ctx, c, rtx.TiKV); err != nil { + return task.Fail().With("cannot remove finalizer: %w", err) + } + default: + // get store info successfully and the store still exists + if err := rtx.PDClient.DeleteStore(ctx, rtx.StoreID); err != nil { + return task.Fail().With("cannot delete store %s: %v", rtx.StoreID, err) + } + + return task.Retry(removingWaitInterval).With("the store is removing") + } + + return task.Complete().With("finalizer is removed") + }) +} + +func TaskFinalizerAdd(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("FinalizerAdd", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if err := k8s.EnsureFinalizer(ctx, c, rtx.TiKV); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %w", err) + } + + return task.Complete().With("finalizer is added") + }) +} diff --git a/pkg/controllers/tikv/tasks/pod.go b/pkg/controllers/tikv/tasks/pod.go new file mode 100644 index 00000000000..d926f31bccb --- /dev/null +++ b/pkg/controllers/tikv/tasks/pod.go @@ -0,0 +1,339 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "path/filepath" + "strings" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + kvcfg "github.com/pingcap/tidb-operator/pkg/configs/tikv" + "github.com/pingcap/tidb-operator/pkg/image" + "github.com/pingcap/tidb-operator/pkg/overlay" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +const ( + MinGracePeriodSeconds = 30 + // Assume that approximately 200 regions are transferred for 1s + RegionsPerSecond = 200 +) + +func TaskPodSuspend(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("PodSuspend", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + if rtx.Pod == nil { + return task.Complete().With("pod has been deleted") + } + if err := c.Delete(rtx, rtx.Pod); err != nil { + return task.Fail().With("can't delete pod of tikv: %w", err) + } + rtx.PodIsTerminating = true + return task.Wait().With("pod is deleting") + }) +} + +type TaskPod struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskPod(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskPod{ + Client: c, + Logger: logger, + } +} + +func (*TaskPod) Name() string { + return "Pod" +} + +//nolint:gocyclo // refactor if possible +func (t *TaskPod) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + expected := t.newPod(rtx.Cluster, rtx.TiKVGroup, rtx.TiKV, rtx.ConfigHash) + if rtx.Pod == nil { + if err := t.Client.Apply(rtx, expected); err != nil { + return task.Fail().With("can't apply pod of tikv: %w", err) + } + + rtx.Pod = expected + return task.Complete().With("pod is created") + } + + // minimize the deletion grace period seconds + if !rtx.Pod.GetDeletionTimestamp().IsZero() { + sec := rtx.Pod.GetDeletionGracePeriodSeconds() + + regionCount := 0 + if rtx.Store != nil { + regionCount = rtx.Store.RegionCount + } + gracePeriod := int64(regionCount/RegionsPerSecond + 1) + if gracePeriod < MinGracePeriodSeconds { + gracePeriod = MinGracePeriodSeconds + } + + if sec != nil && rtx.Store != nil && *sec > gracePeriod { + if err := t.Client.Delete(ctx, rtx.Pod, client.GracePeriodSeconds(gracePeriod)); err != nil { + return task.Fail().With("cannot minimize the shutdown timeout: %w", err) + } + } + + // key will be requeued after the pod is changed + return task.Complete().With("pod is deleting") + } + + res := k8s.ComparePods(rtx.Pod, expected) + curHash, expectHash := rtx.Pod.Labels[v1alpha1.LabelKeyConfigHash], expected.Labels[v1alpha1.LabelKeyConfigHash] + configChanged := curHash != expectHash + t.Logger.Info("compare pod", "result", res, "configChanged", configChanged, "currentConfigHash", curHash, "expectConfigHash", expectHash) + + if res == k8s.CompareResultRecreate || (configChanged && + rtx.TiKVGroup.Spec.ConfigUpdateStrategy == v1alpha1.ConfigUpdateStrategyRollingUpdate) { + t.Logger.Info("will recreate the pod") + if err := t.Client.Delete(rtx, rtx.Pod); err != nil { + return task.Fail().With("can't delete pod of tikv: %w", err) + } + + rtx.PodIsTerminating = true + return task.Complete().With("pod is deleting") + } else if res == k8s.CompareResultUpdate { + t.Logger.Info("will update the pod in place") + if err := t.Client.Apply(rtx, expected); err != nil { + return task.Fail().With("can't apply pod of tikv: %w", err) + } + + // write apply result back to ctx + rtx.Pod = expected + } + + return task.Complete().With("pod is synced") +} + +func (t *TaskPod) newPod(cluster *v1alpha1.Cluster, kvg *v1alpha1.TiKVGroup, tikv *v1alpha1.TiKV, configHash string) *corev1.Pod { + vols := []corev1.Volume{ + { + Name: v1alpha1.VolumeNameConfig, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ConfigMapName(tikv.Name), + }, + }, + }, + }, + { + Name: v1alpha1.VolumeNamePrestopChecker, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + } + + mounts := []corev1.VolumeMount{ + { + Name: v1alpha1.VolumeNameConfig, + MountPath: v1alpha1.DirNameConfigTiKV, + }, + { + Name: v1alpha1.VolumeNamePrestopChecker, + MountPath: v1alpha1.DirNamePrestop, + }, + } + + for i := range tikv.Spec.Volumes { + vol := &tikv.Spec.Volumes[i] + name := v1alpha1.NamePrefix + "tikv" + if vol.Name != "" { + name = name + "-" + vol.Name + } + vols = append(vols, corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: PersistentVolumeClaimName(tikv.Name, vol.Name), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: name, + MountPath: vol.Path, + }) + } + + if cluster.IsTLSClusterEnabled() { + groupName := tikv.Labels[v1alpha1.LabelKeyGroup] + vols = append(vols, corev1.Volume{ + Name: v1alpha1.TiKVClusterTLSVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.TLSClusterSecretName(groupName), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.TiKVClusterTLSVolumeName, + MountPath: v1alpha1.TiKVClusterTLSMountPath, + ReadOnly: true, + }) + + if kvg.MountClusterClientSecret() { + vols = append(vols, corev1.Volume{ + Name: v1alpha1.ClusterTLSClientVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.ClusterClientTLSSecretName(), + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: v1alpha1.ClusterTLSClientVolumeName, + MountPath: v1alpha1.ClusterTLSClientMountPath, + ReadOnly: true, + }) + } + } + + var preStopImage *string + if tikv.Spec.PreStop != nil { + preStopImage = tikv.Spec.PreStop.Image + } + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: tikv.Namespace, + Name: tikv.Name, + Labels: maputil.Merge(tikv.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tikv.Name, + v1alpha1.LabelKeyConfigHash: configHash, + }), + Annotations: maputil.Copy(tikv.GetAnnotations()), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tikv, v1alpha1.SchemeGroupVersion.WithKind("TiKV")), + }, + }, + Spec: corev1.PodSpec{ + // TODO: make the max grace period seconds configurable + //nolint:mnd // refactor to use a constant + TerminationGracePeriodSeconds: ptr.To[int64](65535), + Hostname: tikv.Name, + Subdomain: tikv.Spec.Subdomain, + NodeSelector: tikv.Spec.Topology, + InitContainers: []corev1.Container{ + { + // TODO: support hot reload checker + // NOTE: now sidecar cannot be restarted because of this https://github.com/kubernetes/kubernetes/pull/126525. + Name: v1alpha1.ContainerNamePrestopChecker, + Image: image.PrestopChecker.Image(preStopImage), + ImagePullPolicy: corev1.PullIfNotPresent, + // RestartPolicy: ptr.To(corev1.ContainerRestartPolicyAlways), + Command: []string{ + "/bin/sh", + "-c", + "cp /prestop-checker " + v1alpha1.DirNamePrestop + "/;", + // + "sleep infinity", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: v1alpha1.VolumeNamePrestopChecker, + MountPath: v1alpha1.DirNamePrestop, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: v1alpha1.ContainerNameTiKV, + Image: image.TiKV.Image(tikv.Spec.Image, tikv.Spec.Version), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "/tikv-server", + "--config", + filepath.Join(v1alpha1.DirNameConfigTiKV, v1alpha1.ConfigFileName), + }, + Ports: []corev1.ContainerPort{ + { + Name: v1alpha1.TiKVPortNameClient, + ContainerPort: tikv.GetClientPort(), + }, + { + Name: v1alpha1.TiKVPortNameStatus, + ContainerPort: tikv.GetStatusPort(), + }, + }, + VolumeMounts: mounts, + Resources: k8s.GetResourceRequirements(tikv.Spec.Resources), + Lifecycle: &corev1.Lifecycle{ + // TODO: change to a real pre stop action + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{ + "/bin/sh", + "-c", + t.buildPrestopCheckScript(cluster, tikv), + }, + }, + }, + }, + }, + }, + Volumes: vols, + }, + } + + if tikv.Spec.Overlay != nil { + overlay.OverlayPod(pod, tikv.Spec.Overlay.Pod) + } + + k8s.CalculateHashAndSetLabels(pod) + return pod +} + +func (*TaskPod) buildPrestopCheckScript(cluster *v1alpha1.Cluster, tikv *v1alpha1.TiKV) string { + sb := strings.Builder{} + sb.WriteString(v1alpha1.DirNamePrestop) + sb.WriteString("/prestop-checker") + sb.WriteString(" -pd ") + sb.WriteString(cluster.Status.PD) + sb.WriteString(" -addr ") + sb.WriteString(kvcfg.GetAdvertiseClientURLs(tikv)) + + if cluster.IsTLSClusterEnabled() { + sb.WriteString(" -ca ") + sb.WriteString(v1alpha1.TiKVClusterTLSMountPath) + sb.WriteString("/ca.crt") + sb.WriteString(" -tls ") + sb.WriteString(v1alpha1.TiKVClusterTLSMountPath) + sb.WriteString("/tls.crt") + sb.WriteString(" -key ") + sb.WriteString(v1alpha1.TiKVClusterTLSMountPath) + sb.WriteString("/tls.key") + } + + sb.WriteString(" > /proc/1/fd/1") + + return sb.String() +} diff --git a/pkg/controllers/tikv/tasks/pvc.go b/pkg/controllers/tikv/tasks/pvc.go new file mode 100644 index 00000000000..47fd28a82ad --- /dev/null +++ b/pkg/controllers/tikv/tasks/pvc.go @@ -0,0 +1,91 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/pkg/volumes" +) + +type TaskPVC struct { + Client client.Client + Logger logr.Logger + VolumeModifier volumes.Modifier +} + +func NewTaskPVC(logger logr.Logger, c client.Client, vm volumes.Modifier) task.Task[ReconcileContext] { + return &TaskPVC{ + Client: c, + Logger: logger, + VolumeModifier: vm, + } +} + +func (*TaskPVC) Name() string { + return "PVC" +} + +func (t *TaskPVC) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + pvcs := newPVCs(rtx.TiKV) + if wait, err := volumes.SyncPVCs(rtx, t.Client, pvcs, t.VolumeModifier, t.Logger); err != nil { + return task.Fail().With("failed to sync pvcs: %w", err) + } else if wait { + return task.Complete().With("waiting for pvcs to be synced") + } + + return task.Complete().With("pvcs are synced") +} + +func newPVCs(tikv *v1alpha1.TiKV) []*corev1.PersistentVolumeClaim { + pvcs := make([]*corev1.PersistentVolumeClaim, 0, len(tikv.Spec.Volumes)) + for i := range tikv.Spec.Volumes { + vol := tikv.Spec.Volumes[i] + pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: PersistentVolumeClaimName(tikv.Name, vol.Name), + Namespace: tikv.Namespace, + Labels: maputil.Merge(tikv.Labels, map[string]string{ + v1alpha1.LabelKeyInstance: tikv.Name, + }), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tikv, v1alpha1.SchemeGroupVersion.WithKind("TiKV")), + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: vol.Storage, + }, + }, + StorageClassName: vol.StorageClassName, + VolumeAttributesClassName: vol.VolumeAttributesClassName, + }, + }) + } + + return pvcs +} diff --git a/pkg/controllers/tikv/tasks/status.go b/pkg/controllers/tikv/tasks/status.go new file mode 100644 index 00000000000..2b63d59b2e3 --- /dev/null +++ b/pkg/controllers/tikv/tasks/status.go @@ -0,0 +1,206 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/statefulset" +) + +func TaskStatusSuspend(c client.Client) task.Task[ReconcileContext] { + return task.NameTaskFunc("StatusSuspend", func(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + rtx.TiKV.Status.ObservedGeneration = rtx.TiKV.Generation + + var ( + suspendStatus = metav1.ConditionFalse + suspendMessage = "tikv is suspending" + + // when suspending, the health status should be false + healthStatus = metav1.ConditionFalse + healthMessage = "tikv is not healthy" + ) + + if rtx.Pod == nil { + suspendStatus = metav1.ConditionTrue + suspendMessage = "tikv is suspended" + } + needUpdate := meta.SetStatusCondition(&rtx.TiKV.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiKVCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiKV.Generation, + // TODO: use different reason for suspending and suspended + Reason: v1alpha1.TiKVSuspendReason, + Message: suspendMessage, + }) + + needUpdate = meta.SetStatusCondition(&rtx.TiKV.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiKVCondHealth, + Status: healthStatus, + ObservedGeneration: rtx.TiKV.Generation, + Reason: v1alpha1.TiKVHealthReason, + Message: healthMessage, + }) || needUpdate + + if needUpdate { + if err := c.Status().Update(ctx, rtx.TiKV); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + return task.Complete().With("status of suspend tikv is updated") + }) +} + +type TaskStatus struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Client: c, + Logger: logger, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +//nolint:gocyclo // refactor is possible +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var ( + healthStatus = metav1.ConditionFalse + healthMessage = "tikv is not healthy" + + suspendStatus = metav1.ConditionFalse + suspendMessage = "tikv is not suspended" + + needUpdate = false + ) + + if rtx.StoreID != "" { + if rtx.TiKV.Status.ID != rtx.StoreID { + rtx.TiKV.Status.ID = rtx.StoreID + needUpdate = true + } + + info, err := rtx.PDClient.GetStore(ctx, rtx.StoreID) + if err == nil && info != nil && info.Store != nil { + rtx.StoreState = info.Store.NodeState.String() + } else { + t.Logger.Error(err, "failed to get tikv store info", "store", rtx.StoreID) + } + } + if rtx.StoreState != "" && rtx.TiKV.Status.State != rtx.StoreState { + rtx.TiKV.Status.State = rtx.StoreState + needUpdate = true + } + + needUpdate = meta.SetStatusCondition(&rtx.TiKV.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiKVCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiKV.Generation, + Reason: v1alpha1.TiKVSuspendReason, + Message: suspendMessage, + }) || needUpdate + + if needUpdate || !v1alpha1.IsReconciled(rtx.TiKV) || + rtx.TiKV.Status.UpdateRevision != rtx.TiKV.Labels[v1alpha1.LabelKeyInstanceRevisionHash] { + rtx.TiKV.Status.ObservedGeneration = rtx.TiKV.Generation + rtx.TiKV.Status.UpdateRevision = rtx.TiKV.Labels[v1alpha1.LabelKeyInstanceRevisionHash] + needUpdate = true + } + + if rtx.Pod == nil || rtx.PodIsTerminating { + rtx.Healthy = false + } else if statefulset.IsPodRunningAndReady(rtx.Pod) && rtx.StoreState == v1alpha1.StoreStateServing { + rtx.Healthy = true + if rtx.TiKV.Status.CurrentRevision != rtx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] { + rtx.TiKV.Status.CurrentRevision = rtx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] + needUpdate = true + } + } else { + rtx.Healthy = false + } + + if rtx.Healthy { + healthStatus = metav1.ConditionTrue + healthMessage = "tikv is healthy" + } + needUpdate = meta.SetStatusCondition(&rtx.TiKV.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiKVCondHealth, + Status: healthStatus, + ObservedGeneration: rtx.TiKV.Generation, + Reason: v1alpha1.TiKVHealthReason, + Message: healthMessage, + }) || needUpdate + + if t.syncLeadersEvictedCond(rtx.TiKV, rtx.Store, rtx.LeaderEvicting) { + needUpdate = true + } + + if needUpdate { + if err := t.Client.Status().Update(ctx, rtx.TiKV); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + // TODO: use a condition to refactor it + if rtx.TiKV.Status.ID == "" || rtx.TiKV.Status.State != v1alpha1.StoreStateServing || !v1alpha1.IsUpToDate(rtx.TiKV) { + // can we only rely on the PD member events for this condition? + //nolint:mnd // refactor to use a constant + return task.Retry(5 * time.Second).With("tikv may not be initialized, retry") + } + + return task.Complete().With("status is synced") +} + +// Status of this condition can only transfer as the below +func (*TaskStatus) syncLeadersEvictedCond(tikv *v1alpha1.TiKV, store *pdv1.Store, isEvicting bool) bool { + status := metav1.ConditionFalse + reason := "NotEvicted" + msg := "leaders are not all evicted" + switch { + case store == nil: + status = metav1.ConditionTrue + reason = "StoreIsRemoved" + msg = "store does not exist" + case isEvicting && store.LeaderCount == 0: + status = metav1.ConditionTrue + reason = "Evicted" + msg = "all leaders are evicted" + } + + return meta.SetStatusCondition(&tikv.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiKVCondLeadersEvicted, + Status: status, + ObservedGeneration: tikv.Generation, + Reason: reason, + Message: msg, + }) +} diff --git a/pkg/controllers/tikv/tasks/store_labels.go b/pkg/controllers/tikv/tasks/store_labels.go new file mode 100644 index 00000000000..733ccc8a4d6 --- /dev/null +++ b/pkg/controllers/tikv/tasks/store_labels.go @@ -0,0 +1,91 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "reflect" + "strconv" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v2" +) + +type TaskStoreLabels struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStoreLabels(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStoreLabels{ + Client: c, + Logger: logger, + } +} + +func (*TaskStoreLabels) Name() string { + return "StoreLabels" +} + +func (t *TaskStoreLabels) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.StoreState != v1alpha1.StoreStateServing || rtx.PodIsTerminating || rtx.Pod == nil { + return task.Complete().With("skip sync store labels as the store is not serving") + } + + nodeName := rtx.Pod.Spec.NodeName + if nodeName == "" { + return task.Fail().With("pod %s/%s has not been scheduled", rtx.TiKV.Namespace, rtx.TiKV.Name) + } + var node corev1.Node + if err := t.Client.Get(ctx, client.ObjectKey{Name: nodeName}, &node); err != nil { + return task.Fail().With("failed to get node %s: %s", nodeName, err) + } + + // TODO: too many API calls to PD? + pdCfg, err := rtx.PDClient.GetConfig(ctx) + if err != nil { + return task.Fail().With("failed to get pd config: %s", err) + } + keys := pdCfg.Replication.LocationLabels + if len(keys) == 0 { + return task.Complete().With("no store labels need to sync") + } + + storeLabels := k8s.GetNodeLabelsForKeys(&node, keys) + if len(storeLabels) == 0 { + return task.Complete().With("no store labels from node %s to sync", nodeName) + } + + if !reflect.DeepEqual(rtx.Store.Labels, storeLabels) { + storeID, err := strconv.ParseUint(rtx.StoreID, 10, 64) + if err != nil { + return task.Fail().With("failed to parse store id %s: %s", rtx.StoreID, err) + } + set, err := rtx.PDClient.SetStoreLabels(ctx, storeID, storeLabels) + if err != nil { + return task.Fail().With("failed to set store labels: %s", err) + } else if set { + t.Logger.Info("store labels synced", "storeID", rtx.StoreID, "storeLabels", storeLabels) + } + } + + return task.Complete().With("store labels synced") +} diff --git a/pkg/controllers/tikv/tasks/util.go b/pkg/controllers/tikv/tasks/util.go new file mode 100644 index 00000000000..ed51a327fac --- /dev/null +++ b/pkg/controllers/tikv/tasks/util.go @@ -0,0 +1,27 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +func ConfigMapName(tikvName string) string { + return tikvName +} + +func PersistentVolumeClaimName(tikvName, volName string) string { + // ref: https://github.com/pingcap/tidb-operator/blob/v1.6.0/pkg/apis/pingcap/v1alpha1/helpers.go#L92 + if volName == "" { + return "tikv-" + tikvName + } + return "tikv-" + tikvName + "-" + volName +} diff --git a/pkg/controllers/tikvgroup/controller.go b/pkg/controllers/tikvgroup/controller.go new file mode 100644 index 00000000000..76a5cb32ed6 --- /dev/null +++ b/pkg/controllers/tikvgroup/controller.go @@ -0,0 +1,116 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikvgroup + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/tikvgroup/tasks" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type Reconciler struct { + Logger logr.Logger + Client client.Client +} + +func Setup(mgr manager.Manager, c client.Client) error { + r := &Reconciler{ + Logger: mgr.GetLogger().WithName("TiKVGroup"), + Client: c, + } + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.TiKVGroup{}). + Owns(&v1alpha1.TiKV{}). + // Only care about the generation change (i.e. spec update) + Watches(&v1alpha1.Cluster{}, r.ClusterEventHandler(), builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithOptions(controller.Options{RateLimiter: k8s.RateLimiter}). + Complete(r) +} + +func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], + queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + cluster := event.ObjectNew.(*v1alpha1.Cluster) + + var list v1alpha1.TiKVGroupList + if err := r.Client.List(ctx, &list, client.InNamespace(cluster.Namespace), + client.MatchingFields{"spec.cluster.name": cluster.Name}); err != nil { + if !errors.IsNotFound(err) { + r.Logger.Error(err, "cannot list all tikv groups", "ns", cluster.Namespace, "cluster", cluster.Name) + } + return + } + + for i := range list.Items { + kvg := &list.Items[i] + queue.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: kvg.Name, + Namespace: kvg.Namespace, + }, + }) + } + }, + } +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("tikvgroup", req.NamespacedName) + reporter := task.NewTableTaskReporter() + + startTime := time.Now() + logger.Info("start reconcile") + defer func() { + dur := time.Since(startTime) + logger.Info("end reconcile", "duration", dur) + logger.Info("summay: \n" + reporter.Summary()) + }() + + rtx := &tasks.ReconcileContext{ + // some fields will be set in the context task + Context: ctx, + Key: req.NamespacedName, + } + + runner := task.NewTaskRunner[tasks.ReconcileContext](reporter) + runner.AddTasks( + tasks.NewTaskContext(logger, r.Client), + tasks.NewTaskFinalizer(logger, r.Client), + tasks.NewTaskService(logger, r.Client), + tasks.NewTaskUpdater(logger, r.Client), + tasks.NewTaskStatus(logger, r.Client), + ) + + return runner.Run(rtx) +} diff --git a/pkg/controllers/tikvgroup/tasks/ctx.go b/pkg/controllers/tikvgroup/tasks/ctx.go new file mode 100644 index 00000000000..0a834527a15 --- /dev/null +++ b/pkg/controllers/tikvgroup/tasks/ctx.go @@ -0,0 +1,121 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "cmp" + "context" + "slices" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/action" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type ReconcileContext struct { + context.Context + + Key types.NamespacedName + + Suspended bool + + Cluster *v1alpha1.Cluster + + TiKVGroup *v1alpha1.TiKVGroup + Peers []*v1alpha1.TiKV + UpgradeChecker action.UpgradeChecker + + // Status fields + v1alpha1.CommonStatus +} + +func (ctx *ReconcileContext) Self() *ReconcileContext { + return ctx +} + +type TaskContext struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskContext(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskContext{ + Logger: logger, + Client: c, + } +} + +func (*TaskContext) Name() string { + return "Context" +} + +func (t *TaskContext) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + var kvg v1alpha1.TiKVGroup + if err := t.Client.Get(ctx, rtx.Key, &kvg); err != nil { + if !errors.IsNotFound(err) { + return task.Fail().With("can't get tikv group: %w", err) + } + + return task.Complete().Break().With("tikv group has been deleted") + } + rtx.TiKVGroup = &kvg + + var cluster v1alpha1.Cluster + if err := t.Client.Get(ctx, client.ObjectKey{ + Name: kvg.Spec.Cluster.Name, + Namespace: kvg.Namespace, + }, &cluster); err != nil { + return task.Fail().With("cannot find cluster %s: %w", kvg.Spec.Cluster.Name, err) + } + rtx.Cluster = &cluster + + if cluster.ShouldPauseReconcile() { + return task.Complete().Break().With("cluster reconciliation is paused") + } + + var kvList v1alpha1.TiKVList + if err := t.Client.List(ctx, &kvList, client.InNamespace(kvg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyGroup: kvg.Name, + }); err != nil { + return task.Fail().With("cannot list tikv peers: %w", err) + } + + rtx.Peers = make([]*v1alpha1.TiKV, len(kvList.Items)) + rtx.Suspended = len(kvList.Items) > 0 + for i := range kvList.Items { + rtx.Peers[i] = &kvList.Items[i] + if !meta.IsStatusConditionTrue(kvList.Items[i].Status.Conditions, v1alpha1.TiKVCondSuspended) { + // TiKV Group is not suspended if any of its members is not suspended + rtx.Suspended = false + } + } + slices.SortFunc(rtx.Peers, func(a, b *v1alpha1.TiKV) int { + return cmp.Compare(a.Name, b.Name) + }) + + rtx.UpgradeChecker = action.NewUpgradeChecker(t.Client, rtx.Cluster, t.Logger) + return task.Complete().With("new context completed") +} diff --git a/pkg/controllers/tikvgroup/tasks/finalizer.go b/pkg/controllers/tikvgroup/tasks/finalizer.go new file mode 100644 index 00000000000..303b12e3943 --- /dev/null +++ b/pkg/controllers/tikvgroup/tasks/finalizer.go @@ -0,0 +1,82 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + utilerr "k8s.io/apimachinery/pkg/util/errors" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskFinalizer struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskFinalizer(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskFinalizer{ + Client: c, + Logger: logger, + } +} + +func (*TaskFinalizer) Name() string { + return "Finalizer" +} + +func (t *TaskFinalizer) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if !rtx.TiKVGroup.GetDeletionTimestamp().IsZero() { + errList := []error{} + names := []string{} + for _, peer := range rtx.Peers { + names = append(names, peer.Name) + if peer.GetDeletionTimestamp().IsZero() { + if err := t.Client.Delete(ctx, peer); err != nil { + errList = append(errList, fmt.Errorf("try to delete the tikv instance %v failed: %w", peer.Name, err)) + } + } + } + + if len(errList) != 0 { + return task.Fail().With("failed to delete all tikv instances: %v", utilerr.NewAggregate(errList)) + } + + if len(rtx.Peers) != 0 { + return task.Fail().With("wait for all tikv instances being removed, %v still exists", names) + } + + if err := k8s.EnsureGroupSubResourceDeleted(ctx, t.Client, + rtx.TiKVGroup.Namespace, rtx.TiKVGroup.Name); err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + + if err := k8s.RemoveFinalizer(ctx, t.Client, rtx.TiKVGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been removed: %w", err) + } + } else { + if err := k8s.EnsureFinalizer(ctx, t.Client, rtx.TiKVGroup); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %w", err) + } + } + + return task.Complete().With("finalizer is synced") +} diff --git a/pkg/controllers/tikvgroup/tasks/status.go b/pkg/controllers/tikvgroup/tasks/status.go new file mode 100644 index 00000000000..a25ac0aa341 --- /dev/null +++ b/pkg/controllers/tikvgroup/tasks/status.go @@ -0,0 +1,94 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskStatus struct { + Client client.Client + Logger logr.Logger +} + +func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskStatus{ + Client: c, + Logger: logger, + } +} + +func (*TaskStatus) Name() string { + return "Status" +} + +func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + suspendStatus := metav1.ConditionFalse + suspendMessage := "tikv group is not suspended" + if rtx.Suspended { + suspendStatus = metav1.ConditionTrue + suspendMessage = "tikv group is suspended" + } else if rtx.Cluster.ShouldSuspendCompute() { + suspendMessage = "tikv group is suspending" + } + conditionChanged := meta.SetStatusCondition(&rtx.TiKVGroup.Status.Conditions, metav1.Condition{ + Type: v1alpha1.TiKVGroupCondSuspended, + Status: suspendStatus, + ObservedGeneration: rtx.TiKVGroup.Generation, + Reason: v1alpha1.TiKVGroupSuspendReason, + Message: suspendMessage, + }) + + // Update the current revision if all instances are synced. + if int(rtx.TiKVGroup.GetDesiredReplicas()) == len(rtx.Peers) && v1alpha1.AllInstancesSynced(rtx.Peers, rtx.UpdateRevision) { + if rtx.CurrentRevision != rtx.UpdateRevision || rtx.TiKVGroup.Status.Version != rtx.TiKVGroup.Spec.Version { + rtx.CurrentRevision = rtx.UpdateRevision + rtx.TiKVGroup.Status.Version = rtx.TiKVGroup.Spec.Version + conditionChanged = true + } + } + var readyReplicas int32 + for _, peer := range rtx.Peers { + if peer.IsHealthy() { + readyReplicas++ + } + } + + if conditionChanged || rtx.TiKVGroup.Status.ReadyReplicas != readyReplicas || + rtx.TiKVGroup.Status.Replicas != int32(len(rtx.Peers)) || //nolint:gosec // expected type conversion + !v1alpha1.IsReconciled(rtx.TiKVGroup) || + v1alpha1.StatusChanged(rtx.TiKVGroup, rtx.CommonStatus) { + rtx.TiKVGroup.Status.ReadyReplicas = readyReplicas + rtx.TiKVGroup.Status.Replicas = int32(len(rtx.Peers)) //nolint:gosec // expected type conversion + rtx.TiKVGroup.Status.ObservedGeneration = rtx.TiKVGroup.Generation + rtx.TiKVGroup.Status.CurrentRevision = rtx.CurrentRevision + rtx.TiKVGroup.Status.UpdateRevision = rtx.UpdateRevision + rtx.TiKVGroup.Status.CollisionCount = rtx.CollisionCount + + if err := t.Client.Status().Update(ctx, rtx.TiKVGroup); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + return task.Complete().With("status is synced") +} diff --git a/pkg/controllers/tikvgroup/tasks/svc.go b/pkg/controllers/tikvgroup/tasks/svc.go new file mode 100644 index 00000000000..46f60221151 --- /dev/null +++ b/pkg/controllers/tikvgroup/tasks/svc.go @@ -0,0 +1,106 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/task" +) + +type TaskService struct { + Logger logr.Logger + Client client.Client +} + +func NewTaskService(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskService{ + Logger: logger, + Client: c, + } +} + +func (*TaskService) Name() string { + return "Service" +} + +func (t *TaskService) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + if rtx.Cluster.ShouldSuspendCompute() { + return task.Complete().With("skip service for suspension") + } + + kvg := rtx.TiKVGroup + + svc := newHeadlessService(kvg) + if err := t.Client.Apply(ctx, svc); err != nil { + return task.Fail().With(fmt.Sprintf("can't create headless service of tikv: %v", err)) + } + + return task.Complete().With("headless service of tikv has been applied") +} + +func newHeadlessService(kvg *v1alpha1.TiKVGroup) *corev1.Service { + ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: HeadlessServiceName(kvg.Spec.Cluster.Name, kvg.Name), + Namespace: kvg.Namespace, + Labels: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyCluster: kvg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: kvg.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(kvg, v1alpha1.SchemeGroupVersion.WithKind("TiKVGroup")), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: corev1.ClusterIPNone, + IPFamilyPolicy: &ipFamilyPolicy, + Selector: map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyCluster: kvg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: kvg.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: v1alpha1.TiKVPortNameClient, + Port: kvg.GetClientPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiKVPortNameClient), + }, + { + Name: v1alpha1.TiKVPortNameStatus, + Port: kvg.GetStatusPort(), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(v1alpha1.TiKVPortNameStatus), + }, + }, + PublishNotReadyAddresses: true, + }, + } +} diff --git a/pkg/controllers/tikvgroup/tasks/updater.go b/pkg/controllers/tikvgroup/tasks/updater.go new file mode 100644 index 00000000000..04fd7ed3243 --- /dev/null +++ b/pkg/controllers/tikvgroup/tasks/updater.go @@ -0,0 +1,184 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/updater" + "github.com/pingcap/tidb-operator/pkg/updater/policy" + "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + "github.com/pingcap/tidb-operator/pkg/utils/random" + "github.com/pingcap/tidb-operator/pkg/utils/task" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" +) + +// TaskUpdater is a task for updating TikVGroup when its spec is changed. +type TaskUpdater struct { + Logger logr.Logger + Client client.Client + CRCli history.Interface +} + +func NewTaskUpdater(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { + return &TaskUpdater{ + Logger: logger, + Client: c, + CRCli: history.NewClient(c), + } +} + +func (*TaskUpdater) Name() string { + return "Updater" +} + +func (t *TaskUpdater) Sync(ctx task.Context[ReconcileContext]) task.Result { + rtx := ctx.Self() + + // TODO: move to task v2 + if !rtx.TiKVGroup.GetDeletionTimestamp().IsZero() { + return task.Complete().With("tikv group has been deleted") + } + + // List all controller revisions for the TiKVGroup. + selector := labels.SelectorFromSet(labels.Set{ + v1alpha1.LabelKeyCluster: rtx.Cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyGroup: rtx.TiKVGroup.Name, + }) + revisions, err := t.CRCli.ListControllerRevisions(rtx.TiKVGroup, selector) + if err != nil { + return task.Fail().With("cannot list controller revisions: %w", err) + } + history.SortControllerRevisions(revisions) + + // Get the current(old) and update(new) ControllerRevisions. + currentRevision, updateRevision, collisionCount, err := revision.GetCurrentAndUpdate(rtx.TiKVGroup, revisions, t.CRCli, rtx.TiKVGroup) + if err != nil { + return task.Fail().With("cannot get revisions: %w", err) + } + rtx.CurrentRevision = currentRevision.Name + rtx.UpdateRevision = updateRevision.Name + rtx.CollisionCount = &collisionCount + + if err = revision.TruncateHistory(t.CRCli, rtx.Peers, revisions, + currentRevision, updateRevision, rtx.Cluster.Spec.RevisionHistoryLimit); err != nil { + t.Logger.Error(err, "failed to truncate history") + } + + if needVersionUpgrade(rtx.TiKVGroup) && !rtx.UpgradeChecker.CanUpgrade(ctx, rtx.TiKVGroup) { + return task.Fail().Continue().With( + "preconditions of upgrading the tikv group %s/%s are not met", + rtx.TiKVGroup.Namespace, rtx.TiKVGroup.Name) + } + + desired := 1 + if rtx.TiKVGroup.Spec.Replicas != nil { + desired = int(*rtx.TiKVGroup.Spec.Replicas) + } + + var topos []v1alpha1.ScheduleTopology + for _, p := range rtx.TiKVGroup.Spec.SchedulePolicies { + switch p.Type { + case v1alpha1.SchedulePolicyTypeEvenlySpread: + topos = p.EvenlySpread.Topologies + default: + // do nothing + } + } + + topoPolicy, err := policy.NewTopologyPolicy[*runtime.TiKV](topos) + if err != nil { + return task.Fail().With("invalid topo policy, it should be validated: %w", err) + } + + for _, tikv := range rtx.Peers { + topoPolicy.Add(runtime.FromTiKV(tikv)) + } + + wait, err := updater.New[*runtime.TiKV](). + WithInstances(runtime.FromTiKVSlice(rtx.Peers)...). + WithDesired(desired). + WithClient(t.Client). + WithMaxSurge(0). + WithMaxUnavailable(1). + WithRevision(rtx.UpdateRevision). + WithNewFactory(TiKVNewer(rtx.TiKVGroup, rtx.UpdateRevision)). + WithAddHooks(topoPolicy). + WithUpdateHooks( + policy.KeepName[*runtime.TiKV](), + policy.KeepTopology[*runtime.TiKV](), + ). + WithDelHooks(topoPolicy). + WithScaleInPreferPolicy( + topoPolicy, + ). + Build(). + Do(ctx) + if err != nil { + return task.Fail().With("cannot update instances: %w", err) + } + if wait { + return task.Complete().With("wait for all instances ready") + } + return task.Complete().With("all instances are synced") +} + +func needVersionUpgrade(kvg *v1alpha1.TiKVGroup) bool { + return kvg.Spec.Version != kvg.Status.Version && kvg.Status.Version != "" +} + +func TiKVNewer(kvg *v1alpha1.TiKVGroup, rev string) updater.NewFactory[*runtime.TiKV] { + return updater.NewFunc[*runtime.TiKV](func() *runtime.TiKV { + //nolint:mnd // refactor to use a constant + name := fmt.Sprintf("%s-%s-%s", kvg.Spec.Cluster.Name, kvg.Name, random.Random(6)) + + spec := kvg.Spec.Template.Spec.DeepCopy() + + tikv := &v1alpha1.TiKV{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: kvg.Namespace, + Name: name, + Labels: maputil.Merge(kvg.Spec.Template.Labels, map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyCluster: kvg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: kvg.Name, + v1alpha1.LabelKeyInstanceRevisionHash: rev, + }), + Annotations: maputil.Copy(kvg.Spec.Template.Annotations), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(kvg, v1alpha1.SchemeGroupVersion.WithKind("TiKVGroup")), + }, + }, + Spec: v1alpha1.TiKVSpec{ + Cluster: kvg.Spec.Cluster, + Version: kvg.Spec.Version, + Subdomain: HeadlessServiceName(kvg.Spec.Cluster.Name, kvg.Name), + TiKVTemplateSpec: *spec, + }, + } + + return runtime.FromTiKV(tikv) + }) +} diff --git a/pkg/controllers/tikvgroup/tasks/util.go b/pkg/controllers/tikvgroup/tasks/util.go new file mode 100644 index 00000000000..2b10ada1e26 --- /dev/null +++ b/pkg/controllers/tikvgroup/tasks/util.go @@ -0,0 +1,24 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" +) + +// TODO: fix length issue +func HeadlessServiceName(clusterName, groupName string) string { + return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +} diff --git a/pkg/image/image.go b/pkg/image/image.go new file mode 100644 index 00000000000..beafc1cdb96 --- /dev/null +++ b/pkg/image/image.go @@ -0,0 +1,105 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package is defined to return image of components +package image + +import ( + "fmt" + + "github.com/distribution/reference" +) + +const ( + PD Untagged = "pingcap/pd" + TiDB Untagged = "pingcap/tidb" + TiKV Untagged = "pingcap/tikv" + TiFlash Untagged = "pingcap/tiflash" + + // TODO: use versioned image + PrestopChecker Tagged = "pingcap/prestop-checker:latest" +) + +// Tagged is image with image tag +type Tagged string + +// Untagged is image without image tag +type Untagged string + +// Note: img must be validated before calling Version +func (t Tagged) Image(img *string) string { + image := string(t) + if img != nil { + image = *img + } + + return image +} + +// Note: img must be validated before calling Version +func (t Untagged) Image(img *string, version string) string { + image := string(t) + if img != nil { + image = *img + } + s, err := withVersion(image, version) + if err != nil { + panic(err) + } + + return s +} + +func withVersion(img, version string) (string, error) { + named, exist, err := validate(img) + if err != nil { + return "", err + } + if !exist { + return img, nil + } + + tagged, err := reference.WithTag(named, version) + if err != nil { + return "", fmt.Errorf("cannot override version: %w", err) + } + return tagged.String(), nil +} + +func Validate(img string) error { + _, _, err := validate(img) + return err +} + +func validate(img string) (_ reference.Named, isNamed bool, _ error) { + repo, err := reference.Parse(img) + if err != nil { + return nil, false, err + } + + if _, ok := repo.(reference.Tagged); ok { + return nil, false, nil + } + + if _, ok := repo.(reference.Digested); ok { + return nil, false, nil + } + + named, ok := repo.(reference.Named) + if !ok { + return nil, false, fmt.Errorf("reference is not named") + } + + return named, true, nil +} diff --git a/pkg/image/image_test.go b/pkg/image/image_test.go new file mode 100644 index 00000000000..9cdde0aef6b --- /dev/null +++ b/pkg/image/image_test.go @@ -0,0 +1,116 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWithVersion(t *testing.T) { + cases := []struct { + desc string + image string + version string + expected string + hasErr bool + }{ + { + desc: "image is empty", + hasErr: true, + }, + { + desc: "invalid image", + image: "test:test:test", + hasErr: true, + }, + { + desc: "image with tag", + image: "test:test", + expected: "test:test", + }, + { + desc: "image with digest", + image: "test@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expected: "test@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "image with tag and digest", + image: "test:test@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expected: "test:test@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "image without tag and digest", + image: "test", + version: "test", + expected: "test:test", + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + s, err := withVersion(c.image, c.version) + if c.hasErr { + require.Error(tt, err, c.desc) + } + assert.Equal(tt, c.expected, s, c.desc) + }) + } +} + +func TestUntagged(t *testing.T) { + cases := []struct { + desc string + m Untagged + version string + expected string + }{ + { + desc: "pd default", + m: PD, + version: "test", + expected: "pingcap/pd:test", + }, + { + desc: "tidb default", + m: TiDB, + version: "test", + expected: "pingcap/tidb:test", + }, + { + desc: "tikv default", + m: TiKV, + version: "test", + expected: "pingcap/tikv:test", + }, + { + desc: "tiflash default", + m: TiFlash, + version: "test", + expected: "pingcap/tiflash:test", + }, + } + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + assert.Equal(tt, c.expected, c.m.Image(nil, c.version), c.desc) + }) + } +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go new file mode 100644 index 00000000000..2ac0b573ca6 --- /dev/null +++ b/pkg/metrics/metrics.go @@ -0,0 +1,38 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + + // ControllerPanic is a counter to record the number of panics in the controller. + ControllerPanic = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "tidb_operator", + Subsystem: "controller", + Name: "panic_total", + Help: "The total number of panics in the controller", + }, []string{}, + ) +) + +func init() { + // Register custom metrics with the global prometheus registry + metrics.Registry.MustRegister(ControllerPanic) +} diff --git a/pkg/overlay/overlay.go b/pkg/overlay/overlay.go new file mode 100644 index 00000000000..efc7c02c284 --- /dev/null +++ b/pkg/overlay/overlay.go @@ -0,0 +1,75 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package overlay + +import ( + corev1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +func OverlayPod(pod *corev1.Pod, overlay *v1alpha1.PodOverlay) { + if overlay == nil { + return + } + src := overlay.DeepCopy() + // Field spec.nodeSelector is an atomic map + // But we hope to overlay it as a granular map + // Because we may inject topology selector into node selector + // + // TODO: validate that conflict keys cannot be added into overlay + // But now, just overwrite the conflict keys. + for k, v := range pod.Spec.NodeSelector { + src.Spec.NodeSelector[k] = v + } + + overlayObjectMeta(&pod.ObjectMeta, convertObjectMeta(&src.ObjectMeta)) + overlayPodSpec(&pod.Spec, src.Spec) +} + +func convertObjectMeta(meta *v1alpha1.ObjectMeta) *metav1.ObjectMeta { + return &metav1.ObjectMeta{ + Annotations: meta.Annotations, + Labels: meta.Labels, + } +} + +// Predefined overlay quantity function +func overlayQuantity(dst, src *resource.Quantity) { + *dst = *src +} + +// Predefined overlay objectmeta function +func overlayObjectMeta(dst, src *metav1.ObjectMeta) { + if src.Labels != nil { + if dst.Labels == nil { + dst.Labels = map[string]string{} + } + for k, v := range src.Labels { + dst.Labels[k] = v + } + } + + if src.Annotations != nil { + if dst.Annotations == nil { + dst.Annotations = map[string]string{} + } + for k, v := range src.Annotations { + dst.Annotations[k] = v + } + } +} diff --git a/pkg/overlay/overlay_test.go b/pkg/overlay/overlay_test.go new file mode 100644 index 00000000000..5bcc06e4071 --- /dev/null +++ b/pkg/overlay/overlay_test.go @@ -0,0 +1,139 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package overlay + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/pkg/utils/random" +) + +type Case[T any] struct { + expected T + dst T + src T +} + +type Policy uint + +const ( + // NoLimit means all cases will be returned + NoLimit Policy = 0 + // NoZero means cases contain no zero value + NoZero Policy = 1 << iota + // NoNil means cases contain no nil value + NoNil + // NoNotEqual means cases only have same src and dst + NoNotEqual +) + +func randString() string { + return random.Random(10) +} + +func TestOverlayPodSpec(t *testing.T) { + cases := constructPodSpec(NoLimit) + for _, c := range cases { + overlayPodSpec(&c.dst, &c.src) + assert.Equal(t, &c.expected, &c.dst) + } +} + +func constructQuantity(_ Policy) []Case[resource.Quantity] { + return []Case[resource.Quantity]{ + { + expected: resource.Quantity{}, + dst: resource.Quantity{}, + src: resource.Quantity{}, + }, + { + expected: resource.MustParse("10"), + dst: resource.MustParse("20"), + src: resource.MustParse("10"), + }, + { + expected: resource.MustParse("20"), + dst: resource.MustParse("20"), + src: resource.MustParse("20"), + }, + } +} + +// TODO: add more cases +func constructObjectMeta(_ Policy) []Case[metav1.ObjectMeta] { + return []Case[metav1.ObjectMeta]{ + { + expected: metav1.ObjectMeta{}, + dst: metav1.ObjectMeta{}, + src: metav1.ObjectMeta{}, + }, + } +} + +func constructMapStringToString(_ Policy) []Case[map[string]string] { + cases := []Case[map[string]string]{ + { + expected: nil, + dst: nil, + src: nil, + }, + { + expected: map[string]string{}, + dst: map[string]string{}, + src: map[string]string{}, + }, + { + expected: map[string]string{}, + dst: map[string]string{}, + src: nil, + }, + { + expected: map[string]string{}, + dst: nil, + src: map[string]string{}, + }, + { + expected: map[string]string{"aa": "aa"}, + dst: map[string]string{"aa": "aa"}, + src: map[string]string{"aa": "aa"}, + }, + { + expected: map[string]string{"aa": "aa"}, + dst: map[string]string{"aa": "aa"}, + src: nil, + }, + { + expected: map[string]string{"aa": "aa"}, + dst: nil, + src: map[string]string{"aa": "aa"}, + }, + { + expected: map[string]string{"aa": "aa", "bb": "bb"}, + dst: map[string]string{"aa": "aa"}, + src: map[string]string{"bb": "bb"}, + }, + { + expected: map[string]string{"aa": "bb"}, + dst: map[string]string{"aa": "aa"}, + src: map[string]string{"aa": "bb"}, + }, + } + + return cases +} diff --git a/pkg/overlay/zz_generated.overlay.go b/pkg/overlay/zz_generated.overlay.go new file mode 100644 index 00000000000..427a32f5da9 --- /dev/null +++ b/pkg/overlay/zz_generated.overlay.go @@ -0,0 +1,2432 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by overlay-gen. DO NOT EDIT. + +package overlay + +import ( + strconv "strconv" + strings "strings" + + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +func overlayPodSpec(dst, src *v1.PodSpec) { + if dst.Volumes != nil && src.Volumes != nil { + overlayMapListSliceVolume(&(dst.Volumes), &(src.Volumes)) + } else if dst.Volumes == nil { + dst.Volumes = src.Volumes + } + if dst.InitContainers != nil && src.InitContainers != nil { + overlayMapListSliceContainer(&(dst.InitContainers), &(src.InitContainers)) + } else if dst.InitContainers == nil { + dst.InitContainers = src.InitContainers + } + if dst.Containers != nil && src.Containers != nil { + overlayMapListSliceContainer(&(dst.Containers), &(src.Containers)) + } else if dst.Containers == nil { + dst.Containers = src.Containers + } + if dst.EphemeralContainers != nil && src.EphemeralContainers != nil { + overlayMapListSliceEphemeralContainer(&(dst.EphemeralContainers), &(src.EphemeralContainers)) + } else if dst.EphemeralContainers == nil { + dst.EphemeralContainers = src.EphemeralContainers + } + overlayRestartPolicy(&(dst.RestartPolicy), &(src.RestartPolicy)) + if dst.TerminationGracePeriodSeconds != nil && src.TerminationGracePeriodSeconds != nil { + if *src.TerminationGracePeriodSeconds != 0 { + *dst.TerminationGracePeriodSeconds = *src.TerminationGracePeriodSeconds + } + } else if dst.TerminationGracePeriodSeconds == nil { + dst.TerminationGracePeriodSeconds = src.TerminationGracePeriodSeconds + } + if dst.ActiveDeadlineSeconds != nil && src.ActiveDeadlineSeconds != nil { + if *src.ActiveDeadlineSeconds != 0 { + *dst.ActiveDeadlineSeconds = *src.ActiveDeadlineSeconds + } + } else if dst.ActiveDeadlineSeconds == nil { + dst.ActiveDeadlineSeconds = src.ActiveDeadlineSeconds + } + overlayDNSPolicy(&(dst.DNSPolicy), &(src.DNSPolicy)) + if dst.NodeSelector != nil && src.NodeSelector != nil { + overlayAtomicMapStringToString(&(dst.NodeSelector), &(src.NodeSelector)) + } else if dst.NodeSelector == nil { + dst.NodeSelector = src.NodeSelector + } + if src.ServiceAccountName != "" { + dst.ServiceAccountName = src.ServiceAccountName + } + if src.DeprecatedServiceAccount != "" { + dst.DeprecatedServiceAccount = src.DeprecatedServiceAccount + } + if dst.AutomountServiceAccountToken != nil && src.AutomountServiceAccountToken != nil { + if *src.AutomountServiceAccountToken { + *dst.AutomountServiceAccountToken = *src.AutomountServiceAccountToken + } + } else if dst.AutomountServiceAccountToken == nil { + dst.AutomountServiceAccountToken = src.AutomountServiceAccountToken + } + if src.NodeName != "" { + dst.NodeName = src.NodeName + } + if src.HostNetwork { + dst.HostNetwork = src.HostNetwork + } + if src.HostPID { + dst.HostPID = src.HostPID + } + if src.HostIPC { + dst.HostIPC = src.HostIPC + } + if dst.ShareProcessNamespace != nil && src.ShareProcessNamespace != nil { + if *src.ShareProcessNamespace { + *dst.ShareProcessNamespace = *src.ShareProcessNamespace + } + } else if dst.ShareProcessNamespace == nil { + dst.ShareProcessNamespace = src.ShareProcessNamespace + } + if dst.SecurityContext != nil && src.SecurityContext != nil { + overlayPodSecurityContext(dst.SecurityContext, src.SecurityContext) + } else if dst.SecurityContext == nil { + dst.SecurityContext = src.SecurityContext + } + if dst.ImagePullSecrets != nil && src.ImagePullSecrets != nil { + overlayMapListSliceLocalObjectReference(&(dst.ImagePullSecrets), &(src.ImagePullSecrets)) + } else if dst.ImagePullSecrets == nil { + dst.ImagePullSecrets = src.ImagePullSecrets + } + if src.Hostname != "" { + dst.Hostname = src.Hostname + } + if src.Subdomain != "" { + dst.Subdomain = src.Subdomain + } + if dst.Affinity != nil && src.Affinity != nil { + overlayAffinity(dst.Affinity, src.Affinity) + } else if dst.Affinity == nil { + dst.Affinity = src.Affinity + } + if src.SchedulerName != "" { + dst.SchedulerName = src.SchedulerName + } + if dst.Tolerations != nil && src.Tolerations != nil { + overlayAtomicListSliceToleration(&(dst.Tolerations), &(src.Tolerations)) + } else if dst.Tolerations == nil { + dst.Tolerations = src.Tolerations + } + if dst.HostAliases != nil && src.HostAliases != nil { + overlayMapListSliceHostAlias(&(dst.HostAliases), &(src.HostAliases)) + } else if dst.HostAliases == nil { + dst.HostAliases = src.HostAliases + } + if src.PriorityClassName != "" { + dst.PriorityClassName = src.PriorityClassName + } + if dst.Priority != nil && src.Priority != nil { + if *src.Priority != 0 { + *dst.Priority = *src.Priority + } + } else if dst.Priority == nil { + dst.Priority = src.Priority + } + if dst.DNSConfig != nil && src.DNSConfig != nil { + overlayPodDNSConfig(dst.DNSConfig, src.DNSConfig) + } else if dst.DNSConfig == nil { + dst.DNSConfig = src.DNSConfig + } + if dst.ReadinessGates != nil && src.ReadinessGates != nil { + overlayAtomicListSlicePodReadinessGate(&(dst.ReadinessGates), &(src.ReadinessGates)) + } else if dst.ReadinessGates == nil { + dst.ReadinessGates = src.ReadinessGates + } + if dst.RuntimeClassName != nil && src.RuntimeClassName != nil { + if *src.RuntimeClassName != "" { + *dst.RuntimeClassName = *src.RuntimeClassName + } + } else if dst.RuntimeClassName == nil { + dst.RuntimeClassName = src.RuntimeClassName + } + if dst.EnableServiceLinks != nil && src.EnableServiceLinks != nil { + if *src.EnableServiceLinks { + *dst.EnableServiceLinks = *src.EnableServiceLinks + } + } else if dst.EnableServiceLinks == nil { + dst.EnableServiceLinks = src.EnableServiceLinks + } + if dst.PreemptionPolicy != nil && src.PreemptionPolicy != nil { + overlayPreemptionPolicy(dst.PreemptionPolicy, src.PreemptionPolicy) + } else if dst.PreemptionPolicy == nil { + dst.PreemptionPolicy = src.PreemptionPolicy + } + if dst.Overhead != nil && src.Overhead != nil { + overlayResourceList(&(dst.Overhead), &(src.Overhead)) + } else if dst.Overhead == nil { + dst.Overhead = src.Overhead + } + if dst.TopologySpreadConstraints != nil && src.TopologySpreadConstraints != nil { + overlayMapListSliceTopologySpreadConstraint(&(dst.TopologySpreadConstraints), &(src.TopologySpreadConstraints)) + } else if dst.TopologySpreadConstraints == nil { + dst.TopologySpreadConstraints = src.TopologySpreadConstraints + } + if dst.SetHostnameAsFQDN != nil && src.SetHostnameAsFQDN != nil { + if *src.SetHostnameAsFQDN { + *dst.SetHostnameAsFQDN = *src.SetHostnameAsFQDN + } + } else if dst.SetHostnameAsFQDN == nil { + dst.SetHostnameAsFQDN = src.SetHostnameAsFQDN + } + if dst.OS != nil && src.OS != nil { + overlayPodOS(dst.OS, src.OS) + } else if dst.OS == nil { + dst.OS = src.OS + } + if dst.HostUsers != nil && src.HostUsers != nil { + if *src.HostUsers { + *dst.HostUsers = *src.HostUsers + } + } else if dst.HostUsers == nil { + dst.HostUsers = src.HostUsers + } + if dst.SchedulingGates != nil && src.SchedulingGates != nil { + overlayMapListSlicePodSchedulingGate(&(dst.SchedulingGates), &(src.SchedulingGates)) + } else if dst.SchedulingGates == nil { + dst.SchedulingGates = src.SchedulingGates + } + if dst.ResourceClaims != nil && src.ResourceClaims != nil { + overlayMapListSlicePodResourceClaim(&(dst.ResourceClaims), &(src.ResourceClaims)) + } else if dst.ResourceClaims == nil { + dst.ResourceClaims = src.ResourceClaims + } +} +func overlayMapListSliceVolume(dst, src *[]v1.Volume) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayVolume(&((*dst)[di]), &((*src)[si])) + } +} +func StringToString(val string) string { + return val +} +func overlayVolume(dst, src *v1.Volume) { + if src.Name != "" { + dst.Name = src.Name + } + overlayVolumeSource(&(dst.VolumeSource), &(src.VolumeSource)) +} +func overlayVolumeSource(dst, src *v1.VolumeSource) { + if dst.HostPath != nil && src.HostPath != nil { + overlayHostPathVolumeSource(dst.HostPath, src.HostPath) + } else if dst.HostPath == nil { + dst.HostPath = src.HostPath + } + if dst.EmptyDir != nil && src.EmptyDir != nil { + overlayEmptyDirVolumeSource(dst.EmptyDir, src.EmptyDir) + } else if dst.EmptyDir == nil { + dst.EmptyDir = src.EmptyDir + } + if dst.GCEPersistentDisk != nil && src.GCEPersistentDisk != nil { + overlayGCEPersistentDiskVolumeSource(dst.GCEPersistentDisk, src.GCEPersistentDisk) + } else if dst.GCEPersistentDisk == nil { + dst.GCEPersistentDisk = src.GCEPersistentDisk + } + if dst.AWSElasticBlockStore != nil && src.AWSElasticBlockStore != nil { + overlayAWSElasticBlockStoreVolumeSource(dst.AWSElasticBlockStore, src.AWSElasticBlockStore) + } else if dst.AWSElasticBlockStore == nil { + dst.AWSElasticBlockStore = src.AWSElasticBlockStore + } + if dst.GitRepo != nil && src.GitRepo != nil { + overlayGitRepoVolumeSource(dst.GitRepo, src.GitRepo) + } else if dst.GitRepo == nil { + dst.GitRepo = src.GitRepo + } + if dst.Secret != nil && src.Secret != nil { + overlaySecretVolumeSource(dst.Secret, src.Secret) + } else if dst.Secret == nil { + dst.Secret = src.Secret + } + if dst.NFS != nil && src.NFS != nil { + overlayNFSVolumeSource(dst.NFS, src.NFS) + } else if dst.NFS == nil { + dst.NFS = src.NFS + } + if dst.ISCSI != nil && src.ISCSI != nil { + overlayISCSIVolumeSource(dst.ISCSI, src.ISCSI) + } else if dst.ISCSI == nil { + dst.ISCSI = src.ISCSI + } + if dst.Glusterfs != nil && src.Glusterfs != nil { + overlayGlusterfsVolumeSource(dst.Glusterfs, src.Glusterfs) + } else if dst.Glusterfs == nil { + dst.Glusterfs = src.Glusterfs + } + if dst.PersistentVolumeClaim != nil && src.PersistentVolumeClaim != nil { + overlayPersistentVolumeClaimVolumeSource(dst.PersistentVolumeClaim, src.PersistentVolumeClaim) + } else if dst.PersistentVolumeClaim == nil { + dst.PersistentVolumeClaim = src.PersistentVolumeClaim + } + if dst.RBD != nil && src.RBD != nil { + overlayRBDVolumeSource(dst.RBD, src.RBD) + } else if dst.RBD == nil { + dst.RBD = src.RBD + } + if dst.FlexVolume != nil && src.FlexVolume != nil { + overlayFlexVolumeSource(dst.FlexVolume, src.FlexVolume) + } else if dst.FlexVolume == nil { + dst.FlexVolume = src.FlexVolume + } + if dst.Cinder != nil && src.Cinder != nil { + overlayCinderVolumeSource(dst.Cinder, src.Cinder) + } else if dst.Cinder == nil { + dst.Cinder = src.Cinder + } + if dst.CephFS != nil && src.CephFS != nil { + overlayCephFSVolumeSource(dst.CephFS, src.CephFS) + } else if dst.CephFS == nil { + dst.CephFS = src.CephFS + } + if dst.Flocker != nil && src.Flocker != nil { + overlayFlockerVolumeSource(dst.Flocker, src.Flocker) + } else if dst.Flocker == nil { + dst.Flocker = src.Flocker + } + if dst.DownwardAPI != nil && src.DownwardAPI != nil { + overlayDownwardAPIVolumeSource(dst.DownwardAPI, src.DownwardAPI) + } else if dst.DownwardAPI == nil { + dst.DownwardAPI = src.DownwardAPI + } + if dst.FC != nil && src.FC != nil { + overlayFCVolumeSource(dst.FC, src.FC) + } else if dst.FC == nil { + dst.FC = src.FC + } + if dst.AzureFile != nil && src.AzureFile != nil { + overlayAzureFileVolumeSource(dst.AzureFile, src.AzureFile) + } else if dst.AzureFile == nil { + dst.AzureFile = src.AzureFile + } + if dst.ConfigMap != nil && src.ConfigMap != nil { + overlayConfigMapVolumeSource(dst.ConfigMap, src.ConfigMap) + } else if dst.ConfigMap == nil { + dst.ConfigMap = src.ConfigMap + } + if dst.VsphereVolume != nil && src.VsphereVolume != nil { + overlayVsphereVirtualDiskVolumeSource(dst.VsphereVolume, src.VsphereVolume) + } else if dst.VsphereVolume == nil { + dst.VsphereVolume = src.VsphereVolume + } + if dst.Quobyte != nil && src.Quobyte != nil { + overlayQuobyteVolumeSource(dst.Quobyte, src.Quobyte) + } else if dst.Quobyte == nil { + dst.Quobyte = src.Quobyte + } + if dst.AzureDisk != nil && src.AzureDisk != nil { + overlayAzureDiskVolumeSource(dst.AzureDisk, src.AzureDisk) + } else if dst.AzureDisk == nil { + dst.AzureDisk = src.AzureDisk + } + if dst.PhotonPersistentDisk != nil && src.PhotonPersistentDisk != nil { + overlayPhotonPersistentDiskVolumeSource(dst.PhotonPersistentDisk, src.PhotonPersistentDisk) + } else if dst.PhotonPersistentDisk == nil { + dst.PhotonPersistentDisk = src.PhotonPersistentDisk + } + if dst.Projected != nil && src.Projected != nil { + overlayProjectedVolumeSource(dst.Projected, src.Projected) + } else if dst.Projected == nil { + dst.Projected = src.Projected + } + if dst.PortworxVolume != nil && src.PortworxVolume != nil { + overlayPortworxVolumeSource(dst.PortworxVolume, src.PortworxVolume) + } else if dst.PortworxVolume == nil { + dst.PortworxVolume = src.PortworxVolume + } + if dst.ScaleIO != nil && src.ScaleIO != nil { + overlayScaleIOVolumeSource(dst.ScaleIO, src.ScaleIO) + } else if dst.ScaleIO == nil { + dst.ScaleIO = src.ScaleIO + } + if dst.StorageOS != nil && src.StorageOS != nil { + overlayStorageOSVolumeSource(dst.StorageOS, src.StorageOS) + } else if dst.StorageOS == nil { + dst.StorageOS = src.StorageOS + } + if dst.CSI != nil && src.CSI != nil { + overlayCSIVolumeSource(dst.CSI, src.CSI) + } else if dst.CSI == nil { + dst.CSI = src.CSI + } + if dst.Ephemeral != nil && src.Ephemeral != nil { + overlayEphemeralVolumeSource(dst.Ephemeral, src.Ephemeral) + } else if dst.Ephemeral == nil { + dst.Ephemeral = src.Ephemeral + } + if dst.Image != nil && src.Image != nil { + overlayImageVolumeSource(dst.Image, src.Image) + } else if dst.Image == nil { + dst.Image = src.Image + } +} +func overlayHostPathVolumeSource(dst, src *v1.HostPathVolumeSource) { + if src.Path != "" { + dst.Path = src.Path + } + if dst.Type != nil && src.Type != nil { + overlayHostPathType(dst.Type, src.Type) + } else if dst.Type == nil { + dst.Type = src.Type + } +} +func overlayHostPathType(dst, src *v1.HostPathType) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayEmptyDirVolumeSource(dst, src *v1.EmptyDirVolumeSource) { + overlayStorageMedium(&(dst.Medium), &(src.Medium)) + if dst.SizeLimit != nil && src.SizeLimit != nil { + overlayQuantity(dst.SizeLimit, src.SizeLimit) + } else if dst.SizeLimit == nil { + dst.SizeLimit = src.SizeLimit + } +} +func overlayStorageMedium(dst, src *v1.StorageMedium) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayGCEPersistentDiskVolumeSource(dst, src *v1.GCEPersistentDiskVolumeSource) { + if src.PDName != "" { + dst.PDName = src.PDName + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.Partition != 0 { + dst.Partition = src.Partition + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayAWSElasticBlockStoreVolumeSource(dst, src *v1.AWSElasticBlockStoreVolumeSource) { + if src.VolumeID != "" { + dst.VolumeID = src.VolumeID + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.Partition != 0 { + dst.Partition = src.Partition + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayGitRepoVolumeSource(dst, src *v1.GitRepoVolumeSource) { + if src.Repository != "" { + dst.Repository = src.Repository + } + if src.Revision != "" { + dst.Revision = src.Revision + } + if src.Directory != "" { + dst.Directory = src.Directory + } +} +func overlaySecretVolumeSource(dst, src *v1.SecretVolumeSource) { + if src.SecretName != "" { + dst.SecretName = src.SecretName + } + if dst.Items != nil && src.Items != nil { + overlayAtomicListSliceKeyToPath(&(dst.Items), &(src.Items)) + } else if dst.Items == nil { + dst.Items = src.Items + } + if dst.DefaultMode != nil && src.DefaultMode != nil { + if *src.DefaultMode != 0 { + *dst.DefaultMode = *src.DefaultMode + } + } else if dst.DefaultMode == nil { + dst.DefaultMode = src.DefaultMode + } + if dst.Optional != nil && src.Optional != nil { + if *src.Optional { + *dst.Optional = *src.Optional + } + } else if dst.Optional == nil { + dst.Optional = src.Optional + } +} +func overlayAtomicListSliceKeyToPath(dst, src *[]v1.KeyToPath) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayNFSVolumeSource(dst, src *v1.NFSVolumeSource) { + if src.Server != "" { + dst.Server = src.Server + } + if src.Path != "" { + dst.Path = src.Path + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayISCSIVolumeSource(dst, src *v1.ISCSIVolumeSource) { + if src.TargetPortal != "" { + dst.TargetPortal = src.TargetPortal + } + if src.IQN != "" { + dst.IQN = src.IQN + } + if src.Lun != 0 { + dst.Lun = src.Lun + } + if src.ISCSIInterface != "" { + dst.ISCSIInterface = src.ISCSIInterface + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } + if dst.Portals != nil && src.Portals != nil { + overlayAtomicListSliceString(&(dst.Portals), &(src.Portals)) + } else if dst.Portals == nil { + dst.Portals = src.Portals + } + if src.DiscoveryCHAPAuth { + dst.DiscoveryCHAPAuth = src.DiscoveryCHAPAuth + } + if src.SessionCHAPAuth { + dst.SessionCHAPAuth = src.SessionCHAPAuth + } + if dst.SecretRef != nil && src.SecretRef != nil { + overlayAtomicLocalObjectReference(dst.SecretRef, src.SecretRef) + } else if dst.SecretRef == nil { + dst.SecretRef = src.SecretRef + } + if dst.InitiatorName != nil && src.InitiatorName != nil { + if *src.InitiatorName != "" { + *dst.InitiatorName = *src.InitiatorName + } + } else if dst.InitiatorName == nil { + dst.InitiatorName = src.InitiatorName + } +} +func overlayAtomicListSliceString(dst, src *[]string) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayAtomicLocalObjectReference(dst, src *v1.LocalObjectReference) { + *dst = *src +} +func overlayGlusterfsVolumeSource(dst, src *v1.GlusterfsVolumeSource) { + if src.EndpointsName != "" { + dst.EndpointsName = src.EndpointsName + } + if src.Path != "" { + dst.Path = src.Path + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayPersistentVolumeClaimVolumeSource(dst, src *v1.PersistentVolumeClaimVolumeSource) { + if src.ClaimName != "" { + dst.ClaimName = src.ClaimName + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayRBDVolumeSource(dst, src *v1.RBDVolumeSource) { + if dst.CephMonitors != nil && src.CephMonitors != nil { + overlayAtomicListSliceString(&(dst.CephMonitors), &(src.CephMonitors)) + } else if dst.CephMonitors == nil { + dst.CephMonitors = src.CephMonitors + } + if src.RBDImage != "" { + dst.RBDImage = src.RBDImage + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.RBDPool != "" { + dst.RBDPool = src.RBDPool + } + if src.RadosUser != "" { + dst.RadosUser = src.RadosUser + } + if src.Keyring != "" { + dst.Keyring = src.Keyring + } + if dst.SecretRef != nil && src.SecretRef != nil { + overlayAtomicLocalObjectReference(dst.SecretRef, src.SecretRef) + } else if dst.SecretRef == nil { + dst.SecretRef = src.SecretRef + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayFlexVolumeSource(dst, src *v1.FlexVolumeSource) { + if src.Driver != "" { + dst.Driver = src.Driver + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if dst.SecretRef != nil && src.SecretRef != nil { + overlayAtomicLocalObjectReference(dst.SecretRef, src.SecretRef) + } else if dst.SecretRef == nil { + dst.SecretRef = src.SecretRef + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } + if dst.Options != nil && src.Options != nil { + overlayMapStringToString(&(dst.Options), &(src.Options)) + } else if dst.Options == nil { + dst.Options = src.Options + } +} +func overlayMapStringToString(dst, src *map[string]string) { + for k := range *src { + vdst, ok := (*dst)[k] + if !ok { + (*dst)[k] = (*src)[k] + continue + } + vsrc := (*src)[k] + if vsrc != "" { + vdst = vsrc + } + (*dst)[k] = vdst + } +} +func overlayCinderVolumeSource(dst, src *v1.CinderVolumeSource) { + if src.VolumeID != "" { + dst.VolumeID = src.VolumeID + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } + if dst.SecretRef != nil && src.SecretRef != nil { + overlayAtomicLocalObjectReference(dst.SecretRef, src.SecretRef) + } else if dst.SecretRef == nil { + dst.SecretRef = src.SecretRef + } +} +func overlayCephFSVolumeSource(dst, src *v1.CephFSVolumeSource) { + if dst.Monitors != nil && src.Monitors != nil { + overlayAtomicListSliceString(&(dst.Monitors), &(src.Monitors)) + } else if dst.Monitors == nil { + dst.Monitors = src.Monitors + } + if src.Path != "" { + dst.Path = src.Path + } + if src.User != "" { + dst.User = src.User + } + if src.SecretFile != "" { + dst.SecretFile = src.SecretFile + } + if dst.SecretRef != nil && src.SecretRef != nil { + overlayAtomicLocalObjectReference(dst.SecretRef, src.SecretRef) + } else if dst.SecretRef == nil { + dst.SecretRef = src.SecretRef + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayFlockerVolumeSource(dst, src *v1.FlockerVolumeSource) { + if src.DatasetName != "" { + dst.DatasetName = src.DatasetName + } + if src.DatasetUUID != "" { + dst.DatasetUUID = src.DatasetUUID + } +} +func overlayDownwardAPIVolumeSource(dst, src *v1.DownwardAPIVolumeSource) { + if dst.Items != nil && src.Items != nil { + overlayAtomicListSliceDownwardAPIVolumeFile(&(dst.Items), &(src.Items)) + } else if dst.Items == nil { + dst.Items = src.Items + } + if dst.DefaultMode != nil && src.DefaultMode != nil { + if *src.DefaultMode != 0 { + *dst.DefaultMode = *src.DefaultMode + } + } else if dst.DefaultMode == nil { + dst.DefaultMode = src.DefaultMode + } +} +func overlayAtomicListSliceDownwardAPIVolumeFile(dst, src *[]v1.DownwardAPIVolumeFile) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayFCVolumeSource(dst, src *v1.FCVolumeSource) { + if dst.TargetWWNs != nil && src.TargetWWNs != nil { + overlayAtomicListSliceString(&(dst.TargetWWNs), &(src.TargetWWNs)) + } else if dst.TargetWWNs == nil { + dst.TargetWWNs = src.TargetWWNs + } + if dst.Lun != nil && src.Lun != nil { + if *src.Lun != 0 { + *dst.Lun = *src.Lun + } + } else if dst.Lun == nil { + dst.Lun = src.Lun + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } + if dst.WWIDs != nil && src.WWIDs != nil { + overlayAtomicListSliceString(&(dst.WWIDs), &(src.WWIDs)) + } else if dst.WWIDs == nil { + dst.WWIDs = src.WWIDs + } +} +func overlayAzureFileVolumeSource(dst, src *v1.AzureFileVolumeSource) { + if src.SecretName != "" { + dst.SecretName = src.SecretName + } + if src.ShareName != "" { + dst.ShareName = src.ShareName + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayConfigMapVolumeSource(dst, src *v1.ConfigMapVolumeSource) { + overlayAtomicLocalObjectReference(&(dst.LocalObjectReference), &(src.LocalObjectReference)) + if dst.Items != nil && src.Items != nil { + overlayAtomicListSliceKeyToPath(&(dst.Items), &(src.Items)) + } else if dst.Items == nil { + dst.Items = src.Items + } + if dst.DefaultMode != nil && src.DefaultMode != nil { + if *src.DefaultMode != 0 { + *dst.DefaultMode = *src.DefaultMode + } + } else if dst.DefaultMode == nil { + dst.DefaultMode = src.DefaultMode + } + if dst.Optional != nil && src.Optional != nil { + if *src.Optional { + *dst.Optional = *src.Optional + } + } else if dst.Optional == nil { + dst.Optional = src.Optional + } +} +func overlayVsphereVirtualDiskVolumeSource(dst, src *v1.VsphereVirtualDiskVolumeSource) { + if src.VolumePath != "" { + dst.VolumePath = src.VolumePath + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.StoragePolicyName != "" { + dst.StoragePolicyName = src.StoragePolicyName + } + if src.StoragePolicyID != "" { + dst.StoragePolicyID = src.StoragePolicyID + } +} +func overlayQuobyteVolumeSource(dst, src *v1.QuobyteVolumeSource) { + if src.Registry != "" { + dst.Registry = src.Registry + } + if src.Volume != "" { + dst.Volume = src.Volume + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } + if src.User != "" { + dst.User = src.User + } + if src.Group != "" { + dst.Group = src.Group + } + if src.Tenant != "" { + dst.Tenant = src.Tenant + } +} +func overlayAzureDiskVolumeSource(dst, src *v1.AzureDiskVolumeSource) { + if src.DiskName != "" { + dst.DiskName = src.DiskName + } + if src.DataDiskURI != "" { + dst.DataDiskURI = src.DataDiskURI + } + if dst.CachingMode != nil && src.CachingMode != nil { + overlayAzureDataDiskCachingMode(dst.CachingMode, src.CachingMode) + } else if dst.CachingMode == nil { + dst.CachingMode = src.CachingMode + } + if dst.FSType != nil && src.FSType != nil { + if *src.FSType != "" { + *dst.FSType = *src.FSType + } + } else if dst.FSType == nil { + dst.FSType = src.FSType + } + if dst.ReadOnly != nil && src.ReadOnly != nil { + if *src.ReadOnly { + *dst.ReadOnly = *src.ReadOnly + } + } else if dst.ReadOnly == nil { + dst.ReadOnly = src.ReadOnly + } + if dst.Kind != nil && src.Kind != nil { + overlayAzureDataDiskKind(dst.Kind, src.Kind) + } else if dst.Kind == nil { + dst.Kind = src.Kind + } +} +func overlayAzureDataDiskCachingMode(dst, src *v1.AzureDataDiskCachingMode) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayAzureDataDiskKind(dst, src *v1.AzureDataDiskKind) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayPhotonPersistentDiskVolumeSource(dst, src *v1.PhotonPersistentDiskVolumeSource) { + if src.PdID != "" { + dst.PdID = src.PdID + } + if src.FSType != "" { + dst.FSType = src.FSType + } +} +func overlayProjectedVolumeSource(dst, src *v1.ProjectedVolumeSource) { + if dst.Sources != nil && src.Sources != nil { + overlayAtomicListSliceVolumeProjection(&(dst.Sources), &(src.Sources)) + } else if dst.Sources == nil { + dst.Sources = src.Sources + } + if dst.DefaultMode != nil && src.DefaultMode != nil { + if *src.DefaultMode != 0 { + *dst.DefaultMode = *src.DefaultMode + } + } else if dst.DefaultMode == nil { + dst.DefaultMode = src.DefaultMode + } +} +func overlayAtomicListSliceVolumeProjection(dst, src *[]v1.VolumeProjection) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayPortworxVolumeSource(dst, src *v1.PortworxVolumeSource) { + if src.VolumeID != "" { + dst.VolumeID = src.VolumeID + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayScaleIOVolumeSource(dst, src *v1.ScaleIOVolumeSource) { + if src.Gateway != "" { + dst.Gateway = src.Gateway + } + if src.System != "" { + dst.System = src.System + } + if dst.SecretRef != nil && src.SecretRef != nil { + overlayAtomicLocalObjectReference(dst.SecretRef, src.SecretRef) + } else if dst.SecretRef == nil { + dst.SecretRef = src.SecretRef + } + if src.SSLEnabled { + dst.SSLEnabled = src.SSLEnabled + } + if src.ProtectionDomain != "" { + dst.ProtectionDomain = src.ProtectionDomain + } + if src.StoragePool != "" { + dst.StoragePool = src.StoragePool + } + if src.StorageMode != "" { + dst.StorageMode = src.StorageMode + } + if src.VolumeName != "" { + dst.VolumeName = src.VolumeName + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } +} +func overlayStorageOSVolumeSource(dst, src *v1.StorageOSVolumeSource) { + if src.VolumeName != "" { + dst.VolumeName = src.VolumeName + } + if src.VolumeNamespace != "" { + dst.VolumeNamespace = src.VolumeNamespace + } + if src.FSType != "" { + dst.FSType = src.FSType + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } + if dst.SecretRef != nil && src.SecretRef != nil { + overlayAtomicLocalObjectReference(dst.SecretRef, src.SecretRef) + } else if dst.SecretRef == nil { + dst.SecretRef = src.SecretRef + } +} +func overlayCSIVolumeSource(dst, src *v1.CSIVolumeSource) { + if src.Driver != "" { + dst.Driver = src.Driver + } + if dst.ReadOnly != nil && src.ReadOnly != nil { + if *src.ReadOnly { + *dst.ReadOnly = *src.ReadOnly + } + } else if dst.ReadOnly == nil { + dst.ReadOnly = src.ReadOnly + } + if dst.FSType != nil && src.FSType != nil { + if *src.FSType != "" { + *dst.FSType = *src.FSType + } + } else if dst.FSType == nil { + dst.FSType = src.FSType + } + if dst.VolumeAttributes != nil && src.VolumeAttributes != nil { + overlayMapStringToString(&(dst.VolumeAttributes), &(src.VolumeAttributes)) + } else if dst.VolumeAttributes == nil { + dst.VolumeAttributes = src.VolumeAttributes + } + if dst.NodePublishSecretRef != nil && src.NodePublishSecretRef != nil { + overlayAtomicLocalObjectReference(dst.NodePublishSecretRef, src.NodePublishSecretRef) + } else if dst.NodePublishSecretRef == nil { + dst.NodePublishSecretRef = src.NodePublishSecretRef + } +} +func overlayEphemeralVolumeSource(dst, src *v1.EphemeralVolumeSource) { + if dst.VolumeClaimTemplate != nil && src.VolumeClaimTemplate != nil { + overlayPersistentVolumeClaimTemplate(dst.VolumeClaimTemplate, src.VolumeClaimTemplate) + } else if dst.VolumeClaimTemplate == nil { + dst.VolumeClaimTemplate = src.VolumeClaimTemplate + } +} +func overlayPersistentVolumeClaimTemplate(dst, src *v1.PersistentVolumeClaimTemplate) { + overlayObjectMeta(&(dst.ObjectMeta), &(src.ObjectMeta)) + overlayPersistentVolumeClaimSpec(&(dst.Spec), &(src.Spec)) +} +func overlayPersistentVolumeClaimSpec(dst, src *v1.PersistentVolumeClaimSpec) { + if dst.AccessModes != nil && src.AccessModes != nil { + overlayAtomicListSlicePersistentVolumeAccessMode(&(dst.AccessModes), &(src.AccessModes)) + } else if dst.AccessModes == nil { + dst.AccessModes = src.AccessModes + } + if dst.Selector != nil && src.Selector != nil { + overlayLabelSelector(dst.Selector, src.Selector) + } else if dst.Selector == nil { + dst.Selector = src.Selector + } + overlayVolumeResourceRequirements(&(dst.Resources), &(src.Resources)) + if src.VolumeName != "" { + dst.VolumeName = src.VolumeName + } + if dst.StorageClassName != nil && src.StorageClassName != nil { + if *src.StorageClassName != "" { + *dst.StorageClassName = *src.StorageClassName + } + } else if dst.StorageClassName == nil { + dst.StorageClassName = src.StorageClassName + } + if dst.VolumeMode != nil && src.VolumeMode != nil { + overlayPersistentVolumeMode(dst.VolumeMode, src.VolumeMode) + } else if dst.VolumeMode == nil { + dst.VolumeMode = src.VolumeMode + } + if dst.DataSource != nil && src.DataSource != nil { + overlayAtomicTypedLocalObjectReference(dst.DataSource, src.DataSource) + } else if dst.DataSource == nil { + dst.DataSource = src.DataSource + } + if dst.DataSourceRef != nil && src.DataSourceRef != nil { + overlayTypedObjectReference(dst.DataSourceRef, src.DataSourceRef) + } else if dst.DataSourceRef == nil { + dst.DataSourceRef = src.DataSourceRef + } + if dst.VolumeAttributesClassName != nil && src.VolumeAttributesClassName != nil { + if *src.VolumeAttributesClassName != "" { + *dst.VolumeAttributesClassName = *src.VolumeAttributesClassName + } + } else if dst.VolumeAttributesClassName == nil { + dst.VolumeAttributesClassName = src.VolumeAttributesClassName + } +} +func overlayAtomicListSlicePersistentVolumeAccessMode(dst, src *[]v1.PersistentVolumeAccessMode) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayLabelSelector(dst, src *metav1.LabelSelector) { + if dst.MatchLabels != nil && src.MatchLabels != nil { + overlayMapStringToString(&(dst.MatchLabels), &(src.MatchLabels)) + } else if dst.MatchLabels == nil { + dst.MatchLabels = src.MatchLabels + } + if dst.MatchExpressions != nil && src.MatchExpressions != nil { + overlayAtomicListSliceLabelSelectorRequirement(&(dst.MatchExpressions), &(src.MatchExpressions)) + } else if dst.MatchExpressions == nil { + dst.MatchExpressions = src.MatchExpressions + } +} +func overlayAtomicListSliceLabelSelectorRequirement(dst, src *[]metav1.LabelSelectorRequirement) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayVolumeResourceRequirements(dst, src *v1.VolumeResourceRequirements) { + if dst.Limits != nil && src.Limits != nil { + overlayResourceList(&(dst.Limits), &(src.Limits)) + } else if dst.Limits == nil { + dst.Limits = src.Limits + } + if dst.Requests != nil && src.Requests != nil { + overlayResourceList(&(dst.Requests), &(src.Requests)) + } else if dst.Requests == nil { + dst.Requests = src.Requests + } +} +func overlayResourceList(dst, src *v1.ResourceList) { + ndst := (*map[v1.ResourceName]resource.Quantity)(dst) + nsrc := (*map[v1.ResourceName]resource.Quantity)(src) + overlayMapResourceNameToQuantity(ndst, nsrc) +} +func overlayMapResourceNameToQuantity(dst, src *map[v1.ResourceName]resource.Quantity) { + for k := range *src { + vdst, ok := (*dst)[k] + if !ok { + (*dst)[k] = (*src)[k] + continue + } + vsrc := (*src)[k] + overlayQuantity(&(vdst), &(vsrc)) + (*dst)[k] = vdst + } +} +func overlayPersistentVolumeMode(dst, src *v1.PersistentVolumeMode) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayAtomicTypedLocalObjectReference(dst, src *v1.TypedLocalObjectReference) { + *dst = *src +} +func overlayTypedObjectReference(dst, src *v1.TypedObjectReference) { + if dst.APIGroup != nil && src.APIGroup != nil { + if *src.APIGroup != "" { + *dst.APIGroup = *src.APIGroup + } + } else if dst.APIGroup == nil { + dst.APIGroup = src.APIGroup + } + if src.Kind != "" { + dst.Kind = src.Kind + } + if src.Name != "" { + dst.Name = src.Name + } + if dst.Namespace != nil && src.Namespace != nil { + if *src.Namespace != "" { + *dst.Namespace = *src.Namespace + } + } else if dst.Namespace == nil { + dst.Namespace = src.Namespace + } +} +func overlayImageVolumeSource(dst, src *v1.ImageVolumeSource) { + if src.Reference != "" { + dst.Reference = src.Reference + } + overlayPullPolicy(&(dst.PullPolicy), &(src.PullPolicy)) +} +func overlayPullPolicy(dst, src *v1.PullPolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMapListSliceContainer(dst, src *[]v1.Container) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayContainer(&((*dst)[di]), &((*src)[si])) + } +} +func overlayContainer(dst, src *v1.Container) { + if src.Name != "" { + dst.Name = src.Name + } + if src.Image != "" { + dst.Image = src.Image + } + if dst.Command != nil && src.Command != nil { + overlayAtomicListSliceString(&(dst.Command), &(src.Command)) + } else if dst.Command == nil { + dst.Command = src.Command + } + if dst.Args != nil && src.Args != nil { + overlayAtomicListSliceString(&(dst.Args), &(src.Args)) + } else if dst.Args == nil { + dst.Args = src.Args + } + if src.WorkingDir != "" { + dst.WorkingDir = src.WorkingDir + } + if dst.Ports != nil && src.Ports != nil { + overlayMapListSliceContainerPort(&(dst.Ports), &(src.Ports)) + } else if dst.Ports == nil { + dst.Ports = src.Ports + } + if dst.EnvFrom != nil && src.EnvFrom != nil { + overlayAtomicListSliceEnvFromSource(&(dst.EnvFrom), &(src.EnvFrom)) + } else if dst.EnvFrom == nil { + dst.EnvFrom = src.EnvFrom + } + if dst.Env != nil && src.Env != nil { + overlayMapListSliceEnvVar(&(dst.Env), &(src.Env)) + } else if dst.Env == nil { + dst.Env = src.Env + } + overlayResourceRequirements(&(dst.Resources), &(src.Resources)) + if dst.ResizePolicy != nil && src.ResizePolicy != nil { + overlayAtomicListSliceContainerResizePolicy(&(dst.ResizePolicy), &(src.ResizePolicy)) + } else if dst.ResizePolicy == nil { + dst.ResizePolicy = src.ResizePolicy + } + if dst.RestartPolicy != nil && src.RestartPolicy != nil { + overlayContainerRestartPolicy(dst.RestartPolicy, src.RestartPolicy) + } else if dst.RestartPolicy == nil { + dst.RestartPolicy = src.RestartPolicy + } + if dst.VolumeMounts != nil && src.VolumeMounts != nil { + overlayMapListSliceVolumeMount(&(dst.VolumeMounts), &(src.VolumeMounts)) + } else if dst.VolumeMounts == nil { + dst.VolumeMounts = src.VolumeMounts + } + if dst.VolumeDevices != nil && src.VolumeDevices != nil { + overlayMapListSliceVolumeDevice(&(dst.VolumeDevices), &(src.VolumeDevices)) + } else if dst.VolumeDevices == nil { + dst.VolumeDevices = src.VolumeDevices + } + if dst.LivenessProbe != nil && src.LivenessProbe != nil { + overlayProbe(dst.LivenessProbe, src.LivenessProbe) + } else if dst.LivenessProbe == nil { + dst.LivenessProbe = src.LivenessProbe + } + if dst.ReadinessProbe != nil && src.ReadinessProbe != nil { + overlayProbe(dst.ReadinessProbe, src.ReadinessProbe) + } else if dst.ReadinessProbe == nil { + dst.ReadinessProbe = src.ReadinessProbe + } + if dst.StartupProbe != nil && src.StartupProbe != nil { + overlayProbe(dst.StartupProbe, src.StartupProbe) + } else if dst.StartupProbe == nil { + dst.StartupProbe = src.StartupProbe + } + if dst.Lifecycle != nil && src.Lifecycle != nil { + overlayLifecycle(dst.Lifecycle, src.Lifecycle) + } else if dst.Lifecycle == nil { + dst.Lifecycle = src.Lifecycle + } + if src.TerminationMessagePath != "" { + dst.TerminationMessagePath = src.TerminationMessagePath + } + overlayTerminationMessagePolicy(&(dst.TerminationMessagePolicy), &(src.TerminationMessagePolicy)) + overlayPullPolicy(&(dst.ImagePullPolicy), &(src.ImagePullPolicy)) + if dst.SecurityContext != nil && src.SecurityContext != nil { + overlaySecurityContext(dst.SecurityContext, src.SecurityContext) + } else if dst.SecurityContext == nil { + dst.SecurityContext = src.SecurityContext + } + if src.Stdin { + dst.Stdin = src.Stdin + } + if src.StdinOnce { + dst.StdinOnce = src.StdinOnce + } + if src.TTY { + dst.TTY = src.TTY + } +} +func overlayMapListSliceContainerPort(dst, src *[]v1.ContainerPort) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, Int32ToString(item.ContainerPort)) + keys = append(keys, ProtocolToString(item.Protocol)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, Int32ToString(item.ContainerPort)) + keys = append(keys, ProtocolToString(item.Protocol)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayContainerPort(&((*dst)[di]), &((*src)[si])) + } +} +func Int32ToString(val int32) string { + return strconv.FormatInt(int64(val), 10) +} +func ProtocolToString(val v1.Protocol) string { + return StringToString(string(val)) +} +func overlayContainerPort(dst, src *v1.ContainerPort) { + if src.Name != "" { + dst.Name = src.Name + } + if src.HostPort != 0 { + dst.HostPort = src.HostPort + } + if src.ContainerPort != 0 { + dst.ContainerPort = src.ContainerPort + } + overlayProtocol(&(dst.Protocol), &(src.Protocol)) + if src.HostIP != "" { + dst.HostIP = src.HostIP + } +} +func overlayProtocol(dst, src *v1.Protocol) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayAtomicListSliceEnvFromSource(dst, src *[]v1.EnvFromSource) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayMapListSliceEnvVar(dst, src *[]v1.EnvVar) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayEnvVar(&((*dst)[di]), &((*src)[si])) + } +} +func overlayEnvVar(dst, src *v1.EnvVar) { + if src.Name != "" { + dst.Name = src.Name + } + if src.Value != "" { + dst.Value = src.Value + } + if dst.ValueFrom != nil && src.ValueFrom != nil { + overlayEnvVarSource(dst.ValueFrom, src.ValueFrom) + } else if dst.ValueFrom == nil { + dst.ValueFrom = src.ValueFrom + } +} +func overlayEnvVarSource(dst, src *v1.EnvVarSource) { + if dst.FieldRef != nil && src.FieldRef != nil { + overlayAtomicObjectFieldSelector(dst.FieldRef, src.FieldRef) + } else if dst.FieldRef == nil { + dst.FieldRef = src.FieldRef + } + if dst.ResourceFieldRef != nil && src.ResourceFieldRef != nil { + overlayAtomicResourceFieldSelector(dst.ResourceFieldRef, src.ResourceFieldRef) + } else if dst.ResourceFieldRef == nil { + dst.ResourceFieldRef = src.ResourceFieldRef + } + if dst.ConfigMapKeyRef != nil && src.ConfigMapKeyRef != nil { + overlayAtomicConfigMapKeySelector(dst.ConfigMapKeyRef, src.ConfigMapKeyRef) + } else if dst.ConfigMapKeyRef == nil { + dst.ConfigMapKeyRef = src.ConfigMapKeyRef + } + if dst.SecretKeyRef != nil && src.SecretKeyRef != nil { + overlayAtomicSecretKeySelector(dst.SecretKeyRef, src.SecretKeyRef) + } else if dst.SecretKeyRef == nil { + dst.SecretKeyRef = src.SecretKeyRef + } +} +func overlayAtomicObjectFieldSelector(dst, src *v1.ObjectFieldSelector) { + *dst = *src +} +func overlayAtomicResourceFieldSelector(dst, src *v1.ResourceFieldSelector) { + *dst = *src +} +func overlayAtomicConfigMapKeySelector(dst, src *v1.ConfigMapKeySelector) { + *dst = *src +} +func overlayAtomicSecretKeySelector(dst, src *v1.SecretKeySelector) { + *dst = *src +} +func overlayResourceRequirements(dst, src *v1.ResourceRequirements) { + if dst.Limits != nil && src.Limits != nil { + overlayResourceList(&(dst.Limits), &(src.Limits)) + } else if dst.Limits == nil { + dst.Limits = src.Limits + } + if dst.Requests != nil && src.Requests != nil { + overlayResourceList(&(dst.Requests), &(src.Requests)) + } else if dst.Requests == nil { + dst.Requests = src.Requests + } + if dst.Claims != nil && src.Claims != nil { + overlayMapListSliceResourceClaim(&(dst.Claims), &(src.Claims)) + } else if dst.Claims == nil { + dst.Claims = src.Claims + } +} +func overlayMapListSliceResourceClaim(dst, src *[]v1.ResourceClaim) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayResourceClaim(&((*dst)[di]), &((*src)[si])) + } +} +func overlayResourceClaim(dst, src *v1.ResourceClaim) { + if src.Name != "" { + dst.Name = src.Name + } + if src.Request != "" { + dst.Request = src.Request + } +} +func overlayAtomicListSliceContainerResizePolicy(dst, src *[]v1.ContainerResizePolicy) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayContainerRestartPolicy(dst, src *v1.ContainerRestartPolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMapListSliceVolumeMount(dst, src *[]v1.VolumeMount) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.MountPath)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.MountPath)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayVolumeMount(&((*dst)[di]), &((*src)[si])) + } +} +func overlayVolumeMount(dst, src *v1.VolumeMount) { + if src.Name != "" { + dst.Name = src.Name + } + if src.ReadOnly { + dst.ReadOnly = src.ReadOnly + } + if dst.RecursiveReadOnly != nil && src.RecursiveReadOnly != nil { + overlayRecursiveReadOnlyMode(dst.RecursiveReadOnly, src.RecursiveReadOnly) + } else if dst.RecursiveReadOnly == nil { + dst.RecursiveReadOnly = src.RecursiveReadOnly + } + if src.MountPath != "" { + dst.MountPath = src.MountPath + } + if src.SubPath != "" { + dst.SubPath = src.SubPath + } + if dst.MountPropagation != nil && src.MountPropagation != nil { + overlayMountPropagationMode(dst.MountPropagation, src.MountPropagation) + } else if dst.MountPropagation == nil { + dst.MountPropagation = src.MountPropagation + } + if src.SubPathExpr != "" { + dst.SubPathExpr = src.SubPathExpr + } +} +func overlayRecursiveReadOnlyMode(dst, src *v1.RecursiveReadOnlyMode) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMountPropagationMode(dst, src *v1.MountPropagationMode) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMapListSliceVolumeDevice(dst, src *[]v1.VolumeDevice) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.DevicePath)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.DevicePath)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayVolumeDevice(&((*dst)[di]), &((*src)[si])) + } +} +func overlayVolumeDevice(dst, src *v1.VolumeDevice) { + if src.Name != "" { + dst.Name = src.Name + } + if src.DevicePath != "" { + dst.DevicePath = src.DevicePath + } +} +func overlayProbe(dst, src *v1.Probe) { + overlayProbeHandler(&(dst.ProbeHandler), &(src.ProbeHandler)) + if src.InitialDelaySeconds != 0 { + dst.InitialDelaySeconds = src.InitialDelaySeconds + } + if src.TimeoutSeconds != 0 { + dst.TimeoutSeconds = src.TimeoutSeconds + } + if src.PeriodSeconds != 0 { + dst.PeriodSeconds = src.PeriodSeconds + } + if src.SuccessThreshold != 0 { + dst.SuccessThreshold = src.SuccessThreshold + } + if src.FailureThreshold != 0 { + dst.FailureThreshold = src.FailureThreshold + } + if dst.TerminationGracePeriodSeconds != nil && src.TerminationGracePeriodSeconds != nil { + if *src.TerminationGracePeriodSeconds != 0 { + *dst.TerminationGracePeriodSeconds = *src.TerminationGracePeriodSeconds + } + } else if dst.TerminationGracePeriodSeconds == nil { + dst.TerminationGracePeriodSeconds = src.TerminationGracePeriodSeconds + } +} +func overlayProbeHandler(dst, src *v1.ProbeHandler) { + if dst.Exec != nil && src.Exec != nil { + overlayExecAction(dst.Exec, src.Exec) + } else if dst.Exec == nil { + dst.Exec = src.Exec + } + if dst.HTTPGet != nil && src.HTTPGet != nil { + overlayHTTPGetAction(dst.HTTPGet, src.HTTPGet) + } else if dst.HTTPGet == nil { + dst.HTTPGet = src.HTTPGet + } + if dst.TCPSocket != nil && src.TCPSocket != nil { + overlayTCPSocketAction(dst.TCPSocket, src.TCPSocket) + } else if dst.TCPSocket == nil { + dst.TCPSocket = src.TCPSocket + } + if dst.GRPC != nil && src.GRPC != nil { + overlayGRPCAction(dst.GRPC, src.GRPC) + } else if dst.GRPC == nil { + dst.GRPC = src.GRPC + } +} +func overlayExecAction(dst, src *v1.ExecAction) { + if dst.Command != nil && src.Command != nil { + overlayAtomicListSliceString(&(dst.Command), &(src.Command)) + } else if dst.Command == nil { + dst.Command = src.Command + } +} +func overlayHTTPGetAction(dst, src *v1.HTTPGetAction) { + if src.Path != "" { + dst.Path = src.Path + } + overlayIntOrString(&(dst.Port), &(src.Port)) + if src.Host != "" { + dst.Host = src.Host + } + overlayURIScheme(&(dst.Scheme), &(src.Scheme)) + if dst.HTTPHeaders != nil && src.HTTPHeaders != nil { + overlayAtomicListSliceHTTPHeader(&(dst.HTTPHeaders), &(src.HTTPHeaders)) + } else if dst.HTTPHeaders == nil { + dst.HTTPHeaders = src.HTTPHeaders + } +} +func overlayIntOrString(dst, src *intstr.IntOrString) { + overlayType(&(dst.Type), &(src.Type)) + if src.IntVal != 0 { + dst.IntVal = src.IntVal + } + if src.StrVal != "" { + dst.StrVal = src.StrVal + } +} +func overlayType(dst, src *intstr.Type) { + ndst := (*int64)(dst) + nsrc := (*int64)(src) + if *nsrc != 0 { + *ndst = *nsrc + } +} +func overlayURIScheme(dst, src *v1.URIScheme) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayAtomicListSliceHTTPHeader(dst, src *[]v1.HTTPHeader) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayTCPSocketAction(dst, src *v1.TCPSocketAction) { + overlayIntOrString(&(dst.Port), &(src.Port)) + if src.Host != "" { + dst.Host = src.Host + } +} +func overlayGRPCAction(dst, src *v1.GRPCAction) { + if src.Port != 0 { + dst.Port = src.Port + } + if dst.Service != nil && src.Service != nil { + if *src.Service != "" { + *dst.Service = *src.Service + } + } else if dst.Service == nil { + dst.Service = src.Service + } +} +func overlayLifecycle(dst, src *v1.Lifecycle) { + if dst.PostStart != nil && src.PostStart != nil { + overlayLifecycleHandler(dst.PostStart, src.PostStart) + } else if dst.PostStart == nil { + dst.PostStart = src.PostStart + } + if dst.PreStop != nil && src.PreStop != nil { + overlayLifecycleHandler(dst.PreStop, src.PreStop) + } else if dst.PreStop == nil { + dst.PreStop = src.PreStop + } +} +func overlayLifecycleHandler(dst, src *v1.LifecycleHandler) { + if dst.Exec != nil && src.Exec != nil { + overlayExecAction(dst.Exec, src.Exec) + } else if dst.Exec == nil { + dst.Exec = src.Exec + } + if dst.HTTPGet != nil && src.HTTPGet != nil { + overlayHTTPGetAction(dst.HTTPGet, src.HTTPGet) + } else if dst.HTTPGet == nil { + dst.HTTPGet = src.HTTPGet + } + if dst.TCPSocket != nil && src.TCPSocket != nil { + overlayTCPSocketAction(dst.TCPSocket, src.TCPSocket) + } else if dst.TCPSocket == nil { + dst.TCPSocket = src.TCPSocket + } + if dst.Sleep != nil && src.Sleep != nil { + overlaySleepAction(dst.Sleep, src.Sleep) + } else if dst.Sleep == nil { + dst.Sleep = src.Sleep + } +} +func overlaySleepAction(dst, src *v1.SleepAction) { + if src.Seconds != 0 { + dst.Seconds = src.Seconds + } +} +func overlayTerminationMessagePolicy(dst, src *v1.TerminationMessagePolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlaySecurityContext(dst, src *v1.SecurityContext) { + if dst.Capabilities != nil && src.Capabilities != nil { + overlayCapabilities(dst.Capabilities, src.Capabilities) + } else if dst.Capabilities == nil { + dst.Capabilities = src.Capabilities + } + if dst.Privileged != nil && src.Privileged != nil { + if *src.Privileged { + *dst.Privileged = *src.Privileged + } + } else if dst.Privileged == nil { + dst.Privileged = src.Privileged + } + if dst.SELinuxOptions != nil && src.SELinuxOptions != nil { + overlaySELinuxOptions(dst.SELinuxOptions, src.SELinuxOptions) + } else if dst.SELinuxOptions == nil { + dst.SELinuxOptions = src.SELinuxOptions + } + if dst.WindowsOptions != nil && src.WindowsOptions != nil { + overlayWindowsSecurityContextOptions(dst.WindowsOptions, src.WindowsOptions) + } else if dst.WindowsOptions == nil { + dst.WindowsOptions = src.WindowsOptions + } + if dst.RunAsUser != nil && src.RunAsUser != nil { + if *src.RunAsUser != 0 { + *dst.RunAsUser = *src.RunAsUser + } + } else if dst.RunAsUser == nil { + dst.RunAsUser = src.RunAsUser + } + if dst.RunAsGroup != nil && src.RunAsGroup != nil { + if *src.RunAsGroup != 0 { + *dst.RunAsGroup = *src.RunAsGroup + } + } else if dst.RunAsGroup == nil { + dst.RunAsGroup = src.RunAsGroup + } + if dst.RunAsNonRoot != nil && src.RunAsNonRoot != nil { + if *src.RunAsNonRoot { + *dst.RunAsNonRoot = *src.RunAsNonRoot + } + } else if dst.RunAsNonRoot == nil { + dst.RunAsNonRoot = src.RunAsNonRoot + } + if dst.ReadOnlyRootFilesystem != nil && src.ReadOnlyRootFilesystem != nil { + if *src.ReadOnlyRootFilesystem { + *dst.ReadOnlyRootFilesystem = *src.ReadOnlyRootFilesystem + } + } else if dst.ReadOnlyRootFilesystem == nil { + dst.ReadOnlyRootFilesystem = src.ReadOnlyRootFilesystem + } + if dst.AllowPrivilegeEscalation != nil && src.AllowPrivilegeEscalation != nil { + if *src.AllowPrivilegeEscalation { + *dst.AllowPrivilegeEscalation = *src.AllowPrivilegeEscalation + } + } else if dst.AllowPrivilegeEscalation == nil { + dst.AllowPrivilegeEscalation = src.AllowPrivilegeEscalation + } + if dst.ProcMount != nil && src.ProcMount != nil { + overlayProcMountType(dst.ProcMount, src.ProcMount) + } else if dst.ProcMount == nil { + dst.ProcMount = src.ProcMount + } + if dst.SeccompProfile != nil && src.SeccompProfile != nil { + overlaySeccompProfile(dst.SeccompProfile, src.SeccompProfile) + } else if dst.SeccompProfile == nil { + dst.SeccompProfile = src.SeccompProfile + } + if dst.AppArmorProfile != nil && src.AppArmorProfile != nil { + overlayAppArmorProfile(dst.AppArmorProfile, src.AppArmorProfile) + } else if dst.AppArmorProfile == nil { + dst.AppArmorProfile = src.AppArmorProfile + } +} +func overlayCapabilities(dst, src *v1.Capabilities) { + if dst.Add != nil && src.Add != nil { + overlayAtomicListSliceCapability(&(dst.Add), &(src.Add)) + } else if dst.Add == nil { + dst.Add = src.Add + } + if dst.Drop != nil && src.Drop != nil { + overlayAtomicListSliceCapability(&(dst.Drop), &(src.Drop)) + } else if dst.Drop == nil { + dst.Drop = src.Drop + } +} +func overlayAtomicListSliceCapability(dst, src *[]v1.Capability) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlaySELinuxOptions(dst, src *v1.SELinuxOptions) { + if src.User != "" { + dst.User = src.User + } + if src.Role != "" { + dst.Role = src.Role + } + if src.Type != "" { + dst.Type = src.Type + } + if src.Level != "" { + dst.Level = src.Level + } +} +func overlayWindowsSecurityContextOptions(dst, src *v1.WindowsSecurityContextOptions) { + if dst.GMSACredentialSpecName != nil && src.GMSACredentialSpecName != nil { + if *src.GMSACredentialSpecName != "" { + *dst.GMSACredentialSpecName = *src.GMSACredentialSpecName + } + } else if dst.GMSACredentialSpecName == nil { + dst.GMSACredentialSpecName = src.GMSACredentialSpecName + } + if dst.GMSACredentialSpec != nil && src.GMSACredentialSpec != nil { + if *src.GMSACredentialSpec != "" { + *dst.GMSACredentialSpec = *src.GMSACredentialSpec + } + } else if dst.GMSACredentialSpec == nil { + dst.GMSACredentialSpec = src.GMSACredentialSpec + } + if dst.RunAsUserName != nil && src.RunAsUserName != nil { + if *src.RunAsUserName != "" { + *dst.RunAsUserName = *src.RunAsUserName + } + } else if dst.RunAsUserName == nil { + dst.RunAsUserName = src.RunAsUserName + } + if dst.HostProcess != nil && src.HostProcess != nil { + if *src.HostProcess { + *dst.HostProcess = *src.HostProcess + } + } else if dst.HostProcess == nil { + dst.HostProcess = src.HostProcess + } +} +func overlayProcMountType(dst, src *v1.ProcMountType) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlaySeccompProfile(dst, src *v1.SeccompProfile) { + overlaySeccompProfileType(&(dst.Type), &(src.Type)) + if dst.LocalhostProfile != nil && src.LocalhostProfile != nil { + if *src.LocalhostProfile != "" { + *dst.LocalhostProfile = *src.LocalhostProfile + } + } else if dst.LocalhostProfile == nil { + dst.LocalhostProfile = src.LocalhostProfile + } +} +func overlaySeccompProfileType(dst, src *v1.SeccompProfileType) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayAppArmorProfile(dst, src *v1.AppArmorProfile) { + overlayAppArmorProfileType(&(dst.Type), &(src.Type)) + if dst.LocalhostProfile != nil && src.LocalhostProfile != nil { + if *src.LocalhostProfile != "" { + *dst.LocalhostProfile = *src.LocalhostProfile + } + } else if dst.LocalhostProfile == nil { + dst.LocalhostProfile = src.LocalhostProfile + } +} +func overlayAppArmorProfileType(dst, src *v1.AppArmorProfileType) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMapListSliceEphemeralContainer(dst, src *[]v1.EphemeralContainer) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayEphemeralContainer(&((*dst)[di]), &((*src)[si])) + } +} +func overlayEphemeralContainer(dst, src *v1.EphemeralContainer) { + overlayEphemeralContainerCommon(&(dst.EphemeralContainerCommon), &(src.EphemeralContainerCommon)) + if src.TargetContainerName != "" { + dst.TargetContainerName = src.TargetContainerName + } +} +func overlayEphemeralContainerCommon(dst, src *v1.EphemeralContainerCommon) { + if src.Name != "" { + dst.Name = src.Name + } + if src.Image != "" { + dst.Image = src.Image + } + if dst.Command != nil && src.Command != nil { + overlayAtomicListSliceString(&(dst.Command), &(src.Command)) + } else if dst.Command == nil { + dst.Command = src.Command + } + if dst.Args != nil && src.Args != nil { + overlayAtomicListSliceString(&(dst.Args), &(src.Args)) + } else if dst.Args == nil { + dst.Args = src.Args + } + if src.WorkingDir != "" { + dst.WorkingDir = src.WorkingDir + } + if dst.Ports != nil && src.Ports != nil { + overlayMapListSliceContainerPort(&(dst.Ports), &(src.Ports)) + } else if dst.Ports == nil { + dst.Ports = src.Ports + } + if dst.EnvFrom != nil && src.EnvFrom != nil { + overlayAtomicListSliceEnvFromSource(&(dst.EnvFrom), &(src.EnvFrom)) + } else if dst.EnvFrom == nil { + dst.EnvFrom = src.EnvFrom + } + if dst.Env != nil && src.Env != nil { + overlayMapListSliceEnvVar(&(dst.Env), &(src.Env)) + } else if dst.Env == nil { + dst.Env = src.Env + } + overlayResourceRequirements(&(dst.Resources), &(src.Resources)) + if dst.ResizePolicy != nil && src.ResizePolicy != nil { + overlayAtomicListSliceContainerResizePolicy(&(dst.ResizePolicy), &(src.ResizePolicy)) + } else if dst.ResizePolicy == nil { + dst.ResizePolicy = src.ResizePolicy + } + if dst.RestartPolicy != nil && src.RestartPolicy != nil { + overlayContainerRestartPolicy(dst.RestartPolicy, src.RestartPolicy) + } else if dst.RestartPolicy == nil { + dst.RestartPolicy = src.RestartPolicy + } + if dst.VolumeMounts != nil && src.VolumeMounts != nil { + overlayMapListSliceVolumeMount(&(dst.VolumeMounts), &(src.VolumeMounts)) + } else if dst.VolumeMounts == nil { + dst.VolumeMounts = src.VolumeMounts + } + if dst.VolumeDevices != nil && src.VolumeDevices != nil { + overlayMapListSliceVolumeDevice(&(dst.VolumeDevices), &(src.VolumeDevices)) + } else if dst.VolumeDevices == nil { + dst.VolumeDevices = src.VolumeDevices + } + if dst.LivenessProbe != nil && src.LivenessProbe != nil { + overlayProbe(dst.LivenessProbe, src.LivenessProbe) + } else if dst.LivenessProbe == nil { + dst.LivenessProbe = src.LivenessProbe + } + if dst.ReadinessProbe != nil && src.ReadinessProbe != nil { + overlayProbe(dst.ReadinessProbe, src.ReadinessProbe) + } else if dst.ReadinessProbe == nil { + dst.ReadinessProbe = src.ReadinessProbe + } + if dst.StartupProbe != nil && src.StartupProbe != nil { + overlayProbe(dst.StartupProbe, src.StartupProbe) + } else if dst.StartupProbe == nil { + dst.StartupProbe = src.StartupProbe + } + if dst.Lifecycle != nil && src.Lifecycle != nil { + overlayLifecycle(dst.Lifecycle, src.Lifecycle) + } else if dst.Lifecycle == nil { + dst.Lifecycle = src.Lifecycle + } + if src.TerminationMessagePath != "" { + dst.TerminationMessagePath = src.TerminationMessagePath + } + overlayTerminationMessagePolicy(&(dst.TerminationMessagePolicy), &(src.TerminationMessagePolicy)) + overlayPullPolicy(&(dst.ImagePullPolicy), &(src.ImagePullPolicy)) + if dst.SecurityContext != nil && src.SecurityContext != nil { + overlaySecurityContext(dst.SecurityContext, src.SecurityContext) + } else if dst.SecurityContext == nil { + dst.SecurityContext = src.SecurityContext + } + if src.Stdin { + dst.Stdin = src.Stdin + } + if src.StdinOnce { + dst.StdinOnce = src.StdinOnce + } + if src.TTY { + dst.TTY = src.TTY + } +} +func overlayRestartPolicy(dst, src *v1.RestartPolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayDNSPolicy(dst, src *v1.DNSPolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayAtomicMapStringToString(dst, src *map[string]string) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayPodSecurityContext(dst, src *v1.PodSecurityContext) { + if dst.SELinuxOptions != nil && src.SELinuxOptions != nil { + overlaySELinuxOptions(dst.SELinuxOptions, src.SELinuxOptions) + } else if dst.SELinuxOptions == nil { + dst.SELinuxOptions = src.SELinuxOptions + } + if dst.WindowsOptions != nil && src.WindowsOptions != nil { + overlayWindowsSecurityContextOptions(dst.WindowsOptions, src.WindowsOptions) + } else if dst.WindowsOptions == nil { + dst.WindowsOptions = src.WindowsOptions + } + if dst.RunAsUser != nil && src.RunAsUser != nil { + if *src.RunAsUser != 0 { + *dst.RunAsUser = *src.RunAsUser + } + } else if dst.RunAsUser == nil { + dst.RunAsUser = src.RunAsUser + } + if dst.RunAsGroup != nil && src.RunAsGroup != nil { + if *src.RunAsGroup != 0 { + *dst.RunAsGroup = *src.RunAsGroup + } + } else if dst.RunAsGroup == nil { + dst.RunAsGroup = src.RunAsGroup + } + if dst.RunAsNonRoot != nil && src.RunAsNonRoot != nil { + if *src.RunAsNonRoot { + *dst.RunAsNonRoot = *src.RunAsNonRoot + } + } else if dst.RunAsNonRoot == nil { + dst.RunAsNonRoot = src.RunAsNonRoot + } + if dst.SupplementalGroups != nil && src.SupplementalGroups != nil { + overlayAtomicListSliceInt64(&(dst.SupplementalGroups), &(src.SupplementalGroups)) + } else if dst.SupplementalGroups == nil { + dst.SupplementalGroups = src.SupplementalGroups + } + if dst.SupplementalGroupsPolicy != nil && src.SupplementalGroupsPolicy != nil { + overlaySupplementalGroupsPolicy(dst.SupplementalGroupsPolicy, src.SupplementalGroupsPolicy) + } else if dst.SupplementalGroupsPolicy == nil { + dst.SupplementalGroupsPolicy = src.SupplementalGroupsPolicy + } + if dst.FSGroup != nil && src.FSGroup != nil { + if *src.FSGroup != 0 { + *dst.FSGroup = *src.FSGroup + } + } else if dst.FSGroup == nil { + dst.FSGroup = src.FSGroup + } + if dst.Sysctls != nil && src.Sysctls != nil { + overlayAtomicListSliceSysctl(&(dst.Sysctls), &(src.Sysctls)) + } else if dst.Sysctls == nil { + dst.Sysctls = src.Sysctls + } + if dst.FSGroupChangePolicy != nil && src.FSGroupChangePolicy != nil { + overlayPodFSGroupChangePolicy(dst.FSGroupChangePolicy, src.FSGroupChangePolicy) + } else if dst.FSGroupChangePolicy == nil { + dst.FSGroupChangePolicy = src.FSGroupChangePolicy + } + if dst.SeccompProfile != nil && src.SeccompProfile != nil { + overlaySeccompProfile(dst.SeccompProfile, src.SeccompProfile) + } else if dst.SeccompProfile == nil { + dst.SeccompProfile = src.SeccompProfile + } + if dst.AppArmorProfile != nil && src.AppArmorProfile != nil { + overlayAppArmorProfile(dst.AppArmorProfile, src.AppArmorProfile) + } else if dst.AppArmorProfile == nil { + dst.AppArmorProfile = src.AppArmorProfile + } +} +func overlayAtomicListSliceInt64(dst, src *[]int64) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlaySupplementalGroupsPolicy(dst, src *v1.SupplementalGroupsPolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayAtomicListSliceSysctl(dst, src *[]v1.Sysctl) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayPodFSGroupChangePolicy(dst, src *v1.PodFSGroupChangePolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMapListSliceLocalObjectReference(dst, src *[]v1.LocalObjectReference) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayAtomicLocalObjectReference(&((*dst)[di]), &((*src)[si])) + } +} +func overlayAffinity(dst, src *v1.Affinity) { + if dst.NodeAffinity != nil && src.NodeAffinity != nil { + overlayNodeAffinity(dst.NodeAffinity, src.NodeAffinity) + } else if dst.NodeAffinity == nil { + dst.NodeAffinity = src.NodeAffinity + } + if dst.PodAffinity != nil && src.PodAffinity != nil { + overlayPodAffinity(dst.PodAffinity, src.PodAffinity) + } else if dst.PodAffinity == nil { + dst.PodAffinity = src.PodAffinity + } + if dst.PodAntiAffinity != nil && src.PodAntiAffinity != nil { + overlayPodAntiAffinity(dst.PodAntiAffinity, src.PodAntiAffinity) + } else if dst.PodAntiAffinity == nil { + dst.PodAntiAffinity = src.PodAntiAffinity + } +} +func overlayNodeAffinity(dst, src *v1.NodeAffinity) { + if dst.RequiredDuringSchedulingIgnoredDuringExecution != nil && src.RequiredDuringSchedulingIgnoredDuringExecution != nil { + overlayAtomicNodeSelector(dst.RequiredDuringSchedulingIgnoredDuringExecution, src.RequiredDuringSchedulingIgnoredDuringExecution) + } else if dst.RequiredDuringSchedulingIgnoredDuringExecution == nil { + dst.RequiredDuringSchedulingIgnoredDuringExecution = src.RequiredDuringSchedulingIgnoredDuringExecution + } + if dst.PreferredDuringSchedulingIgnoredDuringExecution != nil && src.PreferredDuringSchedulingIgnoredDuringExecution != nil { + overlayAtomicListSlicePreferredSchedulingTerm(&(dst.PreferredDuringSchedulingIgnoredDuringExecution), &(src.PreferredDuringSchedulingIgnoredDuringExecution)) + } else if dst.PreferredDuringSchedulingIgnoredDuringExecution == nil { + dst.PreferredDuringSchedulingIgnoredDuringExecution = src.PreferredDuringSchedulingIgnoredDuringExecution + } +} +func overlayAtomicNodeSelector(dst, src *v1.NodeSelector) { + *dst = *src +} +func overlayAtomicListSlicePreferredSchedulingTerm(dst, src *[]v1.PreferredSchedulingTerm) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayPodAffinity(dst, src *v1.PodAffinity) { + if dst.RequiredDuringSchedulingIgnoredDuringExecution != nil && src.RequiredDuringSchedulingIgnoredDuringExecution != nil { + overlayAtomicListSlicePodAffinityTerm(&(dst.RequiredDuringSchedulingIgnoredDuringExecution), &(src.RequiredDuringSchedulingIgnoredDuringExecution)) + } else if dst.RequiredDuringSchedulingIgnoredDuringExecution == nil { + dst.RequiredDuringSchedulingIgnoredDuringExecution = src.RequiredDuringSchedulingIgnoredDuringExecution + } + if dst.PreferredDuringSchedulingIgnoredDuringExecution != nil && src.PreferredDuringSchedulingIgnoredDuringExecution != nil { + overlayAtomicListSliceWeightedPodAffinityTerm(&(dst.PreferredDuringSchedulingIgnoredDuringExecution), &(src.PreferredDuringSchedulingIgnoredDuringExecution)) + } else if dst.PreferredDuringSchedulingIgnoredDuringExecution == nil { + dst.PreferredDuringSchedulingIgnoredDuringExecution = src.PreferredDuringSchedulingIgnoredDuringExecution + } +} +func overlayAtomicListSlicePodAffinityTerm(dst, src *[]v1.PodAffinityTerm) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayAtomicListSliceWeightedPodAffinityTerm(dst, src *[]v1.WeightedPodAffinityTerm) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayPodAntiAffinity(dst, src *v1.PodAntiAffinity) { + if dst.RequiredDuringSchedulingIgnoredDuringExecution != nil && src.RequiredDuringSchedulingIgnoredDuringExecution != nil { + overlayAtomicListSlicePodAffinityTerm(&(dst.RequiredDuringSchedulingIgnoredDuringExecution), &(src.RequiredDuringSchedulingIgnoredDuringExecution)) + } else if dst.RequiredDuringSchedulingIgnoredDuringExecution == nil { + dst.RequiredDuringSchedulingIgnoredDuringExecution = src.RequiredDuringSchedulingIgnoredDuringExecution + } + if dst.PreferredDuringSchedulingIgnoredDuringExecution != nil && src.PreferredDuringSchedulingIgnoredDuringExecution != nil { + overlayAtomicListSliceWeightedPodAffinityTerm(&(dst.PreferredDuringSchedulingIgnoredDuringExecution), &(src.PreferredDuringSchedulingIgnoredDuringExecution)) + } else if dst.PreferredDuringSchedulingIgnoredDuringExecution == nil { + dst.PreferredDuringSchedulingIgnoredDuringExecution = src.PreferredDuringSchedulingIgnoredDuringExecution + } +} +func overlayAtomicListSliceToleration(dst, src *[]v1.Toleration) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayMapListSliceHostAlias(dst, src *[]v1.HostAlias) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.IP)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.IP)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayHostAlias(&((*dst)[di]), &((*src)[si])) + } +} +func overlayHostAlias(dst, src *v1.HostAlias) { + if src.IP != "" { + dst.IP = src.IP + } + if dst.Hostnames != nil && src.Hostnames != nil { + overlayAtomicListSliceString(&(dst.Hostnames), &(src.Hostnames)) + } else if dst.Hostnames == nil { + dst.Hostnames = src.Hostnames + } +} +func overlayPodDNSConfig(dst, src *v1.PodDNSConfig) { + if dst.Nameservers != nil && src.Nameservers != nil { + overlayAtomicListSliceString(&(dst.Nameservers), &(src.Nameservers)) + } else if dst.Nameservers == nil { + dst.Nameservers = src.Nameservers + } + if dst.Searches != nil && src.Searches != nil { + overlayAtomicListSliceString(&(dst.Searches), &(src.Searches)) + } else if dst.Searches == nil { + dst.Searches = src.Searches + } + if dst.Options != nil && src.Options != nil { + overlayAtomicListSlicePodDNSConfigOption(&(dst.Options), &(src.Options)) + } else if dst.Options == nil { + dst.Options = src.Options + } +} +func overlayAtomicListSlicePodDNSConfigOption(dst, src *[]v1.PodDNSConfigOption) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayAtomicListSlicePodReadinessGate(dst, src *[]v1.PodReadinessGate) { + if len(*src) == 0 { + return + } + *dst = *src +} +func overlayPreemptionPolicy(dst, src *v1.PreemptionPolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMapListSliceTopologySpreadConstraint(dst, src *[]v1.TopologySpreadConstraint) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.TopologyKey)) + keys = append(keys, UnsatisfiableConstraintActionToString(item.WhenUnsatisfiable)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.TopologyKey)) + keys = append(keys, UnsatisfiableConstraintActionToString(item.WhenUnsatisfiable)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayTopologySpreadConstraint(&((*dst)[di]), &((*src)[si])) + } +} +func UnsatisfiableConstraintActionToString(val v1.UnsatisfiableConstraintAction) string { + return StringToString(string(val)) +} +func overlayTopologySpreadConstraint(dst, src *v1.TopologySpreadConstraint) { + if src.MaxSkew != 0 { + dst.MaxSkew = src.MaxSkew + } + if src.TopologyKey != "" { + dst.TopologyKey = src.TopologyKey + } + overlayUnsatisfiableConstraintAction(&(dst.WhenUnsatisfiable), &(src.WhenUnsatisfiable)) + if dst.LabelSelector != nil && src.LabelSelector != nil { + overlayLabelSelector(dst.LabelSelector, src.LabelSelector) + } else if dst.LabelSelector == nil { + dst.LabelSelector = src.LabelSelector + } + if dst.MinDomains != nil && src.MinDomains != nil { + if *src.MinDomains != 0 { + *dst.MinDomains = *src.MinDomains + } + } else if dst.MinDomains == nil { + dst.MinDomains = src.MinDomains + } + if dst.NodeAffinityPolicy != nil && src.NodeAffinityPolicy != nil { + overlayNodeInclusionPolicy(dst.NodeAffinityPolicy, src.NodeAffinityPolicy) + } else if dst.NodeAffinityPolicy == nil { + dst.NodeAffinityPolicy = src.NodeAffinityPolicy + } + if dst.NodeTaintsPolicy != nil && src.NodeTaintsPolicy != nil { + overlayNodeInclusionPolicy(dst.NodeTaintsPolicy, src.NodeTaintsPolicy) + } else if dst.NodeTaintsPolicy == nil { + dst.NodeTaintsPolicy = src.NodeTaintsPolicy + } + if dst.MatchLabelKeys != nil && src.MatchLabelKeys != nil { + overlayAtomicListSliceString(&(dst.MatchLabelKeys), &(src.MatchLabelKeys)) + } else if dst.MatchLabelKeys == nil { + dst.MatchLabelKeys = src.MatchLabelKeys + } +} +func overlayUnsatisfiableConstraintAction(dst, src *v1.UnsatisfiableConstraintAction) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayNodeInclusionPolicy(dst, src *v1.NodeInclusionPolicy) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayPodOS(dst, src *v1.PodOS) { + overlayOSName(&(dst.Name), &(src.Name)) +} +func overlayOSName(dst, src *v1.OSName) { + ndst := (*string)(dst) + nsrc := (*string)(src) + if *nsrc != "" { + *ndst = *nsrc + } +} +func overlayMapListSlicePodSchedulingGate(dst, src *[]v1.PodSchedulingGate) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayPodSchedulingGate(&((*dst)[di]), &((*src)[si])) + } +} +func overlayPodSchedulingGate(dst, src *v1.PodSchedulingGate) { + if src.Name != "" { + dst.Name = src.Name + } +} +func overlayMapListSlicePodResourceClaim(dst, src *[]v1.PodResourceClaim) { + m := map[string]int{} + for i := range *dst { + item := (*dst)[i] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + m[key] = i + } + for si := range *src { + item := (*src)[si] + keys := []string{} + keys = append(keys, StringToString(item.Name)) + key := strings.Join(keys, ",") + di, ok := m[key] + if !ok { + *dst = append(*dst, item) + continue + } + overlayPodResourceClaim(&((*dst)[di]), &((*src)[si])) + } +} +func overlayPodResourceClaim(dst, src *v1.PodResourceClaim) { + if src.Name != "" { + dst.Name = src.Name + } + if dst.ResourceClaimName != nil && src.ResourceClaimName != nil { + if *src.ResourceClaimName != "" { + *dst.ResourceClaimName = *src.ResourceClaimName + } + } else if dst.ResourceClaimName == nil { + dst.ResourceClaimName = src.ResourceClaimName + } + if dst.ResourceClaimTemplateName != nil && src.ResourceClaimTemplateName != nil { + if *src.ResourceClaimTemplateName != "" { + *dst.ResourceClaimTemplateName = *src.ResourceClaimTemplateName + } + } else if dst.ResourceClaimTemplateName == nil { + dst.ResourceClaimTemplateName = src.ResourceClaimTemplateName + } +} diff --git a/pkg/overlay/zz_generated.overlay_test.go b/pkg/overlay/zz_generated.overlay_test.go new file mode 100644 index 00000000000..0d4586639e0 --- /dev/null +++ b/pkg/overlay/zz_generated.overlay_test.go @@ -0,0 +1,9764 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by overlay-gen. DO NOT EDIT. + +package overlay + +import ( + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +func constructPodSpec(p Policy) []Case[v1.PodSpec] { + cases := []Case[v1.PodSpec]{} + cs0 := constructMapSliceVolume(NoLimit) + cs1 := constructMapSliceContainer(NoLimit) + cs2 := constructMapSliceContainer(NoLimit) + cs3 := constructMapSliceEphemeralContainer(NoLimit) + cs4 := constructRestartPolicy(NoLimit) + cs5 := constructPointerInt64(NoLimit) + cs6 := constructPointerInt64(NoLimit) + cs7 := constructDNSPolicy(NoLimit) + cs8 := constructAtomicMapStringToString(NoLimit) + cs9 := constructString(NoLimit) + cs10 := constructString(NoLimit) + cs11 := constructPointerBool(NoLimit) + cs12 := constructString(NoLimit) + cs13 := constructBool(NoLimit) + cs14 := constructBool(NoLimit) + cs15 := constructBool(NoLimit) + cs16 := constructPointerBool(NoLimit) + cs17 := constructPointerPodSecurityContext(NoLimit) + cs18 := constructMapSliceLocalObjectReference(NoLimit) + cs19 := constructString(NoLimit) + cs20 := constructString(NoLimit) + cs21 := constructPointerAffinity(NoLimit) + cs22 := constructString(NoLimit) + cs23 := constructAtomicSliceToleration(NoLimit) + cs24 := constructMapSliceHostAlias(NoLimit) + cs25 := constructString(NoLimit) + cs26 := constructPointerInt32(NoLimit) + cs27 := constructPointerPodDNSConfig(NoLimit) + cs28 := constructAtomicSlicePodReadinessGate(NoLimit) + cs29 := constructPointerString(NoLimit) + cs30 := constructPointerBool(NoLimit) + cs31 := constructPointerPreemptionPolicy(NoLimit) + cs32 := constructResourceList(NoLimit) + cs33 := constructMapSliceTopologySpreadConstraint(NoLimit) + cs34 := constructPointerBool(NoLimit) + cs35 := constructPointerPodOS(NoLimit) + cs36 := constructPointerBool(NoLimit) + cs37 := constructMapSlicePodSchedulingGate(NoLimit) + cs38 := constructMapSlicePodResourceClaim(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + len(cs10), + len(cs11), + len(cs12), + len(cs13), + len(cs14), + len(cs15), + len(cs16), + len(cs17), + len(cs18), + len(cs19), + len(cs20), + len(cs21), + len(cs22), + len(cs23), + len(cs24), + len(cs25), + len(cs26), + len(cs27), + len(cs28), + len(cs29), + len(cs30), + len(cs31), + len(cs32), + len(cs33), + len(cs34), + len(cs35), + len(cs36), + len(cs37), + len(cs38), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + k10 := 0 + k11 := 0 + k12 := 0 + k13 := 0 + k14 := 0 + k15 := 0 + k16 := 0 + k17 := 0 + k18 := 0 + k19 := 0 + k20 := 0 + k21 := 0 + k22 := 0 + k23 := 0 + k24 := 0 + k25 := 0 + k26 := 0 + k27 := 0 + k28 := 0 + k29 := 0 + k30 := 0 + k31 := 0 + k32 := 0 + k33 := 0 + k34 := 0 + k35 := 0 + k36 := 0 + k37 := 0 + k38 := 0 + for i := range maxCount { + nc := Case[v1.PodSpec]{} + if i/len(cs0) > k0 { + cs0 = constructMapSliceVolume(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Volumes = c0.expected + nc.dst.Volumes = c0.dst + nc.src.Volumes = c0.src + if i/len(cs1) > k1 { + cs1 = constructMapSliceContainer(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.InitContainers = c1.expected + nc.dst.InitContainers = c1.dst + nc.src.InitContainers = c1.src + if i/len(cs2) > k2 { + cs2 = constructMapSliceContainer(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Containers = c2.expected + nc.dst.Containers = c2.dst + nc.src.Containers = c2.src + if i/len(cs3) > k3 { + cs3 = constructMapSliceEphemeralContainer(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.EphemeralContainers = c3.expected + nc.dst.EphemeralContainers = c3.dst + nc.src.EphemeralContainers = c3.src + if i/len(cs4) > k4 { + cs4 = constructRestartPolicy(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.RestartPolicy = c4.expected + nc.dst.RestartPolicy = c4.dst + nc.src.RestartPolicy = c4.src + if i/len(cs5) > k5 { + cs5 = constructPointerInt64(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.TerminationGracePeriodSeconds = c5.expected + nc.dst.TerminationGracePeriodSeconds = c5.dst + nc.src.TerminationGracePeriodSeconds = c5.src + if i/len(cs6) > k6 { + cs6 = constructPointerInt64(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.ActiveDeadlineSeconds = c6.expected + nc.dst.ActiveDeadlineSeconds = c6.dst + nc.src.ActiveDeadlineSeconds = c6.src + if i/len(cs7) > k7 { + cs7 = constructDNSPolicy(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.DNSPolicy = c7.expected + nc.dst.DNSPolicy = c7.dst + nc.src.DNSPolicy = c7.src + if i/len(cs8) > k8 { + cs8 = constructAtomicMapStringToString(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.NodeSelector = c8.expected + nc.dst.NodeSelector = c8.dst + nc.src.NodeSelector = c8.src + if i/len(cs9) > k9 { + cs9 = constructString(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.ServiceAccountName = c9.expected + nc.dst.ServiceAccountName = c9.dst + nc.src.ServiceAccountName = c9.src + if i/len(cs10) > k10 { + cs10 = constructString(NoLimit) + k10 += 1 + } + c10 := &cs10[i%len(cs10)] + nc.expected.DeprecatedServiceAccount = c10.expected + nc.dst.DeprecatedServiceAccount = c10.dst + nc.src.DeprecatedServiceAccount = c10.src + if i/len(cs11) > k11 { + cs11 = constructPointerBool(NoLimit) + k11 += 1 + } + c11 := &cs11[i%len(cs11)] + nc.expected.AutomountServiceAccountToken = c11.expected + nc.dst.AutomountServiceAccountToken = c11.dst + nc.src.AutomountServiceAccountToken = c11.src + if i/len(cs12) > k12 { + cs12 = constructString(NoLimit) + k12 += 1 + } + c12 := &cs12[i%len(cs12)] + nc.expected.NodeName = c12.expected + nc.dst.NodeName = c12.dst + nc.src.NodeName = c12.src + if i/len(cs13) > k13 { + cs13 = constructBool(NoLimit) + k13 += 1 + } + c13 := &cs13[i%len(cs13)] + nc.expected.HostNetwork = c13.expected + nc.dst.HostNetwork = c13.dst + nc.src.HostNetwork = c13.src + if i/len(cs14) > k14 { + cs14 = constructBool(NoLimit) + k14 += 1 + } + c14 := &cs14[i%len(cs14)] + nc.expected.HostPID = c14.expected + nc.dst.HostPID = c14.dst + nc.src.HostPID = c14.src + if i/len(cs15) > k15 { + cs15 = constructBool(NoLimit) + k15 += 1 + } + c15 := &cs15[i%len(cs15)] + nc.expected.HostIPC = c15.expected + nc.dst.HostIPC = c15.dst + nc.src.HostIPC = c15.src + if i/len(cs16) > k16 { + cs16 = constructPointerBool(NoLimit) + k16 += 1 + } + c16 := &cs16[i%len(cs16)] + nc.expected.ShareProcessNamespace = c16.expected + nc.dst.ShareProcessNamespace = c16.dst + nc.src.ShareProcessNamespace = c16.src + if i/len(cs17) > k17 { + cs17 = constructPointerPodSecurityContext(NoLimit) + k17 += 1 + } + c17 := &cs17[i%len(cs17)] + nc.expected.SecurityContext = c17.expected + nc.dst.SecurityContext = c17.dst + nc.src.SecurityContext = c17.src + if i/len(cs18) > k18 { + cs18 = constructMapSliceLocalObjectReference(NoLimit) + k18 += 1 + } + c18 := &cs18[i%len(cs18)] + nc.expected.ImagePullSecrets = c18.expected + nc.dst.ImagePullSecrets = c18.dst + nc.src.ImagePullSecrets = c18.src + if i/len(cs19) > k19 { + cs19 = constructString(NoLimit) + k19 += 1 + } + c19 := &cs19[i%len(cs19)] + nc.expected.Hostname = c19.expected + nc.dst.Hostname = c19.dst + nc.src.Hostname = c19.src + if i/len(cs20) > k20 { + cs20 = constructString(NoLimit) + k20 += 1 + } + c20 := &cs20[i%len(cs20)] + nc.expected.Subdomain = c20.expected + nc.dst.Subdomain = c20.dst + nc.src.Subdomain = c20.src + if i/len(cs21) > k21 { + cs21 = constructPointerAffinity(NoLimit) + k21 += 1 + } + c21 := &cs21[i%len(cs21)] + nc.expected.Affinity = c21.expected + nc.dst.Affinity = c21.dst + nc.src.Affinity = c21.src + if i/len(cs22) > k22 { + cs22 = constructString(NoLimit) + k22 += 1 + } + c22 := &cs22[i%len(cs22)] + nc.expected.SchedulerName = c22.expected + nc.dst.SchedulerName = c22.dst + nc.src.SchedulerName = c22.src + if i/len(cs23) > k23 { + cs23 = constructAtomicSliceToleration(NoLimit) + k23 += 1 + } + c23 := &cs23[i%len(cs23)] + nc.expected.Tolerations = c23.expected + nc.dst.Tolerations = c23.dst + nc.src.Tolerations = c23.src + if i/len(cs24) > k24 { + cs24 = constructMapSliceHostAlias(NoLimit) + k24 += 1 + } + c24 := &cs24[i%len(cs24)] + nc.expected.HostAliases = c24.expected + nc.dst.HostAliases = c24.dst + nc.src.HostAliases = c24.src + if i/len(cs25) > k25 { + cs25 = constructString(NoLimit) + k25 += 1 + } + c25 := &cs25[i%len(cs25)] + nc.expected.PriorityClassName = c25.expected + nc.dst.PriorityClassName = c25.dst + nc.src.PriorityClassName = c25.src + if i/len(cs26) > k26 { + cs26 = constructPointerInt32(NoLimit) + k26 += 1 + } + c26 := &cs26[i%len(cs26)] + nc.expected.Priority = c26.expected + nc.dst.Priority = c26.dst + nc.src.Priority = c26.src + if i/len(cs27) > k27 { + cs27 = constructPointerPodDNSConfig(NoLimit) + k27 += 1 + } + c27 := &cs27[i%len(cs27)] + nc.expected.DNSConfig = c27.expected + nc.dst.DNSConfig = c27.dst + nc.src.DNSConfig = c27.src + if i/len(cs28) > k28 { + cs28 = constructAtomicSlicePodReadinessGate(NoLimit) + k28 += 1 + } + c28 := &cs28[i%len(cs28)] + nc.expected.ReadinessGates = c28.expected + nc.dst.ReadinessGates = c28.dst + nc.src.ReadinessGates = c28.src + if i/len(cs29) > k29 { + cs29 = constructPointerString(NoLimit) + k29 += 1 + } + c29 := &cs29[i%len(cs29)] + nc.expected.RuntimeClassName = c29.expected + nc.dst.RuntimeClassName = c29.dst + nc.src.RuntimeClassName = c29.src + if i/len(cs30) > k30 { + cs30 = constructPointerBool(NoLimit) + k30 += 1 + } + c30 := &cs30[i%len(cs30)] + nc.expected.EnableServiceLinks = c30.expected + nc.dst.EnableServiceLinks = c30.dst + nc.src.EnableServiceLinks = c30.src + if i/len(cs31) > k31 { + cs31 = constructPointerPreemptionPolicy(NoLimit) + k31 += 1 + } + c31 := &cs31[i%len(cs31)] + nc.expected.PreemptionPolicy = c31.expected + nc.dst.PreemptionPolicy = c31.dst + nc.src.PreemptionPolicy = c31.src + if i/len(cs32) > k32 { + cs32 = constructResourceList(NoLimit) + k32 += 1 + } + c32 := &cs32[i%len(cs32)] + nc.expected.Overhead = c32.expected + nc.dst.Overhead = c32.dst + nc.src.Overhead = c32.src + if i/len(cs33) > k33 { + cs33 = constructMapSliceTopologySpreadConstraint(NoLimit) + k33 += 1 + } + c33 := &cs33[i%len(cs33)] + nc.expected.TopologySpreadConstraints = c33.expected + nc.dst.TopologySpreadConstraints = c33.dst + nc.src.TopologySpreadConstraints = c33.src + if i/len(cs34) > k34 { + cs34 = constructPointerBool(NoLimit) + k34 += 1 + } + c34 := &cs34[i%len(cs34)] + nc.expected.SetHostnameAsFQDN = c34.expected + nc.dst.SetHostnameAsFQDN = c34.dst + nc.src.SetHostnameAsFQDN = c34.src + if i/len(cs35) > k35 { + cs35 = constructPointerPodOS(NoLimit) + k35 += 1 + } + c35 := &cs35[i%len(cs35)] + nc.expected.OS = c35.expected + nc.dst.OS = c35.dst + nc.src.OS = c35.src + if i/len(cs36) > k36 { + cs36 = constructPointerBool(NoLimit) + k36 += 1 + } + c36 := &cs36[i%len(cs36)] + nc.expected.HostUsers = c36.expected + nc.dst.HostUsers = c36.dst + nc.src.HostUsers = c36.src + if i/len(cs37) > k37 { + cs37 = constructMapSlicePodSchedulingGate(NoLimit) + k37 += 1 + } + c37 := &cs37[i%len(cs37)] + nc.expected.SchedulingGates = c37.expected + nc.dst.SchedulingGates = c37.dst + nc.src.SchedulingGates = c37.src + if i/len(cs38) > k38 { + cs38 = constructMapSlicePodResourceClaim(NoLimit) + k38 += 1 + } + c38 := &cs38[i%len(cs38)] + nc.expected.ResourceClaims = c38.expected + nc.dst.ResourceClaims = c38.dst + nc.src.ResourceClaims = c38.src + cases = append(cases, nc) + } + return cases +} +func constructMapSliceVolume(p Policy) []Case[[]v1.Volume] { + cases := []Case[[]v1.Volume]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructVolumeIgnore_name(NoLimit) + var nc Case[[]v1.Volume] + nc = Case[[]v1.Volume]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.Volume]{} + srcs := []v1.Volume{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.Volume]{} + srcs = []v1.Volume{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.Volume]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.Volume]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructVolumeIgnore_name(p Policy) []Case[v1.Volume] { + cases := []Case[v1.Volume]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + cs1 := constructVolumeSource(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.Volume]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructVolumeSource(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.VolumeSource = c1.expected + nc.dst.VolumeSource = c1.dst + nc.src.VolumeSource = c1.src + cases = append(cases, nc) + } + return cases +} +func constructString(p Policy) []Case[string] { + cases := []Case[string]{} + if p&(NoZero) == 0 { + cases = append(cases, Case[string]{expected: "", dst: "", src: ""}) + } + if p&(NoNotEqual) == 0 { + dst, src := randString(), randString() + cases = append(cases, Case[string]{expected: src, dst: dst, src: src}) + } + if p&(NoZero|NoNotEqual) == 0 { + dst, src := randString(), randString() + cases = append(cases, Case[string]{expected: dst, dst: dst, src: ""}) + cases = append(cases, Case[string]{expected: src, dst: "", src: src}) + } + var val string + val = randString() + cases = append(cases, Case[string]{expected: val, dst: val, src: val}) + val = randString() + cases = append(cases, Case[string]{expected: val, dst: val, src: val}) + val = randString() + cases = append(cases, Case[string]{expected: val, dst: val, src: val}) + return cases +} +func constructVolumeSource(p Policy) []Case[v1.VolumeSource] { + cases := []Case[v1.VolumeSource]{} + cs0 := constructPointerHostPathVolumeSource(NoLimit) + cs1 := constructPointerEmptyDirVolumeSource(NoLimit) + cs2 := constructPointerGCEPersistentDiskVolumeSource(NoLimit) + cs3 := constructPointerAWSElasticBlockStoreVolumeSource(NoLimit) + cs4 := constructPointerGitRepoVolumeSource(NoLimit) + cs5 := constructPointerSecretVolumeSource(NoLimit) + cs6 := constructPointerNFSVolumeSource(NoLimit) + cs7 := constructPointerISCSIVolumeSource(NoLimit) + cs8 := constructPointerGlusterfsVolumeSource(NoLimit) + cs9 := constructPointerPersistentVolumeClaimVolumeSource(NoLimit) + cs10 := constructPointerRBDVolumeSource(NoLimit) + cs11 := constructPointerFlexVolumeSource(NoLimit) + cs12 := constructPointerCinderVolumeSource(NoLimit) + cs13 := constructPointerCephFSVolumeSource(NoLimit) + cs14 := constructPointerFlockerVolumeSource(NoLimit) + cs15 := constructPointerDownwardAPIVolumeSource(NoLimit) + cs16 := constructPointerFCVolumeSource(NoLimit) + cs17 := constructPointerAzureFileVolumeSource(NoLimit) + cs18 := constructPointerConfigMapVolumeSource(NoLimit) + cs19 := constructPointerVsphereVirtualDiskVolumeSource(NoLimit) + cs20 := constructPointerQuobyteVolumeSource(NoLimit) + cs21 := constructPointerAzureDiskVolumeSource(NoLimit) + cs22 := constructPointerPhotonPersistentDiskVolumeSource(NoLimit) + cs23 := constructPointerProjectedVolumeSource(NoLimit) + cs24 := constructPointerPortworxVolumeSource(NoLimit) + cs25 := constructPointerScaleIOVolumeSource(NoLimit) + cs26 := constructPointerStorageOSVolumeSource(NoLimit) + cs27 := constructPointerCSIVolumeSource(NoLimit) + cs28 := constructPointerEphemeralVolumeSource(NoLimit) + cs29 := constructPointerImageVolumeSource(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + len(cs10), + len(cs11), + len(cs12), + len(cs13), + len(cs14), + len(cs15), + len(cs16), + len(cs17), + len(cs18), + len(cs19), + len(cs20), + len(cs21), + len(cs22), + len(cs23), + len(cs24), + len(cs25), + len(cs26), + len(cs27), + len(cs28), + len(cs29), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + k10 := 0 + k11 := 0 + k12 := 0 + k13 := 0 + k14 := 0 + k15 := 0 + k16 := 0 + k17 := 0 + k18 := 0 + k19 := 0 + k20 := 0 + k21 := 0 + k22 := 0 + k23 := 0 + k24 := 0 + k25 := 0 + k26 := 0 + k27 := 0 + k28 := 0 + k29 := 0 + for i := range maxCount { + nc := Case[v1.VolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructPointerHostPathVolumeSource(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.HostPath = c0.expected + nc.dst.HostPath = c0.dst + nc.src.HostPath = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerEmptyDirVolumeSource(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.EmptyDir = c1.expected + nc.dst.EmptyDir = c1.dst + nc.src.EmptyDir = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerGCEPersistentDiskVolumeSource(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.GCEPersistentDisk = c2.expected + nc.dst.GCEPersistentDisk = c2.dst + nc.src.GCEPersistentDisk = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerAWSElasticBlockStoreVolumeSource(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.AWSElasticBlockStore = c3.expected + nc.dst.AWSElasticBlockStore = c3.dst + nc.src.AWSElasticBlockStore = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerGitRepoVolumeSource(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.GitRepo = c4.expected + nc.dst.GitRepo = c4.dst + nc.src.GitRepo = c4.src + if i/len(cs5) > k5 { + cs5 = constructPointerSecretVolumeSource(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.Secret = c5.expected + nc.dst.Secret = c5.dst + nc.src.Secret = c5.src + if i/len(cs6) > k6 { + cs6 = constructPointerNFSVolumeSource(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.NFS = c6.expected + nc.dst.NFS = c6.dst + nc.src.NFS = c6.src + if i/len(cs7) > k7 { + cs7 = constructPointerISCSIVolumeSource(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.ISCSI = c7.expected + nc.dst.ISCSI = c7.dst + nc.src.ISCSI = c7.src + if i/len(cs8) > k8 { + cs8 = constructPointerGlusterfsVolumeSource(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.Glusterfs = c8.expected + nc.dst.Glusterfs = c8.dst + nc.src.Glusterfs = c8.src + if i/len(cs9) > k9 { + cs9 = constructPointerPersistentVolumeClaimVolumeSource(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.PersistentVolumeClaim = c9.expected + nc.dst.PersistentVolumeClaim = c9.dst + nc.src.PersistentVolumeClaim = c9.src + if i/len(cs10) > k10 { + cs10 = constructPointerRBDVolumeSource(NoLimit) + k10 += 1 + } + c10 := &cs10[i%len(cs10)] + nc.expected.RBD = c10.expected + nc.dst.RBD = c10.dst + nc.src.RBD = c10.src + if i/len(cs11) > k11 { + cs11 = constructPointerFlexVolumeSource(NoLimit) + k11 += 1 + } + c11 := &cs11[i%len(cs11)] + nc.expected.FlexVolume = c11.expected + nc.dst.FlexVolume = c11.dst + nc.src.FlexVolume = c11.src + if i/len(cs12) > k12 { + cs12 = constructPointerCinderVolumeSource(NoLimit) + k12 += 1 + } + c12 := &cs12[i%len(cs12)] + nc.expected.Cinder = c12.expected + nc.dst.Cinder = c12.dst + nc.src.Cinder = c12.src + if i/len(cs13) > k13 { + cs13 = constructPointerCephFSVolumeSource(NoLimit) + k13 += 1 + } + c13 := &cs13[i%len(cs13)] + nc.expected.CephFS = c13.expected + nc.dst.CephFS = c13.dst + nc.src.CephFS = c13.src + if i/len(cs14) > k14 { + cs14 = constructPointerFlockerVolumeSource(NoLimit) + k14 += 1 + } + c14 := &cs14[i%len(cs14)] + nc.expected.Flocker = c14.expected + nc.dst.Flocker = c14.dst + nc.src.Flocker = c14.src + if i/len(cs15) > k15 { + cs15 = constructPointerDownwardAPIVolumeSource(NoLimit) + k15 += 1 + } + c15 := &cs15[i%len(cs15)] + nc.expected.DownwardAPI = c15.expected + nc.dst.DownwardAPI = c15.dst + nc.src.DownwardAPI = c15.src + if i/len(cs16) > k16 { + cs16 = constructPointerFCVolumeSource(NoLimit) + k16 += 1 + } + c16 := &cs16[i%len(cs16)] + nc.expected.FC = c16.expected + nc.dst.FC = c16.dst + nc.src.FC = c16.src + if i/len(cs17) > k17 { + cs17 = constructPointerAzureFileVolumeSource(NoLimit) + k17 += 1 + } + c17 := &cs17[i%len(cs17)] + nc.expected.AzureFile = c17.expected + nc.dst.AzureFile = c17.dst + nc.src.AzureFile = c17.src + if i/len(cs18) > k18 { + cs18 = constructPointerConfigMapVolumeSource(NoLimit) + k18 += 1 + } + c18 := &cs18[i%len(cs18)] + nc.expected.ConfigMap = c18.expected + nc.dst.ConfigMap = c18.dst + nc.src.ConfigMap = c18.src + if i/len(cs19) > k19 { + cs19 = constructPointerVsphereVirtualDiskVolumeSource(NoLimit) + k19 += 1 + } + c19 := &cs19[i%len(cs19)] + nc.expected.VsphereVolume = c19.expected + nc.dst.VsphereVolume = c19.dst + nc.src.VsphereVolume = c19.src + if i/len(cs20) > k20 { + cs20 = constructPointerQuobyteVolumeSource(NoLimit) + k20 += 1 + } + c20 := &cs20[i%len(cs20)] + nc.expected.Quobyte = c20.expected + nc.dst.Quobyte = c20.dst + nc.src.Quobyte = c20.src + if i/len(cs21) > k21 { + cs21 = constructPointerAzureDiskVolumeSource(NoLimit) + k21 += 1 + } + c21 := &cs21[i%len(cs21)] + nc.expected.AzureDisk = c21.expected + nc.dst.AzureDisk = c21.dst + nc.src.AzureDisk = c21.src + if i/len(cs22) > k22 { + cs22 = constructPointerPhotonPersistentDiskVolumeSource(NoLimit) + k22 += 1 + } + c22 := &cs22[i%len(cs22)] + nc.expected.PhotonPersistentDisk = c22.expected + nc.dst.PhotonPersistentDisk = c22.dst + nc.src.PhotonPersistentDisk = c22.src + if i/len(cs23) > k23 { + cs23 = constructPointerProjectedVolumeSource(NoLimit) + k23 += 1 + } + c23 := &cs23[i%len(cs23)] + nc.expected.Projected = c23.expected + nc.dst.Projected = c23.dst + nc.src.Projected = c23.src + if i/len(cs24) > k24 { + cs24 = constructPointerPortworxVolumeSource(NoLimit) + k24 += 1 + } + c24 := &cs24[i%len(cs24)] + nc.expected.PortworxVolume = c24.expected + nc.dst.PortworxVolume = c24.dst + nc.src.PortworxVolume = c24.src + if i/len(cs25) > k25 { + cs25 = constructPointerScaleIOVolumeSource(NoLimit) + k25 += 1 + } + c25 := &cs25[i%len(cs25)] + nc.expected.ScaleIO = c25.expected + nc.dst.ScaleIO = c25.dst + nc.src.ScaleIO = c25.src + if i/len(cs26) > k26 { + cs26 = constructPointerStorageOSVolumeSource(NoLimit) + k26 += 1 + } + c26 := &cs26[i%len(cs26)] + nc.expected.StorageOS = c26.expected + nc.dst.StorageOS = c26.dst + nc.src.StorageOS = c26.src + if i/len(cs27) > k27 { + cs27 = constructPointerCSIVolumeSource(NoLimit) + k27 += 1 + } + c27 := &cs27[i%len(cs27)] + nc.expected.CSI = c27.expected + nc.dst.CSI = c27.dst + nc.src.CSI = c27.src + if i/len(cs28) > k28 { + cs28 = constructPointerEphemeralVolumeSource(NoLimit) + k28 += 1 + } + c28 := &cs28[i%len(cs28)] + nc.expected.Ephemeral = c28.expected + nc.dst.Ephemeral = c28.dst + nc.src.Ephemeral = c28.src + if i/len(cs29) > k29 { + cs29 = constructPointerImageVolumeSource(NoLimit) + k29 += 1 + } + c29 := &cs29[i%len(cs29)] + nc.expected.Image = c29.expected + nc.dst.Image = c29.dst + nc.src.Image = c29.src + cases = append(cases, nc) + } + return cases +} +func constructPointerHostPathVolumeSource(p Policy) []Case[*v1.HostPathVolumeSource] { + cases := []Case[*v1.HostPathVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructHostPathVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.HostPathVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructHostPathVolumeSource(p Policy) []Case[v1.HostPathVolumeSource] { + cases := []Case[v1.HostPathVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructPointerHostPathType(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.HostPathVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Path = c0.expected + nc.dst.Path = c0.dst + nc.src.Path = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerHostPathType(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Type = c1.expected + nc.dst.Type = c1.dst + nc.src.Type = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerHostPathType(p Policy) []Case[*v1.HostPathType] { + cases := []Case[*v1.HostPathType]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructHostPathType(p) + for _, c := range cs { + cases = append(cases, Case[*v1.HostPathType]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructHostPathType(p Policy) []Case[v1.HostPathType] { + cases := []Case[v1.HostPathType]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.HostPathType]{ + expected: v1.HostPathType(c.expected), + dst: v1.HostPathType(c.dst), + src: v1.HostPathType(c.src), + }) + } + return cases +} +func constructPointerEmptyDirVolumeSource(p Policy) []Case[*v1.EmptyDirVolumeSource] { + cases := []Case[*v1.EmptyDirVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructEmptyDirVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.EmptyDirVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructEmptyDirVolumeSource(p Policy) []Case[v1.EmptyDirVolumeSource] { + cases := []Case[v1.EmptyDirVolumeSource]{} + cs0 := constructStorageMedium(NoLimit) + cs1 := constructPointerQuantity(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.EmptyDirVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructStorageMedium(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Medium = c0.expected + nc.dst.Medium = c0.dst + nc.src.Medium = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerQuantity(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.SizeLimit = c1.expected + nc.dst.SizeLimit = c1.dst + nc.src.SizeLimit = c1.src + cases = append(cases, nc) + } + return cases +} +func constructStorageMedium(p Policy) []Case[v1.StorageMedium] { + cases := []Case[v1.StorageMedium]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.StorageMedium]{ + expected: v1.StorageMedium(c.expected), + dst: v1.StorageMedium(c.dst), + src: v1.StorageMedium(c.src), + }) + } + return cases +} +func constructPointerQuantity(p Policy) []Case[*resource.Quantity] { + cases := []Case[*resource.Quantity]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructQuantity(p) + for _, c := range cs { + cases = append(cases, Case[*resource.Quantity]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPointerGCEPersistentDiskVolumeSource(p Policy) []Case[*v1.GCEPersistentDiskVolumeSource] { + cases := []Case[*v1.GCEPersistentDiskVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructGCEPersistentDiskVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.GCEPersistentDiskVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructGCEPersistentDiskVolumeSource(p Policy) []Case[v1.GCEPersistentDiskVolumeSource] { + cases := []Case[v1.GCEPersistentDiskVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructInt32(NoLimit) + cs3 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.GCEPersistentDiskVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.PDName = c0.expected + nc.dst.PDName = c0.dst + nc.src.PDName = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FSType = c1.expected + nc.dst.FSType = c1.dst + nc.src.FSType = c1.src + if i/len(cs2) > k2 { + cs2 = constructInt32(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Partition = c2.expected + nc.dst.Partition = c2.dst + nc.src.Partition = c2.src + if i/len(cs3) > k3 { + cs3 = constructBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.ReadOnly = c3.expected + nc.dst.ReadOnly = c3.dst + nc.src.ReadOnly = c3.src + cases = append(cases, nc) + } + return cases +} +func constructInt32(p Policy) []Case[int32] { + cases := []Case[int32]{} + if p&(NoZero) == 0 { + cases = append(cases, Case[int32]{expected: 0, dst: 0, src: 0}) + } + if p&(NoNotEqual) == 0 { + if p&(NoNotEqual) == 0 { + cases = append(cases, Case[int32]{expected: 1, dst: 2, src: 1}) + cases = append(cases, Case[int32]{expected: -1, dst: 2, src: -1}) + cases = append(cases, Case[int32]{expected: 1, dst: -2, src: 1}) + cases = append(cases, Case[int32]{expected: -1, dst: -2, src: -1}) + } + } + if p&(NoZero|NoNotEqual) == 0 { + cases = append(cases, Case[int32]{expected: 1, dst: 1, src: 0}) + cases = append(cases, Case[int32]{expected: -1, dst: -1, src: 0}) + cases = append(cases, Case[int32]{expected: 1, dst: 0, src: 1}) + cases = append(cases, Case[int32]{expected: -1, dst: 0, src: -1}) + } + cases = append(cases, Case[int32]{expected: 1, dst: 1, src: 1}) + cases = append(cases, Case[int32]{expected: -1, dst: -1, src: -1}) + return cases +} +func constructBool(p Policy) []Case[bool] { + cases := []Case[bool]{} + if p&(NoZero) == 0 { + cases = append(cases, Case[bool]{expected: false, dst: false, src: false}) + } + if p&(NoNotEqual) == 0 { + } + if p&(NoZero|NoNotEqual) == 0 { + } + cases = append(cases, Case[bool]{expected: false, dst: false, src: false}) + cases = append(cases, Case[bool]{expected: true, dst: true, src: true}) + return cases +} +func constructPointerAWSElasticBlockStoreVolumeSource(p Policy) []Case[*v1.AWSElasticBlockStoreVolumeSource] { + cases := []Case[*v1.AWSElasticBlockStoreVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAWSElasticBlockStoreVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.AWSElasticBlockStoreVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAWSElasticBlockStoreVolumeSource(p Policy) []Case[v1.AWSElasticBlockStoreVolumeSource] { + cases := []Case[v1.AWSElasticBlockStoreVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructInt32(NoLimit) + cs3 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.AWSElasticBlockStoreVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.VolumeID = c0.expected + nc.dst.VolumeID = c0.dst + nc.src.VolumeID = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FSType = c1.expected + nc.dst.FSType = c1.dst + nc.src.FSType = c1.src + if i/len(cs2) > k2 { + cs2 = constructInt32(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Partition = c2.expected + nc.dst.Partition = c2.dst + nc.src.Partition = c2.src + if i/len(cs3) > k3 { + cs3 = constructBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.ReadOnly = c3.expected + nc.dst.ReadOnly = c3.dst + nc.src.ReadOnly = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerGitRepoVolumeSource(p Policy) []Case[*v1.GitRepoVolumeSource] { + cases := []Case[*v1.GitRepoVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructGitRepoVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.GitRepoVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructGitRepoVolumeSource(p Policy) []Case[v1.GitRepoVolumeSource] { + cases := []Case[v1.GitRepoVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.GitRepoVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Repository = c0.expected + nc.dst.Repository = c0.dst + nc.src.Repository = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Revision = c1.expected + nc.dst.Revision = c1.dst + nc.src.Revision = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Directory = c2.expected + nc.dst.Directory = c2.dst + nc.src.Directory = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerSecretVolumeSource(p Policy) []Case[*v1.SecretVolumeSource] { + cases := []Case[*v1.SecretVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSecretVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SecretVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSecretVolumeSource(p Policy) []Case[v1.SecretVolumeSource] { + cases := []Case[v1.SecretVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructAtomicSliceKeyToPath(NoLimit) + cs2 := constructPointerInt32(NoLimit) + cs3 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.SecretVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.SecretName = c0.expected + nc.dst.SecretName = c0.dst + nc.src.SecretName = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceKeyToPath(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Items = c1.expected + nc.dst.Items = c1.dst + nc.src.Items = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerInt32(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.DefaultMode = c2.expected + nc.dst.DefaultMode = c2.dst + nc.src.DefaultMode = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Optional = c3.expected + nc.dst.Optional = c3.dst + nc.src.Optional = c3.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceKeyToPath(p Policy) []Case[[]v1.KeyToPath] { + cases := []Case[[]v1.KeyToPath]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructKeyToPath(NoLimit) + var nc Case[[]v1.KeyToPath] + nc = Case[[]v1.KeyToPath]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.KeyToPath]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.KeyToPath{} + cases = append(cases, nc) + return cases +} +func constructKeyToPath(p Policy) []Case[v1.KeyToPath] { + cases := []Case[v1.KeyToPath]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructPointerInt32(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.KeyToPath]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Key = c0.expected + nc.dst.Key = c0.dst + nc.src.Key = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Path = c1.expected + nc.dst.Path = c1.dst + nc.src.Path = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerInt32(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Mode = c2.expected + nc.dst.Mode = c2.dst + nc.src.Mode = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerInt32(p Policy) []Case[*int32] { + cases := []Case[*int32]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructInt32(p) + for _, c := range cs { + cases = append(cases, Case[*int32]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPointerBool(p Policy) []Case[*bool] { + cases := []Case[*bool]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructBool(p) + for _, c := range cs { + cases = append(cases, Case[*bool]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPointerNFSVolumeSource(p Policy) []Case[*v1.NFSVolumeSource] { + cases := []Case[*v1.NFSVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructNFSVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.NFSVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructNFSVolumeSource(p Policy) []Case[v1.NFSVolumeSource] { + cases := []Case[v1.NFSVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.NFSVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Server = c0.expected + nc.dst.Server = c0.dst + nc.src.Server = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Path = c1.expected + nc.dst.Path = c1.dst + nc.src.Path = c1.src + if i/len(cs2) > k2 { + cs2 = constructBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ReadOnly = c2.expected + nc.dst.ReadOnly = c2.dst + nc.src.ReadOnly = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerISCSIVolumeSource(p Policy) []Case[*v1.ISCSIVolumeSource] { + cases := []Case[*v1.ISCSIVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructISCSIVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ISCSIVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructISCSIVolumeSource(p Policy) []Case[v1.ISCSIVolumeSource] { + cases := []Case[v1.ISCSIVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructInt32(NoLimit) + cs3 := constructString(NoLimit) + cs4 := constructString(NoLimit) + cs5 := constructBool(NoLimit) + cs6 := constructAtomicSliceString(NoLimit) + cs7 := constructBool(NoLimit) + cs8 := constructBool(NoLimit) + cs9 := constructAtomicPointerLocalObjectReference(NoLimit) + cs10 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + len(cs10), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + k10 := 0 + for i := range maxCount { + nc := Case[v1.ISCSIVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.TargetPortal = c0.expected + nc.dst.TargetPortal = c0.dst + nc.src.TargetPortal = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.IQN = c1.expected + nc.dst.IQN = c1.dst + nc.src.IQN = c1.src + if i/len(cs2) > k2 { + cs2 = constructInt32(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Lun = c2.expected + nc.dst.Lun = c2.dst + nc.src.Lun = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.ISCSIInterface = c3.expected + nc.dst.ISCSIInterface = c3.dst + nc.src.ISCSIInterface = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.FSType = c4.expected + nc.dst.FSType = c4.dst + nc.src.FSType = c4.src + if i/len(cs5) > k5 { + cs5 = constructBool(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.ReadOnly = c5.expected + nc.dst.ReadOnly = c5.dst + nc.src.ReadOnly = c5.src + if i/len(cs6) > k6 { + cs6 = constructAtomicSliceString(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.Portals = c6.expected + nc.dst.Portals = c6.dst + nc.src.Portals = c6.src + if i/len(cs7) > k7 { + cs7 = constructBool(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.DiscoveryCHAPAuth = c7.expected + nc.dst.DiscoveryCHAPAuth = c7.dst + nc.src.DiscoveryCHAPAuth = c7.src + if i/len(cs8) > k8 { + cs8 = constructBool(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.SessionCHAPAuth = c8.expected + nc.dst.SessionCHAPAuth = c8.dst + nc.src.SessionCHAPAuth = c8.src + if i/len(cs9) > k9 { + cs9 = constructAtomicPointerLocalObjectReference(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.SecretRef = c9.expected + nc.dst.SecretRef = c9.dst + nc.src.SecretRef = c9.src + if i/len(cs10) > k10 { + cs10 = constructPointerString(NoLimit) + k10 += 1 + } + c10 := &cs10[i%len(cs10)] + nc.expected.InitiatorName = c10.expected + nc.dst.InitiatorName = c10.dst + nc.src.InitiatorName = c10.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceString(p Policy) []Case[[]string] { + cases := []Case[[]string]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructString(NoLimit) + var nc Case[[]string] + nc = Case[[]string]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]string]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []string{} + cases = append(cases, nc) + return cases +} +func constructAtomicPointerLocalObjectReference(p Policy) []Case[*v1.LocalObjectReference] { + cases := []Case[*v1.LocalObjectReference]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicLocalObjectReference(p) + for _, c := range cs { + cases = append(cases, Case[*v1.LocalObjectReference]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAtomicLocalObjectReference(p Policy) []Case[v1.LocalObjectReference] { + cases := []Case[v1.LocalObjectReference]{} + cs0 := constructString(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.LocalObjectReference]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.src + nc.dst.Name = c0.dst + nc.src.Name = c0.src + cases = append(cases, nc) + } + return cases +} +func constructPointerString(p Policy) []Case[*string] { + cases := []Case[*string]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[*string]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPointerGlusterfsVolumeSource(p Policy) []Case[*v1.GlusterfsVolumeSource] { + cases := []Case[*v1.GlusterfsVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructGlusterfsVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.GlusterfsVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructGlusterfsVolumeSource(p Policy) []Case[v1.GlusterfsVolumeSource] { + cases := []Case[v1.GlusterfsVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.GlusterfsVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.EndpointsName = c0.expected + nc.dst.EndpointsName = c0.dst + nc.src.EndpointsName = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Path = c1.expected + nc.dst.Path = c1.dst + nc.src.Path = c1.src + if i/len(cs2) > k2 { + cs2 = constructBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ReadOnly = c2.expected + nc.dst.ReadOnly = c2.dst + nc.src.ReadOnly = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerPersistentVolumeClaimVolumeSource(p Policy) []Case[*v1.PersistentVolumeClaimVolumeSource] { + cases := []Case[*v1.PersistentVolumeClaimVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPersistentVolumeClaimVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PersistentVolumeClaimVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPersistentVolumeClaimVolumeSource(p Policy) []Case[v1.PersistentVolumeClaimVolumeSource] { + cases := []Case[v1.PersistentVolumeClaimVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.PersistentVolumeClaimVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.ClaimName = c0.expected + nc.dst.ClaimName = c0.dst + nc.src.ClaimName = c0.src + if i/len(cs1) > k1 { + cs1 = constructBool(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ReadOnly = c1.expected + nc.dst.ReadOnly = c1.dst + nc.src.ReadOnly = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerRBDVolumeSource(p Policy) []Case[*v1.RBDVolumeSource] { + cases := []Case[*v1.RBDVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructRBDVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.RBDVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructRBDVolumeSource(p Policy) []Case[v1.RBDVolumeSource] { + cases := []Case[v1.RBDVolumeSource]{} + cs0 := constructAtomicSliceString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructString(NoLimit) + cs4 := constructString(NoLimit) + cs5 := constructString(NoLimit) + cs6 := constructAtomicPointerLocalObjectReference(NoLimit) + cs7 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + for i := range maxCount { + nc := Case[v1.RBDVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.CephMonitors = c0.expected + nc.dst.CephMonitors = c0.dst + nc.src.CephMonitors = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.RBDImage = c1.expected + nc.dst.RBDImage = c1.dst + nc.src.RBDImage = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.FSType = c2.expected + nc.dst.FSType = c2.dst + nc.src.FSType = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.RBDPool = c3.expected + nc.dst.RBDPool = c3.dst + nc.src.RBDPool = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.RadosUser = c4.expected + nc.dst.RadosUser = c4.dst + nc.src.RadosUser = c4.src + if i/len(cs5) > k5 { + cs5 = constructString(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.Keyring = c5.expected + nc.dst.Keyring = c5.dst + nc.src.Keyring = c5.src + if i/len(cs6) > k6 { + cs6 = constructAtomicPointerLocalObjectReference(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.SecretRef = c6.expected + nc.dst.SecretRef = c6.dst + nc.src.SecretRef = c6.src + if i/len(cs7) > k7 { + cs7 = constructBool(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.ReadOnly = c7.expected + nc.dst.ReadOnly = c7.dst + nc.src.ReadOnly = c7.src + cases = append(cases, nc) + } + return cases +} +func constructPointerFlexVolumeSource(p Policy) []Case[*v1.FlexVolumeSource] { + cases := []Case[*v1.FlexVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructFlexVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.FlexVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructFlexVolumeSource(p Policy) []Case[v1.FlexVolumeSource] { + cases := []Case[v1.FlexVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructAtomicPointerLocalObjectReference(NoLimit) + cs3 := constructBool(NoLimit) + cs4 := constructMapStringToString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.FlexVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Driver = c0.expected + nc.dst.Driver = c0.dst + nc.src.Driver = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FSType = c1.expected + nc.dst.FSType = c1.dst + nc.src.FSType = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicPointerLocalObjectReference(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.SecretRef = c2.expected + nc.dst.SecretRef = c2.dst + nc.src.SecretRef = c2.src + if i/len(cs3) > k3 { + cs3 = constructBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.ReadOnly = c3.expected + nc.dst.ReadOnly = c3.dst + nc.src.ReadOnly = c3.src + if i/len(cs4) > k4 { + cs4 = constructMapStringToString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.Options = c4.expected + nc.dst.Options = c4.dst + nc.src.Options = c4.src + cases = append(cases, nc) + } + return cases +} +func constructPointerCinderVolumeSource(p Policy) []Case[*v1.CinderVolumeSource] { + cases := []Case[*v1.CinderVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructCinderVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.CinderVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructCinderVolumeSource(p Policy) []Case[v1.CinderVolumeSource] { + cases := []Case[v1.CinderVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructBool(NoLimit) + cs3 := constructAtomicPointerLocalObjectReference(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.CinderVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.VolumeID = c0.expected + nc.dst.VolumeID = c0.dst + nc.src.VolumeID = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FSType = c1.expected + nc.dst.FSType = c1.dst + nc.src.FSType = c1.src + if i/len(cs2) > k2 { + cs2 = constructBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ReadOnly = c2.expected + nc.dst.ReadOnly = c2.dst + nc.src.ReadOnly = c2.src + if i/len(cs3) > k3 { + cs3 = constructAtomicPointerLocalObjectReference(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.SecretRef = c3.expected + nc.dst.SecretRef = c3.dst + nc.src.SecretRef = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerCephFSVolumeSource(p Policy) []Case[*v1.CephFSVolumeSource] { + cases := []Case[*v1.CephFSVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructCephFSVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.CephFSVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructCephFSVolumeSource(p Policy) []Case[v1.CephFSVolumeSource] { + cases := []Case[v1.CephFSVolumeSource]{} + cs0 := constructAtomicSliceString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructString(NoLimit) + cs4 := constructAtomicPointerLocalObjectReference(NoLimit) + cs5 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + for i := range maxCount { + nc := Case[v1.CephFSVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Monitors = c0.expected + nc.dst.Monitors = c0.dst + nc.src.Monitors = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Path = c1.expected + nc.dst.Path = c1.dst + nc.src.Path = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.User = c2.expected + nc.dst.User = c2.dst + nc.src.User = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.SecretFile = c3.expected + nc.dst.SecretFile = c3.dst + nc.src.SecretFile = c3.src + if i/len(cs4) > k4 { + cs4 = constructAtomicPointerLocalObjectReference(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.SecretRef = c4.expected + nc.dst.SecretRef = c4.dst + nc.src.SecretRef = c4.src + if i/len(cs5) > k5 { + cs5 = constructBool(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.ReadOnly = c5.expected + nc.dst.ReadOnly = c5.dst + nc.src.ReadOnly = c5.src + cases = append(cases, nc) + } + return cases +} +func constructPointerFlockerVolumeSource(p Policy) []Case[*v1.FlockerVolumeSource] { + cases := []Case[*v1.FlockerVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructFlockerVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.FlockerVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructFlockerVolumeSource(p Policy) []Case[v1.FlockerVolumeSource] { + cases := []Case[v1.FlockerVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.FlockerVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.DatasetName = c0.expected + nc.dst.DatasetName = c0.dst + nc.src.DatasetName = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.DatasetUUID = c1.expected + nc.dst.DatasetUUID = c1.dst + nc.src.DatasetUUID = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerDownwardAPIVolumeSource(p Policy) []Case[*v1.DownwardAPIVolumeSource] { + cases := []Case[*v1.DownwardAPIVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructDownwardAPIVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.DownwardAPIVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructDownwardAPIVolumeSource(p Policy) []Case[v1.DownwardAPIVolumeSource] { + cases := []Case[v1.DownwardAPIVolumeSource]{} + cs0 := constructAtomicSliceDownwardAPIVolumeFile(NoLimit) + cs1 := constructPointerInt32(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.DownwardAPIVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceDownwardAPIVolumeFile(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Items = c0.expected + nc.dst.Items = c0.dst + nc.src.Items = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerInt32(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.DefaultMode = c1.expected + nc.dst.DefaultMode = c1.dst + nc.src.DefaultMode = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceDownwardAPIVolumeFile(p Policy) []Case[[]v1.DownwardAPIVolumeFile] { + cases := []Case[[]v1.DownwardAPIVolumeFile]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructDownwardAPIVolumeFile(NoLimit) + var nc Case[[]v1.DownwardAPIVolumeFile] + nc = Case[[]v1.DownwardAPIVolumeFile]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.DownwardAPIVolumeFile]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.DownwardAPIVolumeFile{} + cases = append(cases, nc) + return cases +} +func constructDownwardAPIVolumeFile(p Policy) []Case[v1.DownwardAPIVolumeFile] { + cases := []Case[v1.DownwardAPIVolumeFile]{} + cs0 := constructString(NoLimit) + cs1 := constructAtomicPointerObjectFieldSelector(NoLimit) + cs2 := constructAtomicPointerResourceFieldSelector(NoLimit) + cs3 := constructPointerInt32(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.DownwardAPIVolumeFile]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Path = c0.expected + nc.dst.Path = c0.dst + nc.src.Path = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicPointerObjectFieldSelector(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FieldRef = c1.expected + nc.dst.FieldRef = c1.dst + nc.src.FieldRef = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicPointerResourceFieldSelector(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ResourceFieldRef = c2.expected + nc.dst.ResourceFieldRef = c2.dst + nc.src.ResourceFieldRef = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerInt32(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Mode = c3.expected + nc.dst.Mode = c3.dst + nc.src.Mode = c3.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicPointerObjectFieldSelector(p Policy) []Case[*v1.ObjectFieldSelector] { + cases := []Case[*v1.ObjectFieldSelector]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicObjectFieldSelector(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ObjectFieldSelector]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAtomicObjectFieldSelector(p Policy) []Case[v1.ObjectFieldSelector] { + cases := []Case[v1.ObjectFieldSelector]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.ObjectFieldSelector]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.APIVersion = c0.src + nc.dst.APIVersion = c0.dst + nc.src.APIVersion = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FieldPath = c1.src + nc.dst.FieldPath = c1.dst + nc.src.FieldPath = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicPointerResourceFieldSelector(p Policy) []Case[*v1.ResourceFieldSelector] { + cases := []Case[*v1.ResourceFieldSelector]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicResourceFieldSelector(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ResourceFieldSelector]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAtomicResourceFieldSelector(p Policy) []Case[v1.ResourceFieldSelector] { + cases := []Case[v1.ResourceFieldSelector]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructQuantity(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.ResourceFieldSelector]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.ContainerName = c0.src + nc.dst.ContainerName = c0.dst + nc.src.ContainerName = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Resource = c1.src + nc.dst.Resource = c1.dst + nc.src.Resource = c1.src + if i/len(cs2) > k2 { + cs2 = constructQuantity(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Divisor = c2.src + nc.dst.Divisor = c2.dst + nc.src.Divisor = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerFCVolumeSource(p Policy) []Case[*v1.FCVolumeSource] { + cases := []Case[*v1.FCVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructFCVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.FCVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructFCVolumeSource(p Policy) []Case[v1.FCVolumeSource] { + cases := []Case[v1.FCVolumeSource]{} + cs0 := constructAtomicSliceString(NoLimit) + cs1 := constructPointerInt32(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructBool(NoLimit) + cs4 := constructAtomicSliceString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.FCVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.TargetWWNs = c0.expected + nc.dst.TargetWWNs = c0.dst + nc.src.TargetWWNs = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerInt32(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Lun = c1.expected + nc.dst.Lun = c1.dst + nc.src.Lun = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.FSType = c2.expected + nc.dst.FSType = c2.dst + nc.src.FSType = c2.src + if i/len(cs3) > k3 { + cs3 = constructBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.ReadOnly = c3.expected + nc.dst.ReadOnly = c3.dst + nc.src.ReadOnly = c3.src + if i/len(cs4) > k4 { + cs4 = constructAtomicSliceString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.WWIDs = c4.expected + nc.dst.WWIDs = c4.dst + nc.src.WWIDs = c4.src + cases = append(cases, nc) + } + return cases +} +func constructPointerAzureFileVolumeSource(p Policy) []Case[*v1.AzureFileVolumeSource] { + cases := []Case[*v1.AzureFileVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAzureFileVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.AzureFileVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAzureFileVolumeSource(p Policy) []Case[v1.AzureFileVolumeSource] { + cases := []Case[v1.AzureFileVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.AzureFileVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.SecretName = c0.expected + nc.dst.SecretName = c0.dst + nc.src.SecretName = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ShareName = c1.expected + nc.dst.ShareName = c1.dst + nc.src.ShareName = c1.src + if i/len(cs2) > k2 { + cs2 = constructBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ReadOnly = c2.expected + nc.dst.ReadOnly = c2.dst + nc.src.ReadOnly = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerConfigMapVolumeSource(p Policy) []Case[*v1.ConfigMapVolumeSource] { + cases := []Case[*v1.ConfigMapVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructConfigMapVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ConfigMapVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructConfigMapVolumeSource(p Policy) []Case[v1.ConfigMapVolumeSource] { + cases := []Case[v1.ConfigMapVolumeSource]{} + cs0 := constructAtomicLocalObjectReference(NoLimit) + cs1 := constructAtomicSliceKeyToPath(NoLimit) + cs2 := constructPointerInt32(NoLimit) + cs3 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.ConfigMapVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicLocalObjectReference(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LocalObjectReference = c0.expected + nc.dst.LocalObjectReference = c0.dst + nc.src.LocalObjectReference = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceKeyToPath(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Items = c1.expected + nc.dst.Items = c1.dst + nc.src.Items = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerInt32(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.DefaultMode = c2.expected + nc.dst.DefaultMode = c2.dst + nc.src.DefaultMode = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Optional = c3.expected + nc.dst.Optional = c3.dst + nc.src.Optional = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerVsphereVirtualDiskVolumeSource(p Policy) []Case[*v1.VsphereVirtualDiskVolumeSource] { + cases := []Case[*v1.VsphereVirtualDiskVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructVsphereVirtualDiskVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.VsphereVirtualDiskVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructVsphereVirtualDiskVolumeSource(p Policy) []Case[v1.VsphereVirtualDiskVolumeSource] { + cases := []Case[v1.VsphereVirtualDiskVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.VsphereVirtualDiskVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.VolumePath = c0.expected + nc.dst.VolumePath = c0.dst + nc.src.VolumePath = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FSType = c1.expected + nc.dst.FSType = c1.dst + nc.src.FSType = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.StoragePolicyName = c2.expected + nc.dst.StoragePolicyName = c2.dst + nc.src.StoragePolicyName = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.StoragePolicyID = c3.expected + nc.dst.StoragePolicyID = c3.dst + nc.src.StoragePolicyID = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerQuobyteVolumeSource(p Policy) []Case[*v1.QuobyteVolumeSource] { + cases := []Case[*v1.QuobyteVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructQuobyteVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.QuobyteVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructQuobyteVolumeSource(p Policy) []Case[v1.QuobyteVolumeSource] { + cases := []Case[v1.QuobyteVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructBool(NoLimit) + cs3 := constructString(NoLimit) + cs4 := constructString(NoLimit) + cs5 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + for i := range maxCount { + nc := Case[v1.QuobyteVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Registry = c0.expected + nc.dst.Registry = c0.dst + nc.src.Registry = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Volume = c1.expected + nc.dst.Volume = c1.dst + nc.src.Volume = c1.src + if i/len(cs2) > k2 { + cs2 = constructBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ReadOnly = c2.expected + nc.dst.ReadOnly = c2.dst + nc.src.ReadOnly = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.User = c3.expected + nc.dst.User = c3.dst + nc.src.User = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.Group = c4.expected + nc.dst.Group = c4.dst + nc.src.Group = c4.src + if i/len(cs5) > k5 { + cs5 = constructString(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.Tenant = c5.expected + nc.dst.Tenant = c5.dst + nc.src.Tenant = c5.src + cases = append(cases, nc) + } + return cases +} +func constructPointerAzureDiskVolumeSource(p Policy) []Case[*v1.AzureDiskVolumeSource] { + cases := []Case[*v1.AzureDiskVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAzureDiskVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.AzureDiskVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAzureDiskVolumeSource(p Policy) []Case[v1.AzureDiskVolumeSource] { + cases := []Case[v1.AzureDiskVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructPointerAzureDataDiskCachingMode(NoLimit) + cs3 := constructPointerString(NoLimit) + cs4 := constructPointerBool(NoLimit) + cs5 := constructPointerAzureDataDiskKind(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + for i := range maxCount { + nc := Case[v1.AzureDiskVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.DiskName = c0.expected + nc.dst.DiskName = c0.dst + nc.src.DiskName = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.DataDiskURI = c1.expected + nc.dst.DataDiskURI = c1.dst + nc.src.DataDiskURI = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerAzureDataDiskCachingMode(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.CachingMode = c2.expected + nc.dst.CachingMode = c2.dst + nc.src.CachingMode = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.FSType = c3.expected + nc.dst.FSType = c3.dst + nc.src.FSType = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerBool(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.ReadOnly = c4.expected + nc.dst.ReadOnly = c4.dst + nc.src.ReadOnly = c4.src + if i/len(cs5) > k5 { + cs5 = constructPointerAzureDataDiskKind(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.Kind = c5.expected + nc.dst.Kind = c5.dst + nc.src.Kind = c5.src + cases = append(cases, nc) + } + return cases +} +func constructPointerAzureDataDiskCachingMode(p Policy) []Case[*v1.AzureDataDiskCachingMode] { + cases := []Case[*v1.AzureDataDiskCachingMode]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAzureDataDiskCachingMode(p) + for _, c := range cs { + cases = append(cases, Case[*v1.AzureDataDiskCachingMode]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAzureDataDiskCachingMode(p Policy) []Case[v1.AzureDataDiskCachingMode] { + cases := []Case[v1.AzureDataDiskCachingMode]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.AzureDataDiskCachingMode]{ + expected: v1.AzureDataDiskCachingMode(c.expected), + dst: v1.AzureDataDiskCachingMode(c.dst), + src: v1.AzureDataDiskCachingMode(c.src), + }) + } + return cases +} +func constructPointerAzureDataDiskKind(p Policy) []Case[*v1.AzureDataDiskKind] { + cases := []Case[*v1.AzureDataDiskKind]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAzureDataDiskKind(p) + for _, c := range cs { + cases = append(cases, Case[*v1.AzureDataDiskKind]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAzureDataDiskKind(p Policy) []Case[v1.AzureDataDiskKind] { + cases := []Case[v1.AzureDataDiskKind]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.AzureDataDiskKind]{ + expected: v1.AzureDataDiskKind(c.expected), + dst: v1.AzureDataDiskKind(c.dst), + src: v1.AzureDataDiskKind(c.src), + }) + } + return cases +} +func constructPointerPhotonPersistentDiskVolumeSource(p Policy) []Case[*v1.PhotonPersistentDiskVolumeSource] { + cases := []Case[*v1.PhotonPersistentDiskVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPhotonPersistentDiskVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PhotonPersistentDiskVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPhotonPersistentDiskVolumeSource(p Policy) []Case[v1.PhotonPersistentDiskVolumeSource] { + cases := []Case[v1.PhotonPersistentDiskVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.PhotonPersistentDiskVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.PdID = c0.expected + nc.dst.PdID = c0.dst + nc.src.PdID = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FSType = c1.expected + nc.dst.FSType = c1.dst + nc.src.FSType = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerProjectedVolumeSource(p Policy) []Case[*v1.ProjectedVolumeSource] { + cases := []Case[*v1.ProjectedVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructProjectedVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ProjectedVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructProjectedVolumeSource(p Policy) []Case[v1.ProjectedVolumeSource] { + cases := []Case[v1.ProjectedVolumeSource]{} + cs0 := constructAtomicSliceVolumeProjection(NoLimit) + cs1 := constructPointerInt32(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.ProjectedVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceVolumeProjection(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Sources = c0.expected + nc.dst.Sources = c0.dst + nc.src.Sources = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerInt32(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.DefaultMode = c1.expected + nc.dst.DefaultMode = c1.dst + nc.src.DefaultMode = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceVolumeProjection(p Policy) []Case[[]v1.VolumeProjection] { + cases := []Case[[]v1.VolumeProjection]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructVolumeProjection(NoLimit) + var nc Case[[]v1.VolumeProjection] + nc = Case[[]v1.VolumeProjection]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.VolumeProjection]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.VolumeProjection{} + cases = append(cases, nc) + return cases +} +func constructVolumeProjection(p Policy) []Case[v1.VolumeProjection] { + cases := []Case[v1.VolumeProjection]{} + cs0 := constructPointerSecretProjection(NoLimit) + cs1 := constructPointerDownwardAPIProjection(NoLimit) + cs2 := constructPointerConfigMapProjection(NoLimit) + cs3 := constructPointerServiceAccountTokenProjection(NoLimit) + cs4 := constructPointerClusterTrustBundleProjection(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.VolumeProjection]{} + if i/len(cs0) > k0 { + cs0 = constructPointerSecretProjection(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Secret = c0.expected + nc.dst.Secret = c0.dst + nc.src.Secret = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerDownwardAPIProjection(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.DownwardAPI = c1.expected + nc.dst.DownwardAPI = c1.dst + nc.src.DownwardAPI = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerConfigMapProjection(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ConfigMap = c2.expected + nc.dst.ConfigMap = c2.dst + nc.src.ConfigMap = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerServiceAccountTokenProjection(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.ServiceAccountToken = c3.expected + nc.dst.ServiceAccountToken = c3.dst + nc.src.ServiceAccountToken = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerClusterTrustBundleProjection(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.ClusterTrustBundle = c4.expected + nc.dst.ClusterTrustBundle = c4.dst + nc.src.ClusterTrustBundle = c4.src + cases = append(cases, nc) + } + return cases +} +func constructPointerSecretProjection(p Policy) []Case[*v1.SecretProjection] { + cases := []Case[*v1.SecretProjection]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSecretProjection(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SecretProjection]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSecretProjection(p Policy) []Case[v1.SecretProjection] { + cases := []Case[v1.SecretProjection]{} + cs0 := constructAtomicLocalObjectReference(NoLimit) + cs1 := constructAtomicSliceKeyToPath(NoLimit) + cs2 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.SecretProjection]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicLocalObjectReference(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LocalObjectReference = c0.expected + nc.dst.LocalObjectReference = c0.dst + nc.src.LocalObjectReference = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceKeyToPath(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Items = c1.expected + nc.dst.Items = c1.dst + nc.src.Items = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Optional = c2.expected + nc.dst.Optional = c2.dst + nc.src.Optional = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerDownwardAPIProjection(p Policy) []Case[*v1.DownwardAPIProjection] { + cases := []Case[*v1.DownwardAPIProjection]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructDownwardAPIProjection(p) + for _, c := range cs { + cases = append(cases, Case[*v1.DownwardAPIProjection]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructDownwardAPIProjection(p Policy) []Case[v1.DownwardAPIProjection] { + cases := []Case[v1.DownwardAPIProjection]{} + cs0 := constructAtomicSliceDownwardAPIVolumeFile(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.DownwardAPIProjection]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceDownwardAPIVolumeFile(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Items = c0.expected + nc.dst.Items = c0.dst + nc.src.Items = c0.src + cases = append(cases, nc) + } + return cases +} +func constructPointerConfigMapProjection(p Policy) []Case[*v1.ConfigMapProjection] { + cases := []Case[*v1.ConfigMapProjection]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructConfigMapProjection(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ConfigMapProjection]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructConfigMapProjection(p Policy) []Case[v1.ConfigMapProjection] { + cases := []Case[v1.ConfigMapProjection]{} + cs0 := constructAtomicLocalObjectReference(NoLimit) + cs1 := constructAtomicSliceKeyToPath(NoLimit) + cs2 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.ConfigMapProjection]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicLocalObjectReference(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LocalObjectReference = c0.expected + nc.dst.LocalObjectReference = c0.dst + nc.src.LocalObjectReference = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceKeyToPath(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Items = c1.expected + nc.dst.Items = c1.dst + nc.src.Items = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Optional = c2.expected + nc.dst.Optional = c2.dst + nc.src.Optional = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerServiceAccountTokenProjection(p Policy) []Case[*v1.ServiceAccountTokenProjection] { + cases := []Case[*v1.ServiceAccountTokenProjection]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructServiceAccountTokenProjection(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ServiceAccountTokenProjection]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructServiceAccountTokenProjection(p Policy) []Case[v1.ServiceAccountTokenProjection] { + cases := []Case[v1.ServiceAccountTokenProjection]{} + cs0 := constructString(NoLimit) + cs1 := constructPointerInt64(NoLimit) + cs2 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.ServiceAccountTokenProjection]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Audience = c0.expected + nc.dst.Audience = c0.dst + nc.src.Audience = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerInt64(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ExpirationSeconds = c1.expected + nc.dst.ExpirationSeconds = c1.dst + nc.src.ExpirationSeconds = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Path = c2.expected + nc.dst.Path = c2.dst + nc.src.Path = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerInt64(p Policy) []Case[*int64] { + cases := []Case[*int64]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructInt64(p) + for _, c := range cs { + cases = append(cases, Case[*int64]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructInt64(p Policy) []Case[int64] { + cases := []Case[int64]{} + if p&(NoZero) == 0 { + cases = append(cases, Case[int64]{expected: 0, dst: 0, src: 0}) + } + if p&(NoNotEqual) == 0 { + if p&(NoNotEqual) == 0 { + cases = append(cases, Case[int64]{expected: 1, dst: 2, src: 1}) + cases = append(cases, Case[int64]{expected: -1, dst: 2, src: -1}) + cases = append(cases, Case[int64]{expected: 1, dst: -2, src: 1}) + cases = append(cases, Case[int64]{expected: -1, dst: -2, src: -1}) + } + } + if p&(NoZero|NoNotEqual) == 0 { + cases = append(cases, Case[int64]{expected: 1, dst: 1, src: 0}) + cases = append(cases, Case[int64]{expected: -1, dst: -1, src: 0}) + cases = append(cases, Case[int64]{expected: 1, dst: 0, src: 1}) + cases = append(cases, Case[int64]{expected: -1, dst: 0, src: -1}) + } + cases = append(cases, Case[int64]{expected: 1, dst: 1, src: 1}) + cases = append(cases, Case[int64]{expected: -1, dst: -1, src: -1}) + return cases +} +func constructPointerClusterTrustBundleProjection(p Policy) []Case[*v1.ClusterTrustBundleProjection] { + cases := []Case[*v1.ClusterTrustBundleProjection]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructClusterTrustBundleProjection(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ClusterTrustBundleProjection]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructClusterTrustBundleProjection(p Policy) []Case[v1.ClusterTrustBundleProjection] { + cases := []Case[v1.ClusterTrustBundleProjection]{} + cs0 := constructPointerString(NoLimit) + cs1 := constructPointerString(NoLimit) + cs2 := constructPointerLabelSelector(NoLimit) + cs3 := constructPointerBool(NoLimit) + cs4 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.ClusterTrustBundleProjection]{} + if i/len(cs0) > k0 { + cs0 = constructPointerString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.SignerName = c1.expected + nc.dst.SignerName = c1.dst + nc.src.SignerName = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerLabelSelector(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.LabelSelector = c2.expected + nc.dst.LabelSelector = c2.dst + nc.src.LabelSelector = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Optional = c3.expected + nc.dst.Optional = c3.dst + nc.src.Optional = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.Path = c4.expected + nc.dst.Path = c4.dst + nc.src.Path = c4.src + cases = append(cases, nc) + } + return cases +} +func constructPointerLabelSelector(p Policy) []Case[*metav1.LabelSelector] { + cases := []Case[*metav1.LabelSelector]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructLabelSelector(p) + for _, c := range cs { + cases = append(cases, Case[*metav1.LabelSelector]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructLabelSelector(p Policy) []Case[metav1.LabelSelector] { + cases := []Case[metav1.LabelSelector]{} + cs0 := constructMapStringToString(NoLimit) + cs1 := constructAtomicSliceLabelSelectorRequirement(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[metav1.LabelSelector]{} + if i/len(cs0) > k0 { + cs0 = constructMapStringToString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.MatchLabels = c0.expected + nc.dst.MatchLabels = c0.dst + nc.src.MatchLabels = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceLabelSelectorRequirement(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.MatchExpressions = c1.expected + nc.dst.MatchExpressions = c1.dst + nc.src.MatchExpressions = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceLabelSelectorRequirement(p Policy) []Case[[]metav1.LabelSelectorRequirement] { + cases := []Case[[]metav1.LabelSelectorRequirement]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructLabelSelectorRequirement(NoLimit) + var nc Case[[]metav1.LabelSelectorRequirement] + nc = Case[[]metav1.LabelSelectorRequirement]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]metav1.LabelSelectorRequirement]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []metav1.LabelSelectorRequirement{} + cases = append(cases, nc) + return cases +} +func constructLabelSelectorRequirement(p Policy) []Case[metav1.LabelSelectorRequirement] { + cases := []Case[metav1.LabelSelectorRequirement]{} + cs0 := constructString(NoLimit) + cs1 := constructLabelSelectorOperator(NoLimit) + cs2 := constructAtomicSliceString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[metav1.LabelSelectorRequirement]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Key = c0.expected + nc.dst.Key = c0.dst + nc.src.Key = c0.src + if i/len(cs1) > k1 { + cs1 = constructLabelSelectorOperator(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Operator = c1.expected + nc.dst.Operator = c1.dst + nc.src.Operator = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicSliceString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Values = c2.expected + nc.dst.Values = c2.dst + nc.src.Values = c2.src + cases = append(cases, nc) + } + return cases +} +func constructLabelSelectorOperator(p Policy) []Case[metav1.LabelSelectorOperator] { + cases := []Case[metav1.LabelSelectorOperator]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[metav1.LabelSelectorOperator]{ + expected: metav1.LabelSelectorOperator(c.expected), + dst: metav1.LabelSelectorOperator(c.dst), + src: metav1.LabelSelectorOperator(c.src), + }) + } + return cases +} +func constructPointerPortworxVolumeSource(p Policy) []Case[*v1.PortworxVolumeSource] { + cases := []Case[*v1.PortworxVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPortworxVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PortworxVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPortworxVolumeSource(p Policy) []Case[v1.PortworxVolumeSource] { + cases := []Case[v1.PortworxVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.PortworxVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.VolumeID = c0.expected + nc.dst.VolumeID = c0.dst + nc.src.VolumeID = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.FSType = c1.expected + nc.dst.FSType = c1.dst + nc.src.FSType = c1.src + if i/len(cs2) > k2 { + cs2 = constructBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ReadOnly = c2.expected + nc.dst.ReadOnly = c2.dst + nc.src.ReadOnly = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerScaleIOVolumeSource(p Policy) []Case[*v1.ScaleIOVolumeSource] { + cases := []Case[*v1.ScaleIOVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructScaleIOVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ScaleIOVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructScaleIOVolumeSource(p Policy) []Case[v1.ScaleIOVolumeSource] { + cases := []Case[v1.ScaleIOVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructAtomicPointerLocalObjectReference(NoLimit) + cs3 := constructBool(NoLimit) + cs4 := constructString(NoLimit) + cs5 := constructString(NoLimit) + cs6 := constructString(NoLimit) + cs7 := constructString(NoLimit) + cs8 := constructString(NoLimit) + cs9 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + for i := range maxCount { + nc := Case[v1.ScaleIOVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Gateway = c0.expected + nc.dst.Gateway = c0.dst + nc.src.Gateway = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.System = c1.expected + nc.dst.System = c1.dst + nc.src.System = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicPointerLocalObjectReference(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.SecretRef = c2.expected + nc.dst.SecretRef = c2.dst + nc.src.SecretRef = c2.src + if i/len(cs3) > k3 { + cs3 = constructBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.SSLEnabled = c3.expected + nc.dst.SSLEnabled = c3.dst + nc.src.SSLEnabled = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.ProtectionDomain = c4.expected + nc.dst.ProtectionDomain = c4.dst + nc.src.ProtectionDomain = c4.src + if i/len(cs5) > k5 { + cs5 = constructString(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.StoragePool = c5.expected + nc.dst.StoragePool = c5.dst + nc.src.StoragePool = c5.src + if i/len(cs6) > k6 { + cs6 = constructString(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.StorageMode = c6.expected + nc.dst.StorageMode = c6.dst + nc.src.StorageMode = c6.src + if i/len(cs7) > k7 { + cs7 = constructString(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.VolumeName = c7.expected + nc.dst.VolumeName = c7.dst + nc.src.VolumeName = c7.src + if i/len(cs8) > k8 { + cs8 = constructString(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.FSType = c8.expected + nc.dst.FSType = c8.dst + nc.src.FSType = c8.src + if i/len(cs9) > k9 { + cs9 = constructBool(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.ReadOnly = c9.expected + nc.dst.ReadOnly = c9.dst + nc.src.ReadOnly = c9.src + cases = append(cases, nc) + } + return cases +} +func constructPointerStorageOSVolumeSource(p Policy) []Case[*v1.StorageOSVolumeSource] { + cases := []Case[*v1.StorageOSVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructStorageOSVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.StorageOSVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructStorageOSVolumeSource(p Policy) []Case[v1.StorageOSVolumeSource] { + cases := []Case[v1.StorageOSVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructBool(NoLimit) + cs4 := constructAtomicPointerLocalObjectReference(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.StorageOSVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.VolumeName = c0.expected + nc.dst.VolumeName = c0.dst + nc.src.VolumeName = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.VolumeNamespace = c1.expected + nc.dst.VolumeNamespace = c1.dst + nc.src.VolumeNamespace = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.FSType = c2.expected + nc.dst.FSType = c2.dst + nc.src.FSType = c2.src + if i/len(cs3) > k3 { + cs3 = constructBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.ReadOnly = c3.expected + nc.dst.ReadOnly = c3.dst + nc.src.ReadOnly = c3.src + if i/len(cs4) > k4 { + cs4 = constructAtomicPointerLocalObjectReference(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.SecretRef = c4.expected + nc.dst.SecretRef = c4.dst + nc.src.SecretRef = c4.src + cases = append(cases, nc) + } + return cases +} +func constructPointerCSIVolumeSource(p Policy) []Case[*v1.CSIVolumeSource] { + cases := []Case[*v1.CSIVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructCSIVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.CSIVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructCSIVolumeSource(p Policy) []Case[v1.CSIVolumeSource] { + cases := []Case[v1.CSIVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructPointerBool(NoLimit) + cs2 := constructPointerString(NoLimit) + cs3 := constructMapStringToString(NoLimit) + cs4 := constructAtomicPointerLocalObjectReference(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.CSIVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Driver = c0.expected + nc.dst.Driver = c0.dst + nc.src.Driver = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerBool(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ReadOnly = c1.expected + nc.dst.ReadOnly = c1.dst + nc.src.ReadOnly = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.FSType = c2.expected + nc.dst.FSType = c2.dst + nc.src.FSType = c2.src + if i/len(cs3) > k3 { + cs3 = constructMapStringToString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.VolumeAttributes = c3.expected + nc.dst.VolumeAttributes = c3.dst + nc.src.VolumeAttributes = c3.src + if i/len(cs4) > k4 { + cs4 = constructAtomicPointerLocalObjectReference(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.NodePublishSecretRef = c4.expected + nc.dst.NodePublishSecretRef = c4.dst + nc.src.NodePublishSecretRef = c4.src + cases = append(cases, nc) + } + return cases +} +func constructPointerEphemeralVolumeSource(p Policy) []Case[*v1.EphemeralVolumeSource] { + cases := []Case[*v1.EphemeralVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructEphemeralVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.EphemeralVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructEphemeralVolumeSource(p Policy) []Case[v1.EphemeralVolumeSource] { + cases := []Case[v1.EphemeralVolumeSource]{} + cs0 := constructPointerPersistentVolumeClaimTemplate(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.EphemeralVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructPointerPersistentVolumeClaimTemplate(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.VolumeClaimTemplate = c0.expected + nc.dst.VolumeClaimTemplate = c0.dst + nc.src.VolumeClaimTemplate = c0.src + cases = append(cases, nc) + } + return cases +} +func constructPointerPersistentVolumeClaimTemplate(p Policy) []Case[*v1.PersistentVolumeClaimTemplate] { + cases := []Case[*v1.PersistentVolumeClaimTemplate]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPersistentVolumeClaimTemplate(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PersistentVolumeClaimTemplate]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPersistentVolumeClaimTemplate(p Policy) []Case[v1.PersistentVolumeClaimTemplate] { + cases := []Case[v1.PersistentVolumeClaimTemplate]{} + cs0 := constructObjectMeta(NoLimit) + cs1 := constructPersistentVolumeClaimSpec(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.PersistentVolumeClaimTemplate]{} + if i/len(cs0) > k0 { + cs0 = constructObjectMeta(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.ObjectMeta = c0.expected + nc.dst.ObjectMeta = c0.dst + nc.src.ObjectMeta = c0.src + if i/len(cs1) > k1 { + cs1 = constructPersistentVolumeClaimSpec(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Spec = c1.expected + nc.dst.Spec = c1.dst + nc.src.Spec = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPersistentVolumeClaimSpec(p Policy) []Case[v1.PersistentVolumeClaimSpec] { + cases := []Case[v1.PersistentVolumeClaimSpec]{} + cs0 := constructAtomicSlicePersistentVolumeAccessMode(NoLimit) + cs1 := constructPointerLabelSelector(NoLimit) + cs2 := constructVolumeResourceRequirements(NoLimit) + cs3 := constructString(NoLimit) + cs4 := constructPointerString(NoLimit) + cs5 := constructPointerPersistentVolumeMode(NoLimit) + cs6 := constructAtomicPointerTypedLocalObjectReference(NoLimit) + cs7 := constructPointerTypedObjectReference(NoLimit) + cs8 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + for i := range maxCount { + nc := Case[v1.PersistentVolumeClaimSpec]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSlicePersistentVolumeAccessMode(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.AccessModes = c0.expected + nc.dst.AccessModes = c0.dst + nc.src.AccessModes = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerLabelSelector(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Selector = c1.expected + nc.dst.Selector = c1.dst + nc.src.Selector = c1.src + if i/len(cs2) > k2 { + cs2 = constructVolumeResourceRequirements(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Resources = c2.expected + nc.dst.Resources = c2.dst + nc.src.Resources = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.VolumeName = c3.expected + nc.dst.VolumeName = c3.dst + nc.src.VolumeName = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.StorageClassName = c4.expected + nc.dst.StorageClassName = c4.dst + nc.src.StorageClassName = c4.src + if i/len(cs5) > k5 { + cs5 = constructPointerPersistentVolumeMode(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.VolumeMode = c5.expected + nc.dst.VolumeMode = c5.dst + nc.src.VolumeMode = c5.src + if i/len(cs6) > k6 { + cs6 = constructAtomicPointerTypedLocalObjectReference(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.DataSource = c6.expected + nc.dst.DataSource = c6.dst + nc.src.DataSource = c6.src + if i/len(cs7) > k7 { + cs7 = constructPointerTypedObjectReference(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.DataSourceRef = c7.expected + nc.dst.DataSourceRef = c7.dst + nc.src.DataSourceRef = c7.src + if i/len(cs8) > k8 { + cs8 = constructPointerString(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.VolumeAttributesClassName = c8.expected + nc.dst.VolumeAttributesClassName = c8.dst + nc.src.VolumeAttributesClassName = c8.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSlicePersistentVolumeAccessMode(p Policy) []Case[[]v1.PersistentVolumeAccessMode] { + cases := []Case[[]v1.PersistentVolumeAccessMode]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPersistentVolumeAccessMode(NoLimit) + var nc Case[[]v1.PersistentVolumeAccessMode] + nc = Case[[]v1.PersistentVolumeAccessMode]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PersistentVolumeAccessMode]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.PersistentVolumeAccessMode{} + cases = append(cases, nc) + return cases +} +func constructPersistentVolumeAccessMode(p Policy) []Case[v1.PersistentVolumeAccessMode] { + cases := []Case[v1.PersistentVolumeAccessMode]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.PersistentVolumeAccessMode]{ + expected: v1.PersistentVolumeAccessMode(c.expected), + dst: v1.PersistentVolumeAccessMode(c.dst), + src: v1.PersistentVolumeAccessMode(c.src), + }) + } + return cases +} +func constructVolumeResourceRequirements(p Policy) []Case[v1.VolumeResourceRequirements] { + cases := []Case[v1.VolumeResourceRequirements]{} + cs0 := constructResourceList(NoLimit) + cs1 := constructResourceList(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.VolumeResourceRequirements]{} + if i/len(cs0) > k0 { + cs0 = constructResourceList(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Limits = c0.expected + nc.dst.Limits = c0.dst + nc.src.Limits = c0.src + if i/len(cs1) > k1 { + cs1 = constructResourceList(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Requests = c1.expected + nc.dst.Requests = c1.dst + nc.src.Requests = c1.src + cases = append(cases, nc) + } + return cases +} +func constructResourceList(p Policy) []Case[v1.ResourceList] { + cases := []Case[v1.ResourceList]{} + cs := constructMapResourceNameToQuantity(p) + for _, c := range cs { + cases = append(cases, Case[v1.ResourceList]{ + expected: v1.ResourceList(c.expected), + dst: v1.ResourceList(c.dst), + src: v1.ResourceList(c.src), + }) + } + return cases +} +func constructMapResourceNameToQuantity(p Policy) []Case[map[v1.ResourceName]resource.Quantity] { + cases := []Case[map[v1.ResourceName]resource.Quantity]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + keys := constructResourceName(NoNil | NoZero | NoNotEqual) + vals := constructQuantity(NoLimit) + keyIndex := 0 + var nc Case[map[v1.ResourceName]resource.Quantity] + for _, val := range vals { + keyIndex += 1 + if keyIndex >= len(keys) { + keys = constructResourceName(NoNil | NoZero | NoNotEqual) + keyIndex = 0 + } + key := keys[keyIndex] + nc = Case[map[v1.ResourceName]resource.Quantity]{} + nc.expected = make(map[v1.ResourceName]resource.Quantity) + nc.dst = make(map[v1.ResourceName]resource.Quantity) + nc.src = make(map[v1.ResourceName]resource.Quantity) + nc.expected[key.expected] = val.expected + nc.dst[key.expected] = val.dst + nc.src[key.expected] = val.src + } + cases = append(cases, nc) + for i, val := range vals { + keyIndex += 1 + if keyIndex >= len(keys) { + keys = constructResourceName(NoNil | NoZero | NoNotEqual) + keyIndex = 0 + } + key := keys[keyIndex] + nc = Case[map[v1.ResourceName]resource.Quantity]{} + nc.expected = make(map[v1.ResourceName]resource.Quantity) + nc.dst = make(map[v1.ResourceName]resource.Quantity) + nc.src = make(map[v1.ResourceName]resource.Quantity) + switch i % 3 { + case 0: + nc.expected[key.expected] = val.expected + nc.dst[key.expected] = val.dst + nc.src[key.expected] = val.src + case 1: + nc.expected[key.expected] = val.dst + nc.dst[key.expected] = val.dst + case 2: + nc.expected[key.expected] = val.src + nc.src[key.expected] = val.src + } + } + cases = append(cases, nc) + for i, val := range vals { + keyIndex += 1 + if keyIndex >= len(keys) { + keys = constructResourceName(NoNil | NoZero | NoNotEqual) + keyIndex = 0 + } + key := keys[keyIndex] + nc = Case[map[v1.ResourceName]resource.Quantity]{} + nc.expected = make(map[v1.ResourceName]resource.Quantity) + nc.dst = make(map[v1.ResourceName]resource.Quantity) + nc.src = make(map[v1.ResourceName]resource.Quantity) + switch i % 2 { + case 0: + nc.expected[key.expected] = val.dst + nc.dst[key.expected] = val.dst + case 1: + nc.expected[key.expected] = val.src + nc.src[key.expected] = val.src + } + } + cases = append(cases, nc) + for _, val := range vals { + keyIndex += 1 + if keyIndex >= len(keys) { + keys = constructResourceName(NoNil | NoZero | NoNotEqual) + keyIndex = 0 + } + key := keys[keyIndex] + nc = Case[map[v1.ResourceName]resource.Quantity]{} + nc.expected = make(map[v1.ResourceName]resource.Quantity) + nc.dst = make(map[v1.ResourceName]resource.Quantity) + nc.src = make(map[v1.ResourceName]resource.Quantity) + nc.expected[key.expected] = val.src + nc.src[key.expected] = val.src + } + cases = append(cases, nc) + for _, val := range vals { + keyIndex += 1 + if keyIndex >= len(keys) { + keys = constructResourceName(NoNil | NoZero | NoNotEqual) + keyIndex = 0 + } + key := keys[keyIndex] + nc = Case[map[v1.ResourceName]resource.Quantity]{} + nc.expected = make(map[v1.ResourceName]resource.Quantity) + nc.dst = make(map[v1.ResourceName]resource.Quantity) + nc.src = make(map[v1.ResourceName]resource.Quantity) + nc.expected[key.expected] = val.dst + nc.dst[key.expected] = val.dst + } + cases = append(cases, nc) + return cases +} +func constructPointerPersistentVolumeMode(p Policy) []Case[*v1.PersistentVolumeMode] { + cases := []Case[*v1.PersistentVolumeMode]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPersistentVolumeMode(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PersistentVolumeMode]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPersistentVolumeMode(p Policy) []Case[v1.PersistentVolumeMode] { + cases := []Case[v1.PersistentVolumeMode]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.PersistentVolumeMode]{ + expected: v1.PersistentVolumeMode(c.expected), + dst: v1.PersistentVolumeMode(c.dst), + src: v1.PersistentVolumeMode(c.src), + }) + } + return cases +} +func constructAtomicPointerTypedLocalObjectReference(p Policy) []Case[*v1.TypedLocalObjectReference] { + cases := []Case[*v1.TypedLocalObjectReference]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicTypedLocalObjectReference(p) + for _, c := range cs { + cases = append(cases, Case[*v1.TypedLocalObjectReference]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAtomicTypedLocalObjectReference(p Policy) []Case[v1.TypedLocalObjectReference] { + cases := []Case[v1.TypedLocalObjectReference]{} + cs0 := constructPointerString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.TypedLocalObjectReference]{} + if i/len(cs0) > k0 { + cs0 = constructPointerString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.APIGroup = c0.src + nc.dst.APIGroup = c0.dst + nc.src.APIGroup = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Kind = c1.src + nc.dst.Kind = c1.dst + nc.src.Kind = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Name = c2.src + nc.dst.Name = c2.dst + nc.src.Name = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerTypedObjectReference(p Policy) []Case[*v1.TypedObjectReference] { + cases := []Case[*v1.TypedObjectReference]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructTypedObjectReference(p) + for _, c := range cs { + cases = append(cases, Case[*v1.TypedObjectReference]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructTypedObjectReference(p Policy) []Case[v1.TypedObjectReference] { + cases := []Case[v1.TypedObjectReference]{} + cs0 := constructPointerString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.TypedObjectReference]{} + if i/len(cs0) > k0 { + cs0 = constructPointerString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.APIGroup = c0.expected + nc.dst.APIGroup = c0.dst + nc.src.APIGroup = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Kind = c1.expected + nc.dst.Kind = c1.dst + nc.src.Kind = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Name = c2.expected + nc.dst.Name = c2.dst + nc.src.Name = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Namespace = c3.expected + nc.dst.Namespace = c3.dst + nc.src.Namespace = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerImageVolumeSource(p Policy) []Case[*v1.ImageVolumeSource] { + cases := []Case[*v1.ImageVolumeSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructImageVolumeSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ImageVolumeSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructImageVolumeSource(p Policy) []Case[v1.ImageVolumeSource] { + cases := []Case[v1.ImageVolumeSource]{} + cs0 := constructString(NoLimit) + cs1 := constructPullPolicy(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.ImageVolumeSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Reference = c0.expected + nc.dst.Reference = c0.dst + nc.src.Reference = c0.src + if i/len(cs1) > k1 { + cs1 = constructPullPolicy(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.PullPolicy = c1.expected + nc.dst.PullPolicy = c1.dst + nc.src.PullPolicy = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPullPolicy(p Policy) []Case[v1.PullPolicy] { + cases := []Case[v1.PullPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.PullPolicy]{ + expected: v1.PullPolicy(c.expected), + dst: v1.PullPolicy(c.dst), + src: v1.PullPolicy(c.src), + }) + } + return cases +} +func constructMapSliceContainer(p Policy) []Case[[]v1.Container] { + cases := []Case[[]v1.Container]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructContainerIgnore_name(NoLimit) + var nc Case[[]v1.Container] + nc = Case[[]v1.Container]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.Container]{} + srcs := []v1.Container{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.Container]{} + srcs = []v1.Container{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.Container]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.Container]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructContainerIgnore_name(p Policy) []Case[v1.Container] { + cases := []Case[v1.Container]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + cs1 := constructString(NoLimit) + cs2 := constructAtomicSliceString(NoLimit) + cs3 := constructAtomicSliceString(NoLimit) + cs4 := constructString(NoLimit) + cs5 := constructMapSliceContainerPort(NoLimit) + cs6 := constructAtomicSliceEnvFromSource(NoLimit) + cs7 := constructMapSliceEnvVar(NoLimit) + cs8 := constructResourceRequirements(NoLimit) + cs9 := constructAtomicSliceContainerResizePolicy(NoLimit) + cs10 := constructPointerContainerRestartPolicy(NoLimit) + cs11 := constructMapSliceVolumeMount(NoLimit) + cs12 := constructMapSliceVolumeDevice(NoLimit) + cs13 := constructPointerProbe(NoLimit) + cs14 := constructPointerProbe(NoLimit) + cs15 := constructPointerProbe(NoLimit) + cs16 := constructPointerLifecycle(NoLimit) + cs17 := constructString(NoLimit) + cs18 := constructTerminationMessagePolicy(NoLimit) + cs19 := constructPullPolicy(NoLimit) + cs20 := constructPointerSecurityContext(NoLimit) + cs21 := constructBool(NoLimit) + cs22 := constructBool(NoLimit) + cs23 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + len(cs10), + len(cs11), + len(cs12), + len(cs13), + len(cs14), + len(cs15), + len(cs16), + len(cs17), + len(cs18), + len(cs19), + len(cs20), + len(cs21), + len(cs22), + len(cs23), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + k10 := 0 + k11 := 0 + k12 := 0 + k13 := 0 + k14 := 0 + k15 := 0 + k16 := 0 + k17 := 0 + k18 := 0 + k19 := 0 + k20 := 0 + k21 := 0 + k22 := 0 + k23 := 0 + for i := range maxCount { + nc := Case[v1.Container]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Image = c1.expected + nc.dst.Image = c1.dst + nc.src.Image = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicSliceString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Command = c2.expected + nc.dst.Command = c2.dst + nc.src.Command = c2.src + if i/len(cs3) > k3 { + cs3 = constructAtomicSliceString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Args = c3.expected + nc.dst.Args = c3.dst + nc.src.Args = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.WorkingDir = c4.expected + nc.dst.WorkingDir = c4.dst + nc.src.WorkingDir = c4.src + if i/len(cs5) > k5 { + cs5 = constructMapSliceContainerPort(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.Ports = c5.expected + nc.dst.Ports = c5.dst + nc.src.Ports = c5.src + if i/len(cs6) > k6 { + cs6 = constructAtomicSliceEnvFromSource(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.EnvFrom = c6.expected + nc.dst.EnvFrom = c6.dst + nc.src.EnvFrom = c6.src + if i/len(cs7) > k7 { + cs7 = constructMapSliceEnvVar(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.Env = c7.expected + nc.dst.Env = c7.dst + nc.src.Env = c7.src + if i/len(cs8) > k8 { + cs8 = constructResourceRequirements(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.Resources = c8.expected + nc.dst.Resources = c8.dst + nc.src.Resources = c8.src + if i/len(cs9) > k9 { + cs9 = constructAtomicSliceContainerResizePolicy(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.ResizePolicy = c9.expected + nc.dst.ResizePolicy = c9.dst + nc.src.ResizePolicy = c9.src + if i/len(cs10) > k10 { + cs10 = constructPointerContainerRestartPolicy(NoLimit) + k10 += 1 + } + c10 := &cs10[i%len(cs10)] + nc.expected.RestartPolicy = c10.expected + nc.dst.RestartPolicy = c10.dst + nc.src.RestartPolicy = c10.src + if i/len(cs11) > k11 { + cs11 = constructMapSliceVolumeMount(NoLimit) + k11 += 1 + } + c11 := &cs11[i%len(cs11)] + nc.expected.VolumeMounts = c11.expected + nc.dst.VolumeMounts = c11.dst + nc.src.VolumeMounts = c11.src + if i/len(cs12) > k12 { + cs12 = constructMapSliceVolumeDevice(NoLimit) + k12 += 1 + } + c12 := &cs12[i%len(cs12)] + nc.expected.VolumeDevices = c12.expected + nc.dst.VolumeDevices = c12.dst + nc.src.VolumeDevices = c12.src + if i/len(cs13) > k13 { + cs13 = constructPointerProbe(NoLimit) + k13 += 1 + } + c13 := &cs13[i%len(cs13)] + nc.expected.LivenessProbe = c13.expected + nc.dst.LivenessProbe = c13.dst + nc.src.LivenessProbe = c13.src + if i/len(cs14) > k14 { + cs14 = constructPointerProbe(NoLimit) + k14 += 1 + } + c14 := &cs14[i%len(cs14)] + nc.expected.ReadinessProbe = c14.expected + nc.dst.ReadinessProbe = c14.dst + nc.src.ReadinessProbe = c14.src + if i/len(cs15) > k15 { + cs15 = constructPointerProbe(NoLimit) + k15 += 1 + } + c15 := &cs15[i%len(cs15)] + nc.expected.StartupProbe = c15.expected + nc.dst.StartupProbe = c15.dst + nc.src.StartupProbe = c15.src + if i/len(cs16) > k16 { + cs16 = constructPointerLifecycle(NoLimit) + k16 += 1 + } + c16 := &cs16[i%len(cs16)] + nc.expected.Lifecycle = c16.expected + nc.dst.Lifecycle = c16.dst + nc.src.Lifecycle = c16.src + if i/len(cs17) > k17 { + cs17 = constructString(NoLimit) + k17 += 1 + } + c17 := &cs17[i%len(cs17)] + nc.expected.TerminationMessagePath = c17.expected + nc.dst.TerminationMessagePath = c17.dst + nc.src.TerminationMessagePath = c17.src + if i/len(cs18) > k18 { + cs18 = constructTerminationMessagePolicy(NoLimit) + k18 += 1 + } + c18 := &cs18[i%len(cs18)] + nc.expected.TerminationMessagePolicy = c18.expected + nc.dst.TerminationMessagePolicy = c18.dst + nc.src.TerminationMessagePolicy = c18.src + if i/len(cs19) > k19 { + cs19 = constructPullPolicy(NoLimit) + k19 += 1 + } + c19 := &cs19[i%len(cs19)] + nc.expected.ImagePullPolicy = c19.expected + nc.dst.ImagePullPolicy = c19.dst + nc.src.ImagePullPolicy = c19.src + if i/len(cs20) > k20 { + cs20 = constructPointerSecurityContext(NoLimit) + k20 += 1 + } + c20 := &cs20[i%len(cs20)] + nc.expected.SecurityContext = c20.expected + nc.dst.SecurityContext = c20.dst + nc.src.SecurityContext = c20.src + if i/len(cs21) > k21 { + cs21 = constructBool(NoLimit) + k21 += 1 + } + c21 := &cs21[i%len(cs21)] + nc.expected.Stdin = c21.expected + nc.dst.Stdin = c21.dst + nc.src.Stdin = c21.src + if i/len(cs22) > k22 { + cs22 = constructBool(NoLimit) + k22 += 1 + } + c22 := &cs22[i%len(cs22)] + nc.expected.StdinOnce = c22.expected + nc.dst.StdinOnce = c22.dst + nc.src.StdinOnce = c22.src + if i/len(cs23) > k23 { + cs23 = constructBool(NoLimit) + k23 += 1 + } + c23 := &cs23[i%len(cs23)] + nc.expected.TTY = c23.expected + nc.dst.TTY = c23.dst + nc.src.TTY = c23.src + cases = append(cases, nc) + } + return cases +} +func constructMapSliceContainerPort(p Policy) []Case[[]v1.ContainerPort] { + cases := []Case[[]v1.ContainerPort]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructContainerPortIgnore_containerPort_protocol(NoLimit) + var nc Case[[]v1.ContainerPort] + nc = Case[[]v1.ContainerPort]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.ContainerPort]{} + srcs := []v1.ContainerPort{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.ContainerPort]{} + srcs = []v1.ContainerPort{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.ContainerPort]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.ContainerPort]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructContainerPortIgnore_containerPort_protocol(p Policy) []Case[v1.ContainerPort] { + cases := []Case[v1.ContainerPort]{} + cs0 := constructString(NoLimit) + cs1 := constructInt32(NoLimit) + cs2 := constructInt32(NoNotEqual | NoZero | NoNil) + cs3 := constructProtocol(NoNotEqual | NoZero | NoNil) + cs4 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.ContainerPort]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructInt32(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.HostPort = c1.expected + nc.dst.HostPort = c1.dst + nc.src.HostPort = c1.src + if i/len(cs2) > k2 { + cs2 = constructInt32(NoNotEqual | NoZero | NoNil) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ContainerPort = c2.expected + nc.dst.ContainerPort = c2.dst + nc.src.ContainerPort = c2.src + if i/len(cs3) > k3 { + cs3 = constructProtocol(NoNotEqual | NoZero | NoNil) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Protocol = c3.expected + nc.dst.Protocol = c3.dst + nc.src.Protocol = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.HostIP = c4.expected + nc.dst.HostIP = c4.dst + nc.src.HostIP = c4.src + cases = append(cases, nc) + } + return cases +} +func constructProtocol(p Policy) []Case[v1.Protocol] { + cases := []Case[v1.Protocol]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.Protocol]{ + expected: v1.Protocol(c.expected), + dst: v1.Protocol(c.dst), + src: v1.Protocol(c.src), + }) + } + return cases +} +func constructAtomicSliceEnvFromSource(p Policy) []Case[[]v1.EnvFromSource] { + cases := []Case[[]v1.EnvFromSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructEnvFromSource(NoLimit) + var nc Case[[]v1.EnvFromSource] + nc = Case[[]v1.EnvFromSource]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.EnvFromSource]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.EnvFromSource{} + cases = append(cases, nc) + return cases +} +func constructEnvFromSource(p Policy) []Case[v1.EnvFromSource] { + cases := []Case[v1.EnvFromSource]{} + cs0 := constructString(NoLimit) + cs1 := constructPointerConfigMapEnvSource(NoLimit) + cs2 := constructPointerSecretEnvSource(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.EnvFromSource]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Prefix = c0.expected + nc.dst.Prefix = c0.dst + nc.src.Prefix = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerConfigMapEnvSource(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ConfigMapRef = c1.expected + nc.dst.ConfigMapRef = c1.dst + nc.src.ConfigMapRef = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerSecretEnvSource(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.SecretRef = c2.expected + nc.dst.SecretRef = c2.dst + nc.src.SecretRef = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerConfigMapEnvSource(p Policy) []Case[*v1.ConfigMapEnvSource] { + cases := []Case[*v1.ConfigMapEnvSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructConfigMapEnvSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ConfigMapEnvSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructConfigMapEnvSource(p Policy) []Case[v1.ConfigMapEnvSource] { + cases := []Case[v1.ConfigMapEnvSource]{} + cs0 := constructAtomicLocalObjectReference(NoLimit) + cs1 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.ConfigMapEnvSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicLocalObjectReference(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LocalObjectReference = c0.expected + nc.dst.LocalObjectReference = c0.dst + nc.src.LocalObjectReference = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerBool(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Optional = c1.expected + nc.dst.Optional = c1.dst + nc.src.Optional = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerSecretEnvSource(p Policy) []Case[*v1.SecretEnvSource] { + cases := []Case[*v1.SecretEnvSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSecretEnvSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SecretEnvSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSecretEnvSource(p Policy) []Case[v1.SecretEnvSource] { + cases := []Case[v1.SecretEnvSource]{} + cs0 := constructAtomicLocalObjectReference(NoLimit) + cs1 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.SecretEnvSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicLocalObjectReference(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LocalObjectReference = c0.expected + nc.dst.LocalObjectReference = c0.dst + nc.src.LocalObjectReference = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerBool(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Optional = c1.expected + nc.dst.Optional = c1.dst + nc.src.Optional = c1.src + cases = append(cases, nc) + } + return cases +} +func constructMapSliceEnvVar(p Policy) []Case[[]v1.EnvVar] { + cases := []Case[[]v1.EnvVar]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructEnvVarIgnore_name(NoLimit) + var nc Case[[]v1.EnvVar] + nc = Case[[]v1.EnvVar]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.EnvVar]{} + srcs := []v1.EnvVar{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.EnvVar]{} + srcs = []v1.EnvVar{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.EnvVar]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.EnvVar]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructEnvVarIgnore_name(p Policy) []Case[v1.EnvVar] { + cases := []Case[v1.EnvVar]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + cs1 := constructString(NoLimit) + cs2 := constructPointerEnvVarSource(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.EnvVar]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Value = c1.expected + nc.dst.Value = c1.dst + nc.src.Value = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerEnvVarSource(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ValueFrom = c2.expected + nc.dst.ValueFrom = c2.dst + nc.src.ValueFrom = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerEnvVarSource(p Policy) []Case[*v1.EnvVarSource] { + cases := []Case[*v1.EnvVarSource]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructEnvVarSource(p) + for _, c := range cs { + cases = append(cases, Case[*v1.EnvVarSource]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructEnvVarSource(p Policy) []Case[v1.EnvVarSource] { + cases := []Case[v1.EnvVarSource]{} + cs0 := constructAtomicPointerObjectFieldSelector(NoLimit) + cs1 := constructAtomicPointerResourceFieldSelector(NoLimit) + cs2 := constructAtomicPointerConfigMapKeySelector(NoLimit) + cs3 := constructAtomicPointerSecretKeySelector(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.EnvVarSource]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicPointerObjectFieldSelector(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.FieldRef = c0.expected + nc.dst.FieldRef = c0.dst + nc.src.FieldRef = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicPointerResourceFieldSelector(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ResourceFieldRef = c1.expected + nc.dst.ResourceFieldRef = c1.dst + nc.src.ResourceFieldRef = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicPointerConfigMapKeySelector(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ConfigMapKeyRef = c2.expected + nc.dst.ConfigMapKeyRef = c2.dst + nc.src.ConfigMapKeyRef = c2.src + if i/len(cs3) > k3 { + cs3 = constructAtomicPointerSecretKeySelector(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.SecretKeyRef = c3.expected + nc.dst.SecretKeyRef = c3.dst + nc.src.SecretKeyRef = c3.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicPointerConfigMapKeySelector(p Policy) []Case[*v1.ConfigMapKeySelector] { + cases := []Case[*v1.ConfigMapKeySelector]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicConfigMapKeySelector(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ConfigMapKeySelector]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAtomicConfigMapKeySelector(p Policy) []Case[v1.ConfigMapKeySelector] { + cases := []Case[v1.ConfigMapKeySelector]{} + cs0 := constructAtomicLocalObjectReference(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.ConfigMapKeySelector]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicLocalObjectReference(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LocalObjectReference = c0.src + nc.dst.LocalObjectReference = c0.dst + nc.src.LocalObjectReference = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Key = c1.src + nc.dst.Key = c1.dst + nc.src.Key = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Optional = c2.src + nc.dst.Optional = c2.dst + nc.src.Optional = c2.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicPointerSecretKeySelector(p Policy) []Case[*v1.SecretKeySelector] { + cases := []Case[*v1.SecretKeySelector]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicSecretKeySelector(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SecretKeySelector]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAtomicSecretKeySelector(p Policy) []Case[v1.SecretKeySelector] { + cases := []Case[v1.SecretKeySelector]{} + cs0 := constructAtomicLocalObjectReference(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.SecretKeySelector]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicLocalObjectReference(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LocalObjectReference = c0.src + nc.dst.LocalObjectReference = c0.dst + nc.src.LocalObjectReference = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Key = c1.src + nc.dst.Key = c1.dst + nc.src.Key = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerBool(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Optional = c2.src + nc.dst.Optional = c2.dst + nc.src.Optional = c2.src + cases = append(cases, nc) + } + return cases +} +func constructResourceRequirements(p Policy) []Case[v1.ResourceRequirements] { + cases := []Case[v1.ResourceRequirements]{} + cs0 := constructResourceList(NoLimit) + cs1 := constructResourceList(NoLimit) + cs2 := constructMapSliceResourceClaim(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.ResourceRequirements]{} + if i/len(cs0) > k0 { + cs0 = constructResourceList(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Limits = c0.expected + nc.dst.Limits = c0.dst + nc.src.Limits = c0.src + if i/len(cs1) > k1 { + cs1 = constructResourceList(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Requests = c1.expected + nc.dst.Requests = c1.dst + nc.src.Requests = c1.src + if i/len(cs2) > k2 { + cs2 = constructMapSliceResourceClaim(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Claims = c2.expected + nc.dst.Claims = c2.dst + nc.src.Claims = c2.src + cases = append(cases, nc) + } + return cases +} +func constructMapSliceResourceClaim(p Policy) []Case[[]v1.ResourceClaim] { + cases := []Case[[]v1.ResourceClaim]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructResourceClaimIgnore_name(NoLimit) + var nc Case[[]v1.ResourceClaim] + nc = Case[[]v1.ResourceClaim]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.ResourceClaim]{} + srcs := []v1.ResourceClaim{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.ResourceClaim]{} + srcs = []v1.ResourceClaim{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.ResourceClaim]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.ResourceClaim]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructResourceClaimIgnore_name(p Policy) []Case[v1.ResourceClaim] { + cases := []Case[v1.ResourceClaim]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.ResourceClaim]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Request = c1.expected + nc.dst.Request = c1.dst + nc.src.Request = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceContainerResizePolicy(p Policy) []Case[[]v1.ContainerResizePolicy] { + cases := []Case[[]v1.ContainerResizePolicy]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructContainerResizePolicy(NoLimit) + var nc Case[[]v1.ContainerResizePolicy] + nc = Case[[]v1.ContainerResizePolicy]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.ContainerResizePolicy]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.ContainerResizePolicy{} + cases = append(cases, nc) + return cases +} +func constructContainerResizePolicy(p Policy) []Case[v1.ContainerResizePolicy] { + cases := []Case[v1.ContainerResizePolicy]{} + cs0 := constructResourceName(NoLimit) + cs1 := constructResourceResizeRestartPolicy(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.ContainerResizePolicy]{} + if i/len(cs0) > k0 { + cs0 = constructResourceName(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.ResourceName = c0.expected + nc.dst.ResourceName = c0.dst + nc.src.ResourceName = c0.src + if i/len(cs1) > k1 { + cs1 = constructResourceResizeRestartPolicy(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.RestartPolicy = c1.expected + nc.dst.RestartPolicy = c1.dst + nc.src.RestartPolicy = c1.src + cases = append(cases, nc) + } + return cases +} +func constructResourceName(p Policy) []Case[v1.ResourceName] { + cases := []Case[v1.ResourceName]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.ResourceName]{ + expected: v1.ResourceName(c.expected), + dst: v1.ResourceName(c.dst), + src: v1.ResourceName(c.src), + }) + } + return cases +} +func constructResourceResizeRestartPolicy(p Policy) []Case[v1.ResourceResizeRestartPolicy] { + cases := []Case[v1.ResourceResizeRestartPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.ResourceResizeRestartPolicy]{ + expected: v1.ResourceResizeRestartPolicy(c.expected), + dst: v1.ResourceResizeRestartPolicy(c.dst), + src: v1.ResourceResizeRestartPolicy(c.src), + }) + } + return cases +} +func constructPointerContainerRestartPolicy(p Policy) []Case[*v1.ContainerRestartPolicy] { + cases := []Case[*v1.ContainerRestartPolicy]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructContainerRestartPolicy(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ContainerRestartPolicy]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructContainerRestartPolicy(p Policy) []Case[v1.ContainerRestartPolicy] { + cases := []Case[v1.ContainerRestartPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.ContainerRestartPolicy]{ + expected: v1.ContainerRestartPolicy(c.expected), + dst: v1.ContainerRestartPolicy(c.dst), + src: v1.ContainerRestartPolicy(c.src), + }) + } + return cases +} +func constructMapSliceVolumeMount(p Policy) []Case[[]v1.VolumeMount] { + cases := []Case[[]v1.VolumeMount]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructVolumeMountIgnore_mountPath(NoLimit) + var nc Case[[]v1.VolumeMount] + nc = Case[[]v1.VolumeMount]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.VolumeMount]{} + srcs := []v1.VolumeMount{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.VolumeMount]{} + srcs = []v1.VolumeMount{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.VolumeMount]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.VolumeMount]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructVolumeMountIgnore_mountPath(p Policy) []Case[v1.VolumeMount] { + cases := []Case[v1.VolumeMount]{} + cs0 := constructString(NoLimit) + cs1 := constructBool(NoLimit) + cs2 := constructPointerRecursiveReadOnlyMode(NoLimit) + cs3 := constructString(NoNotEqual | NoZero | NoNil) + cs4 := constructString(NoLimit) + cs5 := constructPointerMountPropagationMode(NoLimit) + cs6 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + for i := range maxCount { + nc := Case[v1.VolumeMount]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructBool(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ReadOnly = c1.expected + nc.dst.ReadOnly = c1.dst + nc.src.ReadOnly = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerRecursiveReadOnlyMode(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.RecursiveReadOnly = c2.expected + nc.dst.RecursiveReadOnly = c2.dst + nc.src.RecursiveReadOnly = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoNotEqual | NoZero | NoNil) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.MountPath = c3.expected + nc.dst.MountPath = c3.dst + nc.src.MountPath = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.SubPath = c4.expected + nc.dst.SubPath = c4.dst + nc.src.SubPath = c4.src + if i/len(cs5) > k5 { + cs5 = constructPointerMountPropagationMode(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.MountPropagation = c5.expected + nc.dst.MountPropagation = c5.dst + nc.src.MountPropagation = c5.src + if i/len(cs6) > k6 { + cs6 = constructString(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.SubPathExpr = c6.expected + nc.dst.SubPathExpr = c6.dst + nc.src.SubPathExpr = c6.src + cases = append(cases, nc) + } + return cases +} +func constructPointerRecursiveReadOnlyMode(p Policy) []Case[*v1.RecursiveReadOnlyMode] { + cases := []Case[*v1.RecursiveReadOnlyMode]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructRecursiveReadOnlyMode(p) + for _, c := range cs { + cases = append(cases, Case[*v1.RecursiveReadOnlyMode]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructRecursiveReadOnlyMode(p Policy) []Case[v1.RecursiveReadOnlyMode] { + cases := []Case[v1.RecursiveReadOnlyMode]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.RecursiveReadOnlyMode]{ + expected: v1.RecursiveReadOnlyMode(c.expected), + dst: v1.RecursiveReadOnlyMode(c.dst), + src: v1.RecursiveReadOnlyMode(c.src), + }) + } + return cases +} +func constructPointerMountPropagationMode(p Policy) []Case[*v1.MountPropagationMode] { + cases := []Case[*v1.MountPropagationMode]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructMountPropagationMode(p) + for _, c := range cs { + cases = append(cases, Case[*v1.MountPropagationMode]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructMountPropagationMode(p Policy) []Case[v1.MountPropagationMode] { + cases := []Case[v1.MountPropagationMode]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.MountPropagationMode]{ + expected: v1.MountPropagationMode(c.expected), + dst: v1.MountPropagationMode(c.dst), + src: v1.MountPropagationMode(c.src), + }) + } + return cases +} +func constructMapSliceVolumeDevice(p Policy) []Case[[]v1.VolumeDevice] { + cases := []Case[[]v1.VolumeDevice]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructVolumeDeviceIgnore_devicePath(NoLimit) + var nc Case[[]v1.VolumeDevice] + nc = Case[[]v1.VolumeDevice]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.VolumeDevice]{} + srcs := []v1.VolumeDevice{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.VolumeDevice]{} + srcs = []v1.VolumeDevice{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.VolumeDevice]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.VolumeDevice]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructVolumeDeviceIgnore_devicePath(p Policy) []Case[v1.VolumeDevice] { + cases := []Case[v1.VolumeDevice]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoNotEqual | NoZero | NoNil) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.VolumeDevice]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoNotEqual | NoZero | NoNil) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.DevicePath = c1.expected + nc.dst.DevicePath = c1.dst + nc.src.DevicePath = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerProbe(p Policy) []Case[*v1.Probe] { + cases := []Case[*v1.Probe]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructProbe(p) + for _, c := range cs { + cases = append(cases, Case[*v1.Probe]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructProbe(p Policy) []Case[v1.Probe] { + cases := []Case[v1.Probe]{} + cs0 := constructProbeHandler(NoLimit) + cs1 := constructInt32(NoLimit) + cs2 := constructInt32(NoLimit) + cs3 := constructInt32(NoLimit) + cs4 := constructInt32(NoLimit) + cs5 := constructInt32(NoLimit) + cs6 := constructPointerInt64(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + for i := range maxCount { + nc := Case[v1.Probe]{} + if i/len(cs0) > k0 { + cs0 = constructProbeHandler(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.ProbeHandler = c0.expected + nc.dst.ProbeHandler = c0.dst + nc.src.ProbeHandler = c0.src + if i/len(cs1) > k1 { + cs1 = constructInt32(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.InitialDelaySeconds = c1.expected + nc.dst.InitialDelaySeconds = c1.dst + nc.src.InitialDelaySeconds = c1.src + if i/len(cs2) > k2 { + cs2 = constructInt32(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.TimeoutSeconds = c2.expected + nc.dst.TimeoutSeconds = c2.dst + nc.src.TimeoutSeconds = c2.src + if i/len(cs3) > k3 { + cs3 = constructInt32(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.PeriodSeconds = c3.expected + nc.dst.PeriodSeconds = c3.dst + nc.src.PeriodSeconds = c3.src + if i/len(cs4) > k4 { + cs4 = constructInt32(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.SuccessThreshold = c4.expected + nc.dst.SuccessThreshold = c4.dst + nc.src.SuccessThreshold = c4.src + if i/len(cs5) > k5 { + cs5 = constructInt32(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.FailureThreshold = c5.expected + nc.dst.FailureThreshold = c5.dst + nc.src.FailureThreshold = c5.src + if i/len(cs6) > k6 { + cs6 = constructPointerInt64(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.TerminationGracePeriodSeconds = c6.expected + nc.dst.TerminationGracePeriodSeconds = c6.dst + nc.src.TerminationGracePeriodSeconds = c6.src + cases = append(cases, nc) + } + return cases +} +func constructProbeHandler(p Policy) []Case[v1.ProbeHandler] { + cases := []Case[v1.ProbeHandler]{} + cs0 := constructPointerExecAction(NoLimit) + cs1 := constructPointerHTTPGetAction(NoLimit) + cs2 := constructPointerTCPSocketAction(NoLimit) + cs3 := constructPointerGRPCAction(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.ProbeHandler]{} + if i/len(cs0) > k0 { + cs0 = constructPointerExecAction(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Exec = c0.expected + nc.dst.Exec = c0.dst + nc.src.Exec = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerHTTPGetAction(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.HTTPGet = c1.expected + nc.dst.HTTPGet = c1.dst + nc.src.HTTPGet = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerTCPSocketAction(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.TCPSocket = c2.expected + nc.dst.TCPSocket = c2.dst + nc.src.TCPSocket = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerGRPCAction(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.GRPC = c3.expected + nc.dst.GRPC = c3.dst + nc.src.GRPC = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerExecAction(p Policy) []Case[*v1.ExecAction] { + cases := []Case[*v1.ExecAction]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructExecAction(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ExecAction]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructExecAction(p Policy) []Case[v1.ExecAction] { + cases := []Case[v1.ExecAction]{} + cs0 := constructAtomicSliceString(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.ExecAction]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Command = c0.expected + nc.dst.Command = c0.dst + nc.src.Command = c0.src + cases = append(cases, nc) + } + return cases +} +func constructPointerHTTPGetAction(p Policy) []Case[*v1.HTTPGetAction] { + cases := []Case[*v1.HTTPGetAction]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructHTTPGetAction(p) + for _, c := range cs { + cases = append(cases, Case[*v1.HTTPGetAction]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructHTTPGetAction(p Policy) []Case[v1.HTTPGetAction] { + cases := []Case[v1.HTTPGetAction]{} + cs0 := constructString(NoLimit) + cs1 := constructIntOrString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructURIScheme(NoLimit) + cs4 := constructAtomicSliceHTTPHeader(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.HTTPGetAction]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Path = c0.expected + nc.dst.Path = c0.dst + nc.src.Path = c0.src + if i/len(cs1) > k1 { + cs1 = constructIntOrString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Port = c1.expected + nc.dst.Port = c1.dst + nc.src.Port = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Host = c2.expected + nc.dst.Host = c2.dst + nc.src.Host = c2.src + if i/len(cs3) > k3 { + cs3 = constructURIScheme(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Scheme = c3.expected + nc.dst.Scheme = c3.dst + nc.src.Scheme = c3.src + if i/len(cs4) > k4 { + cs4 = constructAtomicSliceHTTPHeader(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.HTTPHeaders = c4.expected + nc.dst.HTTPHeaders = c4.dst + nc.src.HTTPHeaders = c4.src + cases = append(cases, nc) + } + return cases +} +func constructIntOrString(p Policy) []Case[intstr.IntOrString] { + cases := []Case[intstr.IntOrString]{} + cs0 := constructType(NoLimit) + cs1 := constructInt32(NoLimit) + cs2 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[intstr.IntOrString]{} + if i/len(cs0) > k0 { + cs0 = constructType(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Type = c0.expected + nc.dst.Type = c0.dst + nc.src.Type = c0.src + if i/len(cs1) > k1 { + cs1 = constructInt32(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.IntVal = c1.expected + nc.dst.IntVal = c1.dst + nc.src.IntVal = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.StrVal = c2.expected + nc.dst.StrVal = c2.dst + nc.src.StrVal = c2.src + cases = append(cases, nc) + } + return cases +} +func constructType(p Policy) []Case[intstr.Type] { + cases := []Case[intstr.Type]{} + cs := constructInt64(p) + for _, c := range cs { + cases = append(cases, Case[intstr.Type]{ + expected: intstr.Type(c.expected), + dst: intstr.Type(c.dst), + src: intstr.Type(c.src), + }) + } + return cases +} +func constructURIScheme(p Policy) []Case[v1.URIScheme] { + cases := []Case[v1.URIScheme]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.URIScheme]{ + expected: v1.URIScheme(c.expected), + dst: v1.URIScheme(c.dst), + src: v1.URIScheme(c.src), + }) + } + return cases +} +func constructAtomicSliceHTTPHeader(p Policy) []Case[[]v1.HTTPHeader] { + cases := []Case[[]v1.HTTPHeader]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructHTTPHeader(NoLimit) + var nc Case[[]v1.HTTPHeader] + nc = Case[[]v1.HTTPHeader]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.HTTPHeader]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.HTTPHeader{} + cases = append(cases, nc) + return cases +} +func constructHTTPHeader(p Policy) []Case[v1.HTTPHeader] { + cases := []Case[v1.HTTPHeader]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.HTTPHeader]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Value = c1.expected + nc.dst.Value = c1.dst + nc.src.Value = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerTCPSocketAction(p Policy) []Case[*v1.TCPSocketAction] { + cases := []Case[*v1.TCPSocketAction]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructTCPSocketAction(p) + for _, c := range cs { + cases = append(cases, Case[*v1.TCPSocketAction]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructTCPSocketAction(p Policy) []Case[v1.TCPSocketAction] { + cases := []Case[v1.TCPSocketAction]{} + cs0 := constructIntOrString(NoLimit) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.TCPSocketAction]{} + if i/len(cs0) > k0 { + cs0 = constructIntOrString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Port = c0.expected + nc.dst.Port = c0.dst + nc.src.Port = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Host = c1.expected + nc.dst.Host = c1.dst + nc.src.Host = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerGRPCAction(p Policy) []Case[*v1.GRPCAction] { + cases := []Case[*v1.GRPCAction]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructGRPCAction(p) + for _, c := range cs { + cases = append(cases, Case[*v1.GRPCAction]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructGRPCAction(p Policy) []Case[v1.GRPCAction] { + cases := []Case[v1.GRPCAction]{} + cs0 := constructInt32(NoLimit) + cs1 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.GRPCAction]{} + if i/len(cs0) > k0 { + cs0 = constructInt32(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Port = c0.expected + nc.dst.Port = c0.dst + nc.src.Port = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Service = c1.expected + nc.dst.Service = c1.dst + nc.src.Service = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerLifecycle(p Policy) []Case[*v1.Lifecycle] { + cases := []Case[*v1.Lifecycle]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructLifecycle(p) + for _, c := range cs { + cases = append(cases, Case[*v1.Lifecycle]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructLifecycle(p Policy) []Case[v1.Lifecycle] { + cases := []Case[v1.Lifecycle]{} + cs0 := constructPointerLifecycleHandler(NoLimit) + cs1 := constructPointerLifecycleHandler(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.Lifecycle]{} + if i/len(cs0) > k0 { + cs0 = constructPointerLifecycleHandler(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.PostStart = c0.expected + nc.dst.PostStart = c0.dst + nc.src.PostStart = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerLifecycleHandler(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.PreStop = c1.expected + nc.dst.PreStop = c1.dst + nc.src.PreStop = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerLifecycleHandler(p Policy) []Case[*v1.LifecycleHandler] { + cases := []Case[*v1.LifecycleHandler]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructLifecycleHandler(p) + for _, c := range cs { + cases = append(cases, Case[*v1.LifecycleHandler]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructLifecycleHandler(p Policy) []Case[v1.LifecycleHandler] { + cases := []Case[v1.LifecycleHandler]{} + cs0 := constructPointerExecAction(NoLimit) + cs1 := constructPointerHTTPGetAction(NoLimit) + cs2 := constructPointerTCPSocketAction(NoLimit) + cs3 := constructPointerSleepAction(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.LifecycleHandler]{} + if i/len(cs0) > k0 { + cs0 = constructPointerExecAction(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Exec = c0.expected + nc.dst.Exec = c0.dst + nc.src.Exec = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerHTTPGetAction(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.HTTPGet = c1.expected + nc.dst.HTTPGet = c1.dst + nc.src.HTTPGet = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerTCPSocketAction(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.TCPSocket = c2.expected + nc.dst.TCPSocket = c2.dst + nc.src.TCPSocket = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerSleepAction(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Sleep = c3.expected + nc.dst.Sleep = c3.dst + nc.src.Sleep = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerSleepAction(p Policy) []Case[*v1.SleepAction] { + cases := []Case[*v1.SleepAction]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSleepAction(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SleepAction]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSleepAction(p Policy) []Case[v1.SleepAction] { + cases := []Case[v1.SleepAction]{} + cs0 := constructInt64(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.SleepAction]{} + if i/len(cs0) > k0 { + cs0 = constructInt64(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Seconds = c0.expected + nc.dst.Seconds = c0.dst + nc.src.Seconds = c0.src + cases = append(cases, nc) + } + return cases +} +func constructTerminationMessagePolicy(p Policy) []Case[v1.TerminationMessagePolicy] { + cases := []Case[v1.TerminationMessagePolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.TerminationMessagePolicy]{ + expected: v1.TerminationMessagePolicy(c.expected), + dst: v1.TerminationMessagePolicy(c.dst), + src: v1.TerminationMessagePolicy(c.src), + }) + } + return cases +} +func constructPointerSecurityContext(p Policy) []Case[*v1.SecurityContext] { + cases := []Case[*v1.SecurityContext]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSecurityContext(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SecurityContext]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSecurityContext(p Policy) []Case[v1.SecurityContext] { + cases := []Case[v1.SecurityContext]{} + cs0 := constructPointerCapabilities(NoLimit) + cs1 := constructPointerBool(NoLimit) + cs2 := constructPointerSELinuxOptions(NoLimit) + cs3 := constructPointerWindowsSecurityContextOptions(NoLimit) + cs4 := constructPointerInt64(NoLimit) + cs5 := constructPointerInt64(NoLimit) + cs6 := constructPointerBool(NoLimit) + cs7 := constructPointerBool(NoLimit) + cs8 := constructPointerBool(NoLimit) + cs9 := constructPointerProcMountType(NoLimit) + cs10 := constructPointerSeccompProfile(NoLimit) + cs11 := constructPointerAppArmorProfile(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + len(cs10), + len(cs11), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + k10 := 0 + k11 := 0 + for i := range maxCount { + nc := Case[v1.SecurityContext]{} + if i/len(cs0) > k0 { + cs0 = constructPointerCapabilities(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Capabilities = c0.expected + nc.dst.Capabilities = c0.dst + nc.src.Capabilities = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerBool(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Privileged = c1.expected + nc.dst.Privileged = c1.dst + nc.src.Privileged = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerSELinuxOptions(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.SELinuxOptions = c2.expected + nc.dst.SELinuxOptions = c2.dst + nc.src.SELinuxOptions = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerWindowsSecurityContextOptions(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.WindowsOptions = c3.expected + nc.dst.WindowsOptions = c3.dst + nc.src.WindowsOptions = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerInt64(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.RunAsUser = c4.expected + nc.dst.RunAsUser = c4.dst + nc.src.RunAsUser = c4.src + if i/len(cs5) > k5 { + cs5 = constructPointerInt64(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.RunAsGroup = c5.expected + nc.dst.RunAsGroup = c5.dst + nc.src.RunAsGroup = c5.src + if i/len(cs6) > k6 { + cs6 = constructPointerBool(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.RunAsNonRoot = c6.expected + nc.dst.RunAsNonRoot = c6.dst + nc.src.RunAsNonRoot = c6.src + if i/len(cs7) > k7 { + cs7 = constructPointerBool(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.ReadOnlyRootFilesystem = c7.expected + nc.dst.ReadOnlyRootFilesystem = c7.dst + nc.src.ReadOnlyRootFilesystem = c7.src + if i/len(cs8) > k8 { + cs8 = constructPointerBool(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.AllowPrivilegeEscalation = c8.expected + nc.dst.AllowPrivilegeEscalation = c8.dst + nc.src.AllowPrivilegeEscalation = c8.src + if i/len(cs9) > k9 { + cs9 = constructPointerProcMountType(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.ProcMount = c9.expected + nc.dst.ProcMount = c9.dst + nc.src.ProcMount = c9.src + if i/len(cs10) > k10 { + cs10 = constructPointerSeccompProfile(NoLimit) + k10 += 1 + } + c10 := &cs10[i%len(cs10)] + nc.expected.SeccompProfile = c10.expected + nc.dst.SeccompProfile = c10.dst + nc.src.SeccompProfile = c10.src + if i/len(cs11) > k11 { + cs11 = constructPointerAppArmorProfile(NoLimit) + k11 += 1 + } + c11 := &cs11[i%len(cs11)] + nc.expected.AppArmorProfile = c11.expected + nc.dst.AppArmorProfile = c11.dst + nc.src.AppArmorProfile = c11.src + cases = append(cases, nc) + } + return cases +} +func constructPointerCapabilities(p Policy) []Case[*v1.Capabilities] { + cases := []Case[*v1.Capabilities]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructCapabilities(p) + for _, c := range cs { + cases = append(cases, Case[*v1.Capabilities]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructCapabilities(p Policy) []Case[v1.Capabilities] { + cases := []Case[v1.Capabilities]{} + cs0 := constructAtomicSliceCapability(NoLimit) + cs1 := constructAtomicSliceCapability(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.Capabilities]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceCapability(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Add = c0.expected + nc.dst.Add = c0.dst + nc.src.Add = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceCapability(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Drop = c1.expected + nc.dst.Drop = c1.dst + nc.src.Drop = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceCapability(p Policy) []Case[[]v1.Capability] { + cases := []Case[[]v1.Capability]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructCapability(NoLimit) + var nc Case[[]v1.Capability] + nc = Case[[]v1.Capability]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.Capability]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.Capability{} + cases = append(cases, nc) + return cases +} +func constructCapability(p Policy) []Case[v1.Capability] { + cases := []Case[v1.Capability]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.Capability]{ + expected: v1.Capability(c.expected), + dst: v1.Capability(c.dst), + src: v1.Capability(c.src), + }) + } + return cases +} +func constructPointerSELinuxOptions(p Policy) []Case[*v1.SELinuxOptions] { + cases := []Case[*v1.SELinuxOptions]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSELinuxOptions(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SELinuxOptions]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSELinuxOptions(p Policy) []Case[v1.SELinuxOptions] { + cases := []Case[v1.SELinuxOptions]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.SELinuxOptions]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.User = c0.expected + nc.dst.User = c0.dst + nc.src.User = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Role = c1.expected + nc.dst.Role = c1.dst + nc.src.Role = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Type = c2.expected + nc.dst.Type = c2.dst + nc.src.Type = c2.src + if i/len(cs3) > k3 { + cs3 = constructString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Level = c3.expected + nc.dst.Level = c3.dst + nc.src.Level = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerWindowsSecurityContextOptions(p Policy) []Case[*v1.WindowsSecurityContextOptions] { + cases := []Case[*v1.WindowsSecurityContextOptions]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructWindowsSecurityContextOptions(p) + for _, c := range cs { + cases = append(cases, Case[*v1.WindowsSecurityContextOptions]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructWindowsSecurityContextOptions(p Policy) []Case[v1.WindowsSecurityContextOptions] { + cases := []Case[v1.WindowsSecurityContextOptions]{} + cs0 := constructPointerString(NoLimit) + cs1 := constructPointerString(NoLimit) + cs2 := constructPointerString(NoLimit) + cs3 := constructPointerBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + for i := range maxCount { + nc := Case[v1.WindowsSecurityContextOptions]{} + if i/len(cs0) > k0 { + cs0 = constructPointerString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.GMSACredentialSpecName = c0.expected + nc.dst.GMSACredentialSpecName = c0.dst + nc.src.GMSACredentialSpecName = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.GMSACredentialSpec = c1.expected + nc.dst.GMSACredentialSpec = c1.dst + nc.src.GMSACredentialSpec = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.RunAsUserName = c2.expected + nc.dst.RunAsUserName = c2.dst + nc.src.RunAsUserName = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerBool(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.HostProcess = c3.expected + nc.dst.HostProcess = c3.dst + nc.src.HostProcess = c3.src + cases = append(cases, nc) + } + return cases +} +func constructPointerProcMountType(p Policy) []Case[*v1.ProcMountType] { + cases := []Case[*v1.ProcMountType]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructProcMountType(p) + for _, c := range cs { + cases = append(cases, Case[*v1.ProcMountType]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructProcMountType(p Policy) []Case[v1.ProcMountType] { + cases := []Case[v1.ProcMountType]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.ProcMountType]{ + expected: v1.ProcMountType(c.expected), + dst: v1.ProcMountType(c.dst), + src: v1.ProcMountType(c.src), + }) + } + return cases +} +func constructPointerSeccompProfile(p Policy) []Case[*v1.SeccompProfile] { + cases := []Case[*v1.SeccompProfile]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSeccompProfile(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SeccompProfile]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSeccompProfile(p Policy) []Case[v1.SeccompProfile] { + cases := []Case[v1.SeccompProfile]{} + cs0 := constructSeccompProfileType(NoLimit) + cs1 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.SeccompProfile]{} + if i/len(cs0) > k0 { + cs0 = constructSeccompProfileType(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Type = c0.expected + nc.dst.Type = c0.dst + nc.src.Type = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.LocalhostProfile = c1.expected + nc.dst.LocalhostProfile = c1.dst + nc.src.LocalhostProfile = c1.src + cases = append(cases, nc) + } + return cases +} +func constructSeccompProfileType(p Policy) []Case[v1.SeccompProfileType] { + cases := []Case[v1.SeccompProfileType]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.SeccompProfileType]{ + expected: v1.SeccompProfileType(c.expected), + dst: v1.SeccompProfileType(c.dst), + src: v1.SeccompProfileType(c.src), + }) + } + return cases +} +func constructPointerAppArmorProfile(p Policy) []Case[*v1.AppArmorProfile] { + cases := []Case[*v1.AppArmorProfile]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAppArmorProfile(p) + for _, c := range cs { + cases = append(cases, Case[*v1.AppArmorProfile]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAppArmorProfile(p Policy) []Case[v1.AppArmorProfile] { + cases := []Case[v1.AppArmorProfile]{} + cs0 := constructAppArmorProfileType(NoLimit) + cs1 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.AppArmorProfile]{} + if i/len(cs0) > k0 { + cs0 = constructAppArmorProfileType(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Type = c0.expected + nc.dst.Type = c0.dst + nc.src.Type = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.LocalhostProfile = c1.expected + nc.dst.LocalhostProfile = c1.dst + nc.src.LocalhostProfile = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAppArmorProfileType(p Policy) []Case[v1.AppArmorProfileType] { + cases := []Case[v1.AppArmorProfileType]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.AppArmorProfileType]{ + expected: v1.AppArmorProfileType(c.expected), + dst: v1.AppArmorProfileType(c.dst), + src: v1.AppArmorProfileType(c.src), + }) + } + return cases +} +func constructMapSliceEphemeralContainer(p Policy) []Case[[]v1.EphemeralContainer] { + cases := []Case[[]v1.EphemeralContainer]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructEphemeralContainerIgnore_name(NoLimit) + var nc Case[[]v1.EphemeralContainer] + nc = Case[[]v1.EphemeralContainer]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.EphemeralContainer]{} + srcs := []v1.EphemeralContainer{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.EphemeralContainer]{} + srcs = []v1.EphemeralContainer{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.EphemeralContainer]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.EphemeralContainer]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructEphemeralContainerIgnore_name(p Policy) []Case[v1.EphemeralContainer] { + cases := []Case[v1.EphemeralContainer]{} + cs0 := constructEphemeralContainerCommonIgnore_name(NoNotEqual | NoZero | NoNil) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.EphemeralContainer]{} + if i/len(cs0) > k0 { + cs0 = constructEphemeralContainerCommonIgnore_name(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.EphemeralContainerCommon = c0.expected + nc.dst.EphemeralContainerCommon = c0.dst + nc.src.EphemeralContainerCommon = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.TargetContainerName = c1.expected + nc.dst.TargetContainerName = c1.dst + nc.src.TargetContainerName = c1.src + cases = append(cases, nc) + } + return cases +} +func constructEphemeralContainerCommonIgnore_name(p Policy) []Case[v1.EphemeralContainerCommon] { + cases := []Case[v1.EphemeralContainerCommon]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + cs1 := constructString(NoLimit) + cs2 := constructAtomicSliceString(NoLimit) + cs3 := constructAtomicSliceString(NoLimit) + cs4 := constructString(NoLimit) + cs5 := constructMapSliceContainerPort(NoLimit) + cs6 := constructAtomicSliceEnvFromSource(NoLimit) + cs7 := constructMapSliceEnvVar(NoLimit) + cs8 := constructResourceRequirements(NoLimit) + cs9 := constructAtomicSliceContainerResizePolicy(NoLimit) + cs10 := constructPointerContainerRestartPolicy(NoLimit) + cs11 := constructMapSliceVolumeMount(NoLimit) + cs12 := constructMapSliceVolumeDevice(NoLimit) + cs13 := constructPointerProbe(NoLimit) + cs14 := constructPointerProbe(NoLimit) + cs15 := constructPointerProbe(NoLimit) + cs16 := constructPointerLifecycle(NoLimit) + cs17 := constructString(NoLimit) + cs18 := constructTerminationMessagePolicy(NoLimit) + cs19 := constructPullPolicy(NoLimit) + cs20 := constructPointerSecurityContext(NoLimit) + cs21 := constructBool(NoLimit) + cs22 := constructBool(NoLimit) + cs23 := constructBool(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + len(cs10), + len(cs11), + len(cs12), + len(cs13), + len(cs14), + len(cs15), + len(cs16), + len(cs17), + len(cs18), + len(cs19), + len(cs20), + len(cs21), + len(cs22), + len(cs23), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + k10 := 0 + k11 := 0 + k12 := 0 + k13 := 0 + k14 := 0 + k15 := 0 + k16 := 0 + k17 := 0 + k18 := 0 + k19 := 0 + k20 := 0 + k21 := 0 + k22 := 0 + k23 := 0 + for i := range maxCount { + nc := Case[v1.EphemeralContainerCommon]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Image = c1.expected + nc.dst.Image = c1.dst + nc.src.Image = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicSliceString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Command = c2.expected + nc.dst.Command = c2.dst + nc.src.Command = c2.src + if i/len(cs3) > k3 { + cs3 = constructAtomicSliceString(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Args = c3.expected + nc.dst.Args = c3.dst + nc.src.Args = c3.src + if i/len(cs4) > k4 { + cs4 = constructString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.WorkingDir = c4.expected + nc.dst.WorkingDir = c4.dst + nc.src.WorkingDir = c4.src + if i/len(cs5) > k5 { + cs5 = constructMapSliceContainerPort(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.Ports = c5.expected + nc.dst.Ports = c5.dst + nc.src.Ports = c5.src + if i/len(cs6) > k6 { + cs6 = constructAtomicSliceEnvFromSource(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.EnvFrom = c6.expected + nc.dst.EnvFrom = c6.dst + nc.src.EnvFrom = c6.src + if i/len(cs7) > k7 { + cs7 = constructMapSliceEnvVar(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.Env = c7.expected + nc.dst.Env = c7.dst + nc.src.Env = c7.src + if i/len(cs8) > k8 { + cs8 = constructResourceRequirements(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.Resources = c8.expected + nc.dst.Resources = c8.dst + nc.src.Resources = c8.src + if i/len(cs9) > k9 { + cs9 = constructAtomicSliceContainerResizePolicy(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.ResizePolicy = c9.expected + nc.dst.ResizePolicy = c9.dst + nc.src.ResizePolicy = c9.src + if i/len(cs10) > k10 { + cs10 = constructPointerContainerRestartPolicy(NoLimit) + k10 += 1 + } + c10 := &cs10[i%len(cs10)] + nc.expected.RestartPolicy = c10.expected + nc.dst.RestartPolicy = c10.dst + nc.src.RestartPolicy = c10.src + if i/len(cs11) > k11 { + cs11 = constructMapSliceVolumeMount(NoLimit) + k11 += 1 + } + c11 := &cs11[i%len(cs11)] + nc.expected.VolumeMounts = c11.expected + nc.dst.VolumeMounts = c11.dst + nc.src.VolumeMounts = c11.src + if i/len(cs12) > k12 { + cs12 = constructMapSliceVolumeDevice(NoLimit) + k12 += 1 + } + c12 := &cs12[i%len(cs12)] + nc.expected.VolumeDevices = c12.expected + nc.dst.VolumeDevices = c12.dst + nc.src.VolumeDevices = c12.src + if i/len(cs13) > k13 { + cs13 = constructPointerProbe(NoLimit) + k13 += 1 + } + c13 := &cs13[i%len(cs13)] + nc.expected.LivenessProbe = c13.expected + nc.dst.LivenessProbe = c13.dst + nc.src.LivenessProbe = c13.src + if i/len(cs14) > k14 { + cs14 = constructPointerProbe(NoLimit) + k14 += 1 + } + c14 := &cs14[i%len(cs14)] + nc.expected.ReadinessProbe = c14.expected + nc.dst.ReadinessProbe = c14.dst + nc.src.ReadinessProbe = c14.src + if i/len(cs15) > k15 { + cs15 = constructPointerProbe(NoLimit) + k15 += 1 + } + c15 := &cs15[i%len(cs15)] + nc.expected.StartupProbe = c15.expected + nc.dst.StartupProbe = c15.dst + nc.src.StartupProbe = c15.src + if i/len(cs16) > k16 { + cs16 = constructPointerLifecycle(NoLimit) + k16 += 1 + } + c16 := &cs16[i%len(cs16)] + nc.expected.Lifecycle = c16.expected + nc.dst.Lifecycle = c16.dst + nc.src.Lifecycle = c16.src + if i/len(cs17) > k17 { + cs17 = constructString(NoLimit) + k17 += 1 + } + c17 := &cs17[i%len(cs17)] + nc.expected.TerminationMessagePath = c17.expected + nc.dst.TerminationMessagePath = c17.dst + nc.src.TerminationMessagePath = c17.src + if i/len(cs18) > k18 { + cs18 = constructTerminationMessagePolicy(NoLimit) + k18 += 1 + } + c18 := &cs18[i%len(cs18)] + nc.expected.TerminationMessagePolicy = c18.expected + nc.dst.TerminationMessagePolicy = c18.dst + nc.src.TerminationMessagePolicy = c18.src + if i/len(cs19) > k19 { + cs19 = constructPullPolicy(NoLimit) + k19 += 1 + } + c19 := &cs19[i%len(cs19)] + nc.expected.ImagePullPolicy = c19.expected + nc.dst.ImagePullPolicy = c19.dst + nc.src.ImagePullPolicy = c19.src + if i/len(cs20) > k20 { + cs20 = constructPointerSecurityContext(NoLimit) + k20 += 1 + } + c20 := &cs20[i%len(cs20)] + nc.expected.SecurityContext = c20.expected + nc.dst.SecurityContext = c20.dst + nc.src.SecurityContext = c20.src + if i/len(cs21) > k21 { + cs21 = constructBool(NoLimit) + k21 += 1 + } + c21 := &cs21[i%len(cs21)] + nc.expected.Stdin = c21.expected + nc.dst.Stdin = c21.dst + nc.src.Stdin = c21.src + if i/len(cs22) > k22 { + cs22 = constructBool(NoLimit) + k22 += 1 + } + c22 := &cs22[i%len(cs22)] + nc.expected.StdinOnce = c22.expected + nc.dst.StdinOnce = c22.dst + nc.src.StdinOnce = c22.src + if i/len(cs23) > k23 { + cs23 = constructBool(NoLimit) + k23 += 1 + } + c23 := &cs23[i%len(cs23)] + nc.expected.TTY = c23.expected + nc.dst.TTY = c23.dst + nc.src.TTY = c23.src + cases = append(cases, nc) + } + return cases +} +func constructRestartPolicy(p Policy) []Case[v1.RestartPolicy] { + cases := []Case[v1.RestartPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.RestartPolicy]{ + expected: v1.RestartPolicy(c.expected), + dst: v1.RestartPolicy(c.dst), + src: v1.RestartPolicy(c.src), + }) + } + return cases +} +func constructDNSPolicy(p Policy) []Case[v1.DNSPolicy] { + cases := []Case[v1.DNSPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.DNSPolicy]{ + expected: v1.DNSPolicy(c.expected), + dst: v1.DNSPolicy(c.dst), + src: v1.DNSPolicy(c.src), + }) + } + return cases +} +func constructAtomicMapStringToString(p Policy) []Case[map[string]string] { + cases := []Case[map[string]string]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + keys := constructString(NoNil | NoZero | NoNotEqual) + vals := constructString(NoLimit) + keyIndex := 0 + var nc Case[map[string]string] + for _, val := range vals { + keyIndex += 1 + if keyIndex >= len(keys) { + keys = constructString(NoNil | NoZero | NoNotEqual) + keyIndex = 0 + } + key := keys[keyIndex] + nc = Case[map[string]string]{} + nc.expected = make(map[string]string) + nc.dst = make(map[string]string) + nc.src = make(map[string]string) + nc.expected[key.expected] = val.src + nc.dst[key.expected] = val.dst + nc.src[key.expected] = val.src + } + cases = append(cases, nc) + for _, val := range vals { + keyIndex += 1 + if keyIndex >= len(keys) { + keys = constructString(NoNil | NoZero | NoNotEqual) + keyIndex = 0 + } + key := keys[keyIndex] + nc = Case[map[string]string]{} + nc.expected = make(map[string]string) + nc.dst = make(map[string]string) + nc.src = make(map[string]string) + nc.expected[key.expected] = val.dst + nc.dst[key.expected] = val.dst + } + cases = append(cases, nc) + return cases +} +func constructPointerPodSecurityContext(p Policy) []Case[*v1.PodSecurityContext] { + cases := []Case[*v1.PodSecurityContext]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodSecurityContext(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PodSecurityContext]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPodSecurityContext(p Policy) []Case[v1.PodSecurityContext] { + cases := []Case[v1.PodSecurityContext]{} + cs0 := constructPointerSELinuxOptions(NoLimit) + cs1 := constructPointerWindowsSecurityContextOptions(NoLimit) + cs2 := constructPointerInt64(NoLimit) + cs3 := constructPointerInt64(NoLimit) + cs4 := constructPointerBool(NoLimit) + cs5 := constructAtomicSliceInt64(NoLimit) + cs6 := constructPointerSupplementalGroupsPolicy(NoLimit) + cs7 := constructPointerInt64(NoLimit) + cs8 := constructAtomicSliceSysctl(NoLimit) + cs9 := constructPointerPodFSGroupChangePolicy(NoLimit) + cs10 := constructPointerSeccompProfile(NoLimit) + cs11 := constructPointerAppArmorProfile(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + len(cs8), + len(cs9), + len(cs10), + len(cs11), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + k8 := 0 + k9 := 0 + k10 := 0 + k11 := 0 + for i := range maxCount { + nc := Case[v1.PodSecurityContext]{} + if i/len(cs0) > k0 { + cs0 = constructPointerSELinuxOptions(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.SELinuxOptions = c0.expected + nc.dst.SELinuxOptions = c0.dst + nc.src.SELinuxOptions = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerWindowsSecurityContextOptions(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.WindowsOptions = c1.expected + nc.dst.WindowsOptions = c1.dst + nc.src.WindowsOptions = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerInt64(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.RunAsUser = c2.expected + nc.dst.RunAsUser = c2.dst + nc.src.RunAsUser = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerInt64(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.RunAsGroup = c3.expected + nc.dst.RunAsGroup = c3.dst + nc.src.RunAsGroup = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerBool(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.RunAsNonRoot = c4.expected + nc.dst.RunAsNonRoot = c4.dst + nc.src.RunAsNonRoot = c4.src + if i/len(cs5) > k5 { + cs5 = constructAtomicSliceInt64(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.SupplementalGroups = c5.expected + nc.dst.SupplementalGroups = c5.dst + nc.src.SupplementalGroups = c5.src + if i/len(cs6) > k6 { + cs6 = constructPointerSupplementalGroupsPolicy(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.SupplementalGroupsPolicy = c6.expected + nc.dst.SupplementalGroupsPolicy = c6.dst + nc.src.SupplementalGroupsPolicy = c6.src + if i/len(cs7) > k7 { + cs7 = constructPointerInt64(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.FSGroup = c7.expected + nc.dst.FSGroup = c7.dst + nc.src.FSGroup = c7.src + if i/len(cs8) > k8 { + cs8 = constructAtomicSliceSysctl(NoLimit) + k8 += 1 + } + c8 := &cs8[i%len(cs8)] + nc.expected.Sysctls = c8.expected + nc.dst.Sysctls = c8.dst + nc.src.Sysctls = c8.src + if i/len(cs9) > k9 { + cs9 = constructPointerPodFSGroupChangePolicy(NoLimit) + k9 += 1 + } + c9 := &cs9[i%len(cs9)] + nc.expected.FSGroupChangePolicy = c9.expected + nc.dst.FSGroupChangePolicy = c9.dst + nc.src.FSGroupChangePolicy = c9.src + if i/len(cs10) > k10 { + cs10 = constructPointerSeccompProfile(NoLimit) + k10 += 1 + } + c10 := &cs10[i%len(cs10)] + nc.expected.SeccompProfile = c10.expected + nc.dst.SeccompProfile = c10.dst + nc.src.SeccompProfile = c10.src + if i/len(cs11) > k11 { + cs11 = constructPointerAppArmorProfile(NoLimit) + k11 += 1 + } + c11 := &cs11[i%len(cs11)] + nc.expected.AppArmorProfile = c11.expected + nc.dst.AppArmorProfile = c11.dst + nc.src.AppArmorProfile = c11.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceInt64(p Policy) []Case[[]int64] { + cases := []Case[[]int64]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructInt64(NoLimit) + var nc Case[[]int64] + nc = Case[[]int64]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]int64]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []int64{} + cases = append(cases, nc) + return cases +} +func constructPointerSupplementalGroupsPolicy(p Policy) []Case[*v1.SupplementalGroupsPolicy] { + cases := []Case[*v1.SupplementalGroupsPolicy]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSupplementalGroupsPolicy(p) + for _, c := range cs { + cases = append(cases, Case[*v1.SupplementalGroupsPolicy]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructSupplementalGroupsPolicy(p Policy) []Case[v1.SupplementalGroupsPolicy] { + cases := []Case[v1.SupplementalGroupsPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.SupplementalGroupsPolicy]{ + expected: v1.SupplementalGroupsPolicy(c.expected), + dst: v1.SupplementalGroupsPolicy(c.dst), + src: v1.SupplementalGroupsPolicy(c.src), + }) + } + return cases +} +func constructAtomicSliceSysctl(p Policy) []Case[[]v1.Sysctl] { + cases := []Case[[]v1.Sysctl]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructSysctl(NoLimit) + var nc Case[[]v1.Sysctl] + nc = Case[[]v1.Sysctl]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.Sysctl]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.Sysctl{} + cases = append(cases, nc) + return cases +} +func constructSysctl(p Policy) []Case[v1.Sysctl] { + cases := []Case[v1.Sysctl]{} + cs0 := constructString(NoLimit) + cs1 := constructString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.Sysctl]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Value = c1.expected + nc.dst.Value = c1.dst + nc.src.Value = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerPodFSGroupChangePolicy(p Policy) []Case[*v1.PodFSGroupChangePolicy] { + cases := []Case[*v1.PodFSGroupChangePolicy]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodFSGroupChangePolicy(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PodFSGroupChangePolicy]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPodFSGroupChangePolicy(p Policy) []Case[v1.PodFSGroupChangePolicy] { + cases := []Case[v1.PodFSGroupChangePolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.PodFSGroupChangePolicy]{ + expected: v1.PodFSGroupChangePolicy(c.expected), + dst: v1.PodFSGroupChangePolicy(c.dst), + src: v1.PodFSGroupChangePolicy(c.src), + }) + } + return cases +} +func constructMapSliceLocalObjectReference(p Policy) []Case[[]v1.LocalObjectReference] { + cases := []Case[[]v1.LocalObjectReference]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicLocalObjectReferenceIgnore_name(NoLimit) + var nc Case[[]v1.LocalObjectReference] + nc = Case[[]v1.LocalObjectReference]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.LocalObjectReference]{} + srcs := []v1.LocalObjectReference{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.LocalObjectReference]{} + srcs = []v1.LocalObjectReference{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.LocalObjectReference]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.LocalObjectReference]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructAtomicLocalObjectReferenceIgnore_name(p Policy) []Case[v1.LocalObjectReference] { + cases := []Case[v1.LocalObjectReference]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.LocalObjectReference]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.src + nc.dst.Name = c0.dst + nc.src.Name = c0.src + cases = append(cases, nc) + } + return cases +} +func constructPointerAffinity(p Policy) []Case[*v1.Affinity] { + cases := []Case[*v1.Affinity]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAffinity(p) + for _, c := range cs { + cases = append(cases, Case[*v1.Affinity]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAffinity(p Policy) []Case[v1.Affinity] { + cases := []Case[v1.Affinity]{} + cs0 := constructPointerNodeAffinity(NoLimit) + cs1 := constructPointerPodAffinity(NoLimit) + cs2 := constructPointerPodAntiAffinity(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.Affinity]{} + if i/len(cs0) > k0 { + cs0 = constructPointerNodeAffinity(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.NodeAffinity = c0.expected + nc.dst.NodeAffinity = c0.dst + nc.src.NodeAffinity = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerPodAffinity(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.PodAffinity = c1.expected + nc.dst.PodAffinity = c1.dst + nc.src.PodAffinity = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerPodAntiAffinity(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.PodAntiAffinity = c2.expected + nc.dst.PodAntiAffinity = c2.dst + nc.src.PodAntiAffinity = c2.src + cases = append(cases, nc) + } + return cases +} +func constructPointerNodeAffinity(p Policy) []Case[*v1.NodeAffinity] { + cases := []Case[*v1.NodeAffinity]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructNodeAffinity(p) + for _, c := range cs { + cases = append(cases, Case[*v1.NodeAffinity]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructNodeAffinity(p Policy) []Case[v1.NodeAffinity] { + cases := []Case[v1.NodeAffinity]{} + cs0 := constructAtomicPointerNodeSelector(NoLimit) + cs1 := constructAtomicSlicePreferredSchedulingTerm(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.NodeAffinity]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicPointerNodeSelector(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.RequiredDuringSchedulingIgnoredDuringExecution = c0.expected + nc.dst.RequiredDuringSchedulingIgnoredDuringExecution = c0.dst + nc.src.RequiredDuringSchedulingIgnoredDuringExecution = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSlicePreferredSchedulingTerm(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.PreferredDuringSchedulingIgnoredDuringExecution = c1.expected + nc.dst.PreferredDuringSchedulingIgnoredDuringExecution = c1.dst + nc.src.PreferredDuringSchedulingIgnoredDuringExecution = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicPointerNodeSelector(p Policy) []Case[*v1.NodeSelector] { + cases := []Case[*v1.NodeSelector]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicNodeSelector(p) + for _, c := range cs { + cases = append(cases, Case[*v1.NodeSelector]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructAtomicNodeSelector(p Policy) []Case[v1.NodeSelector] { + cases := []Case[v1.NodeSelector]{} + cs0 := constructAtomicSliceNodeSelectorTerm(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.NodeSelector]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceNodeSelectorTerm(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.NodeSelectorTerms = c0.src + nc.dst.NodeSelectorTerms = c0.dst + nc.src.NodeSelectorTerms = c0.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceNodeSelectorTerm(p Policy) []Case[[]v1.NodeSelectorTerm] { + cases := []Case[[]v1.NodeSelectorTerm]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructAtomicNodeSelectorTerm(NoLimit) + var nc Case[[]v1.NodeSelectorTerm] + nc = Case[[]v1.NodeSelectorTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.NodeSelectorTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.NodeSelectorTerm{} + cases = append(cases, nc) + return cases +} +func constructAtomicNodeSelectorTerm(p Policy) []Case[v1.NodeSelectorTerm] { + cases := []Case[v1.NodeSelectorTerm]{} + cs0 := constructAtomicSliceNodeSelectorRequirement(NoLimit) + cs1 := constructAtomicSliceNodeSelectorRequirement(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.NodeSelectorTerm]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceNodeSelectorRequirement(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.MatchExpressions = c0.src + nc.dst.MatchExpressions = c0.dst + nc.src.MatchExpressions = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceNodeSelectorRequirement(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.MatchFields = c1.src + nc.dst.MatchFields = c1.dst + nc.src.MatchFields = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceNodeSelectorRequirement(p Policy) []Case[[]v1.NodeSelectorRequirement] { + cases := []Case[[]v1.NodeSelectorRequirement]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructNodeSelectorRequirement(NoLimit) + var nc Case[[]v1.NodeSelectorRequirement] + nc = Case[[]v1.NodeSelectorRequirement]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.NodeSelectorRequirement]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.NodeSelectorRequirement{} + cases = append(cases, nc) + return cases +} +func constructNodeSelectorRequirement(p Policy) []Case[v1.NodeSelectorRequirement] { + cases := []Case[v1.NodeSelectorRequirement]{} + cs0 := constructString(NoLimit) + cs1 := constructNodeSelectorOperator(NoLimit) + cs2 := constructAtomicSliceString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.NodeSelectorRequirement]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Key = c0.expected + nc.dst.Key = c0.dst + nc.src.Key = c0.src + if i/len(cs1) > k1 { + cs1 = constructNodeSelectorOperator(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Operator = c1.expected + nc.dst.Operator = c1.dst + nc.src.Operator = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicSliceString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Values = c2.expected + nc.dst.Values = c2.dst + nc.src.Values = c2.src + cases = append(cases, nc) + } + return cases +} +func constructNodeSelectorOperator(p Policy) []Case[v1.NodeSelectorOperator] { + cases := []Case[v1.NodeSelectorOperator]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.NodeSelectorOperator]{ + expected: v1.NodeSelectorOperator(c.expected), + dst: v1.NodeSelectorOperator(c.dst), + src: v1.NodeSelectorOperator(c.src), + }) + } + return cases +} +func constructAtomicSlicePreferredSchedulingTerm(p Policy) []Case[[]v1.PreferredSchedulingTerm] { + cases := []Case[[]v1.PreferredSchedulingTerm]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPreferredSchedulingTerm(NoLimit) + var nc Case[[]v1.PreferredSchedulingTerm] + nc = Case[[]v1.PreferredSchedulingTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PreferredSchedulingTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.PreferredSchedulingTerm{} + cases = append(cases, nc) + return cases +} +func constructPreferredSchedulingTerm(p Policy) []Case[v1.PreferredSchedulingTerm] { + cases := []Case[v1.PreferredSchedulingTerm]{} + cs0 := constructInt32(NoLimit) + cs1 := constructAtomicNodeSelectorTerm(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.PreferredSchedulingTerm]{} + if i/len(cs0) > k0 { + cs0 = constructInt32(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Weight = c0.expected + nc.dst.Weight = c0.dst + nc.src.Weight = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicNodeSelectorTerm(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Preference = c1.expected + nc.dst.Preference = c1.dst + nc.src.Preference = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerPodAffinity(p Policy) []Case[*v1.PodAffinity] { + cases := []Case[*v1.PodAffinity]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodAffinity(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PodAffinity]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPodAffinity(p Policy) []Case[v1.PodAffinity] { + cases := []Case[v1.PodAffinity]{} + cs0 := constructAtomicSlicePodAffinityTerm(NoLimit) + cs1 := constructAtomicSliceWeightedPodAffinityTerm(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.PodAffinity]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSlicePodAffinityTerm(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.RequiredDuringSchedulingIgnoredDuringExecution = c0.expected + nc.dst.RequiredDuringSchedulingIgnoredDuringExecution = c0.dst + nc.src.RequiredDuringSchedulingIgnoredDuringExecution = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceWeightedPodAffinityTerm(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.PreferredDuringSchedulingIgnoredDuringExecution = c1.expected + nc.dst.PreferredDuringSchedulingIgnoredDuringExecution = c1.dst + nc.src.PreferredDuringSchedulingIgnoredDuringExecution = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSlicePodAffinityTerm(p Policy) []Case[[]v1.PodAffinityTerm] { + cases := []Case[[]v1.PodAffinityTerm]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodAffinityTerm(NoLimit) + var nc Case[[]v1.PodAffinityTerm] + nc = Case[[]v1.PodAffinityTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PodAffinityTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.PodAffinityTerm{} + cases = append(cases, nc) + return cases +} +func constructPodAffinityTerm(p Policy) []Case[v1.PodAffinityTerm] { + cases := []Case[v1.PodAffinityTerm]{} + cs0 := constructPointerLabelSelector(NoLimit) + cs1 := constructAtomicSliceString(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructPointerLabelSelector(NoLimit) + cs4 := constructAtomicSliceString(NoLimit) + cs5 := constructAtomicSliceString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + for i := range maxCount { + nc := Case[v1.PodAffinityTerm]{} + if i/len(cs0) > k0 { + cs0 = constructPointerLabelSelector(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.LabelSelector = c0.expected + nc.dst.LabelSelector = c0.dst + nc.src.LabelSelector = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Namespaces = c1.expected + nc.dst.Namespaces = c1.dst + nc.src.Namespaces = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.TopologyKey = c2.expected + nc.dst.TopologyKey = c2.dst + nc.src.TopologyKey = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerLabelSelector(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.NamespaceSelector = c3.expected + nc.dst.NamespaceSelector = c3.dst + nc.src.NamespaceSelector = c3.src + if i/len(cs4) > k4 { + cs4 = constructAtomicSliceString(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.MatchLabelKeys = c4.expected + nc.dst.MatchLabelKeys = c4.dst + nc.src.MatchLabelKeys = c4.src + if i/len(cs5) > k5 { + cs5 = constructAtomicSliceString(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.MismatchLabelKeys = c5.expected + nc.dst.MismatchLabelKeys = c5.dst + nc.src.MismatchLabelKeys = c5.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceWeightedPodAffinityTerm(p Policy) []Case[[]v1.WeightedPodAffinityTerm] { + cases := []Case[[]v1.WeightedPodAffinityTerm]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructWeightedPodAffinityTerm(NoLimit) + var nc Case[[]v1.WeightedPodAffinityTerm] + nc = Case[[]v1.WeightedPodAffinityTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.WeightedPodAffinityTerm]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.WeightedPodAffinityTerm{} + cases = append(cases, nc) + return cases +} +func constructWeightedPodAffinityTerm(p Policy) []Case[v1.WeightedPodAffinityTerm] { + cases := []Case[v1.WeightedPodAffinityTerm]{} + cs0 := constructInt32(NoLimit) + cs1 := constructPodAffinityTerm(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.WeightedPodAffinityTerm]{} + if i/len(cs0) > k0 { + cs0 = constructInt32(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Weight = c0.expected + nc.dst.Weight = c0.dst + nc.src.Weight = c0.src + if i/len(cs1) > k1 { + cs1 = constructPodAffinityTerm(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.PodAffinityTerm = c1.expected + nc.dst.PodAffinityTerm = c1.dst + nc.src.PodAffinityTerm = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerPodAntiAffinity(p Policy) []Case[*v1.PodAntiAffinity] { + cases := []Case[*v1.PodAntiAffinity]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodAntiAffinity(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PodAntiAffinity]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPodAntiAffinity(p Policy) []Case[v1.PodAntiAffinity] { + cases := []Case[v1.PodAntiAffinity]{} + cs0 := constructAtomicSlicePodAffinityTerm(NoLimit) + cs1 := constructAtomicSliceWeightedPodAffinityTerm(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.PodAntiAffinity]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSlicePodAffinityTerm(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.RequiredDuringSchedulingIgnoredDuringExecution = c0.expected + nc.dst.RequiredDuringSchedulingIgnoredDuringExecution = c0.dst + nc.src.RequiredDuringSchedulingIgnoredDuringExecution = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceWeightedPodAffinityTerm(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.PreferredDuringSchedulingIgnoredDuringExecution = c1.expected + nc.dst.PreferredDuringSchedulingIgnoredDuringExecution = c1.dst + nc.src.PreferredDuringSchedulingIgnoredDuringExecution = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSliceToleration(p Policy) []Case[[]v1.Toleration] { + cases := []Case[[]v1.Toleration]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructToleration(NoLimit) + var nc Case[[]v1.Toleration] + nc = Case[[]v1.Toleration]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.Toleration]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.Toleration{} + cases = append(cases, nc) + return cases +} +func constructToleration(p Policy) []Case[v1.Toleration] { + cases := []Case[v1.Toleration]{} + cs0 := constructString(NoLimit) + cs1 := constructTolerationOperator(NoLimit) + cs2 := constructString(NoLimit) + cs3 := constructTaintEffect(NoLimit) + cs4 := constructPointerInt64(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + for i := range maxCount { + nc := Case[v1.Toleration]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Key = c0.expected + nc.dst.Key = c0.dst + nc.src.Key = c0.src + if i/len(cs1) > k1 { + cs1 = constructTolerationOperator(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Operator = c1.expected + nc.dst.Operator = c1.dst + nc.src.Operator = c1.src + if i/len(cs2) > k2 { + cs2 = constructString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Value = c2.expected + nc.dst.Value = c2.dst + nc.src.Value = c2.src + if i/len(cs3) > k3 { + cs3 = constructTaintEffect(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.Effect = c3.expected + nc.dst.Effect = c3.dst + nc.src.Effect = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerInt64(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.TolerationSeconds = c4.expected + nc.dst.TolerationSeconds = c4.dst + nc.src.TolerationSeconds = c4.src + cases = append(cases, nc) + } + return cases +} +func constructTolerationOperator(p Policy) []Case[v1.TolerationOperator] { + cases := []Case[v1.TolerationOperator]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.TolerationOperator]{ + expected: v1.TolerationOperator(c.expected), + dst: v1.TolerationOperator(c.dst), + src: v1.TolerationOperator(c.src), + }) + } + return cases +} +func constructTaintEffect(p Policy) []Case[v1.TaintEffect] { + cases := []Case[v1.TaintEffect]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.TaintEffect]{ + expected: v1.TaintEffect(c.expected), + dst: v1.TaintEffect(c.dst), + src: v1.TaintEffect(c.src), + }) + } + return cases +} +func constructMapSliceHostAlias(p Policy) []Case[[]v1.HostAlias] { + cases := []Case[[]v1.HostAlias]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructHostAliasIgnore_ip(NoLimit) + var nc Case[[]v1.HostAlias] + nc = Case[[]v1.HostAlias]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.HostAlias]{} + srcs := []v1.HostAlias{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.HostAlias]{} + srcs = []v1.HostAlias{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.HostAlias]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.HostAlias]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructHostAliasIgnore_ip(p Policy) []Case[v1.HostAlias] { + cases := []Case[v1.HostAlias]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + cs1 := constructAtomicSliceString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.HostAlias]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.IP = c0.expected + nc.dst.IP = c0.dst + nc.src.IP = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Hostnames = c1.expected + nc.dst.Hostnames = c1.dst + nc.src.Hostnames = c1.src + cases = append(cases, nc) + } + return cases +} +func constructPointerPodDNSConfig(p Policy) []Case[*v1.PodDNSConfig] { + cases := []Case[*v1.PodDNSConfig]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodDNSConfig(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PodDNSConfig]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPodDNSConfig(p Policy) []Case[v1.PodDNSConfig] { + cases := []Case[v1.PodDNSConfig]{} + cs0 := constructAtomicSliceString(NoLimit) + cs1 := constructAtomicSliceString(NoLimit) + cs2 := constructAtomicSlicePodDNSConfigOption(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.PodDNSConfig]{} + if i/len(cs0) > k0 { + cs0 = constructAtomicSliceString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Nameservers = c0.expected + nc.dst.Nameservers = c0.dst + nc.src.Nameservers = c0.src + if i/len(cs1) > k1 { + cs1 = constructAtomicSliceString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Searches = c1.expected + nc.dst.Searches = c1.dst + nc.src.Searches = c1.src + if i/len(cs2) > k2 { + cs2 = constructAtomicSlicePodDNSConfigOption(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.Options = c2.expected + nc.dst.Options = c2.dst + nc.src.Options = c2.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSlicePodDNSConfigOption(p Policy) []Case[[]v1.PodDNSConfigOption] { + cases := []Case[[]v1.PodDNSConfigOption]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodDNSConfigOption(NoLimit) + var nc Case[[]v1.PodDNSConfigOption] + nc = Case[[]v1.PodDNSConfigOption]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PodDNSConfigOption]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.PodDNSConfigOption{} + cases = append(cases, nc) + return cases +} +func constructPodDNSConfigOption(p Policy) []Case[v1.PodDNSConfigOption] { + cases := []Case[v1.PodDNSConfigOption]{} + cs0 := constructString(NoLimit) + cs1 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + ) + k0 := 0 + k1 := 0 + for i := range maxCount { + nc := Case[v1.PodDNSConfigOption]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.Value = c1.expected + nc.dst.Value = c1.dst + nc.src.Value = c1.src + cases = append(cases, nc) + } + return cases +} +func constructAtomicSlicePodReadinessGate(p Policy) []Case[[]v1.PodReadinessGate] { + cases := []Case[[]v1.PodReadinessGate]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodReadinessGate(NoLimit) + var nc Case[[]v1.PodReadinessGate] + nc = Case[[]v1.PodReadinessGate]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PodReadinessGate]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + nc.src = []v1.PodReadinessGate{} + cases = append(cases, nc) + return cases +} +func constructPodReadinessGate(p Policy) []Case[v1.PodReadinessGate] { + cases := []Case[v1.PodReadinessGate]{} + cs0 := constructPodConditionType(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.PodReadinessGate]{} + if i/len(cs0) > k0 { + cs0 = constructPodConditionType(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.ConditionType = c0.expected + nc.dst.ConditionType = c0.dst + nc.src.ConditionType = c0.src + cases = append(cases, nc) + } + return cases +} +func constructPodConditionType(p Policy) []Case[v1.PodConditionType] { + cases := []Case[v1.PodConditionType]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.PodConditionType]{ + expected: v1.PodConditionType(c.expected), + dst: v1.PodConditionType(c.dst), + src: v1.PodConditionType(c.src), + }) + } + return cases +} +func constructPointerPreemptionPolicy(p Policy) []Case[*v1.PreemptionPolicy] { + cases := []Case[*v1.PreemptionPolicy]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPreemptionPolicy(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PreemptionPolicy]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPreemptionPolicy(p Policy) []Case[v1.PreemptionPolicy] { + cases := []Case[v1.PreemptionPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.PreemptionPolicy]{ + expected: v1.PreemptionPolicy(c.expected), + dst: v1.PreemptionPolicy(c.dst), + src: v1.PreemptionPolicy(c.src), + }) + } + return cases +} +func constructMapSliceTopologySpreadConstraint(p Policy) []Case[[]v1.TopologySpreadConstraint] { + cases := []Case[[]v1.TopologySpreadConstraint]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructTopologySpreadConstraintIgnore_topologyKey_whenUnsatisfiable(NoLimit) + var nc Case[[]v1.TopologySpreadConstraint] + nc = Case[[]v1.TopologySpreadConstraint]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.TopologySpreadConstraint]{} + srcs := []v1.TopologySpreadConstraint{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.TopologySpreadConstraint]{} + srcs = []v1.TopologySpreadConstraint{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.TopologySpreadConstraint]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.TopologySpreadConstraint]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructTopologySpreadConstraintIgnore_topologyKey_whenUnsatisfiable(p Policy) []Case[v1.TopologySpreadConstraint] { + cases := []Case[v1.TopologySpreadConstraint]{} + cs0 := constructInt32(NoLimit) + cs1 := constructString(NoNotEqual | NoZero | NoNil) + cs2 := constructUnsatisfiableConstraintAction(NoNotEqual | NoZero | NoNil) + cs3 := constructPointerLabelSelector(NoLimit) + cs4 := constructPointerInt32(NoLimit) + cs5 := constructPointerNodeInclusionPolicy(NoLimit) + cs6 := constructPointerNodeInclusionPolicy(NoLimit) + cs7 := constructAtomicSliceString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + len(cs3), + len(cs4), + len(cs5), + len(cs6), + len(cs7), + ) + k0 := 0 + k1 := 0 + k2 := 0 + k3 := 0 + k4 := 0 + k5 := 0 + k6 := 0 + k7 := 0 + for i := range maxCount { + nc := Case[v1.TopologySpreadConstraint]{} + if i/len(cs0) > k0 { + cs0 = constructInt32(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.MaxSkew = c0.expected + nc.dst.MaxSkew = c0.dst + nc.src.MaxSkew = c0.src + if i/len(cs1) > k1 { + cs1 = constructString(NoNotEqual | NoZero | NoNil) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.TopologyKey = c1.expected + nc.dst.TopologyKey = c1.dst + nc.src.TopologyKey = c1.src + if i/len(cs2) > k2 { + cs2 = constructUnsatisfiableConstraintAction(NoNotEqual | NoZero | NoNil) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.WhenUnsatisfiable = c2.expected + nc.dst.WhenUnsatisfiable = c2.dst + nc.src.WhenUnsatisfiable = c2.src + if i/len(cs3) > k3 { + cs3 = constructPointerLabelSelector(NoLimit) + k3 += 1 + } + c3 := &cs3[i%len(cs3)] + nc.expected.LabelSelector = c3.expected + nc.dst.LabelSelector = c3.dst + nc.src.LabelSelector = c3.src + if i/len(cs4) > k4 { + cs4 = constructPointerInt32(NoLimit) + k4 += 1 + } + c4 := &cs4[i%len(cs4)] + nc.expected.MinDomains = c4.expected + nc.dst.MinDomains = c4.dst + nc.src.MinDomains = c4.src + if i/len(cs5) > k5 { + cs5 = constructPointerNodeInclusionPolicy(NoLimit) + k5 += 1 + } + c5 := &cs5[i%len(cs5)] + nc.expected.NodeAffinityPolicy = c5.expected + nc.dst.NodeAffinityPolicy = c5.dst + nc.src.NodeAffinityPolicy = c5.src + if i/len(cs6) > k6 { + cs6 = constructPointerNodeInclusionPolicy(NoLimit) + k6 += 1 + } + c6 := &cs6[i%len(cs6)] + nc.expected.NodeTaintsPolicy = c6.expected + nc.dst.NodeTaintsPolicy = c6.dst + nc.src.NodeTaintsPolicy = c6.src + if i/len(cs7) > k7 { + cs7 = constructAtomicSliceString(NoLimit) + k7 += 1 + } + c7 := &cs7[i%len(cs7)] + nc.expected.MatchLabelKeys = c7.expected + nc.dst.MatchLabelKeys = c7.dst + nc.src.MatchLabelKeys = c7.src + cases = append(cases, nc) + } + return cases +} +func constructUnsatisfiableConstraintAction(p Policy) []Case[v1.UnsatisfiableConstraintAction] { + cases := []Case[v1.UnsatisfiableConstraintAction]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.UnsatisfiableConstraintAction]{ + expected: v1.UnsatisfiableConstraintAction(c.expected), + dst: v1.UnsatisfiableConstraintAction(c.dst), + src: v1.UnsatisfiableConstraintAction(c.src), + }) + } + return cases +} +func constructPointerNodeInclusionPolicy(p Policy) []Case[*v1.NodeInclusionPolicy] { + cases := []Case[*v1.NodeInclusionPolicy]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructNodeInclusionPolicy(p) + for _, c := range cs { + cases = append(cases, Case[*v1.NodeInclusionPolicy]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructNodeInclusionPolicy(p Policy) []Case[v1.NodeInclusionPolicy] { + cases := []Case[v1.NodeInclusionPolicy]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.NodeInclusionPolicy]{ + expected: v1.NodeInclusionPolicy(c.expected), + dst: v1.NodeInclusionPolicy(c.dst), + src: v1.NodeInclusionPolicy(c.src), + }) + } + return cases +} +func constructPointerPodOS(p Policy) []Case[*v1.PodOS] { + cases := []Case[*v1.PodOS]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodOS(p) + for _, c := range cs { + cases = append(cases, Case[*v1.PodOS]{ + expected: &c.expected, + dst: &c.dst, + src: &c.src, + }) + } + return cases +} +func constructPodOS(p Policy) []Case[v1.PodOS] { + cases := []Case[v1.PodOS]{} + cs0 := constructOSName(NoLimit) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.PodOS]{} + if i/len(cs0) > k0 { + cs0 = constructOSName(NoLimit) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + cases = append(cases, nc) + } + return cases +} +func constructOSName(p Policy) []Case[v1.OSName] { + cases := []Case[v1.OSName]{} + cs := constructString(p) + for _, c := range cs { + cases = append(cases, Case[v1.OSName]{ + expected: v1.OSName(c.expected), + dst: v1.OSName(c.dst), + src: v1.OSName(c.src), + }) + } + return cases +} +func constructMapSlicePodSchedulingGate(p Policy) []Case[[]v1.PodSchedulingGate] { + cases := []Case[[]v1.PodSchedulingGate]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodSchedulingGateIgnore_name(NoLimit) + var nc Case[[]v1.PodSchedulingGate] + nc = Case[[]v1.PodSchedulingGate]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PodSchedulingGate]{} + srcs := []v1.PodSchedulingGate{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.PodSchedulingGate]{} + srcs = []v1.PodSchedulingGate{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.PodSchedulingGate]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PodSchedulingGate]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructPodSchedulingGateIgnore_name(p Policy) []Case[v1.PodSchedulingGate] { + cases := []Case[v1.PodSchedulingGate]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + maxCount := max( + len(cs0), + ) + k0 := 0 + for i := range maxCount { + nc := Case[v1.PodSchedulingGate]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + cases = append(cases, nc) + } + return cases +} +func constructMapSlicePodResourceClaim(p Policy) []Case[[]v1.PodResourceClaim] { + cases := []Case[[]v1.PodResourceClaim]{ + { + expected: nil, + dst: nil, + src: nil, + }, + } + cs := constructPodResourceClaimIgnore_name(NoLimit) + var nc Case[[]v1.PodResourceClaim] + nc = Case[[]v1.PodResourceClaim]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PodResourceClaim]{} + srcs := []v1.PodResourceClaim{} + for i, c := range cs { + switch i % 3 { + case 0: + nc.expected = append(nc.expected, c.expected) + nc.dst = append(nc.dst, c.dst) + nc.src = append(nc.src, c.src) + case 1: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 2: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.PodResourceClaim]{} + srcs = []v1.PodResourceClaim{} + for i, c := range cs { + switch i % 2 { + case 0: + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + case 1: + srcs = append(srcs, c.src) + nc.src = append(nc.src, c.src) + } + } + nc.expected = append(nc.expected, srcs...) + cases = append(cases, nc) + nc = Case[[]v1.PodResourceClaim]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.src) + nc.src = append(nc.src, c.src) + } + cases = append(cases, nc) + nc = Case[[]v1.PodResourceClaim]{} + for _, c := range cs { + nc.expected = append(nc.expected, c.dst) + nc.dst = append(nc.dst, c.dst) + } + cases = append(cases, nc) + return cases +} +func constructPodResourceClaimIgnore_name(p Policy) []Case[v1.PodResourceClaim] { + cases := []Case[v1.PodResourceClaim]{} + cs0 := constructString(NoNotEqual | NoZero | NoNil) + cs1 := constructPointerString(NoLimit) + cs2 := constructPointerString(NoLimit) + maxCount := max( + len(cs0), + len(cs1), + len(cs2), + ) + k0 := 0 + k1 := 0 + k2 := 0 + for i := range maxCount { + nc := Case[v1.PodResourceClaim]{} + if i/len(cs0) > k0 { + cs0 = constructString(NoNotEqual | NoZero | NoNil) + k0 += 1 + } + c0 := &cs0[i%len(cs0)] + nc.expected.Name = c0.expected + nc.dst.Name = c0.dst + nc.src.Name = c0.src + if i/len(cs1) > k1 { + cs1 = constructPointerString(NoLimit) + k1 += 1 + } + c1 := &cs1[i%len(cs1)] + nc.expected.ResourceClaimName = c1.expected + nc.dst.ResourceClaimName = c1.dst + nc.src.ResourceClaimName = c1.src + if i/len(cs2) > k2 { + cs2 = constructPointerString(NoLimit) + k2 += 1 + } + c2 := &cs2[i%len(cs2)] + nc.expected.ResourceClaimTemplateName = c2.expected + nc.dst.ResourceClaimTemplateName = c2.dst + nc.src.ResourceClaimTemplateName = c2.src + cases = append(cases, nc) + } + return cases +} diff --git a/pkg/pdapi/pd/duration.go b/pkg/pdapi/pd/duration.go new file mode 100644 index 00000000000..d7cc8c5963c --- /dev/null +++ b/pkg/pdapi/pd/duration.go @@ -0,0 +1,64 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is copied from https://github.com/tikv/pd/blob/v8.1.0/pkg/utils/typeutil/duration.go + +package pd + +import ( + "fmt" + "strconv" + "time" +) + +// Duration is a wrapper of time.Duration for TOML and JSON. +type Duration struct { + time.Duration +} + +// NewDuration creates a Duration from time.Duration. +func NewDuration(duration time.Duration) Duration { + return Duration{Duration: duration} +} + +// MarshalJSON returns the duration as a JSON string. +func (d *Duration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%q"`, d.String())), nil +} + +// UnmarshalJSON parses a JSON string into the duration. +func (d *Duration) UnmarshalJSON(text []byte) error { + s, err := strconv.Unquote(string(text)) + if err != nil { + return err + } + duration, err := time.ParseDuration(s) + if err != nil { + return err + } + d.Duration = duration + return nil +} + +// UnmarshalText parses a TOML string into the duration. +func (d *Duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} + +// MarshalText returns the duration as a JSON string. +func (d Duration) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} diff --git a/pkg/pdapi/pd/size.go b/pkg/pdapi/pd/size.go new file mode 100644 index 00000000000..3cc9a575792 --- /dev/null +++ b/pkg/pdapi/pd/size.go @@ -0,0 +1,66 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is copied from https://github.com/tikv/pd/blob/v8.1.0/pkg/utils/typeutil/size.go + +package pd + +import ( + "strconv" + + "github.com/docker/go-units" +) + +// ByteSize is a retype uint64 for TOML and JSON. +type ByteSize uint64 + +// ParseMBFromText parses MB from text. +func ParseMBFromText(text string, value uint64) uint64 { + b := ByteSize(0) + err := b.UnmarshalText([]byte(text)) + if err != nil { + return value + } + return uint64(b / units.MiB) +} + +// MarshalJSON returns the size as a JSON string. +func (b ByteSize) MarshalJSON() ([]byte, error) { + return []byte(`"` + units.BytesSize(float64(b)) + `"`), nil +} + +// UnmarshalJSON parses a JSON string into the byte size. +func (b *ByteSize) UnmarshalJSON(text []byte) error { + s, err := strconv.Unquote(string(text)) + if err != nil { + return err + } + v, err := units.RAMInBytes(s) + if err != nil { + return err + } + //nolint:gosec // expected type conversion + *b = ByteSize(v) + return nil +} + +// UnmarshalText parses a Toml string into the byte size. +func (b *ByteSize) UnmarshalText(text []byte) error { + v, err := units.RAMInBytes(string(text)) + if err != nil { + return err + } + *b = ByteSize(v) //nolint:gosec // expected type conversion + return nil +} diff --git a/pkg/pdapi/v1/client.go b/pkg/pdapi/v1/client.go new file mode 100644 index 00000000000..64c26ddb8c5 --- /dev/null +++ b/pkg/pdapi/v1/client.go @@ -0,0 +1,561 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdapi + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + + httputil "github.com/pingcap/tidb-operator/pkg/utils/http" +) + +const ( + evictSchedulerLeader = "evict-leader-scheduler" + tiKVNotBootstrapped = `TiKV cluster not bootstrapped, please start TiKV first"` +) + +// PDWriter defines write api call of pd +// TODO: move all Get api call to PDClient +type PDWriter interface { + // GetHealth returns the health of PD's members. + GetHealth(ctx context.Context) (*HealthInfo, error) + // GetConfig returns PD's config. + GetConfig(ctx context.Context) (*PDConfigFromAPI, error) + // GetCluster returns the cluster information. + GetCluster(ctx context.Context) (*metapb.Cluster, error) + // GetMembers returns all PD members of the cluster. + GetMembers(ctx context.Context) (*MembersInfo, error) + // GetMSMembers returns all PD members service-addr from cluster by specific Micro Service. + GetMSMembers(ctx context.Context, service string) ([]string, error) + // SetStoreLabels sets the labels for a store. + SetStoreLabels(ctx context.Context, storeID uint64, labels map[string]string) (bool, error) + // DeleteStore deletes a TiKV/TiFlash store from the cluster. + DeleteStore(ctx context.Context, storeID string) error + // DeleteMember deletes a PD member from the cluster. + DeleteMember(ctx context.Context, name string) error + // DeleteMemberByID deletes a PD member from the cluster + DeleteMemberByID(ctx context.Context, memberID uint64) error + // UpdateReplicationConfig updates the replication config. + UpdateReplicationConfig(ctx context.Context, config PDReplicationConfig) error + + // BeginEvictLeader initiates leader eviction for a store. + BeginEvictLeader(ctx context.Context, storeID string) error + // EndEvictLeader removes the leader eviction scheduler for a store. + EndEvictLeader(ctx context.Context, storeID string) error + // GetEvictLeaderSchedulers gets schedulers of leader eviction. + GetEvictLeaderSchedulers(ctx context.Context) ([]string, error) + // GetEvictLeaderScheduler gets leader eviction schedulers for stores. + GetEvictLeaderScheduler(ctx context.Context, storeID string) (string, error) + + // GetPDLeader returns the PD leader. + GetPDLeader(ctx context.Context) (*pdpb.Member, error) + // TransferPDLeader transfers PD leader to specified member. + TransferPDLeader(ctx context.Context, name string) error +} + +// PDClient provides PD server's APIs used by TiDB Operator. +type PDClient interface { + // GetStores lists all TiKV/TiFlash stores of the cluster. + GetStores(ctx context.Context) (*StoresInfo, error) + // GetTombStoneStores lists all tombstone stores of the cluster. + // GetTombStoneStores() (*StoresInfo, error) + // GetStore gets a TiKV/TiFlash store for a specific store id of the cluster. + GetStore(ctx context.Context, storeID string) (*StoreInfo, error) + + PDWriter +} + +var ( + healthPrefix = "pd/api/v1/health" + configPrefix = "pd/api/v1/config" + clusterIDPrefix = "pd/api/v1/cluster" + + membersPrefix = "pd/api/v1/members" + MicroServicePrefix = "pd/api/v2/ms" + + storesPrefix = "pd/api/v1/stores" + storePrefix = "pd/api/v1/store" + + pdReplicationPrefix = "pd/api/v1/config/replicate" + + schedulersPrefix = "pd/api/v1/schedulers" + pdLeaderPrefix = "pd/api/v1/leader" + pdLeaderTransferPrefix = "pd/api/v1/leader/transfer" + evictLeaderSchedulerConfigPrefix = "pd/api/v1/scheduler-config/evict-leader-scheduler/list" + // Micro Service +) + +// pdClient is the default implementation of PDClient. +type pdClient struct { + url string + httpClient *http.Client +} + +// NewPDClient returns a new PDClient +func NewPDClient(url string, timeout time.Duration, tlsConfig *tls.Config) PDClient { + return &pdClient{ + url: url, + httpClient: &http.Client{ + Timeout: timeout, + Transport: &http.Transport{TLSClientConfig: tlsConfig}, + }, + } +} + +func (c *pdClient) GetHealth(ctx context.Context) (*HealthInfo, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, healthPrefix) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + var healths []MemberHealth + err = json.Unmarshal(body, &healths) + if err != nil { + return nil, err + } + return &HealthInfo{ + healths, + }, nil +} + +func (c *pdClient) GetConfig(ctx context.Context) (*PDConfigFromAPI, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, configPrefix) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + config := &PDConfigFromAPI{} + err = json.Unmarshal(body, config) + if err != nil { + return nil, err + } + return config, nil +} + +func (c *pdClient) GetCluster(ctx context.Context) (*metapb.Cluster, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, clusterIDPrefix) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + cluster := &metapb.Cluster{} + err = json.Unmarshal(body, cluster) + if err != nil { + return nil, err + } + return cluster, nil +} + +func (c *pdClient) GetMembers(ctx context.Context) (*MembersInfo, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, membersPrefix) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + members := &MembersInfo{} + err = json.Unmarshal(body, members) + if err != nil { + return nil, err + } + return members, nil +} + +func (c *pdClient) GetMSMembers(ctx context.Context, service string) ([]string, error) { + apiURL := fmt.Sprintf("%s/%s/members/%s", c.url, MicroServicePrefix, service) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + var members []ServiceRegistryEntry + err = json.Unmarshal(body, &members) + if err != nil { + return nil, err + } + var addrs []string + for _, member := range members { + addrs = append(addrs, member.ServiceAddr) + } + return addrs, nil +} + +func (c *pdClient) GetStores(ctx context.Context) (*StoresInfo, error) { + storesInfo, err := c.getStores(ctx, fmt.Sprintf("%s/%s", c.url, storesPrefix)) + if err != nil { + if strings.HasSuffix(err.Error(), tiKVNotBootstrapped+"\n") { + //nolint:govet // expected + err = TiKVNotBootstrappedErrorf(err.Error()) + } + return nil, err + } + return storesInfo, nil +} + +func (c *pdClient) GetTombStoneStores(ctx context.Context) (*StoresInfo, error) { + return c.getStores(ctx, fmt.Sprintf("%s/%s?state=%d", c.url, storesPrefix, metapb.StoreState_Tombstone)) +} + +func (c *pdClient) GetStore(ctx context.Context, storeID string) (*StoreInfo, error) { + apiURL := fmt.Sprintf("%s/%s/%s", c.url, storePrefix, storeID) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + storeInfo := &StoreInfo{} + err = json.Unmarshal(body, storeInfo) + if err != nil { + return nil, err + } + return storeInfo, nil +} + +func (c *pdClient) getStores(ctx context.Context, apiURL string) (*StoresInfo, error) { + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + storesInfo := &StoresInfo{} + err = json.Unmarshal(body, storesInfo) + if err != nil { + return nil, err + } + return storesInfo, nil +} + +func (c *pdClient) SetStoreLabels(ctx context.Context, storeID uint64, labels map[string]string) (bool, error) { + apiURL := fmt.Sprintf("%s/%s/%d/label", c.url, storePrefix, storeID) + data, err := json.Marshal(labels) + if err != nil { + return false, err + } + if _, err := httputil.PostBodyOK(ctx, c.httpClient, apiURL, bytes.NewBuffer(data)); err != nil { + return false, fmt.Errorf("failed to set store labels: %w", err) + } + return true, nil +} + +func (c *pdClient) DeleteStore(ctx context.Context, storeID string) error { + apiURL := fmt.Sprintf("%s/%s/%s", c.url, storePrefix, storeID) + req, err := http.NewRequestWithContext(ctx, "DELETE", apiURL, http.NoBody) + if err != nil { + return err + } + + //nolint:bodyclose // has been handled + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer httputil.DeferClose(res.Body) + + // Remove an offline store should return http.StatusOK + if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { + return nil + } + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to delete store %s: %v", storeID, string(body)) +} + +func (c *pdClient) DeleteMember(ctx context.Context, name string) error { + var exist bool + members, err := c.GetMembers(ctx) + if err != nil { + return err + } + for _, member := range members.Members { + if member.Name == name { + exist = true + break + } + } + if !exist { + return nil + } + apiURL := fmt.Sprintf("%s/%s/name/%s", c.url, membersPrefix, name) + req, err := http.NewRequest("DELETE", apiURL, http.NoBody) + if err != nil { + return err + } + //nolint:bodyclose // has been handled + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer httputil.DeferClose(res.Body) + if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { + return nil + } + err2 := httputil.ReadErrorBody(res.Body) + return fmt.Errorf("failed %v to delete member %s: %w", res.StatusCode, name, err2) +} + +func (c *pdClient) DeleteMemberByID(ctx context.Context, memberID uint64) error { + var exist bool + members, err := c.GetMembers(ctx) + if err != nil { + return err + } + for _, member := range members.Members { + if member.MemberId == memberID { + exist = true + break + } + } + if !exist { + return nil + } + apiURL := fmt.Sprintf("%s/%s/id/%d", c.url, membersPrefix, memberID) + req, err := http.NewRequestWithContext(ctx, "DELETE", apiURL, http.NoBody) + if err != nil { + return err + } + //nolint:bodyclose // has been handled + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer httputil.DeferClose(res.Body) + if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { + return nil + } + err2 := httputil.ReadErrorBody(res.Body) + return fmt.Errorf("failed %v to delete member %d: %w", res.StatusCode, memberID, err2) +} + +func (c *pdClient) UpdateReplicationConfig(ctx context.Context, config PDReplicationConfig) error { + apiURL := fmt.Sprintf("%s/%s", c.url, pdReplicationPrefix) + data, err := json.Marshal(config) + if err != nil { + return err + } + if _, err := httputil.PostBodyOK(ctx, c.httpClient, apiURL, bytes.NewBuffer(data)); err != nil { + return fmt.Errorf("failed to update replication: %w", err) + } + return nil +} + +func (c *pdClient) BeginEvictLeader(ctx context.Context, storeID string) error { + leaderEvictInfo, err := getLeaderEvictSchedulerInfo(storeID) + if err != nil { + return err + } + apiURL := fmt.Sprintf("%s/%s", c.url, schedulersPrefix) + data, err := json.Marshal(leaderEvictInfo) + if err != nil { + return err + } + if _, err = httputil.PostBodyOK(ctx, c.httpClient, apiURL, bytes.NewBuffer(data)); err == nil { + return nil + } + + // pd will return an error with the body contains "scheduler existed" if the scheduler already exists + // this is not the standard response. + // so these lines are just a workaround for now: + // - make a new request to get all schedulers + // - return nil if the scheduler already exists + // + // when PD returns standard json response, we should get rid of this verbose code. + evictLeaderSchedulers, err2 := c.GetEvictLeaderSchedulers(ctx) + if err2 != nil { + return err2 + } + for _, s := range evictLeaderSchedulers { + if s == getLeaderEvictSchedulerStr(storeID) { + return nil + } + } + + return fmt.Errorf("failed to begin evict leader of store:[%s], error: %w", storeID, err) +} + +func (c *pdClient) EndEvictLeader(ctx context.Context, storeID string) error { + sName := getLeaderEvictSchedulerStr(storeID) + apiURL := fmt.Sprintf("%s/%s/%s", c.url, schedulersPrefix, sName) + req, err := http.NewRequestWithContext(ctx, "DELETE", apiURL, http.NoBody) + if err != nil { + return err + } + //nolint:bodyclose // has been handled + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer httputil.DeferClose(res.Body) + if res.StatusCode == http.StatusNotFound { + return nil + } else if res.StatusCode != http.StatusOK { + err2 := httputil.ReadErrorBody(res.Body) + return fmt.Errorf("failed %v to end leader evict scheduler of store:[%s], error: %w", res.StatusCode, storeID, err2) + } + + // pd will return an error with the body contains "scheduler not found" if the scheduler is not found + // this is not the standard response. + // so these lines are just a workaround for now: + // - make a new request to get all schedulers + // - return nil if the scheduler is not found + // + // when PD returns standard json response, we should get rid of this verbose code. + evictLeaderSchedulers, err := c.GetEvictLeaderSchedulers(ctx) + if err != nil { + return err + } + for _, s := range evictLeaderSchedulers { + if s == sName { + return fmt.Errorf("end leader evict scheduler failed, the store:[%s]'s leader evict scheduler is still exist", storeID) + } + } + + return nil +} + +func (c *pdClient) GetEvictLeaderSchedulers(ctx context.Context) ([]string, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, schedulersPrefix) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + var schedulers []string + err = json.Unmarshal(body, &schedulers) + if err != nil { + return nil, err + } + var evicts []string + for _, scheduler := range schedulers { + if strings.HasPrefix(scheduler, evictSchedulerLeader) { + evicts = append(evicts, scheduler) + } + } + evictSchedulers, err := c.filterLeaderEvictScheduler(ctx, evicts) + if err != nil { + return nil, err + } + return evictSchedulers, nil +} + +func (c *pdClient) GetEvictLeaderScheduler(ctx context.Context, storeID string) (string, error) { + schedulers, err := c.GetEvictLeaderSchedulers(ctx) + if err != nil { + return "", err + } + + for _, scheduler := range schedulers { + sName := getLeaderEvictSchedulerStr(storeID) + if scheduler == sName { + return scheduler, nil + } + } + + return "", nil +} + +// This method is to make compatible between old pdapi version and versions after 3.1/4.0. +// To get more detail, see: +// - https://github.com/pingcap/tidb-operator/pull/1831 +// - https://github.com/pingcap/pd/issues/2550 +func (c *pdClient) filterLeaderEvictScheduler(ctx context.Context, evictLeaderSchedulers []string) ([]string, error) { + if len(evictLeaderSchedulers) == 1 && evictLeaderSchedulers[0] == evictSchedulerLeader { + var schedulerIds []string + // If there is only one evcit scehduler entry without store ID postfix. + // We should get the store IDs via scheduler config API and append them + // to provide consistent results. + c, err := c.getEvictLeaderSchedulerConfig(ctx) + if err != nil { + return nil, err + } + for k := range c.StoreIDWithRanges { + schedulerIds = append(schedulerIds, fmt.Sprintf("%s-%v", evictSchedulerLeader, k)) + } + + return schedulerIds, nil + } + + return evictLeaderSchedulers, nil +} + +// getEvictLeaderSchedulerConfig gets the config of PD scheduler "evict-leader-scheduler" +// It's available since PD 3.1.0. +// In the previous versions, PD API returns 404 and this function will return an error. +func (c *pdClient) getEvictLeaderSchedulerConfig(ctx context.Context) (*EvictLeaderSchedulerConfig, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, evictLeaderSchedulerConfigPrefix) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + config := &EvictLeaderSchedulerConfig{} + err = json.Unmarshal(body, config) + if err != nil { + return nil, err + } + return config, nil +} + +func getLeaderEvictSchedulerInfo(storeID string) (*SchedulerInfo, error) { + id, err := strconv.ParseUint(storeID, 10, 64) + if err != nil { + return nil, err + } + return &SchedulerInfo{"evict-leader-scheduler", id}, nil +} + +func getLeaderEvictSchedulerStr(storeID string) string { + return fmt.Sprintf("%s-%s", "evict-leader-scheduler", storeID) +} + +func (c *pdClient) GetPDLeader(ctx context.Context) (*pdpb.Member, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, pdLeaderPrefix) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + leader := &pdpb.Member{} + err = json.Unmarshal(body, leader) + if err != nil { + return nil, err + } + return leader, nil +} + +func (c *pdClient) TransferPDLeader(ctx context.Context, memberName string) error { + apiURL := fmt.Sprintf("%s/%s/%s", c.url, pdLeaderTransferPrefix, memberName) + req, err := http.NewRequestWithContext(ctx, "POST", apiURL, http.NoBody) + if err != nil { + return err + } + //nolint:bodyclose // has been handled + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer httputil.DeferClose(res.Body) + if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { + return nil + } + err2 := httputil.ReadErrorBody(res.Body) + return fmt.Errorf("failed %v to transfer pd leader to %s, error: %w", res.StatusCode, memberName, err2) +} diff --git a/pkg/pdapi/v1/pd_config.go b/pkg/pdapi/v1/pd_config.go new file mode 100644 index 00000000000..926651886bf --- /dev/null +++ b/pkg/pdapi/v1/pd_config.go @@ -0,0 +1,265 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdapi + +import ( + "encoding/json" + "strconv" + "strings" +) + +// PDConfigFromAPI is the configuration from PD API +// +k8s:openapi-gen=true +type PDConfigFromAPI struct { + // Log related config. + Log *PDLogConfig `toml:"log,omitempty" json:"log,omitempty"` + + // Immutable, change should be made through pd-ctl after cluster creation + Schedule *PDScheduleConfig `toml:"schedule,omitempty" json:"schedule,omitempty"` + + // Immutable, change should be made through pd-ctl after cluster creation + Replication *PDReplicationConfig `toml:"replication,omitempty" json:"replication,omitempty"` +} + +// PDLogConfig serializes log related config in toml/json. +// +k8s:openapi-gen=true +type PDLogConfig struct { + // Log level. + // Optional: Defaults to info + Level string `toml:"level,omitempty" json:"level,omitempty"` + // Log format. one of json, text, or console. + Format string `toml:"format,omitempty" json:"format,omitempty"` + // Disable automatic timestamps in output. + DisableTimestamp *bool `toml:"disable-timestamp,omitempty" json:"disable-timestamp,omitempty"` + // File log config. + File *FileLogConfig `toml:"file,omitempty" json:"file,omitempty"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development *bool `toml:"development,omitempty" json:"development,omitempty"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller *bool `toml:"disable-caller,omitempty" json:"disable-caller,omitempty"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace *bool `toml:"disable-stacktrace,omitempty" json:"disable-stacktrace,omitempty"` + // DisableErrorVerbose stops annotating logs with the full verbose error + // message. + DisableErrorVerbose *bool `toml:"disable-error-verbose,omitempty" json:"disable-error-verbose,omitempty"` +} + +// PDReplicationConfig is the replication configuration. +// +k8s:openapi-gen=true +type PDReplicationConfig struct { + // MaxReplicas is the number of replicas for each region. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 3 + MaxReplicas *uint64 `toml:"max-replicas,omitempty" json:"max-replicas,omitempty"` + + // The label keys specified the location of a store. + // The placement priorities is implied by the order of label keys. + // For example, ["zone", "rack"] means that we should place replicas to + // different zones first, then to different racks if we don't have enough zones. + // Immutable, change should be made through pd-ctl after cluster creation + // +k8s:openapi-gen=false + LocationLabels StringSlice `toml:"location-labels,omitempty" json:"location-labels,omitempty"` + // StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels. + // Immutable, change should be made through pd-ctl after cluster creation. + // Imported from v3.1.0 + StrictlyMatchLabel *bool `toml:"strictly-match-label,omitempty" json:"strictly-match-label,string,omitempty"` + + // When PlacementRules feature is enabled. MaxReplicas and LocationLabels are not used anymore. + EnablePlacementRules *bool `toml:"enable-placement-rules" json:"enable-placement-rules,string,omitempty"` +} + +// ScheduleConfig is the schedule configuration. +// +k8s:openapi-gen=true +type PDScheduleConfig struct { + // If the snapshot count of one store is greater than this value, + // it will never be used as a source or target store. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 3 + MaxSnapshotCount *uint64 `toml:"max-snapshot-count,omitempty" json:"max-snapshot-count,omitempty"` + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 16 + MaxPendingPeerCount *uint64 `toml:"max-pending-peer-count,omitempty" json:"max-pending-peer-count,omitempty"` + // If both the size of region is smaller than MaxMergeRegionSize + // and the number of rows in region is smaller than MaxMergeRegionKeys, + // it will try to merge with adjacent regions. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 20 + MaxMergeRegionSize *uint64 `toml:"max-merge-region-size,omitempty" json:"max-merge-region-size,omitempty"` + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 200000 + MaxMergeRegionKeys *uint64 `toml:"max-merge-region-keys,omitempty" json:"max-merge-region-keys,omitempty"` + // SplitMergeInterval is the minimum interval time to permit merge after split. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 1h + SplitMergeInterval string `toml:"split-merge-interval,omitempty" json:"split-merge-interval,omitempty"` + // PatrolRegionInterval is the interval for scanning region during patrol. + // Immutable, change should be made through pd-ctl after cluster creation + PatrolRegionInterval string `toml:"patrol-region-interval,omitempty" json:"patrol-region-interval,omitempty"` + // MaxStoreDownTime is the max duration after which + // a store will be considered to be down if it hasn't reported heartbeats. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 30m + MaxStoreDownTime string `toml:"max-store-down-time,omitempty" json:"max-store-down-time,omitempty"` + // LeaderScheduleLimit is the max coexist leader schedules. + // Immutable, change should be made through pd-ctl after cluster creation. + // Optional: Defaults to 4. + // Imported from v3.1.0 + LeaderScheduleLimit *uint64 `toml:"leader-schedule-limit,omitempty" json:"leader-schedule-limit,omitempty"` + // RegionScheduleLimit is the max coexist region schedules. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 2048 + RegionScheduleLimit *uint64 `toml:"region-schedule-limit,omitempty" json:"region-schedule-limit,omitempty"` + // ReplicaScheduleLimit is the max coexist replica schedules. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 64 + ReplicaScheduleLimit *uint64 `toml:"replica-schedule-limit,omitempty" json:"replica-schedule-limit,omitempty"` + // MergeScheduleLimit is the max coexist merge schedules. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 8 + MergeScheduleLimit *uint64 `toml:"merge-schedule-limit,omitempty" json:"merge-schedule-limit,omitempty"` + // HotRegionScheduleLimit is the max coexist hot region schedules. + // Immutable, change should be made through pd-ctl after cluster creation + // Optional: Defaults to 4 + HotRegionScheduleLimit *uint64 `toml:"hot-region-schedule-limit,omitempty" json:"hot-region-schedule-limit,omitempty"` + // HotRegionCacheHitThreshold is the cache hits threshold of the hot region. + // If the number of times a region hits the hot cache is greater than this + // threshold, it is considered a hot region. + // Immutable, change should be made through pd-ctl after cluster creation + HotRegionCacheHitsThreshold *uint64 `toml:"hot-region-cache-hits-threshold,omitempty" json:"hot-region-cache-hits-threshold,omitempty"` + // TolerantSizeRatio is the ratio of buffer size for balance scheduler. + // Immutable, change should be made through pd-ctl after cluster creation. + // Imported from v3.1.0 + TolerantSizeRatio *float64 `toml:"tolerant-size-ratio,omitempty" json:"tolerant-size-ratio,omitempty"` + // + // high space stage transition stage low space stage + // |--------------------|-----------------------------|-------------------------| + // ^ ^ ^ ^ + // 0 HighSpaceRatio * capacity LowSpaceRatio * capacity capacity + // + // LowSpaceRatio is the lowest usage ratio of store which regraded as low space. + // When in low space, store region score increases to very large and varies inversely with available size. + // Immutable, change should be made through pd-ctl after cluster creation + LowSpaceRatio *float64 `toml:"low-space-ratio,omitempty" json:"low-space-ratio,omitempty"` + // HighSpaceRatio is the highest usage ratio of store which regraded as high space. + // High space means there is a lot of spare capacity, and store region score varies directly with used size. + // Immutable, change should be made through pd-ctl after cluster creation + HighSpaceRatio *float64 `toml:"high-space-ratio,omitempty" json:"high-space-ratio,omitempty"` + // DisableLearner is the option to disable using AddLearnerNode instead of AddNode + // Immutable, change should be made through pd-ctl after cluster creation + DisableLearner *bool `toml:"disable-raft-learner,omitempty" json:"disable-raft-learner,string,omitempty"` + + // DisableRemoveDownReplica is the option to prevent replica checker from + // removing down replicas. + // Immutable, change should be made through pd-ctl after cluster creation + DisableRemoveDownReplica *bool `toml:"disable-remove-down-replica,omitempty" json:"disable-remove-down-replica,string,omitempty"` + // DisableReplaceOfflineReplica is the option to prevent replica checker from + // repalcing offline replicas. + // Immutable, change should be made through pd-ctl after cluster creation + //nolint:lll // too long name + DisableReplaceOfflineReplica *bool `toml:"disable-replace-offline-replica,omitempty" json:"disable-replace-offline-replica,string,omitempty"` + // DisableMakeUpReplica is the option to prevent replica checker from making up + // replicas when replica count is less than expected. + // Immutable, change should be made through pd-ctl after cluster creation + DisableMakeUpReplica *bool `toml:"disable-make-up-replica,omitempty" json:"disable-make-up-replica,string,omitempty"` + // DisableRemoveExtraReplica is the option to prevent replica checker from + // removing extra replicas. + // Immutable, change should be made through pd-ctl after cluster creation + DisableRemoveExtraReplica *bool `toml:"disable-remove-extra-replica,omitempty" json:"disable-remove-extra-replica,string,omitempty"` + // DisableLocationReplacement is the option to prevent replica checker from + // moving replica to a better location. + // Immutable, change should be made through pd-ctl after cluster creation + DisableLocationReplacement *bool `toml:"disable-location-replacement,omitempty" json:"disable-location-replacement,string,omitempty"` + // DisableNamespaceRelocation is the option to prevent namespace checker + // from moving replica to the target namespace. + // Immutable, change should be made through pd-ctl after cluster creation + DisableNamespaceRelocation *bool `toml:"disable-namespace-relocation,omitempty" json:"disable-namespace-relocation,string,omitempty"` + + // Schedulers support for loding customized schedulers + // Immutable, change should be made through pd-ctl after cluster creation + // json v2 is for the sake of compatible upgrade + Schedulers *PDSchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2,omitempty"` + + // Only used to display + SchedulersPayload map[string]any `toml:"schedulers-payload" json:"schedulers-payload,omitempty"` + + // EnableOneWayMerge is the option to enable one way merge. This means a Region can only be merged into the next region of it. + // Imported from v3.1.0 + EnableOneWayMerge *bool `toml:"enable-one-way-merge" json:"enable-one-way-merge,string,omitempty"` + // EnableCrossTableMerge is the option to enable cross table merge. This means two Regions can be merged with different table IDs. + // This option only works when key type is "table". + // Imported from v3.1.0 + EnableCrossTableMerge *bool `toml:"enable-cross-table-merge" json:"enable-cross-table-merge,string,omitempty"` +} + +type PDSchedulerConfigs []PDSchedulerConfig + +// PDSchedulerConfig is customized scheduler configuration +// +k8s:openapi-gen=true +type PDSchedulerConfig struct { + // Immutable, change should be made through pd-ctl after cluster creation + Type string `toml:"type,omitempty" json:"type,omitempty"` + // Immutable, change should be made through pd-ctl after cluster creation + Args []string `toml:"args,omitempty" json:"args,omitempty"` + // Immutable, change should be made through pd-ctl after cluster creation + Disable *bool `toml:"disable,omitempty" json:"disable,omitempty"` +} + +// +k8s:openapi-gen=true +type FileLogConfig struct { + // Log filename, leave empty to disable file log. + Filename string `toml:"filename,omitempty" json:"filename,omitempty"` + // Is log rotate enabled. + LogRotate bool `toml:"log-rotate,omitempty" json:"log-rotate,omitempty"` + // Max size for a single file, in MB. + MaxSize int `toml:"max-size,omitempty" json:"max-size,omitempty"` + // Max log keep days, default is never deleting. + MaxDays int `toml:"max-days,omitempty" json:"max-days,omitempty"` + // Maximum number of old log files to retain. + MaxBackups int `toml:"max-backups,omitempty" json:"max-backups,omitempty"` +} + +// StringSlice is more friendly to json encode/decode +type StringSlice []string + +// MarshalJSON returns the size as a JSON string. +func (s StringSlice) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(strings.Join(s, ","))), nil +} + +// UnmarshalJSON parses a JSON string into the bytesize. +func (s *StringSlice) UnmarshalJSON(text []byte) error { + data, err := strconv.Unquote(string(text)) + if err != nil { + return err + } + if data == "" { + *s = nil + return nil + } + *s = strings.Split(data, ",") + return nil +} + +// EvictLeaderSchedulerConfig holds configuration for evict leader +// https://github.com/pingcap/pd/blob/b21855a3aeb787c71b0819743059e432be217dcd/server/schedulers/evict_leader.go#L81-L86 +// note that we use `json.RawMessage` as the type of value because we don't care +// about the value for now +type EvictLeaderSchedulerConfig struct { + StoreIDWithRanges map[uint64]json.RawMessage `json:"store-id-ranges"` +} diff --git a/pkg/pdapi/v1/types.go b/pkg/pdapi/v1/types.go new file mode 100644 index 00000000000..35363a79ee4 --- /dev/null +++ b/pkg/pdapi/v1/types.go @@ -0,0 +1,115 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdapi + +import ( + "fmt" + "time" + + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + + "github.com/pingcap/tidb-operator/pkg/pdapi/pd" +) + +// HealthInfo define PD's healthy info. +type HealthInfo struct { + Healths []MemberHealth +} + +// MemberHealth define a PD member's healthy info. +type MemberHealth struct { + Name string `json:"name"` + MemberID uint64 `json:"member_id"` + ClientUrls []string `json:"client_urls"` + Health bool `json:"health"` +} + +// MembersInfo is PD members info returned from PD RESTful interface. +type MembersInfo struct { + Header *pdpb.ResponseHeader `json:"header,omitempty"` + Members []*pdpb.Member `json:"members,omitempty"` + Leader *pdpb.Member `json:"leader,omitempty"` + EtcdLeader *pdpb.Member `json:"etcd_leader,omitempty"` +} + +// ServiceRegistryEntry is the registry entry of PD Micro Service. +type ServiceRegistryEntry struct { + ServiceAddr string `json:"service-addr"` + Version string `json:"version"` + GitHash string `json:"git-hash"` + DeployPath string `json:"deploy-path"` + StartTimestamp int64 `json:"start-timestamp"` +} + +// MetaStore is TiKV store status defined in protobuf. +type MetaStore struct { + *metapb.Store + StateName string `json:"state_name"` +} + +// StoreStatus is TiKV store status returned from PD RESTful interface. +type StoreStatus struct { + Capacity pd.ByteSize `json:"capacity"` + Available pd.ByteSize `json:"available"` + LeaderCount int `json:"leader_count"` + RegionCount int `json:"region_count"` + SendingSnapCount uint32 `json:"sending_snap_count"` + ReceivingSnapCount uint32 `json:"receiving_snap_count"` + ApplyingSnapCount uint32 `json:"applying_snap_count"` + IsBusy bool `json:"is_busy"` + + StartTS time.Time `json:"start_ts"` + LastHeartbeatTS time.Time `json:"last_heartbeat_ts"` + Uptime pd.Duration `json:"uptime"` +} + +// StoreInfo is a single store info returned from PD RESTful interface. +type StoreInfo struct { + Store *MetaStore `json:"store"` + Status *StoreStatus `json:"status"` +} + +// StoresInfo is stores info returned from PD RESTful interface +type StoresInfo struct { + Count int `json:"count"` + Stores []*StoreInfo `json:"stores"` +} + +// SchedulerInfo is a single scheduler info returned from PD RESTful interface. +type SchedulerInfo struct { + Name string `json:"name"` + StoreID uint64 `json:"store_id"` +} + +// TiKVNotBootstrappedError represents that TiKV cluster is not bootstrapped yet. +type TiKVNotBootstrappedError struct { + s string +} + +func (e *TiKVNotBootstrappedError) Error() string { + return e.s +} + +// TiKVNotBootstrappedErrorf returns a TiKVNotBootstrappedError. +func TiKVNotBootstrappedErrorf(format string, a ...any) error { + return &TiKVNotBootstrappedError{fmt.Sprintf(format, a...)} +} + +// IsTiKVNotBootstrappedError returns whether err is a TiKVNotBootstrappedError. +func IsTiKVNotBootstrappedError(err error) bool { + _, ok := err.(*TiKVNotBootstrappedError) + return ok +} diff --git a/pkg/runtime/group.go b/pkg/runtime/group.go new file mode 100644 index 00000000000..fe6a5be97b1 --- /dev/null +++ b/pkg/runtime/group.go @@ -0,0 +1,30 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +type group interface { + object + + SetReplicas(replicas *int32) + Replicas() *int32 + Cluster() string + Component() string +} + +type Group interface { + group + + *PDGroup | *TiDBGroup | *TiKVGroup | *TiFlashGroup +} diff --git a/pkg/runtime/instance.go b/pkg/runtime/instance.go new file mode 100644 index 00000000000..7db97923a51 --- /dev/null +++ b/pkg/runtime/instance.go @@ -0,0 +1,39 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type instance interface { + object + + GetTopology() v1alpha1.Topology + SetTopology(topo v1alpha1.Topology) + + GetUpdateRevision() string + IsHealthy() bool + // IsUpToDate means all resources managed by the instance is up to date + // NOTE: It does not mean the instance is updated to the newest revision + // TODO: may be change a more meaningful name? + IsUpToDate() bool +} + +type Instance interface { + instance + + *PD | *TiDB | *TiKV | *TiFlash +} diff --git a/pkg/runtime/object.go b/pkg/runtime/object.go new file mode 100644 index 00000000000..ae3c1697d00 --- /dev/null +++ b/pkg/runtime/object.go @@ -0,0 +1,33 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type object interface { + metav1.Object + + To() client.Object + Conditions() []metav1.Condition +} + +type Object interface { + object + + *PDGroup | *TiDBGroup | *TiKVGroup | *TiFlashGroup | *PD | *TiDB | *TiKV | *TiFlash +} diff --git a/pkg/runtime/pd.go b/pkg/runtime/pd.go new file mode 100644 index 00000000000..e13f15f4e86 --- /dev/null +++ b/pkg/runtime/pd.go @@ -0,0 +1,130 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "unsafe" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type ( + PD v1alpha1.PD + PDGroup v1alpha1.PDGroup +) + +func FromPD(pd *v1alpha1.PD) *PD { + return (*PD)(pd) +} + +func ToPD(pd *PD) *v1alpha1.PD { + return (*v1alpha1.PD)(pd) +} + +func FromPDSlice(pds []*v1alpha1.PD) []*PD { + return *(*[]*PD)(unsafe.Pointer(&pds)) +} + +func ToPDSlice(pds []*PD) []*v1alpha1.PD { + return *(*[]*v1alpha1.PD)(unsafe.Pointer(&pds)) +} + +func FromPDGroup(pdg *v1alpha1.PDGroup) *PDGroup { + return (*PDGroup)(pdg) +} + +func ToPDGroup(pdg *PDGroup) *v1alpha1.PDGroup { + return (*v1alpha1.PDGroup)(pdg) +} + +func FromPDGroupSlice(pdgs []*v1alpha1.PDGroup) []*PDGroup { + return *(*[]*PDGroup)(unsafe.Pointer(&pdgs)) +} + +func ToPDGroupSlice(pdgs []*PDGroup) []*v1alpha1.PDGroup { + return *(*[]*v1alpha1.PDGroup)(unsafe.Pointer(&pdgs)) +} + +var _ instance = &PD{} + +func (pd *PD) DeepCopyObject() runtime.Object { + return (*v1alpha1.PD)(pd).DeepCopyObject() +} + +func (pd *PD) To() client.Object { + return ToPD(pd) +} + +func (pd *PD) GetTopology() v1alpha1.Topology { + return pd.Spec.Topology +} + +func (pd *PD) SetTopology(t v1alpha1.Topology) { + pd.Spec.Topology = t +} + +func (pd *PD) GetUpdateRevision() string { + if pd.Labels == nil { + return "" + } + return pd.Labels[v1alpha1.LabelKeyInstanceRevisionHash] +} + +func (pd *PD) IsHealthy() bool { + return meta.IsStatusConditionTrue(pd.Status.Conditions, v1alpha1.PDCondHealth) +} + +func (pd *PD) IsUpToDate() bool { + return pd.Status.ObservedGeneration == pd.GetGeneration() && pd.GetUpdateRevision() == pd.Status.CurrentRevision +} + +func (pd *PD) Conditions() []metav1.Condition { + return pd.Status.Conditions +} + +var _ group = &PDGroup{} + +func (pdg *PDGroup) DeepCopyObject() runtime.Object { + return (*v1alpha1.PDGroup)(pdg) +} + +func (pdg *PDGroup) To() client.Object { + return ToPDGroup(pdg) +} + +func (pdg *PDGroup) SetReplicas(replicas *int32) { + pdg.Spec.Replicas = replicas +} + +func (pdg *PDGroup) Replicas() *int32 { + return pdg.Spec.Replicas +} + +func (pdg *PDGroup) Cluster() string { + return pdg.Spec.Cluster.Name +} + +func (*PDGroup) Component() string { + return v1alpha1.LabelValComponentPD +} + +func (pdg *PDGroup) Conditions() []metav1.Condition { + return pdg.Status.Conditions +} diff --git a/pkg/runtime/tidb.go b/pkg/runtime/tidb.go new file mode 100644 index 00000000000..3773d67dcba --- /dev/null +++ b/pkg/runtime/tidb.go @@ -0,0 +1,76 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "unsafe" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type ( + TiDB v1alpha1.TiDB + TiDBGroup v1alpha1.TiDBGroup +) + +func FromTiDB(db *v1alpha1.TiDB) *TiDB { + return (*TiDB)(db) +} + +func FromTiDBSlice(dbs []*v1alpha1.TiDB) []*TiDB { + return *(*[]*TiDB)(unsafe.Pointer(&dbs)) +} + +var _ instance = &TiDB{} + +func (db *TiDB) DeepCopyObject() runtime.Object { + return (*v1alpha1.TiDB)(db).DeepCopyObject() +} + +func (db *TiDB) To() client.Object { + return (*v1alpha1.TiDB)(db) +} + +func (db *TiDB) GetTopology() v1alpha1.Topology { + return db.Spec.Topology +} + +func (db *TiDB) SetTopology(t v1alpha1.Topology) { + db.Spec.Topology = t +} + +func (db *TiDB) GetUpdateRevision() string { + if db.Labels == nil { + return "" + } + return db.Labels[v1alpha1.LabelKeyInstanceRevisionHash] +} + +func (db *TiDB) IsHealthy() bool { + return meta.IsStatusConditionTrue(db.Status.Conditions, v1alpha1.TiKVCondHealth) +} + +func (db *TiDB) IsUpToDate() bool { + return db.Status.ObservedGeneration == db.GetGeneration() && db.GetUpdateRevision() == db.Status.CurrentRevision +} + +func (db *TiDB) Conditions() []metav1.Condition { + return db.Status.Conditions +} diff --git a/pkg/runtime/tiflash.go b/pkg/runtime/tiflash.go new file mode 100644 index 00000000000..8305f594e5e --- /dev/null +++ b/pkg/runtime/tiflash.go @@ -0,0 +1,76 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "unsafe" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type ( + TiFlash v1alpha1.TiFlash + TiFlashGroup v1alpha1.TiFlashGroup +) + +func FromTiFlash(f *v1alpha1.TiFlash) *TiFlash { + return (*TiFlash)(f) +} + +func FromTiFlashSlice(fs []*v1alpha1.TiFlash) []*TiFlash { + return *(*[]*TiFlash)(unsafe.Pointer(&fs)) +} + +var _ instance = &TiFlash{} + +func (f *TiFlash) DeepCopyObject() runtime.Object { + return (*v1alpha1.TiFlash)(f).DeepCopyObject() +} + +func (f *TiFlash) To() client.Object { + return (*v1alpha1.TiFlash)(f) +} + +func (f *TiFlash) GetTopology() v1alpha1.Topology { + return f.Spec.Topology +} + +func (f *TiFlash) SetTopology(t v1alpha1.Topology) { + f.Spec.Topology = t +} + +func (f *TiFlash) GetUpdateRevision() string { + if f.Labels == nil { + return "" + } + return f.Labels[v1alpha1.LabelKeyInstanceRevisionHash] +} + +func (f *TiFlash) IsHealthy() bool { + return meta.IsStatusConditionTrue(f.Status.Conditions, v1alpha1.TiFlashCondHealth) +} + +func (f *TiFlash) IsUpToDate() bool { + return f.Status.ObservedGeneration == f.GetGeneration() && f.GetUpdateRevision() == f.Status.CurrentRevision +} + +func (f *TiFlash) Conditions() []metav1.Condition { + return f.Status.Conditions +} diff --git a/pkg/runtime/tikv.go b/pkg/runtime/tikv.go new file mode 100644 index 00000000000..9c968735b10 --- /dev/null +++ b/pkg/runtime/tikv.go @@ -0,0 +1,76 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "unsafe" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type ( + TiKV v1alpha1.TiKV + TiKVGroup v1alpha1.TiKVGroup +) + +func FromTiKV(kv *v1alpha1.TiKV) *TiKV { + return (*TiKV)(kv) +} + +func FromTiKVSlice(kvs []*v1alpha1.TiKV) []*TiKV { + return *(*[]*TiKV)(unsafe.Pointer(&kvs)) +} + +var _ instance = &TiKV{} + +func (kv *TiKV) DeepCopyObject() runtime.Object { + return (*v1alpha1.TiKV)(kv).DeepCopyObject() +} + +func (kv *TiKV) To() client.Object { + return (*v1alpha1.TiKV)(kv) +} + +func (kv *TiKV) GetTopology() v1alpha1.Topology { + return kv.Spec.Topology +} + +func (kv *TiKV) SetTopology(t v1alpha1.Topology) { + kv.Spec.Topology = t +} + +func (kv *TiKV) GetUpdateRevision() string { + if kv.Labels == nil { + return "" + } + return kv.Labels[v1alpha1.LabelKeyInstanceRevisionHash] +} + +func (kv *TiKV) IsHealthy() bool { + return meta.IsStatusConditionTrue(kv.Status.Conditions, v1alpha1.TiKVCondHealth) +} + +func (kv *TiKV) IsUpToDate() bool { + return kv.Status.ObservedGeneration == kv.GetGeneration() && kv.GetUpdateRevision() == kv.Status.CurrentRevision +} + +func (kv *TiKV) Conditions() []metav1.Condition { + return kv.Status.Conditions +} diff --git a/pkg/scheme/scheme.go b/pkg/scheme/scheme.go new file mode 100644 index 00000000000..b88994a3f77 --- /dev/null +++ b/pkg/scheme/scheme.go @@ -0,0 +1,35 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheme + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +// Scheme is used by client to visit kubernetes API. +var ( + Scheme = runtime.NewScheme() + Codecs = serializer.NewCodecFactory(Scheme) +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(Scheme)) + utilruntime.Must(v1alpha1.Install(Scheme)) +} diff --git a/pkg/tidbapi/v1/client.go b/pkg/tidbapi/v1/client.go new file mode 100644 index 00000000000..2df96c61e3a --- /dev/null +++ b/pkg/tidbapi/v1/client.go @@ -0,0 +1,107 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidbapi + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "net" + "net/http" + "time" + + httputil "github.com/pingcap/tidb-operator/pkg/utils/http" +) + +const ( + statusPath = "status" + infoPath = "info" + labelsPath = "labels" +) + +// TiDBClient provides TiDB server's APIs used by TiDB Operator. +type TiDBClient interface { + // GetHealth gets the health status of this TiDB server. + GetHealth(ctx context.Context) (bool, error) + // GetInfo gets the information of this TiDB server. + GetInfo(ctx context.Context) (*ServerInfo, error) + // SetServerLabels sets the labels of this TiDB server. + SetServerLabels(ctx context.Context, labels map[string]string) error +} + +// tidbClient is the default implementation of TiDBClient. +type tidbClient struct { + url string + httpClient *http.Client +} + +// NewTiDBClient returns a new TiDBClient. +func NewTiDBClient(url string, timeout time.Duration, tlsConfig *tls.Config) TiDBClient { + var disableKeepalive bool + if tlsConfig != nil { + disableKeepalive = true + } + return &tidbClient{ + url: url, + httpClient: &http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + DisableKeepAlives: disableKeepalive, + ResponseHeaderTimeout: 10 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + DialContext: (&net.Dialer{ + Timeout: 10 * time.Second, + }).DialContext, + }, + }, + } +} + +func (c *tidbClient) GetHealth(ctx context.Context) (bool, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, statusPath) + _, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + // NOTE: we don't check the response body here. + return err == nil, err +} + +func (c *tidbClient) GetInfo(ctx context.Context) (*ServerInfo, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, infoPath) + // NOTE: in TiDB Operator v1, we use "POST" to get the info. + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return nil, err + } + + info := ServerInfo{} + err = json.Unmarshal(body, &info) + if err != nil { + return nil, err + } + return &info, nil +} + +func (c *tidbClient) SetServerLabels(ctx context.Context, labels map[string]string) error { + buffer := bytes.NewBuffer(nil) + if err := json.NewEncoder(buffer).Encode(labels); err != nil { + return fmt.Errorf("encode labels to json failed, error: %w", err) + } + + apiURL := fmt.Sprintf("%s/%s", c.url, labelsPath) + _, err := httputil.PostBodyOK(ctx, c.httpClient, apiURL, buffer) + return err +} diff --git a/pkg/tidbapi/v1/control.go b/pkg/tidbapi/v1/control.go new file mode 100644 index 00000000000..403ac017092 --- /dev/null +++ b/pkg/tidbapi/v1/control.go @@ -0,0 +1,82 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidbapi + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + corelisterv1 "k8s.io/client-go/listers/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tlsutil "github.com/pingcap/tidb-operator/pkg/utils/tls" +) + +var ( + defaultTiDBClientTimeout = 5 * time.Second + defaultTiDBStatusPort = 10080 +) + +// IiDBControlInterface is the interface that knows how to manage and get client for TiDB. +type TiDBControlInterface interface { + // GetTiDBPodClient provides TiDBClient of a TiDB pod. + GetTiDBPodClient(ctx context.Context, cli client.Client, + namespace, tcName, podName, clusterDomain string, tlsEnabled bool) (TiDBClient, error) +} + +// defaultTiDBControl is the default implementation of TiDBControlInterface. +type defaultTiDBControl struct { + secretLister corelisterv1.SecretLister +} + +// NewDefaultTiDBControl returns a defaultTiDBControl instance. +func NewDefaultTiDBControl(secretLister corelisterv1.SecretLister) TiDBControlInterface { + return &defaultTiDBControl{secretLister: secretLister} +} + +// GetTiDBPodClient provides TiDBClient of a TiDB pod. +// NOTE: add cache for TiDBClient if necessary. +func (*defaultTiDBControl) GetTiDBPodClient(ctx context.Context, cli client.Client, + namespace, tcName, podName, clusterDomain string, tlsEnabled bool, +) (TiDBClient, error) { + var tlsConfig *tls.Config + var err error + scheme := "http" + + if tlsEnabled { + scheme = "https" + tlsConfig, err = tlsutil.GetTLSConfigFromSecret(ctx, cli, namespace, v1alpha1.TLSClusterClientSecretName(tcName)) + if err != nil { + return NewTiDBClient(TiDBPodClientURL(namespace, tcName, podName, clusterDomain, scheme), defaultTiDBClientTimeout, tlsConfig), + fmt.Errorf("unable to get tls config for TiDB cluster %q, tidb client may not work: %w", tcName, err) + } + return NewTiDBClient(TiDBPodClientURL(namespace, tcName, podName, clusterDomain, scheme), defaultTiDBClientTimeout, tlsConfig), nil + } + + return NewTiDBClient(TiDBPodClientURL(namespace, tcName, podName, clusterDomain, scheme), defaultTiDBClientTimeout, tlsConfig), nil +} + +// TiDBPodClientURL builds the URL of a tidb pod client. +func TiDBPodClientURL(namespace, clusterName, podName, clusterDomain, scheme string) string { + if clusterDomain != "" { + return fmt.Sprintf("%s://%s.%s-tidb-peer.%s.svc.%s:%d", + scheme, podName, clusterName, namespace, clusterDomain, defaultTiDBStatusPort) + } + return fmt.Sprintf("%s://%s.%s-tidb-peer.%s:%d", + scheme, podName, clusterName, namespace, defaultTiDBStatusPort) +} diff --git a/pkg/tidbapi/v1/types.go b/pkg/tidbapi/v1/types.go new file mode 100644 index 00000000000..61295765d9e --- /dev/null +++ b/pkg/tidbapi/v1/types.go @@ -0,0 +1,21 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidbapi + +// ServerInfo is the information of a TiDB server. +// ref https://github.com/pingcap/tidb/blob/v8.1.0/pkg/server/handler/tikvhandler/tikv_handler.go#L1696 +type ServerInfo struct { + IsOwner bool `json:"is_owner"` +} diff --git a/pkg/tiflashapi/v1/client.go b/pkg/tiflashapi/v1/client.go new file mode 100644 index 00000000000..5e774ceb8b9 --- /dev/null +++ b/pkg/tiflashapi/v1/client.go @@ -0,0 +1,81 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflashapi + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + httputil "github.com/pingcap/tidb-operator/pkg/utils/http" +) + +const ( + storeStatusPath = "tiflash/store-status" +) + +// Status represents the status of a TiFlash store. +type Status string + +const ( + Idle Status = "Idle" + Ready Status = "Ready" + Running Status = "Running" + Stopping Status = "Stopping" + Terminated Status = "Terminated" +) + +// TiFlashClient provides TiFlash server's APIs used by TiDB Operator. +type TiFlashClient interface { + // GetStoreStatus gets the status of this TiFlash store. + GetStoreStatus(ctx context.Context) (Status, error) +} + +// tiflashClient is the default implementation of TiFlashClient. +type tiflashClient struct { + url string + httpClient *http.Client +} + +// NewTiFlashClient returns a new TiFlashClient. +func NewTiFlashClient(url string, timeout time.Duration, tlsConfig *tls.Config, disableKeepalive bool) TiFlashClient { + return &tiflashClient{ + url: url, + httpClient: &http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + DisableKeepAlives: disableKeepalive, + ResponseHeaderTimeout: 10 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + DialContext: (&net.Dialer{ + Timeout: 10 * time.Second, + }).DialContext, + }, + }, + } +} + +func (c *tiflashClient) GetStoreStatus(ctx context.Context) (Status, error) { + apiURL := fmt.Sprintf("%s/%s", c.url, storeStatusPath) + body, err := httputil.GetBodyOK(ctx, c.httpClient, apiURL) + if err != nil { + return "", err + } + return Status(body), nil +} diff --git a/pkg/tiflashapi/v1/control.go b/pkg/tiflashapi/v1/control.go new file mode 100644 index 00000000000..33d0eb566aa --- /dev/null +++ b/pkg/tiflashapi/v1/control.go @@ -0,0 +1,78 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflashapi + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + corelisterv1 "k8s.io/client-go/listers/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tlsutil "github.com/pingcap/tidb-operator/pkg/utils/tls" +) + +const ( + defaultTiFlashClientTimeout = 5 * time.Second + defaultTiFlashProxyStatusPort = 20292 +) + +// TiFlashControlInterface is an interface that knows how to manage and get client for TiFlash. +type TiFlashControlInterface interface { + // GetTiFlashPodClient provides TiFlashClient of a TiFlash pod. + GetTiFlashPodClient(ctx context.Context, cli client.Client, + namespace, tcName, podName string, tlsEnabled bool) (TiFlashClient, error) +} + +// defaultTiFlashControl is the default implementation of TiFlashControlInterface. +type defaultTiFlashControl struct { + secretLister corelisterv1.SecretLister +} + +// NewDefaultTiFlashControl returns a defaultTiFlashControl instance +func NewDefaultTiFlashControl(secretLister corelisterv1.SecretLister) TiFlashControlInterface { + return &defaultTiFlashControl{secretLister: secretLister} +} + +// GetTiFlashPodClient provides TiFlashClient of a TiFlash pod. +// NOTE: add cache for TiFlashClient if necessary. +func (*defaultTiFlashControl) GetTiFlashPodClient(ctx context.Context, cli client.Client, + namespace, tcName, podName string, tlsEnabled bool, +) (TiFlashClient, error) { + var tlsConfig *tls.Config + var err error + scheme := "http" + + if tlsEnabled { + scheme = "https" + tlsConfig, err = tlsutil.GetTLSConfigFromSecret(ctx, cli, namespace, v1alpha1.TLSClusterClientSecretName(tcName)) + if err != nil { + return NewTiFlashClient(TiFlashPodClientURL(namespace, tcName, podName, scheme), defaultTiFlashClientTimeout, tlsConfig, true), + fmt.Errorf("unable to get tls config for TiDB cluster %q, tiflash client may not work: %w", tcName, err) + } + return NewTiFlashClient(TiFlashPodClientURL(namespace, tcName, podName, scheme), defaultTiFlashClientTimeout, tlsConfig, true), nil + } + + return NewTiFlashClient(TiFlashPodClientURL(namespace, tcName, podName, scheme), defaultTiFlashClientTimeout, tlsConfig, true), nil +} + +// TiFlashPodClientURL builds the URL of a TiFlash pod client. +func TiFlashPodClientURL(namespace, clusterName, podName, scheme string) string { + return fmt.Sprintf("%s://%s.%s-tiflash-peer.%s:%d", + scheme, podName, clusterName, namespace, defaultTiFlashProxyStatusPort) +} diff --git a/pkg/tikvapi/v1/client.go b/pkg/tikvapi/v1/client.go new file mode 100644 index 00000000000..d06d332c6c6 --- /dev/null +++ b/pkg/tikvapi/v1/client.go @@ -0,0 +1,100 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikvapi + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strconv" + "time" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prom2json" +) + +const ( + metricsPath = "metrics" + metricNameRegionCount = "tikv_raftstore_region_count" + metricLabelNameLeaderCount = "leader" + + metricChanSize = 1024 +) + +// TiKVClient provides TiKV server's APIs used by TiDB Operator. +type TiKVClient interface { + // GetLeaderCount gets region leader count of this TiKV store. + GetLeaderCount() (int, error) +} + +// tikvClient is the default implementation of TiKVClient. +type tikvClient struct { + url string + httpClient *http.Client +} + +// NewTiKVClient returns a new TiKVClient +func NewTiKVClient(url string, timeout time.Duration, tlsConfig *tls.Config, disableKeepalive bool) TiKVClient { + return &tikvClient{ + url: url, + httpClient: &http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + DisableKeepAlives: disableKeepalive, + ResponseHeaderTimeout: 10 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + DialContext: (&net.Dialer{ + Timeout: 10 * time.Second, + }).DialContext, + }, + }, + } +} + +func (c *tikvClient) GetLeaderCount() (int, error) { + // we need to get the region leader count via the metrics API. + apiURL := fmt.Sprintf("%s/%s", c.url, metricsPath) + transport := c.httpClient.Transport + mfChan := make(chan *dto.MetricFamily, metricChanSize) + + var fetchErr error + go func() { + if err := prom2json.FetchMetricFamilies(apiURL, mfChan, transport); err != nil { + fetchErr = fmt.Errorf("fail to fetch metric families from %s, error: %w", apiURL, err) + } + }() + + fms := []*prom2json.Family{} + for mfc := range mfChan { + fm := prom2json.NewFamily(mfc) + fms = append(fms, fm) + } + for _, fm := range fms { + if fm.Name == metricNameRegionCount { + for _, m := range fm.Metrics { + if m, ok := m.(prom2json.Metric); ok && m.Labels["type"] == metricLabelNameLeaderCount { + return strconv.Atoi(m.Value) + } + } + } + } + + if fetchErr != nil { + return 0, fetchErr + } + return 0, fmt.Errorf("metric %s{type=\"%s\"} not found for %s", metricNameRegionCount, metricLabelNameLeaderCount, apiURL) +} diff --git a/pkg/tikvapi/v1/control.go b/pkg/tikvapi/v1/control.go new file mode 100644 index 00000000000..731a0999ef9 --- /dev/null +++ b/pkg/tikvapi/v1/control.go @@ -0,0 +1,82 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikvapi + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + corelisterv1 "k8s.io/client-go/listers/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + tlsutil "github.com/pingcap/tidb-operator/pkg/utils/tls" +) + +const ( + defaultTiKVClientTimeout = 5 * time.Second + defaultTiKVStatusPort = 20180 +) + +// TiKVControlInterface is an interface that knows how to manage and get client for TiKV. +type TiKVControlInterface interface { + // GetTiKVPodClient provides TiKVClient of a TiKV pod. + GetTiKVPodClient(ctx context.Context, cli client.Client, + namespace, tcName, podName, clusterDomain string, tlsEnabled bool) (TiKVClient, error) +} + +// defaultTiKVControl is the default implementation of TiKVControlInterface. +type defaultTiKVControl struct { + secretLister corelisterv1.SecretLister +} + +// NewDefaultTiKVControl returns a defaultTiKVControl instance. +func NewDefaultTiKVControl(secretLister corelisterv1.SecretLister) TiKVControlInterface { + return &defaultTiKVControl{secretLister: secretLister} +} + +// GetTiKVPodClient provides TiKVClient of a TiKV pod. +// NOTE: add cache for TiKVClient if necessary. +func (*defaultTiKVControl) GetTiKVPodClient(ctx context.Context, cli client.Client, + namespace, tcName, podName, clusterDomain string, tlsEnabled bool, +) (TiKVClient, error) { + var tlsConfig *tls.Config + var err error + scheme := "http" + + if tlsEnabled { + scheme = "https" + tlsConfig, err = tlsutil.GetTLSConfigFromSecret(ctx, cli, namespace, v1alpha1.TLSClusterClientSecretName(tcName)) + if err != nil { + return NewTiKVClient(TiKVPodClientURL(namespace, tcName, podName, scheme, clusterDomain), defaultTiKVClientTimeout, tlsConfig, true), + fmt.Errorf("unable to get tls config for TiDB cluster %q, tikv client may not work: %w", tcName, err) + } + return NewTiKVClient(TiKVPodClientURL(namespace, tcName, podName, scheme, clusterDomain), defaultTiKVClientTimeout, tlsConfig, true), nil + } + + return NewTiKVClient(TiKVPodClientURL(namespace, tcName, podName, scheme, clusterDomain), defaultTiKVClientTimeout, tlsConfig, true), nil +} + +// TiKVPodClientURL builds the URL of a tikv pod client. +func TiKVPodClientURL(namespace, clusterName, podName, scheme, clusterDomain string) string { + if clusterDomain != "" { + return fmt.Sprintf("%s://%s.%s-tikv-peer.%s.svc.%s:%d", + scheme, podName, clusterName, namespace, clusterDomain, defaultTiKVStatusPort) + } + return fmt.Sprintf("%s://%s.%s-tikv-peer.%s:%d", + scheme, podName, clusterName, namespace, defaultTiKVStatusPort) +} diff --git a/pkg/timanager/apis/pd/v1/doc.go b/pkg/timanager/apis/pd/v1/doc.go new file mode 100644 index 00000000000..72bd143eab4 --- /dev/null +++ b/pkg/timanager/apis/pd/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +groupName=pd.pingcap.com +// +versionName=v1 +// +k8s:deepcopy-gen=package +package v1 diff --git a/pkg/timanager/apis/pd/v1/types.go b/pkg/timanager/apis/pd/v1/types.go new file mode 100644 index 00000000000..e4253a5bfaf --- /dev/null +++ b/pkg/timanager/apis/pd/v1/types.go @@ -0,0 +1,130 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// StoreList is the list of store +type StoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Store `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// Store is the object of store +type Store struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Invalid means pd svc is unavailable and store info is untrusted + Invalid bool `json:"invalid,omitempty"` + + ID string `json:"id,omitempty"` + Address string `json:"address,omitempty"` + Version string `json:"version,omitempty"` + // PeerAddress string `json:"peer_address,omitempty"` + // StatusAddress string `json:"status_address,omitempty"` + // GitHash string `json:"git_hash,omitempty"` + // DeployPath string `json:"deploy_path,omitempty"` + PhysicallyDestroyed bool `json:"physically_destroyed,omitempty"` + + State StoreState `json:"state,omitempty"` + NodeState NodeState `json:"node_state,omitempty"` + + StartTimestamp int64 `json:"start_timestamp,omitempty"` + // LastHeartbeat int64 `json:"last_heartbeat,omitempty"` + + LeaderCount int `json:"leader_count"` + RegionCount int `json:"region_count"` + // SendingSnapCount uint32 `json:"sending_snap_count"` + // ReceivingSnapCount uint32 `json:"receiving_snap_count"` + // ApplyingSnapCount uint32 `json:"applying_snap_count"` + // IsBusy bool `json:"is_busy"` +} + +func (s *Store) Engine() StoreEngine { + if s == nil { + return "" + } + if e, ok := s.Labels["engine"]; ok { + return StoreEngine(e) + } + return StoreEngineTiKV +} + +type StoreEngine string + +const ( + StoreEngineTiKV StoreEngine = "tikv" + StoreEngineTiFlash StoreEngine = "tiflash" +) + +type StoreState string + +const ( + StoreStateUp StoreState = "Up" + StoreStateOffline StoreState = "Offline" + StoreStateTombstore StoreState = "Tombstone" +) + +type NodeState string + +const ( + NodeStatePreparing NodeState = "Preparing" + NodeStateServing NodeState = "Serving" + NodeStateRemoving NodeState = "Removing" + NodeStateRemoved NodeState = "Removed" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// MemberList is the list of pd members +type MemberList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Member `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// Member is the object of pd member +type Member struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Invalid means pd svc is unavailable and store info is untrusted + Invalid bool `json:"invalid,omitempty"` + + ID string `json:"id"` + PeerUrls []string `json:"peer_urls,omitempty"` + ClientUrls []string `json:"client_urls,omitempty"` + LeaderPriority int32 `json:"leader_priority,omitempty"` + + IsLeader bool `json:"is_leader"` + IsEtcdLeader bool `json:"is_etcd_leader"` + Health bool `json:"health"` +} diff --git a/pkg/timanager/apis/pd/v1/zz_generated.deepcopy.go b/pkg/timanager/apis/pd/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..e0eb775257c --- /dev/null +++ b/pkg/timanager/apis/pd/v1/zz_generated.deepcopy.go @@ -0,0 +1,152 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Member) DeepCopyInto(out *Member) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PeerUrls != nil { + in, out := &in.PeerUrls, &out.PeerUrls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClientUrls != nil { + in, out := &in.ClientUrls, &out.ClientUrls + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Member. +func (in *Member) DeepCopy() *Member { + if in == nil { + return nil + } + out := new(Member) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Member) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberList) DeepCopyInto(out *MemberList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Member, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberList. +func (in *MemberList) DeepCopy() *MemberList { + if in == nil { + return nil + } + out := new(MemberList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MemberList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Store) DeepCopyInto(out *Store) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Store. +func (in *Store) DeepCopy() *Store { + if in == nil { + return nil + } + out := new(Store) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Store) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreList) DeepCopyInto(out *StoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Store, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreList. +func (in *StoreList) DeepCopy() *StoreList { + if in == nil { + return nil + } + out := new(StoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/pkg/timanager/apis/pd/v1/zz_generated.register.go b/pkg/timanager/apis/pd/v1/zz_generated.register.go new file mode 100644 index 00000000000..84e9f9d2d85 --- /dev/null +++ b/pkg/timanager/apis/pd/v1/zz_generated.register.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by register-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "pd.pingcap.com" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Deprecated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Member{}, + &MemberList{}, + &Store{}, + &StoreList{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/timanager/informer.go b/pkg/timanager/informer.go new file mode 100644 index 00000000000..599f4f39eb3 --- /dev/null +++ b/pkg/timanager/informer.go @@ -0,0 +1,324 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timanager + +import ( + "context" + "reflect" + "sync" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +// SharedInformerFactory is modified from k8s.io/client-go/informers/factory.go +// to support poll from tidb clusters. +type SharedInformerFactory[UnderlayClient any] interface { + Start(stopCh <-chan struct{}) + Shutdown() + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + InformerFor(obj runtime.Object) cache.SharedIndexInformer + // Refresh will poll once immediately + Refresh(obj runtime.Object) +} + +func NewSharedInformerFactory[UnderlayClient any]( + name string, + logger logr.Logger, + scheme *runtime.Scheme, + c UnderlayClient, + newPollerFuncMap map[reflect.Type]NewPollerFunc[UnderlayClient], + resyncPeriod time.Duration, +) SharedInformerFactory[UnderlayClient] { + return &factory[UnderlayClient]{ + logger: logger, + name: name, + resyncPeriod: resyncPeriod, + pollers: map[reflect.Type]Poller{}, + informers: map[reflect.Type]cache.SharedIndexInformer{}, + startedInformers: map[reflect.Type]bool{}, + scheme: scheme, + c: c, + newPollerFuncMap: newPollerFuncMap, + } +} + +type factory[UnderlayClient any] struct { + logger logr.Logger + + name string + + c UnderlayClient + + lock sync.Mutex + + wg sync.WaitGroup + + shuttingDown bool + + resyncPeriod time.Duration + + pollers map[reflect.Type]Poller + + informers map[reflect.Type]cache.SharedIndexInformer + startedInformers map[reflect.Type]bool + + scheme *runtime.Scheme + + newPollerFuncMap map[reflect.Type]NewPollerFunc[UnderlayClient] +} + +func (f *factory[UnderlayClient]) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *factory[UnderlayClient]) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *factory[UnderlayClient]) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +func (f *factory[UnderlayClient]) InformerFor(obj runtime.Object) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + poller, ok := f.pollers[informerType] + if !ok { + newPollerFunc, ok := f.newPollerFuncMap[informerType] + if !ok { + // TODO: fix it + panic("unrecognized type") + } + poller = newPollerFunc(f.name, f.logger, f.c) + } + + lw := NewListerWatcher[UnderlayClient](f.logger, poller) + + informer = cache.NewSharedIndexInformer( + lw, + obj, + f.resyncPeriod, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + ) + f.informers[informerType] = informer + + return informer +} + +func (f *factory[UnderlayClient]) Refresh(obj runtime.Object) { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + poller, ok := f.pollers[informerType] + if !ok { + return + } + poller.Refresh() +} + +type ListerWatcher[UnderlayClient any] interface { + cache.ListerWatcher +} + +type listerWatcher[UnderlayClient any] struct { + logger logr.Logger + p Poller +} + +func NewListerWatcher[UnderlayClient any]( + logger logr.Logger, + p Poller, +) ListerWatcher[UnderlayClient] { + return &listerWatcher[UnderlayClient]{ + logger: logger, + p: p, + } +} + +// List implements the ListerWatcher interface. +// +//nolint:gocritic // implements an interface +func (lw *listerWatcher[UnderlayClient]) List(_ metav1.ListOptions) (runtime.Object, error) { + //nolint:mnd // refactor to use a constant if necessary + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + list, err := lw.p.Sync(ctx) + if err != nil { + return nil, err + } + + return list, nil +} + +// Watch implements the ListerWatcher interface. +// +//nolint:gocritic // implements an interface +func (lw *listerWatcher[UnderlayClient]) Watch(_ metav1.ListOptions) (watch.Interface, error) { + ctx, cancel := context.WithCancel(context.Background()) + + resultCh := make(chan watch.Event, bufSize) + w := watch.NewProxyWatcher(resultCh) + + go func() { + <-w.StopChan() + cancel() + close(resultCh) + }() + + go lw.p.Run(ctx, resultCh) + + return w, nil +} + +type GlobalCacheLister[T any, PT Object[T]] interface { + ByCluster(cluster string) CacheLister[T, PT] +} + +type CacheLister[T any, PT Object[T]] interface { + List(selector labels.Selector) ([]PT, error) + Get(name string) (PT, error) +} + +type globalCacheLister[T any, PT Object[T]] struct { + indexer cache.Indexer + gr schema.GroupResource +} + +type cacheLister[T any, PT Object[T]] struct { + indexer cache.Indexer + gr schema.GroupResource + ns string +} + +func (s *globalCacheLister[T, PT]) ByCluster(cluster string) CacheLister[T, PT] { + return &cacheLister[T, PT]{ + indexer: s.indexer, + ns: cluster, + gr: s.gr, + } +} + +func (s *cacheLister[T, PT]) List(selector labels.Selector) (ret []PT, _ error) { + if err := cache.ListAllByNamespace(s.indexer, s.ns, selector, func(m any) { + ret = append(ret, m.(PT)) + }); err != nil { + return nil, err + } + return ret, nil +} + +func (s *cacheLister[T, PT]) Get(name string) (PT, error) { + obj, exists, err := s.indexer.GetByKey(s.ns + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.gr, name) + } + return obj.(PT), nil +} + +func NewGlobalCacheLister[T any, PT Object[T]](indexer cache.Indexer, gr schema.GroupResource) GlobalCacheLister[T, PT] { + return &globalCacheLister[T, PT]{ + indexer: indexer, + gr: gr, + } +} + +type RefreshableCacheLister[T any, PT Object[T]] interface { + CacheLister[T, PT] + Refresher +} + +type Refresher interface { + Refresh() +} + +type RefreshFunc func() + +func (f RefreshFunc) Refresh() { + f() +} + +type refreshableCacheLister[T any, PT Object[T]] struct { + CacheLister[T, PT] + Refresher +} + +func CacheWithRefresher[T any, PT Object[T]](c CacheLister[T, PT], r Refresher) RefreshableCacheLister[T, PT] { + return &refreshableCacheLister[T, PT]{ + CacheLister: c, + Refresher: r, + } +} diff --git a/pkg/timanager/manager.go b/pkg/timanager/manager.go new file mode 100644 index 00000000000..73507457db9 --- /dev/null +++ b/pkg/timanager/manager.go @@ -0,0 +1,370 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timanager + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/pingcap/tidb-operator/pkg/client" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" +) + +const ( + bufSize = 100 +) + +// NewPollerFunc is the function to new a poller by underlay client +type NewPollerFunc[UnderlayClient any] func(name string, logger logr.Logger, c UnderlayClient) Poller + +// NewUnderlayClientFunc is the func to new a underlay client, for example, pdapi.PDClient +type NewUnderlayClientFunc[Object client.Object, UnderlayClient any] func(obj Object) (UnderlayClient, error) + +// NewClientFunc is the func to new an external client with the cache layer, for example, timanager.PDClient +type NewClientFunc[Object client.Object, UnderlayClient, Client any] func( + string, UnderlayClient, SharedInformerFactory[UnderlayClient]) Client + +type CacheKeysFunc[Object client.Object] func(obj Object) ([]string, error) + +type Manager[Object client.Object, Client any] interface { + Register(obj Object) error + Deregister(key string) + Get(key string) (Client, bool) + + Source(obj runtime.Object, h handler.EventHandler) source.Source + Start(ctx context.Context) +} + +type ManagerBuilder[Object client.Object, UnderlayClient, Client any] interface { + WithLogger(logger logr.Logger) ManagerBuilder[Object, UnderlayClient, Client] + WithCacheKeysFunc(f CacheKeysFunc[Object]) ManagerBuilder[Object, UnderlayClient, Client] + WithNewUnderlayClientFunc(f NewUnderlayClientFunc[Object, UnderlayClient]) ManagerBuilder[Object, UnderlayClient, Client] + WithNewClientFunc(f NewClientFunc[Object, UnderlayClient, Client]) ManagerBuilder[Object, UnderlayClient, Client] + WithNewPollerFunc(obj runtime.Object, f NewPollerFunc[UnderlayClient]) ManagerBuilder[Object, UnderlayClient, Client] + Build() Manager[Object, Client] +} + +type builder[Object client.Object, UnderlayClient, Client any] struct { + logger logr.Logger + newUnderlayClientFunc NewUnderlayClientFunc[Object, UnderlayClient] + newClientFunc NewClientFunc[Object, UnderlayClient, Client] + cacheKeysFunc CacheKeysFunc[Object] + + newPollerFuncMap map[reflect.Type]NewPollerFunc[UnderlayClient] + + exampleObjs []runtime.Object +} + +func NewManagerBuilder[Object client.Object, UnderlayClient, Client any]() ManagerBuilder[Object, UnderlayClient, Client] { + return &builder[Object, UnderlayClient, Client]{ + newPollerFuncMap: map[reflect.Type]NewPollerFunc[UnderlayClient]{}, + } +} + +func (b *builder[Object, UnderlayClient, Client]) WithLogger(logger logr.Logger) ManagerBuilder[Object, UnderlayClient, Client] { + b.logger = logger + return b +} + +func (b *builder[Object, UnderlayClient, Client]) WithCacheKeysFunc( + f CacheKeysFunc[Object]) ManagerBuilder[Object, UnderlayClient, Client] { + b.cacheKeysFunc = f + return b +} + +func (b *builder[Object, UnderlayClient, Client]) WithNewUnderlayClientFunc( + f NewUnderlayClientFunc[Object, UnderlayClient]) ManagerBuilder[Object, UnderlayClient, Client] { + b.newUnderlayClientFunc = f + return b +} + +func (b *builder[Object, UnderlayClient, Client]) WithNewClientFunc( + f NewClientFunc[Object, UnderlayClient, Client]) ManagerBuilder[Object, UnderlayClient, Client] { + b.newClientFunc = f + return b +} + +func (b *builder[Object, UnderlayClient, Client]) WithNewPollerFunc( + obj runtime.Object, f NewPollerFunc[UnderlayClient]) ManagerBuilder[Object, UnderlayClient, Client] { + b.exampleObjs = append(b.exampleObjs, obj) + t := reflect.TypeOf(obj) + b.newPollerFuncMap[t] = f + + return b +} + +func (b *builder[Object, UnderlayClient, Client]) Build() Manager[Object, Client] { + s := runtime.NewScheme() + if err := pdv1.Install(s); err != nil { + panic(err) + } + + return &clientManager[Object, UnderlayClient, Client]{ + logger: b.logger, + scheme: s, + newUnderlayClientFunc: b.newUnderlayClientFunc, + newClientFunc: b.newClientFunc, + cacheKeysFunc: b.cacheKeysFunc, + newPollerFuncMap: b.newPollerFuncMap, + sources: map[reflect.Type][]EventSource{}, + exampleObjs: b.exampleObjs, + } +} + +type clientManager[Object client.Object, UnderlayClient, Client any] struct { + logger logr.Logger + scheme *runtime.Scheme + + newUnderlayClientFunc NewUnderlayClientFunc[Object, UnderlayClient] + newClientFunc NewClientFunc[Object, UnderlayClient, Client] + cacheKeysFunc CacheKeysFunc[Object] + + cs Map[string, Cache[Client, UnderlayClient]] + + newPollerFuncMap map[reflect.Type]NewPollerFunc[UnderlayClient] + sources map[reflect.Type][]EventSource + + exampleObjs []runtime.Object + + ctx context.Context + started bool +} + +func (m *clientManager[Object, UnderlayClient, Client]) Register(obj Object) error { + keys, err := m.cacheKeysFunc(obj) + if err != nil { + return err + } + cacheObj, ok := m.cs.Load(keys[0]) + if ok { + if reflect.DeepEqual(keys, cacheObj.Keys()) { + m.logger.Info("hit client cache", "obj", client.ObjectKeyFromObject(obj)) + return nil + } + + m.logger.Info("renew client", "obj", client.ObjectKeyFromObject(obj)) + + if m.cs.CompareAndDelete(keys[0], cacheObj) { + cacheObj.Stop() + } + } else { + m.logger.Info("register client", "obj", client.ObjectKeyFromObject(obj)) + } + + underlay, err := m.newUnderlayClientFunc(obj) + if err != nil { + return err + } + + f := NewSharedInformerFactory(keys[0], m.logger, m.scheme, underlay, m.newPollerFuncMap, time.Hour) + for _, obj := range m.exampleObjs { + f.InformerFor(obj) + } + c := m.newClientFunc(keys[0], underlay, f) + + cacheObj = NewCache[Client, UnderlayClient](keys, c, f) + m.cs.Store(keys[0], cacheObj) + go func() { + cacheObj.Start(m.ctx) + cacheObj.InformerFactory().WaitForCacheSync(m.ctx.Done()) + for _, obj := range m.exampleObjs { + informer := f.InformerFor(obj) + t := reflect.TypeOf(obj) + ss := m.sources[t] + for _, s := range ss { + if err := s.For(keys[0], informer); err != nil { + m.logger.Error(err, "cannot add event handler into informer") + } + } + } + }() + return nil +} + +func (m *clientManager[Object, UnderlayClient, Client]) Get(primaryKey string) (c Client, ok bool) { + cacheObj, found := m.cs.Load(primaryKey) + if !found { + return + } + + return cacheObj.Client(), true +} + +func (m *clientManager[Object, UnderlayClient, Client]) Deregister(primaryKey string) { + m.logger.Info("deregister client", "key", primaryKey) + c, ok := m.cs.LoadAndDelete(primaryKey) + if ok { + c.Stop() + m.logger.Info("client is successfully stopped", "key", primaryKey) + } +} + +func (m *clientManager[Object, UnderlayClient, Client]) InformerFor(key string, obj runtime.Object) cache.SharedIndexInformer { + c, ok := m.cs.Load(key) + if !ok { + return nil + } + + return c.InformerFactory().InformerFor(obj) +} + +func (m *clientManager[Object, UnderlayClient, Client]) Source(obj runtime.Object, h handler.EventHandler) source.Source { + if m.started { + // TODO: optimize the panic + panic("cannot add source after manager is started") + } + t := reflect.TypeOf(obj) + _, ok := m.newPollerFuncMap[t] + if !ok { + // TODO: optimize the panic + panic("cannot get source of type " + t.Name()) + } + + s := NewEventSource(h) + ss := m.sources[t] + ss = append(ss, s) + + m.sources[t] = ss + + return s +} + +func (m *clientManager[Object, UnderlayClient, Client]) Start(ctx context.Context) { + m.started = true + m.ctx = ctx +} + +type EventSource interface { + source.Source + For(key string, f cache.SharedIndexInformer) error + HasSynced(key string) bool +} + +type eventSource struct { + h handler.EventHandler + + ctx context.Context + rh cache.ResourceEventHandler + + synced Map[string, cache.InformerSynced] +} + +func (s *eventSource) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + s.ctx = ctx + s.rh = NewResourceEventHandler(s.ctx, s.h, queue) + + return nil +} + +func NewEventSource(h handler.EventHandler) EventSource { + return &eventSource{ + h: h, + } +} + +func (s *eventSource) For(key string, f cache.SharedIndexInformer) error { + res, err := f.AddEventHandler(s.rh) + s.synced.Store(key, res.HasSynced) + return err +} + +func (s *eventSource) HasSynced(key string) bool { + hasSynced, ok := s.synced.Load(key) + if !ok { + return false + } + return hasSynced() +} + +// See "sigs.k8s.io/controller-runtime/pkg/internal/source.NewEventHandler" +func NewResourceEventHandler[O client.Object, R comparable]( + ctx context.Context, h handler.TypedEventHandler[O, R], + q workqueue.TypedRateLimitingInterface[R]) cache.ResourceEventHandler { + logger := logr.FromContextOrDiscard(ctx) + + return cache.ResourceEventHandlerDetailedFuncs{ + AddFunc: func(obj any, _ bool) { + e := event.TypedCreateEvent[O]{} + + if o, ok := obj.(O); ok { + e.Object = o + } else { + logger.Error(nil, "OnAdd missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + h.Create(ctx, e, q) + }, + UpdateFunc: func(oldObj, newObj any) { + e := event.TypedUpdateEvent[O]{} + + if o, ok := oldObj.(O); ok { + e.ObjectOld = o + } else { + logger.Error(nil, "OnUpdate missing ObjectOld", + "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) + return + } + + if o, ok := newObj.(O); ok { + e.ObjectNew = o + } else { + logger.Error(nil, "OnUpdate missing ObjectNew", + "object", newObj, "type", fmt.Sprintf("%T", newObj)) + return + } + h.Update(ctx, e, q) + }, + DeleteFunc: func(obj any) { + e := event.TypedDeleteEvent[O]{} + + var ok bool + if _, ok = obj.(client.Object); !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + logger.Error(nil, "Error decoding objects. Expected cache.DeletedFinalStateUnknown", + "type", fmt.Sprintf("%T", obj), + "object", obj) + return + } + + e.DeleteStateUnknown = true + + obj = tombstone.Obj + } + + if o, ok := obj.(O); ok { + e.Object = o + } else { + logger.Error(nil, "OnDelete missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + + h.Delete(ctx, e, q) + }, + } +} diff --git a/pkg/timanager/manager_test.go b/pkg/timanager/manager_test.go new file mode 100644 index 00000000000..1997d1c4fa3 --- /dev/null +++ b/pkg/timanager/manager_test.go @@ -0,0 +1,351 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timanager + +import ( + "cmp" + "context" + "slices" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pingcap/tidb-operator/pkg/client" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +func TestClientManager(t *testing.T) { + cases := []struct { + desc string + obj client.Object + updateFunc func(obj client.Object) client.Object + changed bool + }{ + { + desc: "not changed", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + updateFunc: func(obj client.Object) client.Object { return obj }, + changed: false, + }, + { + desc: "change ns", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + updateFunc: func(obj client.Object) client.Object { + obj.SetNamespace("test") + return obj + }, + changed: true, + }, + { + desc: "change uid", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + updateFunc: func(obj client.Object) client.Object { + obj.SetUID("xxxx") + return obj + }, + changed: true, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + count := 0 + cm := NewManagerBuilder[client.Object, int, int](). + WithNewUnderlayClientFunc(func(client.Object) (int, error) { + // add count for each time the underlay client is newed + count += 1 + return count, nil + }). + WithCacheKeysFunc(func(obj client.Object) ([]string, error) { + return []string{obj.GetName(), obj.GetNamespace(), string(obj.GetUID())}, nil + }). + WithNewClientFunc(func(key string, underlay int, _ SharedInformerFactory[int]) int { + // check underlay client is newed by NewUnderlayClientFunc + assert.Equal(tt, count, underlay) + // key is equal with the primary key returned by cache keys + assert.Equal(tt, c.obj.GetName(), key) + return count + }). + Build() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cm.Start(ctx) + + // client is not registered + _, ok := cm.Get(c.obj.GetName()) + assert.False(tt, ok) + + // client can be get after registered + require.NoError(tt, cm.Register(c.obj)) + clientObj, ok := cm.Get(c.obj.GetName()) + assert.True(tt, ok) + + // update obj, client will be updated only when cache keys are changed + updated := c.updateFunc(c.obj) + require.NoError(tt, cm.Register(updated)) + updateClient, ok := cm.Get(c.obj.GetName()) + assert.True(tt, ok) + + if !c.changed { + assert.Equal(tt, clientObj, updateClient) + } else { + assert.NotEqual(tt, clientObj, updateClient) + } + + // Deregister obj + cm.Deregister(updated.GetName()) + _, ok2 := cm.Get(updated.GetName()) + assert.False(tt, ok2) + }) + } +} + +func TestClientManagerSource(t *testing.T) { + cases := []struct { + desc string + previous []pdv1.Store + updated []pdv1.Store + + expectedCreateEvents []event.TypedCreateEvent[client.Object] + expectedUpdateEvents []event.TypedUpdateEvent[client.Object] + expectedDeleteEvents []event.TypedDeleteEvent[client.Object] + }{ + { + desc: "no update", + previous: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + *fake.FakeObj[pdv1.Store]("bb"), + }, + updated: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + *fake.FakeObj[pdv1.Store]("bb"), + }, + expectedCreateEvents: []event.TypedCreateEvent[client.Object]{ + { + Object: fake.FakeObj[pdv1.Store]("aa"), + }, + { + Object: fake.FakeObj[pdv1.Store]("bb"), + }, + }, + }, + { + desc: "add new obj", + previous: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + }, + updated: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + *fake.FakeObj[pdv1.Store]("bb"), + }, + expectedCreateEvents: []event.TypedCreateEvent[client.Object]{ + { + Object: fake.FakeObj[pdv1.Store]("aa"), + }, + { + Object: fake.FakeObj[pdv1.Store]("bb"), + }, + }, + }, + { + desc: "del existing obj", + previous: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + *fake.FakeObj[pdv1.Store]("bb"), + }, + updated: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + }, + expectedCreateEvents: []event.TypedCreateEvent[client.Object]{ + { + Object: fake.FakeObj[pdv1.Store]("aa"), + }, + { + Object: fake.FakeObj[pdv1.Store]("bb"), + }, + }, + expectedDeleteEvents: []event.TypedDeleteEvent[client.Object]{ + { + Object: fake.FakeObj[pdv1.Store]("bb"), + }, + }, + }, + { + desc: "update existing obj", + previous: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + *fake.FakeObj[pdv1.Store]("bb"), + }, + updated: []pdv1.Store{ + *fake.FakeObj[pdv1.Store]("aa"), + *fake.FakeObj("bb", func(obj *pdv1.Store) *pdv1.Store { + obj.Labels = map[string]string{"test": "test"} + return obj + }), + }, + expectedCreateEvents: []event.TypedCreateEvent[client.Object]{ + { + Object: fake.FakeObj[pdv1.Store]("aa"), + }, + { + Object: fake.FakeObj[pdv1.Store]("bb"), + }, + }, + expectedUpdateEvents: []event.TypedUpdateEvent[client.Object]{ + { + ObjectOld: fake.FakeObj[pdv1.Store]("bb"), + ObjectNew: fake.FakeObj("bb", func(obj *pdv1.Store) *pdv1.Store { + obj.Labels = map[string]string{"test": "test"} + return obj + }), + }, + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + lister := FakeLister[pdv1.Store, *pdv1.Store]{ + L: List[pdv1.Store, *pdv1.Store]{ + Items: c.previous, + }, + } + cm := NewManagerBuilder[client.Object, int, int](). + WithNewUnderlayClientFunc(func(client.Object) (int, error) { + return 0, nil + }). + WithCacheKeysFunc(func(obj client.Object) ([]string, error) { + return []string{obj.GetName()}, nil + }). + WithNewClientFunc(func(_ string, _ int, _ SharedInformerFactory[int]) int { + return 0 + }). + WithNewPollerFunc(&pdv1.Store{}, func(name string, logger logr.Logger, _ int) Poller { + return NewPoller(name, logger, &lister, NewDeepEquality[pdv1.Store](), time.Millisecond*200) + }). + Build() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + done := make(chan struct{}) + total := 0 + var createEvents []event.TypedCreateEvent[client.Object] + var updateEvents []event.TypedUpdateEvent[client.Object] + var deleteEvents []event.TypedDeleteEvent[client.Object] + + s := cm.Source(&pdv1.Store{}, handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(_ context.Context, event event.TypedCreateEvent[client.Object], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { + _, ok := event.Object.(*pdv1.Store) + assert.True(tt, ok) + + createEvents = append(createEvents, event) + + total += 1 + if total == len(c.expectedCreateEvents)+len(c.expectedUpdateEvents)+len(c.expectedDeleteEvents) { + close(done) + } + }, + + UpdateFunc: func(_ context.Context, event event.TypedUpdateEvent[client.Object], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { + _, ok1 := event.ObjectOld.(*pdv1.Store) + assert.True(tt, ok1) + + _, ok2 := event.ObjectNew.(*pdv1.Store) + assert.True(tt, ok2) + + updateEvents = append(updateEvents, event) + + total += 1 + if total == len(c.expectedCreateEvents)+len(c.expectedUpdateEvents)+len(c.expectedDeleteEvents) { + close(done) + } + }, + DeleteFunc: func(_ context.Context, event event.TypedDeleteEvent[client.Object], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { + _, ok := event.Object.(*pdv1.Store) + assert.True(tt, ok) + + deleteEvents = append(deleteEvents, event) + + total += 1 + if total == len(c.expectedCreateEvents)+len(c.expectedUpdateEvents)+len(c.expectedDeleteEvents) { + close(done) + } + }, + }) + + cm.Start(ctx) + assert.NoError(tt, s.Start(ctx, workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]()))) + + assert.NoError(tt, cm.Register(fake.FakeObj[corev1.Pod]("aa"))) + + es, ok := s.(EventSource) + assert.True(tt, ok) + + synced := cache.WaitForCacheSync(ctx.Done(), func() bool { + return es.HasSynced("aa") + }) + assert.True(tt, synced) + + lister.L.Items = c.updated + + select { + case <-ctx.Done(): + assert.Fail(tt, "wait events timeout") + case <-done: + } + + slices.SortFunc(createEvents, func(a, b event.TypedCreateEvent[client.Object]) int { + return cmp.Compare(a.Object.GetName(), b.Object.GetName()) + }) + slices.SortFunc(updateEvents, func(a, b event.TypedUpdateEvent[client.Object]) int { + return cmp.Compare(a.ObjectNew.GetName(), b.ObjectNew.GetName()) + }) + slices.SortFunc(deleteEvents, func(a, b event.TypedDeleteEvent[client.Object]) int { + return cmp.Compare(a.Object.GetName(), b.Object.GetName()) + }) + + assert.Equal(tt, c.expectedCreateEvents, createEvents) + assert.Equal(tt, c.expectedUpdateEvents, updateEvents) + assert.Equal(tt, c.expectedDeleteEvents, deleteEvents) + }) + } +} diff --git a/pkg/timanager/pd/member.go b/pkg/timanager/pd/member.go new file mode 100644 index 00000000000..ffbc3dca997 --- /dev/null +++ b/pkg/timanager/pd/member.go @@ -0,0 +1,141 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "cmp" + "context" + "fmt" + "slices" + "strconv" + "time" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" + "github.com/pingcap/tidb-operator/pkg/timanager" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" +) + +const ( + defaultPollInterval = 30 * time.Second +) + +type ( + MemberCache = timanager.RefreshableCacheLister[pdv1.Member, *pdv1.Member] +) + +func NewMemberCache(cluster string, informerFactory timanager.SharedInformerFactory[pdapi.PDClient]) MemberCache { + informer := informerFactory.InformerFor(&pdv1.Member{}) + lister := timanager.NewGlobalCacheLister[pdv1.Member]( + informer.GetIndexer(), + schema.GroupResource{ + Group: pdv1.GroupName, + Resource: "members", + }, + ).ByCluster(cluster) + return timanager.CacheWithRefresher(lister, timanager.RefreshFunc(func() { + informerFactory.Refresh(&pdv1.Member{}) + })) +} + +func NewMemberPoller(name string, logger logr.Logger, c pdapi.PDClient) timanager.Poller { + lister := NewMemberLister(name, c) + + // TODO: change interval + return timanager.NewPoller(name, logger, lister, timanager.NewDeepEquality[pdv1.Member](), defaultPollInterval) +} + +type memberLister struct { + cluster string + c pdapi.PDClient +} + +func NewMemberLister(cluster string, c pdapi.PDClient) timanager.Lister[pdv1.Member, *pdv1.Member, *pdv1.MemberList] { + return &memberLister{ + cluster: cluster, + c: c, + } +} + +func (l *memberLister) List(ctx context.Context) (*pdv1.MemberList, error) { + info, err := l.c.GetMembers(ctx) + if err != nil { + return nil, err + } + + health, err := l.c.GetHealth(ctx) + if err != nil { + return nil, err + } + + mm := map[uint64]*pdv1.Member{} + + for _, m := range info.Members { + mm[m.MemberId] = &pdv1.Member{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name, + Namespace: l.cluster, + }, + ID: strconv.FormatUint(m.MemberId, 10), + PeerUrls: m.PeerUrls, + ClientUrls: m.ClientUrls, + LeaderPriority: m.LeaderPriority, + + IsLeader: m.MemberId == info.Leader.MemberId, + IsEtcdLeader: m.MemberId == info.EtcdLeader.MemberId, + } + } + + for _, h := range health.Healths { + m, ok := mm[h.MemberID] + if !ok { + return nil, fmt.Errorf("member %s(%v) doesn't exist but return health info", h.Name, h.MemberID) + } + m.Health = h.Health + mm[h.MemberID] = m + } + + list := pdv1.MemberList{} + + for _, m := range mm { + list.Items = append(list.Items, *m) + } + + slices.SortFunc(list.Items, func(a, b pdv1.Member) int { + return cmp.Compare(a.Name, b.Name) + }) + + return &list, nil +} + +func (*memberLister) GetItems(list *pdv1.MemberList) []*pdv1.Member { + objs := make([]*pdv1.Member, 0, len(list.Items)) + for i := range list.Items { + objs = append(objs, &list.Items[i]) + } + + return objs +} + +func (*memberLister) MarkAsInvalid(m *pdv1.Member) bool { + if !m.Invalid { + m.Invalid = true + return true + } + return false +} diff --git a/pkg/timanager/pd/pd.go b/pkg/timanager/pd/pd.go new file mode 100644 index 00000000000..b7ac37cecd6 --- /dev/null +++ b/pkg/timanager/pd/pd.go @@ -0,0 +1,161 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/go-logr/logr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" + "github.com/pingcap/tidb-operator/pkg/timanager" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" + tlsutil "github.com/pingcap/tidb-operator/pkg/utils/tls" +) + +const ( + pdRequestTimeout = 10 * time.Second +) + +type PDClientManager = timanager.Manager[*v1alpha1.PDGroup, PDClient] + +type PDClient interface { + HasSynced() bool + Stores() StoreCache + Members() MemberCache + // TODO: only returns write interface + Underlay() pdapi.PDClient +} + +type pdClient struct { + underlay pdapi.PDClient + + stores StoreCache + members MemberCache + + hasSynced []func() bool +} + +func (c *pdClient) Stores() StoreCache { + return c.stores +} + +func (c *pdClient) Members() MemberCache { + return c.members +} + +func (c *pdClient) Underlay() pdapi.PDClient { + return c.underlay +} + +func (c *pdClient) HasSynced() bool { + for _, f := range c.hasSynced { + if !f() { + return false + } + } + + return true +} + +func NewClient(key string, underlay pdapi.PDClient, informerFactory timanager.SharedInformerFactory[pdapi.PDClient]) PDClient { + storeInformer := informerFactory.InformerFor(&pdv1.Store{}) + memberInformer := informerFactory.InformerFor(&pdv1.Member{}) + stores := NewStoreCache(key, informerFactory) + members := NewMemberCache(key, informerFactory) + + return &pdClient{ + underlay: underlay, + stores: stores, + members: members, + hasSynced: []func() bool{ + storeInformer.HasSynced, + memberInformer.HasSynced, + }, + } +} + +func PrimaryKey(ns, cluster string) string { + return ns + ":" + cluster +} + +func SplitPrimaryKey(key string) (ns, cluster string) { + keys := strings.SplitN(key, ":", 2) + if len(keys) < 2 { + return keys[0], "" + } + return keys[0], keys[1] +} + +// If any keys are changed, client will be renewed +// The first key is primary key to get client from manager +func CacheKeys(pdg *v1alpha1.PDGroup) ([]string, error) { + keys := []string{} + + keys = append(keys, + PrimaryKey(pdg.Namespace, pdg.Spec.Cluster.Name), // cluster name as primary key + pdg.Name, + string(pdg.GetUID())) + // TODO: support reload tls config + + return keys, nil +} + +func NewUnderlayClientFunc(c client.Client) timanager.NewUnderlayClientFunc[*v1alpha1.PDGroup, pdapi.PDClient] { + return func(pdg *v1alpha1.PDGroup) (pdapi.PDClient, error) { + ctx := context.Background() + var cluster v1alpha1.Cluster + if err := c.Get(ctx, client.ObjectKey{ + Name: pdg.Spec.Cluster.Name, + Namespace: pdg.Namespace, + }, &cluster); err != nil { + return nil, fmt.Errorf("cannot find cluster %s: %w", pdg.Spec.Cluster.Name, err) + } + + host := fmt.Sprintf("%s-%s.%s:%d", pdg.Spec.Cluster.Name, pdg.Name, pdg.Namespace, pdg.GetClientPort()) + + if cluster.IsTLSClusterEnabled() { + tlsConfig, err := tlsutil.GetTLSConfigFromSecret(ctx, c, + cluster.Namespace, v1alpha1.TLSClusterClientSecretName(cluster.Name)) + if err != nil { + return nil, fmt.Errorf("cannot get tls config from secret: %w", err) + } + + addr := "https://" + host + return pdapi.NewPDClient(addr, pdRequestTimeout, tlsConfig), nil + } + + addr := "http://" + host + pc := pdapi.NewPDClient(addr, pdRequestTimeout, nil) + return pc, nil + } +} + +func NewPDClientManager(_ logr.Logger, c client.Client) PDClientManager { + m := timanager.NewManagerBuilder[*v1alpha1.PDGroup, pdapi.PDClient, PDClient](). + WithNewUnderlayClientFunc(NewUnderlayClientFunc(c)). + WithNewClientFunc(NewClient). + WithCacheKeysFunc(CacheKeys). + WithNewPollerFunc(&pdv1.Store{}, NewStorePoller). + WithNewPollerFunc(&pdv1.Member{}, NewMemberPoller). + Build() + + return m +} diff --git a/pkg/timanager/pd/store.go b/pkg/timanager/pd/store.go new file mode 100644 index 00000000000..aa79cd81772 --- /dev/null +++ b/pkg/timanager/pd/store.go @@ -0,0 +1,127 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "strconv" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/pingcap/tidb-operator/pkg/pdapi/v1" + "github.com/pingcap/tidb-operator/pkg/timanager" + pdv1 "github.com/pingcap/tidb-operator/pkg/timanager/apis/pd/v1" +) + +type ( + StoreCache = timanager.RefreshableCacheLister[pdv1.Store, *pdv1.Store] +) + +func NewStoreCache(cluster string, informerFactory timanager.SharedInformerFactory[pdapi.PDClient]) StoreCache { + informer := informerFactory.InformerFor(&pdv1.Store{}) + lister := timanager.NewGlobalCacheLister[pdv1.Store]( + informer.GetIndexer(), + schema.GroupResource{ + Group: pdv1.GroupName, + Resource: "stores", + }, + ).ByCluster(cluster) + return timanager.CacheWithRefresher(lister, timanager.RefreshFunc(func() { + informerFactory.Refresh(&pdv1.Store{}) + })) +} + +func NewStorePoller(name string, logger logr.Logger, c pdapi.PDClient) timanager.Poller { + lister := NewStoreLister(name, c) + + // TODO: change interval + return timanager.NewPoller(name, logger, lister, timanager.NewDeepEquality[pdv1.Store](), defaultPollInterval) +} + +type storeLister struct { + cluster string + c pdapi.PDClient +} + +func NewStoreLister(cluster string, c pdapi.PDClient) timanager.Lister[pdv1.Store, *pdv1.Store, *pdv1.StoreList] { + return &storeLister{ + cluster: cluster, + c: c, + } +} + +func (l *storeLister) List(ctx context.Context) (*pdv1.StoreList, error) { + ss, err := l.c.GetStores(ctx) + if err != nil { + if pdapi.IsTiKVNotBootstrappedError(err) { + return &pdv1.StoreList{}, nil + } + return nil, err + } + + list := pdv1.StoreList{} + for _, s := range ss.Stores { + obj := l.convert(l.cluster, s) + list.Items = append(list.Items, *obj) + } + return &list, nil +} + +func (*storeLister) GetItems(list *pdv1.StoreList) []*pdv1.Store { + objs := make([]*pdv1.Store, 0, len(list.Items)) + for i := range list.Items { + objs = append(objs, &list.Items[i]) + } + + return objs +} + +func (*storeLister) MarkAsInvalid(m *pdv1.Store) bool { + if !m.Invalid { + m.Invalid = true + return true + } + return false +} + +func (*storeLister) convert(cluster string, s *pdapi.StoreInfo) *pdv1.Store { + ls := map[string]string{} + for _, label := range s.Store.Labels { + if label != nil { + ls[label.Key] = label.Value + } + } + + return &pdv1.Store{ + ObjectMeta: metav1.ObjectMeta{ + Labels: ls, + Name: s.Store.Address, + Namespace: cluster, + }, + ID: strconv.FormatUint(s.Store.Id, 10), + // Address: s.Store.Address, + Version: s.Store.Version, + PhysicallyDestroyed: s.Store.PhysicallyDestroyed, + State: pdv1.StoreState(s.Store.GetState().String()), + NodeState: pdv1.NodeState(s.Store.NodeState.String()), + StartTimestamp: s.Store.StartTimestamp, + // LastHeartbeat: s.Store.LastHeartbeat, + + LeaderCount: s.Status.LeaderCount, + RegionCount: s.Status.RegionCount, + } +} diff --git a/pkg/timanager/poller.go b/pkg/timanager/poller.go new file mode 100644 index 00000000000..2bbdd8d69e6 --- /dev/null +++ b/pkg/timanager/poller.go @@ -0,0 +1,255 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timanager + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + + "github.com/pingcap/tidb-operator/pkg/client" +) + +// Poller polls from tidb and sends watch event to channel +type Poller interface { + // Sync will initialize the state of poller + // It will poll and make sure state is available + // Sync must be called before Run + Sync(ctx context.Context) (runtime.Object, error) + // IF Run is called multiple times, the previous one will be stopped + // immediately and start a new one to push event to new channel + Run(ctx context.Context, ch chan<- watch.Event) + // Poll once immediately + Refresh() +} + +type Lister[T any, PT Object[T], L client.ObjectList] interface { + List(ctx context.Context) (L, error) + GetItems(l L) []PT + MarkAsInvalid(PT) bool +} + +type Equality[T any, PT Object[T]] interface { + Equal(prev, cur PT) bool +} + +func NewPoller[T any, PT Object[T], L client.ObjectList]( + name string, + _ logr.Logger, + lister Lister[T, PT, L], + eq Equality[T, PT], + interval time.Duration, +) Poller { + return &poller[T, PT, L]{ + name: name, + interval: interval, + lister: lister, + equality: eq, + } +} + +type poller[T any, PT Object[T], L client.ObjectList] struct { + name string + logger logr.Logger + + lock sync.Mutex + started bool + + resultCh chan watch.Event + + cancel context.CancelFunc + interval time.Duration + + refreshCh chan struct{} + + state map[string]PT + + lister Lister[T, PT, L] + equality Equality[T, PT] +} + +func (p *poller[T, PT, L]) renew(ctx context.Context) context.Context { + p.stop() + p.resultCh = make(chan watch.Event, bufSize) + + nctx, cancel := context.WithCancel(ctx) + p.cancel = cancel + + return nctx +} + +func (p *poller[T, PT, L]) Sync(ctx context.Context) (runtime.Object, error) { + p.lock.Lock() + defer p.lock.Unlock() + + if p.started { + return nil, fmt.Errorf("poller has started") + } + + list, err := p.lister.List(ctx) + if err != nil { + return nil, err + } + + items := p.lister.GetItems(list) + p.updateState(ctx, p.newState(items), false) + + return list, nil +} + +func (p *poller[T, PT, L]) stop() { + if p.cancel != nil { + p.cancel() + close(p.resultCh) + p.cancel = nil + } +} + +func (p *poller[T, PT, L]) Stop() { + p.stop() + p.state = nil +} + +func (p *poller[T, PT, L]) Run(ctx context.Context, ch chan<- watch.Event) { + p.lock.Lock() + p.started = true + p.lock.Unlock() + + nctx := p.renew(ctx) + defer p.Stop() + + go func() { + for { + select { + case event := <-p.resultCh: + ch <- event + case <-nctx.Done(): + return + } + } + }() + + timer := time.NewTicker(p.interval) + defer timer.Stop() + for { + select { + case <-nctx.Done(): + p.logger.Info("poller is stopped", "cluster", p.name, "type", new(T)) + return + case <-p.refreshCh: + p.poll(ctx) + case <-timer.C: + p.poll(ctx) + } + timer.Reset(p.interval) + } +} + +func (p *poller[T, PT, L]) Refresh() { + p.refreshCh <- struct{}{} +} + +func (p *poller[T, PT, L]) poll(ctx context.Context) { + list, err := p.lister.List(ctx) + if err != nil { + p.logger.Error(err, "poll err", "cluster", p.name, "type", new(T)) + p.markStateInvalid(ctx) + } + objs := p.lister.GetItems(list) + + p.updateState(ctx, p.newState(objs), true) +} + +func (*poller[T, PT, L]) newState(objs []PT) map[string]PT { + s := map[string]PT{} + for _, obj := range objs { + s[obj.GetName()] = obj + } + + return s +} + +func (p *poller[T, PT, L]) markStateInvalid(ctx context.Context) { + for _, v := range p.state { + if p.lister.MarkAsInvalid(v) { + p.sendEvent(ctx, &watch.Event{ + Type: watch.Modified, + Object: v, + }) + } + } +} + +func (p *poller[T, PT, L]) updateState(ctx context.Context, newState map[string]PT, sendEvent bool) { + oldState := p.state + p.state = newState + + if sendEvent { + p.generateEvents(ctx, oldState, newState) + } +} + +func (p *poller[T, PT, L]) generateEvents(ctx context.Context, prevState, curState map[string]PT) { + for _, obj := range curState { + if preObj, ok := prevState[obj.GetName()]; ok { + if p.equality.Equal(preObj, obj) { + continue + } + p.sendEvent(ctx, &watch.Event{ + Type: watch.Modified, + Object: obj, + }) + } else { + p.sendEvent(ctx, &watch.Event{ + Type: watch.Added, + Object: obj, + }) + } + } + + for name := range prevState { + if _, ok := curState[name]; !ok { + p.sendEvent(ctx, &watch.Event{ + Type: watch.Deleted, + Object: prevState[name], + }) + } + } +} + +func (p *poller[T, PT, L]) sendEvent(ctx context.Context, e *watch.Event) { + select { + case p.resultCh <- *e: + //nolint:mnd // refactor to use a constant if necessary + p.logger.V(4).Info("send event", "type", e.Type, "object", e.Object) + case <-ctx.Done(): + } +} + +type deepEquality[T any, PT Object[T]] struct{} + +func (*deepEquality[T, PT]) Equal(preObj, curObj PT) bool { + return reflect.DeepEqual(preObj, curObj) +} + +func NewDeepEquality[T any, PT Object[T]]() Equality[T, PT] { + return &deepEquality[T, PT]{} +} diff --git a/pkg/timanager/poller_test.go b/pkg/timanager/poller_test.go new file mode 100644 index 00000000000..6a899e61486 --- /dev/null +++ b/pkg/timanager/poller_test.go @@ -0,0 +1,227 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timanager + +import ( + "cmp" + "context" + "slices" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +type FakeLister[T any, PT Object[T]] struct { + L List[T, PT] +} + +func (l *FakeLister[T, PT]) List(_ context.Context) (*List[T, PT], error) { + return &l.L, nil +} + +func (l *FakeLister[T, PT]) GetItems(_ *List[T, PT]) []PT { + objs := make([]PT, 0, len(l.L.Items)) + for i := range l.L.Items { + objs = append(objs, &l.L.Items[i]) + } + return objs +} + +func (*FakeLister[T, PT]) MarkAsInvalid(PT) bool { + return false +} + +func TestPoller(t *testing.T) { + cases := []struct { + desc string + previous []corev1.Pod + updated []corev1.Pod + + expectedList runtime.Object + expectedEvents []watch.Event + }{ + { + desc: "no update", + previous: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + updated: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + + expectedList: &List[corev1.Pod, *corev1.Pod]{ + Items: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + }, + expectedEvents: []watch.Event{}, + }, + { + desc: "add new obj", + previous: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + }, + updated: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + + expectedList: &List[corev1.Pod, *corev1.Pod]{ + Items: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + }, + }, + expectedEvents: []watch.Event{ + { + Type: watch.Added, + Object: fake.FakeObj[corev1.Pod]("bb"), + }, + }, + }, + { + desc: "del existing obj", + previous: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + updated: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + }, + + expectedList: &List[corev1.Pod, *corev1.Pod]{ + Items: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + }, + expectedEvents: []watch.Event{ + { + Type: watch.Deleted, + Object: fake.FakeObj[corev1.Pod]("bb"), + }, + }, + }, + { + desc: "update existing obj", + previous: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + updated: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj("bb", func(obj *corev1.Pod) *corev1.Pod { + obj.Labels = map[string]string{"test": "test"} + return obj + }), + }, + + expectedList: &List[corev1.Pod, *corev1.Pod]{ + Items: []corev1.Pod{ + *fake.FakeObj[corev1.Pod]("aa"), + *fake.FakeObj[corev1.Pod]("bb"), + }, + }, + expectedEvents: []watch.Event{ + { + Type: watch.Modified, + Object: fake.FakeObj("bb", func(obj *corev1.Pod) *corev1.Pod { + obj.Labels = map[string]string{"test": "test"} + return obj + }), + }, + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + lister := FakeLister[corev1.Pod, *corev1.Pod]{ + L: List[corev1.Pod, *corev1.Pod]{ + Items: c.previous, + }, + } + + p := NewPoller(c.desc, logr.Discard(), &lister, NewDeepEquality[corev1.Pod](), time.Millisecond*500) + + ctx, cancel := context.WithCancel(context.Background()) + list, err := p.Sync(ctx) + require.NoError(tt, err) + + assert.Equal(tt, c.expectedList, list) + + lister.L.Items = c.updated + + events := []watch.Event{} + ch := make(chan watch.Event, bufSize) + + go p.Run(ctx, ch) + + waited := false + func() { + for { + select { + case event := <-ch: + events = append(events, event) + default: + if waited { + return + } + // sleep at least 2 * interval + time.Sleep(time.Second) + waited = true + } + } + }() + + cancel() + close(ch) + + slices.SortFunc(c.expectedEvents, CompareEvent) + slices.SortFunc(events, CompareEvent) + assert.Equal(tt, c.expectedEvents, events) + }) + } +} + +func CompareObject(a, b runtime.Object) int { + aname := a.(client.Object).GetName() + bname := b.(client.Object).GetName() + + return cmp.Compare(aname, bname) +} + +func CompareEvent(a, b watch.Event) int { + aname := a.Object.(client.Object).GetName() + bname := b.Object.(client.Object).GetName() + + if aname == bname { + return cmp.Compare(a.Type, b.Type) + } + + return cmp.Compare(aname, bname) +} diff --git a/pkg/timanager/util.go b/pkg/timanager/util.go new file mode 100644 index 00000000000..574d8137a20 --- /dev/null +++ b/pkg/timanager/util.go @@ -0,0 +1,170 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timanager + +import ( + "context" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/pingcap/tidb-operator/pkg/client" +) + +type Object[T any] interface { + *T + client.Object + DeepCopy() *T + DeepCopyInto(*T) +} + +// NOTE: it's only for test now +type List[T any, PT Object[T]] struct { + metav1.TypeMeta + metav1.ListMeta + + Items []T +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List[T, PT]) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List[T, PT]) DeepCopyInto(out *List[T, PT]) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]T, len(*in)) + for i := range *in { + var obj PT = &(*in)[i] + obj.DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *List[T, PT]) DeepCopy() *List[T, PT] { + if in == nil { + return nil + } + out := new(List[T, PT]) + in.DeepCopyInto(out) + return out +} + +// Map is a wrapper of sync.Map to avoid type assertion in the outer function +type Map[K comparable, V any] struct { + sync.Map +} + +func (m *Map[K, V]) Load(k K) (_ V, _ bool) { + val, ok := m.Map.Load(k) + if !ok { + return + } + return val.(V), true +} + +func (m *Map[K, V]) Store(k K, v V) { + m.Map.Store(k, v) +} + +func (m *Map[K, V]) Delete(k K) { + m.Map.Delete(k) +} + +func (m *Map[K, V]) Range(f func(K, V) bool) { + m.Map.Range(func(key, val any) bool { + k := key.(K) + v := val.(V) + return f(k, v) + }) +} + +func (m *Map[K, V]) LoadAndDelete(k K) (_ V, _ bool) { + val, ok := m.Map.LoadAndDelete(k) + if !ok { + return + } + return val.(V), true +} + +func (m *Map[K, V]) Swap(k K, v V) (_ V, _ bool) { + val, ok := m.Map.Swap(k, v) + if !ok { + return + } + return val.(V), true +} + +type Cache[Client, UnderlayClient any] interface { + Client() Client + InformerFactory() SharedInformerFactory[UnderlayClient] + + Keys() []string + + Start(ctx context.Context) + Stop() +} + +type cached[Client, UnderlayClient any] struct { + c Client + f SharedInformerFactory[UnderlayClient] + + cancel context.CancelFunc + + cacheKeys []string +} + +func NewCache[Client, UnderlayClient any](keys []string, c Client, f SharedInformerFactory[UnderlayClient]) Cache[Client, UnderlayClient] { + return &cached[Client, UnderlayClient]{ + c: c, + f: f, + cacheKeys: keys, + } +} + +func (c *cached[Client, UnderlayClient]) Client() Client { + return c.c +} + +func (c *cached[Client, UnderlayClient]) InformerFactory() SharedInformerFactory[UnderlayClient] { + return c.f +} + +func (c *cached[Client, UnderlayClient]) Keys() []string { + return c.cacheKeys +} + +func (c *cached[Client, UnderlayClient]) Start(ctx context.Context) { + nctx, cancel := context.WithCancel(ctx) + c.cancel = cancel + c.f.Start(nctx.Done()) +} + +func (c *cached[Client, UnderlayClient]) Stop() { + if c.cancel != nil { + c.cancel() + } + c.f.Shutdown() +} diff --git a/pkg/updater/actor.go b/pkg/updater/actor.go new file mode 100644 index 00000000000..6e34710cfbb --- /dev/null +++ b/pkg/updater/actor.go @@ -0,0 +1,163 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package updater + +import ( + "context" + "fmt" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +type NewFactory[PT runtime.Instance] interface { + New() PT +} + +type NewFunc[PT runtime.Instance] func() PT + +func (f NewFunc[PT]) New() PT { + return f() +} + +// e.g. for some write once fields(name, topology, etc.) +type UpdateHook[PT runtime.Instance] interface { + Update(update, outdated PT) PT +} + +// e.g. for topology scheduling +type AddHook[PT runtime.Instance] interface { + Add(update PT) PT +} + +type DelHook[PT runtime.Instance] interface { + Delete(name string) +} + +type UpdateHookFunc[PT runtime.Instance] func(update, outdated PT) PT + +func (f UpdateHookFunc[PT]) Update(update, outdated PT) PT { + return f(update, outdated) +} + +type actor[PT runtime.Instance] struct { + c client.Client + + f NewFactory[PT] + + update State[PT] + outdated State[PT] + + addHooks []AddHook[PT] + updateHooks []UpdateHook[PT] + delHooks []DelHook[PT] + + scaleInSelector Selector[PT] + updateSelector Selector[PT] +} + +func (act *actor[PT]) chooseToUpdate(s []PT) (string, error) { + name := act.updateSelector.Choose(s) + if name == "" { + return "", fmt.Errorf("no instance can be updated") + } + + return name, nil +} + +func (act *actor[PT]) chooseToScaleIn(s []PT) (string, error) { + name := act.scaleInSelector.Choose(s) + if name == "" { + return "", fmt.Errorf("no instance can be scale in") + } + + return name, nil +} + +func (act *actor[PT]) ScaleOut(ctx context.Context) error { + obj := act.f.New() + + for _, hook := range act.addHooks { + obj = hook.Add(obj) + } + + if err := act.c.Apply(ctx, obj.To()); err != nil { + return err + } + + act.update.Add(obj) + + return nil +} + +func (act *actor[PT]) ScaleInUpdate(ctx context.Context) (bool, error) { + name, err := act.chooseToScaleIn(act.update.List()) + if err != nil { + return false, err + } + obj := act.update.Del(name) + + isUnavailable := !obj.IsHealthy() || !obj.IsUpToDate() + + if err := act.c.Delete(ctx, obj.To()); err != nil { + return false, err + } + + for _, hook := range act.delHooks { + hook.Delete(obj.GetName()) + } + + return isUnavailable, nil +} + +func (act *actor[PT]) ScaleInOutdated(ctx context.Context) (bool, error) { + name, err := act.chooseToScaleIn(act.outdated.List()) + if err != nil { + return false, err + } + obj := act.outdated.Del(name) + isUnavailable := !obj.IsHealthy() || !obj.IsUpToDate() + + if err := act.c.Delete(ctx, obj.To()); err != nil { + return false, err + } + + for _, hook := range act.delHooks { + hook.Delete(obj.GetName()) + } + + return isUnavailable, nil +} + +func (act *actor[PT]) Update(ctx context.Context) error { + name, err := act.chooseToUpdate(act.outdated.List()) + if err != nil { + return err + } + outdated := act.outdated.Del(name) + + update := act.f.New() + for _, hook := range act.updateHooks { + update = hook.Update(update, outdated) + } + + if err := act.c.Apply(ctx, update.To()); err != nil { + return err + } + + act.update.Add(update) + + return nil +} diff --git a/pkg/updater/builder.go b/pkg/updater/builder.go new file mode 100644 index 00000000000..da7bbec6b42 --- /dev/null +++ b/pkg/updater/builder.go @@ -0,0 +1,164 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package updater + +import ( + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +type Builder[PT runtime.Instance] interface { + WithInstances(...PT) Builder[PT] + WithDesired(desired int) Builder[PT] + WithMaxSurge(maxSurge int) Builder[PT] + WithMaxUnavailable(maxUnavailable int) Builder[PT] + WithRevision(rev string) Builder[PT] + WithClient(c client.Client) Builder[PT] + WithNewFactory(NewFactory[PT]) Builder[PT] + WithAddHooks(hooks ...AddHook[PT]) Builder[PT] + WithUpdateHooks(hooks ...UpdateHook[PT]) Builder[PT] + WithDelHooks(hooks ...DelHook[PT]) Builder[PT] + WithScaleInPreferPolicy(ps ...PreferPolicy[PT]) Builder[PT] + WithUpdatePreferPolicy(ps ...PreferPolicy[PT]) Builder[PT] + Build() Executor +} + +type builder[PT runtime.Instance] struct { + instances []PT + desired int + maxSurge int + maxUnavailable int + rev string + + c client.Client + + f NewFactory[PT] + + addHooks []AddHook[PT] + updateHooks []UpdateHook[PT] + delHooks []DelHook[PT] + + scaleInPreferPolicies []PreferPolicy[PT] + updatePreferPolicies []PreferPolicy[PT] +} + +func (b *builder[PT]) Build() Executor { + update, outdated := split(b.instances, b.rev) + updatePolicies := b.updatePreferPolicies + updatePolicies = append(updatePolicies, PreferUnavailable[PT]()) + actor := &actor[PT]{ + c: b.c, + f: b.f, + + update: NewState(update), + outdated: NewState(outdated), + + addHooks: b.addHooks, + updateHooks: b.updateHooks, + delHooks: b.delHooks, + + scaleInSelector: NewSelector(b.scaleInPreferPolicies...), + updateSelector: NewSelector(updatePolicies...), + } + return NewExecutor(actor, len(update), len(outdated), b.desired, + countUnavailable(update), countUnavailable(outdated), b.maxSurge, b.maxUnavailable) +} + +func New[PT runtime.Instance]() Builder[PT] { + return &builder[PT]{} +} + +func (b *builder[PT]) WithInstances(instances ...PT) Builder[PT] { + b.instances = append(b.instances, instances...) + return b +} + +func (b *builder[PT]) WithDesired(desired int) Builder[PT] { + b.desired = desired + return b +} + +func (b *builder[PT]) WithMaxSurge(maxSurge int) Builder[PT] { + b.maxSurge = maxSurge + return b +} + +func (b *builder[PT]) WithMaxUnavailable(maxUnavailable int) Builder[PT] { + b.maxUnavailable = maxUnavailable + return b +} + +func (b *builder[PT]) WithRevision(revision string) Builder[PT] { + b.rev = revision + return b +} + +func (b *builder[PT]) WithClient(c client.Client) Builder[PT] { + b.c = c + return b +} + +func (b *builder[PT]) WithNewFactory(f NewFactory[PT]) Builder[PT] { + b.f = f + return b +} + +func (b *builder[PT]) WithAddHooks(hooks ...AddHook[PT]) Builder[PT] { + b.addHooks = append(b.addHooks, hooks...) + return b +} + +func (b *builder[PT]) WithUpdateHooks(hooks ...UpdateHook[PT]) Builder[PT] { + b.updateHooks = append(b.updateHooks, hooks...) + return b +} + +func (b *builder[PT]) WithDelHooks(hooks ...DelHook[PT]) Builder[PT] { + b.delHooks = append(b.delHooks, hooks...) + return b +} + +func (b *builder[PT]) WithScaleInPreferPolicy(ps ...PreferPolicy[PT]) Builder[PT] { + b.scaleInPreferPolicies = append(b.scaleInPreferPolicies, ps...) + return b +} + +func (b *builder[PT]) WithUpdatePreferPolicy(ps ...PreferPolicy[PT]) Builder[PT] { + b.updatePreferPolicies = append(b.updatePreferPolicies, ps...) + return b +} + +func split[PT runtime.Instance](all []PT, rev string) (update, outdated []PT) { + for _, instance := range all { + if instance.GetUpdateRevision() == rev && instance.GetDeletionTimestamp().IsZero() { + update = append(update, instance) + } else { + outdated = append(outdated, instance) + } + } + + return update, outdated +} + +func countUnavailable[PT runtime.Instance](all []PT) int { + unavailable := 0 + for _, instance := range all { + if !instance.IsHealthy() || !instance.IsUpToDate() { + unavailable += 1 + } + } + + return unavailable +} diff --git a/pkg/updater/executor.go b/pkg/updater/executor.go new file mode 100644 index 00000000000..a4c0b993bbf --- /dev/null +++ b/pkg/updater/executor.go @@ -0,0 +1,177 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package updater + +import ( + "context" +) + +type Actor interface { + ScaleOut(ctx context.Context) error + Update(ctx context.Context) error + ScaleInUpdate(ctx context.Context) (unavailable bool, _ error) + ScaleInOutdated(ctx context.Context) (unavailable bool, _ error) +} + +// TODO: return instance list after Do +type Executor interface { + Do(ctx context.Context) (bool, error) +} + +type executor struct { + update int + outdated int + desired int + unavailableUpdate int + unavailableOutdated int + maxSurge int + maxUnavailable int + + act Actor +} + +func NewExecutor( + act Actor, + update, + outdated, + desired, + unavailableUpdate, + unavailableOutdated, + maxSurge, + maxUnavailable int, +) Executor { + return &executor{ + update: update, + outdated: outdated, + desired: desired, + unavailableUpdate: unavailableUpdate, + unavailableOutdated: unavailableOutdated, + maxSurge: maxSurge, + maxUnavailable: maxUnavailable, + act: act, + } +} + +// TODO: add scale in/out rate limit +// +//nolint:gocyclo // refactor if possible +func (ex *executor) Do(ctx context.Context) (bool, error) { + for ex.update != ex.desired || ex.outdated != 0 { + actual := ex.update + ex.outdated + available := actual - ex.unavailableUpdate - ex.unavailableOutdated + maximum := ex.desired + min(ex.maxSurge, ex.outdated) + minimum := ex.desired - ex.maxUnavailable + switch { + case actual < maximum: + if err := ex.act.ScaleOut(ctx); err != nil { + return false, err + } + ex.update += 1 + ex.unavailableUpdate += 1 + + case actual == maximum: + if ex.update < ex.desired { + // update will always prefer unavailable one so available will not changed if there are + // unavailable and outdated instances + if ex.unavailableOutdated > 0 { + if err := ex.act.Update(ctx); err != nil { + return false, err + } + ex.outdated -= 1 + ex.unavailableOutdated -= 1 + ex.update += 1 + ex.unavailableUpdate += 1 + } else { + // DON'T decrease available if available is less than minimum + if available <= minimum { + return true, nil + } + + if err := ex.act.Update(ctx); err != nil { + return false, err + } + ex.outdated -= 1 + ex.update += 1 + ex.unavailableUpdate += 1 + } + } else { + // => ex.update + ex.outdated == ex.desired + min(ex.maxSurge, ex.outdated) and ex.update >= ex.desired + // => ex.outdated <= min(ex.maxSurge, ex.outdated) + // => ex.outdated <= ex.maxSurge + // => ex.outdated = min(ex.maxSurge, ex.outdated) + // => ex.update + ex.outdated >= ex.desired + ex.outdated + // => ex.update == ex.desired + // => ex.outdated != 0 (ex.update != ex.desired || ex.outdated != 0 in for loop condition) + if available <= minimum { + return true, nil + } + + unavailable, err := ex.act.ScaleInOutdated(ctx) + if err != nil { + return false, err + } + // scale in may not choose an unavailable outdated so just descrease the outdated + // and assume we always choose an available outdated. + // And then wait if next available is less than minimum + if unavailable { + ex.outdated -= 1 + ex.unavailableOutdated -= 1 + } else { + ex.outdated -= 1 + } + } + case actual > maximum: + // Scale in op may choose an available instance. + // Assume we always choose an unavailable one, we will scale once and wait until next reconcile + checkAvail := false + if ex.update > ex.desired { + unavailable, err := ex.act.ScaleInUpdate(ctx) + if err != nil { + return false, err + } + if unavailable { + ex.update -= 1 + ex.unavailableUpdate -= 1 + } else { + ex.update -= 1 + available -= 1 + checkAvail = true + } + } else { + // ex.update + ex.outdated > ex.desired + min(ex.maxSurge, ex.outdated) and ex.update >= ex.desired + // => ex.outdated > min(ex.maxSurge, ex.outdated) + // => ex.outdated > 0 + unavailable, err := ex.act.ScaleInOutdated(ctx) + if err != nil { + return false, err + } + if unavailable { + ex.outdated -= 1 + ex.unavailableOutdated -= 1 + } else { + ex.outdated -= 1 + available -= 1 + checkAvail = true + } + } + // Wait if available is less than minimum + if checkAvail && available <= minimum { + return true, nil + } + } + } + + return false, nil +} diff --git a/pkg/updater/executor_test.go b/pkg/updater/executor_test.go new file mode 100644 index 00000000000..9a7bcb9833b --- /dev/null +++ b/pkg/updater/executor_test.go @@ -0,0 +1,765 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package updater + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type action int + +const ( + actionScaleOut action = iota + actionUpdate + actionScaleInUpdate + actionScaleInOutdated +) + +type FakeActor struct { + Actions []action + + unavailableUpdate int + unavailableOutdated int + update int + outdated int + + preferAvailable bool +} + +func (a *FakeActor) ScaleOut(_ context.Context) error { + a.Actions = append(a.Actions, actionScaleOut) + return nil +} + +func (a *FakeActor) ScaleInOutdated(_ context.Context) (bool, error) { + a.Actions = append(a.Actions, actionScaleInOutdated) + a.outdated -= 1 + if a.preferAvailable || a.unavailableOutdated == 0 { + return false, nil + } + + a.unavailableOutdated -= 1 + return true, nil +} + +func (a *FakeActor) ScaleInUpdate(_ context.Context) (bool, error) { + a.Actions = append(a.Actions, actionScaleInUpdate) + a.update -= 1 + if a.preferAvailable || a.unavailableUpdate == 0 { + return false, nil + } + + a.unavailableUpdate -= 1 + return true, nil +} + +func (a *FakeActor) Update(_ context.Context) error { + a.Actions = append(a.Actions, actionUpdate) + return nil +} + +func TestExecutor(t *testing.T) { + cases := []struct { + desc string + update int + outdated int + desired int + unavailableUpdate int + unavailableOutdated int + maxSurge int + maxUnavailable int + + preferAvailable bool + + expectedActions []action + expectedWait bool + }{ + { + desc: "do nothing", + update: 3, + outdated: 0, + desired: 3, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: nil, + }, + { + desc: "scale out from 0 with 0 maxSurge", + update: 0, + outdated: 0, + desired: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleOut, + actionScaleOut, + actionScaleOut, + }, + }, + { + desc: "scale out from 0 with 1 maxSurge", + update: 0, + outdated: 0, + desired: 3, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleOut, + actionScaleOut, + actionScaleOut, + }, + }, + { + desc: "scale in to 0", + update: 3, + outdated: 0, + desired: 0, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInUpdate, + actionScaleInUpdate, + actionScaleInUpdate, + }, + }, + { + desc: "rolling update with 0 maxSurge and 1 maxUnavailable(0)", + update: 0, + outdated: 3, + desired: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "rolling update with 0 maxSurge and 1 maxUnavailable(1)", + update: 1, + outdated: 2, + desired: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "rolling update with 0 maxSurge and 1 maxUnavailable(2)", + update: 2, + outdated: 1, + desired: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + }, + { + desc: "rolling update with 1 maxSurge and 0 maxUnavailable(0)", + update: 0, + outdated: 3, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleOut, + }, + expectedWait: true, + }, + { + desc: "rolling update with 1 maxSurge and 0 maxUnavailable(1)", + update: 1, + outdated: 3, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "rolling update with 1 maxSurge and 0 maxUnavailable(2)", + update: 2, + outdated: 2, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "rolling update with 1 maxSurge and 0 maxUnavailable(3)", + update: 3, + outdated: 1, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleInOutdated, + }, + }, + { + desc: "rolling update with 1 maxSurge and 1 maxUnavailable(0)", + update: 0, + outdated: 3, + desired: 3, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleOut, + actionUpdate, + }, + expectedWait: true, + }, + { + // when 1 update is ready but 1 is not + desc: "rolling update with 1 maxSurge and 1 maxUnavailable and 1 unavailableUpdate(0-1)", + update: 2, + outdated: 2, + desired: 3, + unavailableUpdate: 1, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + // there is still 1 update not ready + desc: "rolling update with 1 maxSurge and 1 maxUnavailable and 1 unavailableUpdate(0-2)", + update: 3, + outdated: 1, + desired: 3, + unavailableUpdate: 1, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInOutdated, + }, + }, + { + desc: "rolling update with 1 maxSurge and 1 maxUnavailable(1)", + update: 2, + outdated: 2, + desired: 3, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + actionScaleInOutdated, + }, + }, + { + desc: "rolling update with 2 maxSurge and 0 maxUnavailable(0)", + update: 0, + outdated: 3, + desired: 3, + maxSurge: 2, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleOut, + actionScaleOut, + }, + expectedWait: true, + }, + { + desc: "rolling update with 2 maxSurge and 0 maxUnavailable(1)", + update: 2, + outdated: 3, + desired: 3, + maxSurge: 2, + maxUnavailable: 0, + expectedActions: []action{ + actionUpdate, + actionScaleInOutdated, + }, + expectedWait: true, + }, + { + desc: "rolling update with 2 maxSurge and 0 maxUnavailable(2)", + update: 3, + outdated: 1, + desired: 3, + maxSurge: 2, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleInOutdated, + }, + }, + { + desc: "scale out and rolling update at same time with 0 maxSurge and 1 maxUnavailable(0)", + update: 0, + outdated: 3, + desired: 5, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleOut, + actionScaleOut, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 0 maxSurge and 1 maxUnavailable(1)", + update: 2, + outdated: 3, + desired: 5, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 0 maxSurge and 1 maxUnavailable(2)", + update: 3, + outdated: 2, + desired: 5, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 0 maxSurge and 1 maxUnavailable(3)", + update: 4, + outdated: 1, + desired: 5, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + }, + { + desc: "scale out and rolling update at same time with 1 maxSurge and 0 maxUnavailable(0)", + update: 0, + outdated: 3, + desired: 5, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleOut, + actionScaleOut, + actionScaleOut, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 1 maxSurge and 0 maxUnavailable(1)", + update: 3, + outdated: 3, + desired: 5, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 1 maxSurge and 0 maxUnavailable(2)", + update: 4, + outdated: 2, + desired: 5, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 1 maxSurge and 0 maxUnavailable(3)", + update: 5, + outdated: 1, + desired: 5, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleInOutdated, + }, + }, + { + desc: "scale out and rolling update at same time with 1 maxSurge and 1 maxUnavailable(0)", + update: 0, + outdated: 3, + desired: 5, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleOut, + actionScaleOut, + actionScaleOut, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 1 maxSurge and 1 maxUnavailable(1)", + update: 3, + outdated: 3, + desired: 5, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale out and rolling update at same time with 1 maxSurge and 1 maxUnavailable(2)", + update: 5, + outdated: 1, + desired: 5, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInOutdated, + }, + }, + { + desc: "scale in and rolling update at same time with 0 maxSurge and 1 maxUnavailable(0)", + update: 0, + outdated: 5, + desired: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInOutdated, + actionScaleInOutdated, + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale in and rolling update at same time with 0 maxSurge and 1 maxUnavailable(1)", + update: 1, + outdated: 2, + desired: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale in and rolling update at same time with 0 maxSurge and 1 maxUnavailable(2)", + update: 2, + outdated: 1, + desired: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + }, + { + desc: "scale in and rolling update at same time with 1 maxSurge and 0 maxUnavailable(0)", + update: 0, + outdated: 5, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleInOutdated, + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale in and rolling update at same time with 1 maxSurge and 0 maxUnavailable(1)", + update: 1, + outdated: 3, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale in and rolling update at same time with 1 maxSurge and 0 maxUnavailable(2)", + update: 2, + outdated: 2, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale in and rolling update at same time with 1 maxSurge and 0 maxUnavailable(3)", + update: 3, + outdated: 1, + desired: 3, + maxSurge: 1, + maxUnavailable: 0, + expectedActions: []action{ + actionScaleInOutdated, + }, + }, + { + desc: "scale in and rolling update at same time with 1 maxSurge and 1 maxUnavailable(0)", + update: 0, + outdated: 5, + desired: 3, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInOutdated, + actionUpdate, + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "scale in and rolling update at same time with 1 maxSurge and 1 maxUnavailable(1)", + update: 2, + outdated: 2, + desired: 3, + maxSurge: 1, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + actionScaleInOutdated, + }, + }, + { + desc: "rolling update with all are unavailable(0)", + update: 0, + outdated: 3, + desired: 3, + unavailableOutdated: 3, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + actionUpdate, + actionUpdate, + }, + }, + { + desc: "scale in when all are unavailable(0)", + update: 5, + outdated: 0, + desired: 3, + unavailableUpdate: 5, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInUpdate, + actionScaleInUpdate, + }, + }, + { + desc: "complex case(0)", + update: 1, + outdated: 4, + desired: 3, + unavailableOutdated: 2, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInOutdated, + actionScaleInOutdated, + }, + expectedWait: true, + }, + { + desc: "complex case(1-0)", + update: 1, + outdated: 3, + desired: 3, + unavailableOutdated: 2, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInOutdated, + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "complex case(1-1)", + update: 1, + outdated: 3, + desired: 3, + unavailableOutdated: 1, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionScaleInOutdated, + }, + expectedWait: true, + }, + { + desc: "complex case(2-0)", + update: 1, + outdated: 2, + desired: 3, + unavailableOutdated: 2, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + actionUpdate, + }, + }, + { + desc: "complex case(2-1)", + update: 1, + outdated: 2, + desired: 3, + unavailableOutdated: 1, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + expectedWait: true, + }, + { + desc: "complex case(2-2)", + update: 1, + outdated: 2, + desired: 3, + unavailableOutdated: 0, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: nil, + expectedWait: true, + }, + { + desc: "complex case(3-0)", + update: 2, + outdated: 1, + desired: 3, + unavailableOutdated: 1, + unavailableUpdate: 2, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + }, + { + desc: "complex case(3-1)", + update: 2, + outdated: 1, + desired: 3, + unavailableOutdated: 0, + unavailableUpdate: 2, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: nil, + expectedWait: true, + }, + { + desc: "complex case(3-2)", + update: 2, + outdated: 1, + desired: 3, + unavailableOutdated: 1, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + }, + { + desc: "complex case(3-3)", + update: 2, + outdated: 1, + desired: 3, + unavailableOutdated: 0, + unavailableUpdate: 1, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: nil, + expectedWait: true, + }, + { + desc: "complex case(3-4)", + update: 2, + outdated: 1, + desired: 3, + unavailableOutdated: 1, + unavailableUpdate: 0, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + }, + { + desc: "complex case(3-5)", + update: 2, + outdated: 1, + desired: 3, + unavailableOutdated: 0, + unavailableUpdate: 0, + maxSurge: 0, + maxUnavailable: 1, + expectedActions: []action{ + actionUpdate, + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + act := &FakeActor{ + update: c.update, + outdated: c.outdated, + unavailableUpdate: c.unavailableUpdate, + unavailableOutdated: c.unavailableOutdated, + } + e := NewExecutor(act, c.update, c.outdated, c.desired, c.unavailableUpdate, c.unavailableOutdated, c.maxSurge, c.maxUnavailable) + wait, err := e.Do(context.TODO()) + require.NoError(tt, err) + assert.Equal(tt, c.expectedWait, wait, c.desc) + assert.Equal(tt, c.expectedActions, act.Actions, c.desc) + }) + } +} diff --git a/pkg/updater/policy/keep.go b/pkg/updater/policy/keep.go new file mode 100644 index 00000000000..043bc1ed099 --- /dev/null +++ b/pkg/updater/policy/keep.go @@ -0,0 +1,34 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/updater" +) + +func KeepName[PT runtime.Instance]() updater.UpdateHook[PT] { + return updater.UpdateHookFunc[PT](func(update, outdated PT) PT { + update.SetName(outdated.GetName()) + return update + }) +} + +func KeepTopology[PT runtime.Instance]() updater.UpdateHook[PT] { + return updater.UpdateHookFunc[PT](func(update, outdated PT) PT { + update.SetTopology(outdated.GetTopology()) + return update + }) +} diff --git a/pkg/updater/policy/topology.go b/pkg/updater/policy/topology.go new file mode 100644 index 00000000000..26c75ceca6b --- /dev/null +++ b/pkg/updater/policy/topology.go @@ -0,0 +1,68 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/updater" + "github.com/pingcap/tidb-operator/pkg/utils/topology" +) + +type topologyPolicy[PT runtime.Instance] struct { + scheduler topology.Scheduler +} + +type TopologyPolicy[PT runtime.Instance] interface { + updater.AddHook[PT] + updater.DelHook[PT] + updater.PreferPolicy[PT] +} + +func NewTopologyPolicy[PT runtime.Instance](ts []v1alpha1.ScheduleTopology) (TopologyPolicy[PT], error) { + s, err := topology.New(ts) + if err != nil { + return nil, err + } + return &topologyPolicy[PT]{ + scheduler: s, + }, nil +} + +func (p *topologyPolicy[PT]) Add(update PT) PT { + topo := p.scheduler.NextAdd() + update.SetTopology(topo) + p.scheduler.Add(update.GetName(), update.GetTopology()) + + return update +} + +func (p *topologyPolicy[PT]) Delete(name string) { + p.scheduler.Del(name) +} + +func (p *topologyPolicy[PT]) Prefer(allowed []PT) []PT { + names := p.scheduler.NextDel() + preferred := make([]PT, 0, len(allowed)) + for _, item := range allowed { + for _, name := range names { + if item.GetName() == name { + preferred = append(preferred, item) + } + } + } + + return preferred +} diff --git a/pkg/updater/selector.go b/pkg/updater/selector.go new file mode 100644 index 00000000000..35c17d1ac37 --- /dev/null +++ b/pkg/updater/selector.go @@ -0,0 +1,121 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package updater + +import ( + "fmt" + + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +const ( + maxPreferPolicy = 30 +) + +type PreferPolicy[PT runtime.Instance] interface { + Prefer([]PT) []PT +} + +type ScoredPreferPolicy[PT runtime.Instance] interface { + Score() Score + Prefer([]PT) []PT +} + +type PreferPolicyFunc[PT runtime.Instance] func([]PT) []PT + +func (f PreferPolicyFunc[PT]) Prefer(in []PT) []PT { + return f(in) +} + +type Selector[PT runtime.Instance] interface { + Choose([]PT) string +} + +type Score uint32 + +type scoredPreferPolicy[PT runtime.Instance] struct { + PreferPolicy[PT] + + score Score +} + +func scored[PT runtime.Instance](s Score, p PreferPolicy[PT]) ScoredPreferPolicy[PT] { + return &scoredPreferPolicy[PT]{ + PreferPolicy: p, + score: s, + } +} + +func (p *scoredPreferPolicy[PT]) Score() Score { + return p.score +} + +type selector[PT runtime.Instance] struct { + ps []ScoredPreferPolicy[PT] +} + +func NewSelector[PT runtime.Instance](ps ...PreferPolicy[PT]) Selector[PT] { + if len(ps) > maxPreferPolicy { + // TODO: use a util to panic for unreachable code + panic(fmt.Sprintf("cannot new selector with too much prefer policy: %d", len(ps))) + } + s := selector[PT]{} + for i, p := range ps { + s.ps = append(s.ps, scored(Score(1<<(i+1)), p)) + } + + return &s +} + +func (s *selector[PT]) Choose(allowed []PT) string { + scores := make(map[string]uint32, len(allowed)) + for _, in := range allowed { + scores[in.GetName()] = 1 + } + for _, p := range s.ps { + preferred := p.Prefer(allowed) + for _, ins := range preferred { + score, ok := scores[ins.GetName()] + if !ok { + score = 0 + } + score += uint32(p.Score()) + scores[ins.GetName()] = score + } + } + + choosed := "" + maximum := uint32(0) + for name, score := range scores { + if score > maximum { + choosed = name + maximum = score + } + } + + return choosed +} + +func PreferUnavailable[PT runtime.Instance]() PreferPolicy[PT] { + return PreferPolicyFunc[PT](func(s []PT) []PT { + unavail := []PT{} + for _, in := range s { + if !in.IsUpToDate() || !in.IsHealthy() { + unavail = append(unavail, in) + } + } + return unavail + }) +} diff --git a/pkg/updater/state.go b/pkg/updater/state.go new file mode 100644 index 00000000000..05b82cdb612 --- /dev/null +++ b/pkg/updater/state.go @@ -0,0 +1,67 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package updater + +import ( + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +type State[PT runtime.Instance] interface { + Add(obj PT) + Del(name string) PT + Get(name string) PT + List() []PT + Len() int +} + +type state[PT runtime.Instance] struct { + nameToObj map[string]PT +} + +func NewState[PT runtime.Instance](instances []PT) State[PT] { + nameToObj := make(map[string]PT) + for _, instance := range instances { + nameToObj[instance.GetName()] = instance + } + return &state[PT]{ + nameToObj: nameToObj, + } +} + +func (s *state[PT]) Add(obj PT) { + s.nameToObj[obj.GetName()] = obj +} + +func (s *state[PT]) Del(name string) PT { + obj := s.nameToObj[name] + delete(s.nameToObj, name) + return obj +} + +func (s *state[PT]) Get(name string) PT { + return s.nameToObj[name] +} + +func (s *state[PT]) List() []PT { + l := make([]PT, 0, len(s.nameToObj)) + for _, obj := range s.nameToObj { + l = append(l, obj) + } + return l +} + +func (s *state[PT]) Len() int { + return len(s.nameToObj) +} diff --git a/pkg/utils/fake/fake.go b/pkg/utils/fake/fake.go new file mode 100644 index 00000000000..9a0ec805021 --- /dev/null +++ b/pkg/utils/fake/fake.go @@ -0,0 +1,125 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fake + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type Object[T any] interface { + client.Object + *T +} + +type Pointer[T any] interface { + *T +} + +type ChangeFunc[T any, PT Pointer[T]] func(obj PT) PT + +func Fake[T any, PT Pointer[T]](changes ...ChangeFunc[T, PT]) PT { + var obj PT = new(T) + for _, change := range changes { + obj = change(obj) + } + + return obj +} + +func FakeObj[T any, PT Object[T]](name string, changes ...ChangeFunc[T, PT]) PT { + obj := Fake(changes...) + obj.SetName(name) + + return obj +} + +func Label[T any, PT Object[T]](k, v string) ChangeFunc[T, PT] { + return func(obj PT) PT { + ls := obj.GetLabels() + if ls == nil { + ls = map[string]string{} + } + ls[k] = v + obj.SetLabels(ls) + + return obj + } +} + +func Annotation[T any, PT Object[T]](k, v string) ChangeFunc[T, PT] { + return func(obj PT) PT { + a := obj.GetAnnotations() + if a == nil { + a = map[string]string{} + } + a[k] = v + obj.SetAnnotations(a) + return obj + } +} + +func SetDeleteTimestamp[T any, PT Object[T]]() ChangeFunc[T, PT] { + return func(obj PT) PT { + now := metav1.Now() + obj.SetDeletionTimestamp(&now) + return obj + } +} + +func AddFinalizer[T any, PT Object[T]]() ChangeFunc[T, PT] { + return func(obj PT) PT { + controllerutil.AddFinalizer(obj, v1alpha1.Finalizer) + return obj + } +} + +func SetGeneration[T any, PT Object[T]](gen int64) ChangeFunc[T, PT] { + return func(obj PT) PT { + obj.SetGeneration(gen) + return obj + } +} + +func SetNamespace[T any, PT Object[T]](ns string) ChangeFunc[T, PT] { + return func(obj PT) PT { + obj.SetNamespace(ns) + return obj + } +} + +func GVK[T any, PT Object[T]](gv schema.GroupVersion) ChangeFunc[T, PT] { + return func(obj PT) PT { + t := reflect.TypeOf(obj).Elem() + obj.GetObjectKind().SetGroupVersionKind(gv.WithKind(t.Name())) + + return obj + } +} + +func UID[T any, PT Object[T]](uid string) ChangeFunc[T, PT] { + return func(obj PT) PT { + obj.SetUID(types.UID(uid)) + + return obj + } +} diff --git a/pkg/utils/http/http.go b/pkg/utils/http/http.go new file mode 100644 index 00000000000..bb40ab6171d --- /dev/null +++ b/pkg/utils/http/http.go @@ -0,0 +1,84 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httputil + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" +) + +// DeferClose captures and prints the error from closing (if an error occurs). +// This is designed to be used in a defer statement. +func DeferClose(c io.Closer) { + if err := c.Close(); err != nil { + fmt.Printf("Error closing: %v\n", err) + } +} + +// ReadErrorBody in the error case ready the body message. +// But return it as an error (or return an error from reading the body). +func ReadErrorBody(body io.Reader) (err error) { + bodyBytes, err := io.ReadAll(body) + if err != nil { + return err + } + return errors.New(string(bodyBytes)) +} + +// GetBodyOK returns the body or an error if the response is not okay +func GetBodyOK(ctx context.Context, httpClient *http.Client, apiURL string) ([]byte, error) { + return DoBodyOK(ctx, httpClient, apiURL, "GET", nil) +} + +// PutBodyOK will PUT and returns the body or an error if the response is not okay +func PutBodyOK(ctx context.Context, httpClient *http.Client, apiURL string) ([]byte, error) { + return DoBodyOK(ctx, httpClient, apiURL, "PUT", nil) +} + +// DeleteBodyOK will DELETE and returns the body or an error if the response is not okay +func DeleteBodyOK(ctx context.Context, httpClient *http.Client, apiURL string) ([]byte, error) { + return DoBodyOK(ctx, httpClient, apiURL, "DELETE", nil) +} + +// PostBodyOK will POST and returns the body or an error if the response is not okay +func PostBodyOK(ctx context.Context, httpClient *http.Client, apiURL string, reqBody io.Reader) ([]byte, error) { + return DoBodyOK(ctx, httpClient, apiURL, "POST", reqBody) +} + +// DoBodyOK returns the body or an error if the response is not okay(StatusCode >= 400) +func DoBodyOK(ctx context.Context, httpClient *http.Client, apiURL, method string, reqBody io.Reader) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, method, apiURL, reqBody) + if err != nil { + return nil, err + } + req.Header.Add("Content-Type", "application/json") + res, err := httpClient.Do(req) + if err != nil { + return nil, err + } + defer DeferClose(res.Body) + body, err := io.ReadAll(res.Body) + if err != nil { + return nil, err + } + if res.StatusCode >= http.StatusBadRequest { + errMsg := fmt.Errorf("error response %v URL %s,body response: %s", res.StatusCode, apiURL, string(body)) + return nil, errMsg + } + return body, err +} diff --git a/pkg/utils/http/http_test.go b/pkg/utils/http/http_test.go new file mode 100644 index 00000000000..0794b926396 --- /dev/null +++ b/pkg/utils/http/http_test.go @@ -0,0 +1,132 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httputil + +import ( + "bytes" + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + "testing/iotest" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReadErrorBody(t *testing.T) { + cases := []struct { + desc string + input io.Reader + want string + }{ + { + desc: "normal", + input: bytes.NewReader([]byte("ok")), + want: "ok", + }, + { + desc: "timeout", + input: iotest.TimeoutReader(bytes.NewReader([]byte("ok"))), + want: "timeout", + }, + } + + for _, tt := range cases { + t.Run(tt.desc, func(t *testing.T) { + err := ReadErrorBody(tt.input) + require.Error(t, err) + assert.Equal(t, tt.want, err.Error()) + }) + } +} + +func TestDoBodyOK(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/ok": + // just echo the body + data, err := io.ReadAll(r.Body) + if err != nil { + // different from the below 500 + w.WriteHeader(403) + return + } + w.WriteHeader(200) + _, err = w.Write(data) + assert.NoError(t, err) + case "/server_error": + w.WriteHeader(500) + return + } + })) + + cases := []struct { + desc string + method func(context.Context, *http.Client, string) ([]byte, error) + path string + want []byte + hasErr bool + }{ + { + desc: "GetBodyOK", + method: GetBodyOK, + path: "/ok", + want: []byte(""), + }, + { + desc: "PutBodyOK", + method: PutBodyOK, + path: "/ok", + want: []byte(""), + }, + { + desc: "DeleteBodyOK", + method: DeleteBodyOK, + path: "/ok", + want: []byte(""), + }, + { + desc: "PostBodyOK", + method: func(ctx context.Context, cli *http.Client, url string) ([]byte, error) { + return PostBodyOK(ctx, cli, url, bytes.NewReader([]byte("ok"))) + }, + path: "/ok", + want: []byte("ok"), + }, + { + desc: "error status code", + method: GetBodyOK, + path: "/server_error", + want: nil, + hasErr: true, + }, + } + + for _, tt := range cases { + t.Run(tt.desc, func(t *testing.T) { + cli := ts.Client() + ctx := context.Background() + data, err := tt.method(ctx, cli, ts.URL+tt.path) + if tt.hasErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + assert.Equal(t, tt.want, data) + }) + } +} diff --git a/pkg/utils/k8s/deletion.go b/pkg/utils/k8s/deletion.go new file mode 100644 index 00000000000..7f74e366cca --- /dev/null +++ b/pkg/utils/k8s/deletion.go @@ -0,0 +1,117 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" +) + +// EnsureGroupSubResourceDeleted ensures the sub resources of a group are deleted. +// It only deletes the service of the group currently. +func EnsureGroupSubResourceDeleted(ctx context.Context, cli client.Client, + namespace, name string, _ ...client.DeleteOption, +) error { + var needWait bool // wait after we call delete on some resources + var svcList corev1.ServiceList + if err := cli.List(ctx, &svcList, client.InNamespace(namespace), + client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyGroup: name, + }); err != nil { + return fmt.Errorf("failed to list svc %s/%s: %w", namespace, name, err) + } + for i := range svcList.Items { + svc := svcList.Items[i] + if err := cli.Delete(ctx, &svc); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete svc %s/%s: %w", namespace, svc.Name, err) + } + continue + } + needWait = true + } + + if needWait { + return fmt.Errorf("wait for all sub resources of %s/%s being removed", namespace, name) + } + + return nil +} + +// EnsureInstanceSubResourceDeleted ensures the sub resources of an instance are deleted. +// It deletes the pod, pvc and configmap of the instance currently. +// For pod and configmap, the name of the resource is the same as the instance name. +// For pvc, it should contain the instance name as the value of the label "app.kubernetes.io/instance". +// TODO: retain policy support +func EnsureInstanceSubResourceDeleted(ctx context.Context, cli client.Client, + namespace, name string, podOpts ...client.DeleteOption, +) error { + var needWait bool // wait after we call delete on some resources + var pod corev1.Pod + if err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &pod); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to get pod %s/%s: %w", namespace, name, err) + } + } else { + if err := cli.Delete(ctx, &pod, podOpts...); err != nil { + return fmt.Errorf("failed to delete pod %s/%s: %w", namespace, name, err) + } + needWait = true + } + + var cm corev1.ConfigMap + if err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to get cm %s/%s: %w", namespace, name, err) + } + } else { + if err := cli.Delete(ctx, &cm); err != nil { + return fmt.Errorf("failed to delete cm %s/%s: %w", namespace, name, err) + } + needWait = true + } + + var pvcList corev1.PersistentVolumeClaimList + if err := cli.List(ctx, &pvcList, client.InNamespace(namespace), + client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyInstance: name, + }); err != nil { + return fmt.Errorf("failed to list pvc %s/%s: %w", namespace, name, err) + } + for i := range pvcList.Items { + pvc := pvcList.Items[i] + if err := cli.Delete(ctx, &pvc); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete pvc %s/%s: %w", namespace, pvc.Name, err) + } + continue + } + needWait = true + } + + if needWait { + return fmt.Errorf("wait for all sub resources of %s/%s being removed", namespace, name) + } + + return nil +} diff --git a/pkg/utils/k8s/finalizer.go b/pkg/utils/k8s/finalizer.go new file mode 100644 index 00000000000..8ad9f7d6f4b --- /dev/null +++ b/pkg/utils/k8s/finalizer.go @@ -0,0 +1,44 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" +) + +// EnsureFinalizer ensures the finalizer is added to the object and updates the object if necessary. +func EnsureFinalizer(ctx context.Context, cli client.Client, obj client.Object) error { + if controllerutil.AddFinalizer(obj, v1alpha1.Finalizer) { + if err := cli.Update(ctx, obj); err != nil { + return err + } + } + return nil +} + +// RemoveFinalizer removes the finalizer from the object and updates the object if necessary. +func RemoveFinalizer(ctx context.Context, cli client.Client, obj client.Object) error { + if controllerutil.RemoveFinalizer(obj, v1alpha1.Finalizer) { + if err := cli.Update(ctx, obj); err != nil { + return err + } + } + return nil +} diff --git a/pkg/utils/k8s/node.go b/pkg/utils/k8s/node.go new file mode 100644 index 00000000000..4fcc90e470f --- /dev/null +++ b/pkg/utils/k8s/node.go @@ -0,0 +1,49 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + corev1 "k8s.io/api/core/v1" +) + +// a pre-defined mapping that mapping some short label name to K8s well-known labels. +// PD depend on short label name to gain better performance. +// See: https://github.com/pingcap/tidb-operator/issues/4678 for more details. +var shortLabelNameToK8sLabel = map[string][]string{ + "region": {corev1.LabelTopologyRegion}, + "zone": {corev1.LabelTopologyZone}, + "host": {corev1.LabelHostname}, +} + +// GetNodeLabelsForKeys gets the labels of the node for the specified keys. +// This function is used to get the lables for setting store & server labels. +func GetNodeLabelsForKeys(node *corev1.Node, keys []string) map[string]string { + labels := make(map[string]string) + for _, key := range keys { + if value, ok := node.Labels[key]; ok { + labels[key] = value + continue + } + if k8sLabels, ok := shortLabelNameToK8sLabel[key]; ok { + for _, kl := range k8sLabels { + if value, ok := node.Labels[kl]; ok { + labels[key] = value + break + } + } + } + } + return labels +} diff --git a/pkg/utils/k8s/pod.go b/pkg/utils/k8s/pod.go new file mode 100644 index 00000000000..0fee81e8f1b --- /dev/null +++ b/pkg/utils/k8s/pod.go @@ -0,0 +1,101 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "fmt" + "hash/fnv" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/rand" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + maputil "github.com/pingcap/tidb-operator/pkg/utils/map" + hashutil "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/util/hash" +) + +// CalculateHashAndSetLabels calculate the hash of pod spec and set it to the pod labels. +func CalculateHashAndSetLabels(pod *corev1.Pod) { + hasher := fnv.New32a() + if pod.Labels == nil { + pod.Labels = map[string]string{} + } + spec := pod.Spec.DeepCopy() + for i := range spec.InitContainers { + c := &spec.InitContainers[i] + // ignores init containers image change to support hot reload image for sidecar + c.Image = "" + } + hashutil.DeepHashObject(hasher, spec) + pod.Labels[v1alpha1.LabelKeyPodSpecHash] = rand.SafeEncodeString(fmt.Sprint(hasher.Sum32())) +} + +type CompareResult int + +const ( + CompareResultEqual CompareResult = iota + CompareResultRecreate + CompareResultUpdate +) + +func (r CompareResult) String() string { + switch r { + case CompareResultEqual: + return "Equal" + case CompareResultRecreate: + return "Recreate" + case CompareResultUpdate: + return "Update" + default: + return "Unknown" + } +} + +// ComparePods compares two pods and returns the result of comparison. +// TODO: add check for changes that can be updated without recreating the pod. +func ComparePods(current, expected *corev1.Pod) CompareResult { + if current.GetLabels()[v1alpha1.LabelKeyPodSpecHash] == expected.GetLabels()[v1alpha1.LabelKeyPodSpecHash] { + // The revision hash will always be different when there is a change, so ignore it. + p1, p2 := current.DeepCopy(), expected.DeepCopy() + // We also should update labels of pods even if revisions are not equal + // delete(p1.Labels, v1alpha1.LabelKeyInstanceRevisionHash) + // delete(p2.Labels, v1alpha1.LabelKeyInstanceRevisionHash) + if !maputil.AreEqual(p1.Labels, p2.Labels) || !maputil.AreEqual(p1.Annotations, p2.Annotations) { + // Labels or annotations are different, need to update the pod. + return CompareResultUpdate + } + // No difference found, no need to update the pod. + return CompareResultEqual + } + // Pod spec hash is different, need to recreate the pod. + return CompareResultRecreate +} + +func GetResourceRequirements(req v1alpha1.ResourceRequirements) corev1.ResourceRequirements { + ret := corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{}, + Requests: map[corev1.ResourceName]resource.Quantity{}, + } + if req.CPU != nil { + ret.Requests[corev1.ResourceCPU] = *req.CPU + ret.Limits[corev1.ResourceCPU] = *req.CPU + } + if req.Memory != nil { + ret.Requests[corev1.ResourceMemory] = *req.Memory + ret.Limits[corev1.ResourceMemory] = *req.Memory + } + return ret +} diff --git a/pkg/utils/k8s/pod_test.go b/pkg/utils/k8s/pod_test.go new file mode 100644 index 00000000000..e71d1363dda --- /dev/null +++ b/pkg/utils/k8s/pod_test.go @@ -0,0 +1,124 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +func TestComparePods(t *testing.T) { + tests := []struct { + name string + current *corev1.Pod + expected *corev1.Pod + want CompareResult + }{ + { + name: "test equal", + current: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + ), + expected: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + ), + want: CompareResultEqual, + }, + { + name: "revision should not be ignored", + current: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + fake.Label[corev1.Pod](v1alpha1.LabelKeyInstanceRevisionHash, "v2"), + ), + expected: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + fake.Label[corev1.Pod](v1alpha1.LabelKeyInstanceRevisionHash, "v1"), + ), + want: CompareResultUpdate, + }, + { + name: "only labels different", + current: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + fake.Label[corev1.Pod]("test", "bar"), + fake.Label[corev1.Pod](v1alpha1.LabelKeyInstanceRevisionHash, "v2"), + ), + expected: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + fake.Label[corev1.Pod]("test", "test"), + fake.Label[corev1.Pod](v1alpha1.LabelKeyInstanceRevisionHash, "v1"), + ), + want: CompareResultUpdate, + }, + { + name: "only annotations different", + current: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + fake.Label[corev1.Pod]("test", "bar"), + fake.Annotation[corev1.Pod]("k1", "v1"), + ), + expected: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + fake.Label[corev1.Pod]("test", "bar"), + fake.Annotation[corev1.Pod]("k1", "v2"), + ), + want: CompareResultUpdate, + }, + { + name: "test recreate", + current: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "foo"), + ), + expected: fake.FakeObj("pod", + fake.Label[corev1.Pod](v1alpha1.LabelKeyPodSpecHash, "bar"), + ), + want: CompareResultRecreate, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ComparePods(tt.current, tt.expected); got != tt.want { + t.Errorf("ComparePods() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCalculateHashAndSetLabels(t *testing.T) { + p1 := fake.FakeObj("pod", func(p *corev1.Pod) *corev1.Pod { + p.Spec.Containers = []corev1.Container{ + {Name: "test", Image: "test"}, + } + p.Spec.TerminationGracePeriodSeconds = ptr.To(int64(10)) + return p + }) + + p2 := p1.DeepCopy() + if p2.Labels == nil { + p2.Labels = map[string]string{} + } + p2.Labels["foo"] = "bar" + + CalculateHashAndSetLabels(p1) + CalculateHashAndSetLabels(p2) + if p1.Labels[v1alpha1.LabelKeyPodSpecHash] != p2.Labels[v1alpha1.LabelKeyPodSpecHash] { + t.Errorf("CalculateHashAndSetLabels() = %v, want %v", p1.Labels[v1alpha1.LabelKeyPodSpecHash], p2.Labels[v1alpha1.LabelKeyPodSpecHash]) + } +} diff --git a/pkg/utils/k8s/rate_limiter.go b/pkg/utils/k8s/rate_limiter.go new file mode 100644 index 00000000000..81deba5580f --- /dev/null +++ b/pkg/utils/k8s/rate_limiter.go @@ -0,0 +1,31 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "time" + + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" +) + +const ( + controllerInitDelay = 1 * time.Second + controllerMaxInterval = 1 * time.Minute +) + +var RateLimiter = workqueue.NewTypedMaxOfRateLimiter[ctrl.Request]( + workqueue.NewTypedItemExponentialFailureRateLimiter[ctrl.Request](controllerInitDelay, controllerMaxInterval), +) diff --git a/pkg/utils/k8s/revision/controller_revision.go b/pkg/utils/k8s/revision/controller_revision.go new file mode 100644 index 00000000000..faf92174ccb --- /dev/null +++ b/pkg/utils/k8s/revision/controller_revision.go @@ -0,0 +1,212 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package revision + +import ( + "bytes" + "encoding/json" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serializerjson "k8s.io/apimachinery/pkg/runtime/serializer/json" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/scheme" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/statefulset" +) + +const ( + defaultRevisionHistoryLimit = 10 +) + +var encoderMap = map[schema.GroupVersion]runtime.Encoder{} + +// GetCurrentAndUpdate returns the current and update ControllerRevisions. It also +// returns a collision count that records the number of name collisions set saw when creating +// new ControllerRevisions. This count is incremented on every name collision and is used in +// building the ControllerRevision names for name collision avoidance. This method may create +// a new revision, or modify the Revision of an existing revision if an update to ComponentGroup is detected. +// This method expects that revisions is sorted when supplied. +func GetCurrentAndUpdate(group client.Object, revisions []*appsv1.ControllerRevision, + cli history.Interface, accessor v1alpha1.ComponentAccessor) ( + currentRevision *appsv1.ControllerRevision, + updateRevision *appsv1.ControllerRevision, + collisionCount int32, + err error, +) { + if accessor.CollisionCount() != nil { + collisionCount = *accessor.CollisionCount() + } + + // create a new revision from the current object + updateRevision, err = newRevision(group, accessor.GVK(), statefulset.NextRevision(revisions), &collisionCount) + if err != nil { + return nil, nil, collisionCount, fmt.Errorf("failed to new a revision: %w", err) + } + + // find any equivalent revisions + equalRevisions := history.FindEqualRevisions(revisions, updateRevision) + equalCount := len(equalRevisions) + + revisionCount := len(revisions) + if equalCount > 0 && history.EqualRevision(revisions[revisionCount-1], equalRevisions[equalCount-1]) { + // if the equivalent revision is immediately prior the update revision has not changed + updateRevision = revisions[revisionCount-1] + } else if equalCount > 0 { + // if the equivalent revision is not immediately prior we will roll back by incrementing the + // Revision of the equivalent revision + updateRevision, err = cli.UpdateControllerRevision(equalRevisions[equalCount-1], updateRevision.Revision) + if err != nil { + return nil, nil, collisionCount, fmt.Errorf("failed to update a revision: %w", err) + } + } else { + // if there is no equivalent revision we create a new one + updateRevision, err = cli.CreateControllerRevision(group, updateRevision, &collisionCount) + if err != nil { + return nil, nil, collisionCount, fmt.Errorf("failed to create a revision: %w", err) + } + } + + // attempt to find the revision that corresponds to the current revision + for i := range revisions { + if revisions[i].Name == accessor.CurrentRevision() { + currentRevision = revisions[i] + break + } + } + + // if the current revision is nil we initialize the history by setting it to the update revision + if currentRevision == nil { + currentRevision = updateRevision + } + + return currentRevision, updateRevision, collisionCount, nil +} + +// newRevision creates a new ControllerRevision containing a patch that reapplies the target state of CR. +func newRevision(obj client.Object, gvk schema.GroupVersionKind, + revision int64, collisionCount *int32) (*appsv1.ControllerRevision, error) { + patch, err := getPatch(obj, gvk) + if err != nil { + return nil, fmt.Errorf("failed to get patch: %w", err) + } + cr, err := history.NewControllerRevision(obj, obj.GetLabels(), runtime.RawExtension{Raw: patch}, revision, collisionCount) + if err != nil { + return nil, fmt.Errorf("failed to create a revision: %w", err) + } + // Add this label so that tidb operator will watch this controller revision. + cr.Labels[v1alpha1.LabelKeyManagedBy] = v1alpha1.LabelValManagedByOperator + if err = controllerutil.SetControllerReference(obj, cr, scheme.Scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + return cr, nil +} + +// getPatch returns a merge patch that can be applied to restore a CR to a previous version. +// The current state that we save is just the spec. +func getPatch(obj client.Object, gvk schema.GroupVersionKind) ([]byte, error) { + encoder, ok := encoderMap[gvk.GroupVersion()] + if !ok { + encoder = scheme.Codecs.EncoderForVersion( + serializerjson.NewSerializerWithOptions( + serializerjson.DefaultMetaFactory, + scheme.Scheme, + scheme.Scheme, + serializerjson.SerializerOptions{ + Yaml: false, + Pretty: false, + Strict: true, + }), + gvk.GroupVersion(), + ) + } + + buf := bytes.Buffer{} + if err := encoder.Encode(obj, &buf); err != nil { + return nil, fmt.Errorf("failed to encode patch: %w", err) + } + + var raw map[string]any + if err := json.Unmarshal(buf.Bytes(), &raw); err != nil { + return nil, fmt.Errorf("failed to unmarshal data: %w", err) + } + + objCopy := make(map[string]any) + objCopy["$patch"] = "replace" + objCopy["spec"] = raw["spec"].(map[string]any) + return json.Marshal(objCopy) +} + +// TruncateHistory truncates any non-live ControllerRevisions in revisions from group's history. +// The UpdateRevision and CurrentRevision in group's Status are considered to be live. +// Any revisions associated with the Pods in pods are also considered to be live. +// Non-live revisions are deleted, starting with the revision with the lowest Revision, +// until only RevisionHistoryLimit revisions remain. +// This method expects that revisions is sorted when supplied. +func TruncateHistory[T v1alpha1.Instance]( + cli history.Interface, + instances []T, + revisions []*appsv1.ControllerRevision, + current *appsv1.ControllerRevision, + update *appsv1.ControllerRevision, + limit *int32) error { + // mark all live revisions + live := make(map[string]bool, len(revisions)) + if current != nil { + live[current.Name] = true + } + if update != nil { + live[update.Name] = true + } + for _, ins := range instances { + if ins.CurrentRevision() != "" { + live[ins.CurrentRevision()] = true + } + if ins.UpdateRevision() != "" { + live[ins.UpdateRevision()] = true + } + } + + // collect live revisions and historic revisions + hist := make([]*appsv1.ControllerRevision, 0, len(revisions)) + for i := range revisions { + if !live[revisions[i].Name] { + hist = append(hist, revisions[i]) + } + } + + historyLimit := defaultRevisionHistoryLimit + if limit != nil { + historyLimit = int(*limit) + } + historyLen := len(hist) + if historyLen <= historyLimit { + return nil + } + + // delete any non-live history to maintain the revision limit. + hist = hist[:(historyLen - historyLimit)] + for i := 0; i < len(hist); i++ { + if err := cli.DeleteControllerRevision(hist[i]); err != nil { + return fmt.Errorf("failed to delete controller revision %s: %w", hist[i].Name, err) + } + } + return nil +} diff --git a/pkg/utils/k8s/revision/controller_revision_test.go b/pkg/utils/k8s/revision/controller_revision_test.go new file mode 100644 index 00000000000..cc456af4d7b --- /dev/null +++ b/pkg/utils/k8s/revision/controller_revision_test.go @@ -0,0 +1,193 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package revision + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" +) + +// FakeHistoryClient is a fake implementation of Interface that is useful for testing. +type FakeHistoryClient struct { + Revisions []*appsv1.ControllerRevision + CreateFunc func(parent client.Object, revision *appsv1.ControllerRevision, collisionCount *int32) (*appsv1.ControllerRevision, error) +} + +func (f *FakeHistoryClient) CreateControllerRevision(parent client.Object, revision *appsv1.ControllerRevision, collisionCount *int32) (*appsv1.ControllerRevision, error) { + if f.CreateFunc != nil { + rev, err := f.CreateFunc(parent, revision, collisionCount) + if err != nil { + f.Revisions = append(f.Revisions, rev) + } + return rev, err + } + return nil, nil +} + +func (f *FakeHistoryClient) DeleteControllerRevision(revision *appsv1.ControllerRevision) error { + for i, r := range f.Revisions { + if r.Name == revision.Name { + f.Revisions = append(f.Revisions[:i], f.Revisions[i+1:]...) + return nil + } + } + return nil +} + +func (f *FakeHistoryClient) ListControllerRevisions(_ client.Object, _ labels.Selector) ([]*appsv1.ControllerRevision, error) { + return f.Revisions, nil +} + +func (f *FakeHistoryClient) UpdateControllerRevision(revision *appsv1.ControllerRevision, newRevision int64) (*appsv1.ControllerRevision, error) { + for _, r := range f.Revisions { + if r.Name == revision.Name { + r.Revision = newRevision + return r, nil + } + } + return nil, nil +} + +var _ history.Interface = &FakeHistoryClient{} + +func withRevision(currentRev, updateRev string) fake.ChangeFunc[v1alpha1.TiDB, *v1alpha1.TiDB] { + return func(tidb *v1alpha1.TiDB) *v1alpha1.TiDB { + tidb.Status.CurrentRevision = currentRev + tidb.Status.UpdateRevision = updateRev + return tidb + } +} + +func TestTruncateHistory(t *testing.T) { + tests := []struct { + name string + instances []*v1alpha1.TiDB + revisions []*appsv1.ControllerRevision + current *appsv1.ControllerRevision + update *appsv1.ControllerRevision + limit *int32 + expected []*appsv1.ControllerRevision + }{ + { + name: "no revisions to truncate", + instances: []*v1alpha1.TiDB{}, + revisions: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev1"), + fake.FakeObj[appsv1.ControllerRevision]("rev2"), + }, + current: fake.FakeObj[appsv1.ControllerRevision]("rev1"), + update: fake.FakeObj[appsv1.ControllerRevision]("rev2"), + limit: ptr.To[int32](2), + expected: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev1"), + fake.FakeObj[appsv1.ControllerRevision]("rev2"), + }, + }, + { + name: "truncate one revision", + instances: []*v1alpha1.TiDB{}, + revisions: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev1"), + fake.FakeObj[appsv1.ControllerRevision]("rev2"), + fake.FakeObj[appsv1.ControllerRevision]("rev3"), + fake.FakeObj[appsv1.ControllerRevision]("rev4"), + }, + current: fake.FakeObj[appsv1.ControllerRevision]("rev4"), + update: fake.FakeObj[appsv1.ControllerRevision]("rev4"), + limit: ptr.To[int32](2), + expected: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev2"), + fake.FakeObj[appsv1.ControllerRevision]("rev3"), + fake.FakeObj[appsv1.ControllerRevision]("rev4"), + }, + }, + { + name: "truncate multiple revisions", + instances: []*v1alpha1.TiDB{}, + revisions: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev1"), + fake.FakeObj[appsv1.ControllerRevision]("rev2"), + fake.FakeObj[appsv1.ControllerRevision]("rev3"), + fake.FakeObj[appsv1.ControllerRevision]("rev4"), + }, + current: fake.FakeObj[appsv1.ControllerRevision]("rev3"), + update: fake.FakeObj[appsv1.ControllerRevision]("rev4"), + limit: ptr.To[int32](0), + expected: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev3"), + fake.FakeObj[appsv1.ControllerRevision]("rev4"), + }, + }, + { + name: "complex case", + instances: []*v1alpha1.TiDB{ + fake.FakeObj("tidb1", withRevision("rev4", "rev4")), + fake.FakeObj("tidb2", withRevision("rev3", "rev4")), + fake.FakeObj("tidb3", withRevision("rev3", "rev5")), + fake.FakeObj("tidb4", withRevision("rev4", "rev5")), + }, + revisions: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev1"), + fake.FakeObj[appsv1.ControllerRevision]("rev2"), + fake.FakeObj[appsv1.ControllerRevision]("rev3"), + fake.FakeObj[appsv1.ControllerRevision]("rev4"), + fake.FakeObj[appsv1.ControllerRevision]("rev5"), + fake.FakeObj[appsv1.ControllerRevision]("rev6"), + }, + current: fake.FakeObj[appsv1.ControllerRevision]("rev4"), + update: fake.FakeObj[appsv1.ControllerRevision]("rev5"), + limit: ptr.To[int32](1), + expected: []*appsv1.ControllerRevision{ + fake.FakeObj[appsv1.ControllerRevision]("rev3"), + fake.FakeObj[appsv1.ControllerRevision]("rev4"), + fake.FakeObj[appsv1.ControllerRevision]("rev5"), + fake.FakeObj[appsv1.ControllerRevision]("rev6"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cli := &FakeHistoryClient{ + Revisions: tt.revisions, + } + err := TruncateHistory(cli, tt.instances, tt.revisions, tt.current, tt.update, tt.limit) + require.NoError(t, err) + + remainingRevisions, err := cli.ListControllerRevisions(nil, labels.Everything()) + require.NoError(t, err) + assert.Equal(t, len(tt.expected), len(remainingRevisions)) + m := make(map[string]struct{}, len(remainingRevisions)) + for _, r := range remainingRevisions { + m[r.Name] = struct{}{} + } + for _, r := range tt.expected { + if _, ok := m[r.Name]; !ok { + t.Errorf("expected revision %s not found", r.Name) + } + } + }) + } +} diff --git a/pkg/utils/kubefeat/feat.go b/pkg/utils/kubefeat/feat.go new file mode 100644 index 00000000000..a56bc50cbb0 --- /dev/null +++ b/pkg/utils/kubefeat/feat.go @@ -0,0 +1,19 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubefeat + +type Feature string + +var VolumeAttributesClass Feature = "VolumeAttributesClass" diff --git a/pkg/utils/kubefeat/gates.go b/pkg/utils/kubefeat/gates.go new file mode 100644 index 00000000000..8dabd724989 --- /dev/null +++ b/pkg/utils/kubefeat/gates.go @@ -0,0 +1,149 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubefeat + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/prometheus/common/expfmt" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +var FeatureGates Gates + +type Gates interface { + Stage(key Feature) StagedFeature +} + +type StagedFeature interface { + Enabled(Stage) bool +} + +type featureGates struct { + feats map[Feature]spec +} + +type Stage int + +const ( + INVAILD Stage = 0 + + ALPHA Stage = 1 << iota + BETA + STABLE + ANY = ALPHA | BETA | STABLE +) + +func stageFromString(s string) Stage { + switch s { + case "ALPHA": + return ALPHA + case "BETA": + return BETA + case "": + return STABLE + } + + return INVAILD +} + +type spec struct { + enabled bool + stage Stage +} + +func (s spec) Enabled(stage Stage) bool { + if s.stage&stage == 0 { + return false + } + return s.enabled +} + +func (g *featureGates) Stage(key Feature) StagedFeature { + return g.feats[key] +} + +func MustInitFeatureGates(cfg *rest.Config) { + gates, err := NewFeatureGates(cfg) + if err != nil { + // TODO: use a common panic util to panic + panic(err) + } + + fmt.Println("init feature gates") + + FeatureGates = gates +} + +func NewFeatureGates(cfg *rest.Config) (Gates, error) { + clientset, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("cannot new client: %w", err) + } + + //nolint:mnd // refactor to a constant if needed + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + metricsPath := "/metrics" + resp, err := clientset.RESTClient().Get().RequestURI(metricsPath).DoRaw(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get metrics: %w", err) + } + + // Parse the metrics using Prometheus expfmt + parser := expfmt.TextParser{} + metricFamilies, err := parser.TextToMetricFamilies(bytes.NewReader(resp)) + if err != nil { + return nil, fmt.Errorf("failed to parse metrics: %w", err) + } + + featureGatesMetric, ok := metricFamilies["kubernetes_feature_enabled"] + if !ok { + // this metric is supported after v1.26 + // TODO: fix it if we hope to support the version before v1.26 + return nil, fmt.Errorf("no kubernetes_feature_enabled metric") + } + + gates := &featureGates{ + feats: map[Feature]spec{}, + } + + for _, metric := range featureGatesMetric.GetMetric() { + if metric.GetGauge().GetValue() == 1 { + feat := spec{ + enabled: true, + } + name := "" + for _, label := range metric.GetLabel() { + switch label.GetName() { + case "name": + name = label.GetValue() + case "stage": + feat.stage = stageFromString(label.GetValue()) + } + } + if name != "" { + gates.feats[Feature(name)] = feat + } + } + } + + return gates, nil +} diff --git a/pkg/utils/map/map.go b/pkg/utils/map/map.go new file mode 100644 index 00000000000..74ecccd6bc4 --- /dev/null +++ b/pkg/utils/map/map.go @@ -0,0 +1,65 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package maputil + +// Merge merges all maps to a new one. +func Merge[K comparable, V any](maps ...map[K]V) map[K]V { + return MergeTo(nil, maps...) +} + +// MergeTo merges all maps to the original one. +func MergeTo[K comparable, V any](original map[K]V, maps ...map[K]V) map[K]V { + if original == nil { + original = make(map[K]V) + } + for _, m := range maps { + for k, v := range m { + original[k] = v + } + } + return original +} + +// Copy returns a copy of the given map. +func Copy[K comparable, V any](originalMap map[K]V) map[K]V { + if originalMap == nil { + return nil + } + // Create a new map to store the copied key-value pairs with the same capacity as the original map + copiedMap := make(map[K]V, len(originalMap)) + + // Iterate over the original map's key-value pairs + for key, value := range originalMap { + // Add the key-value pair into the new map, since the value is not a reference type, it is safe to copy directly + copiedMap[key] = value + } + + // Return the deep copied map + return copiedMap +} + +// AreEqual checks if two maps are equal. +func AreEqual[K comparable](map1, map2 map[K]string) bool { + if len(map1) != len(map2) { + return false + } + for k, v1 := range map1 { + v2, ok := map2[k] + if !ok || v1 != v2 { + return false + } + } + return true +} diff --git a/pkg/utils/map/map_test.go b/pkg/utils/map/map_test.go new file mode 100644 index 00000000000..0cfaddf84f1 --- /dev/null +++ b/pkg/utils/map/map_test.go @@ -0,0 +1,103 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package maputil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMerge(t *testing.T) { + cases := []struct { + desc string + maps []map[string]string + expected map[string]string + }{ + { + desc: "nil", + maps: nil, + expected: map[string]string{}, + }, + { + desc: "overwrite", + maps: []map[string]string{ + { + "aa": "aa", + }, + { + "bb": "bb", + }, + { + "aa": "cc", + }, + }, + expected: map[string]string{ + "aa": "cc", + "bb": "bb", + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + res := Merge(c.maps...) + assert.Equal(tt, c.expected, res) + }) + } +} + +func TestCopy(t *testing.T) { + cases := []struct { + desc string + original map[string]string + expected map[string]string + }{ + { + desc: "nil", + original: nil, + expected: nil, + }, + { + desc: "empty", + original: map[string]string{}, + expected: map[string]string{}, + }, + { + desc: "normal", + original: map[string]string{ + "aa": "aa", + "bb": "bb", + }, + expected: map[string]string{ + "aa": "aa", + "bb": "bb", + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + res := Copy(c.original) + assert.Equal(tt, c.expected, res) + }) + } +} diff --git a/pkg/utils/random/random.go b/pkg/utils/random/random.go new file mode 100644 index 00000000000..1df176a643e --- /dev/null +++ b/pkg/utils/random/random.go @@ -0,0 +1,57 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package random is copied from +// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-go +package random + +import ( + "math/rand" + "strings" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz1234567890" + +const ( + // 6 bits to represent a letter index. + letterIdxBits = 6 + // All 1-bits, as many as letterIdxBits. + letterIdxMask = 1<= 0; { + if remain == 0 { + //nolint:gosec // no need to use cryptographically secure random number + cache, remain = rand.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + if err := sb.WriteByte(letterBytes[idx]); err != nil { + panic(err) + } + i-- + } + cache >>= letterIdxBits + remain-- + } + + return sb.String() +} diff --git a/pkg/utils/random/random_test.go b/pkg/utils/random/random_test.go new file mode 100644 index 00000000000..67a3f264370 --- /dev/null +++ b/pkg/utils/random/random_test.go @@ -0,0 +1,34 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package random + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRandom(t *testing.T) { + re := regexp.MustCompile("^[" + letterBytes + "]{6}$") + m := map[string]struct{}{} + for i := 0; i < 1000; i++ { + s := Random(6) + assert.Regexp(t, re, s) + _, ok := m[s] + assert.False(t, ok) + m[s] = struct{}{} + } +} diff --git a/pkg/utils/task/mock_generated.go b/pkg/utils/task/mock_generated.go new file mode 100644 index 00000000000..0dbbdb9f113 --- /dev/null +++ b/pkg/utils/task/mock_generated.go @@ -0,0 +1,513 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/utils/task/task.go +// +// Generated by this command: +// +// mockgen --source pkg/utils/task/task.go --destination pkg/utils/task/mock_generated.go --package=task +// + +// Package task is a generated GoMock package. +package task + +import ( + reflect "reflect" + time "time" + + gomock "go.uber.org/mock/gomock" + controller_runtime "sigs.k8s.io/controller-runtime" +) + +// MockContext is a mock of Context interface. +type MockContext[T any] struct { + ctrl *gomock.Controller + recorder *MockContextMockRecorder[T] +} + +// MockContextMockRecorder is the mock recorder for MockContext. +type MockContextMockRecorder[T any] struct { + mock *MockContext[T] +} + +// NewMockContext creates a new mock instance. +func NewMockContext[T any](ctrl *gomock.Controller) *MockContext[T] { + mock := &MockContext[T]{ctrl: ctrl} + mock.recorder = &MockContextMockRecorder[T]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockContext[T]) EXPECT() *MockContextMockRecorder[T] { + return m.recorder +} + +// Deadline mocks base method. +func (m *MockContext[T]) Deadline() (time.Time, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Deadline") + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Deadline indicates an expected call of Deadline. +func (mr *MockContextMockRecorder[T]) Deadline() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Deadline", reflect.TypeOf((*MockContext[T])(nil).Deadline)) +} + +// Done mocks base method. +func (m *MockContext[T]) Done() <-chan struct{} { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Done") + ret0, _ := ret[0].(<-chan struct{}) + return ret0 +} + +// Done indicates an expected call of Done. +func (mr *MockContextMockRecorder[T]) Done() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockContext[T])(nil).Done)) +} + +// Err mocks base method. +func (m *MockContext[T]) Err() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 +} + +// Err indicates an expected call of Err. +func (mr *MockContextMockRecorder[T]) Err() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockContext[T])(nil).Err)) +} + +// Self mocks base method. +func (m *MockContext[T]) Self() *T { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Self") + ret0, _ := ret[0].(*T) + return ret0 +} + +// Self indicates an expected call of Self. +func (mr *MockContextMockRecorder[T]) Self() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Self", reflect.TypeOf((*MockContext[T])(nil).Self)) +} + +// Value mocks base method. +func (m *MockContext[T]) Value(key any) any { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Value", key) + ret0, _ := ret[0].(any) + return ret0 +} + +// Value indicates an expected call of Value. +func (mr *MockContextMockRecorder[T]) Value(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Value", reflect.TypeOf((*MockContext[T])(nil).Value), key) +} + +// MockResult is a mock of Result interface. +type MockResult struct { + ctrl *gomock.Controller + recorder *MockResultMockRecorder +} + +// MockResultMockRecorder is the mock recorder for MockResult. +type MockResultMockRecorder struct { + mock *MockResult +} + +// NewMockResult creates a new mock instance. +func NewMockResult(ctrl *gomock.Controller) *MockResult { + mock := &MockResult{ctrl: ctrl} + mock.recorder = &MockResultMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockResult) EXPECT() *MockResultMockRecorder { + return m.recorder +} + +// IsFailed mocks base method. +func (m *MockResult) IsFailed() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsFailed") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsFailed indicates an expected call of IsFailed. +func (mr *MockResultMockRecorder) IsFailed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsFailed", reflect.TypeOf((*MockResult)(nil).IsFailed)) +} + +// Message mocks base method. +func (m *MockResult) Message() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Message") + ret0, _ := ret[0].(string) + return ret0 +} + +// Message indicates an expected call of Message. +func (mr *MockResultMockRecorder) Message() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Message", reflect.TypeOf((*MockResult)(nil).Message)) +} + +// RequeueAfter mocks base method. +func (m *MockResult) RequeueAfter() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RequeueAfter") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// RequeueAfter indicates an expected call of RequeueAfter. +func (mr *MockResultMockRecorder) RequeueAfter() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequeueAfter", reflect.TypeOf((*MockResult)(nil).RequeueAfter)) +} + +// ShouldContinue mocks base method. +func (m *MockResult) ShouldContinue() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldContinue") + ret0, _ := ret[0].(bool) + return ret0 +} + +// ShouldContinue indicates an expected call of ShouldContinue. +func (mr *MockResultMockRecorder) ShouldContinue() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldContinue", reflect.TypeOf((*MockResult)(nil).ShouldContinue)) +} + +// MockWithMessage is a mock of WithMessage interface. +type MockWithMessage struct { + ctrl *gomock.Controller + recorder *MockWithMessageMockRecorder +} + +// MockWithMessageMockRecorder is the mock recorder for MockWithMessage. +type MockWithMessageMockRecorder struct { + mock *MockWithMessage +} + +// NewMockWithMessage creates a new mock instance. +func NewMockWithMessage(ctrl *gomock.Controller) *MockWithMessage { + mock := &MockWithMessage{ctrl: ctrl} + mock.recorder = &MockWithMessageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWithMessage) EXPECT() *MockWithMessageMockRecorder { + return m.recorder +} + +// With mocks base method. +func (m *MockWithMessage) With(format string, args ...any) Result { + m.ctrl.T.Helper() + varargs := []any{format} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "With", varargs...) + ret0, _ := ret[0].(Result) + return ret0 +} + +// With indicates an expected call of With. +func (mr *MockWithMessageMockRecorder) With(format any, args ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{format}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "With", reflect.TypeOf((*MockWithMessage)(nil).With), varargs...) +} + +// MockBreakableResult is a mock of BreakableResult interface. +type MockBreakableResult struct { + ctrl *gomock.Controller + recorder *MockBreakableResultMockRecorder +} + +// MockBreakableResultMockRecorder is the mock recorder for MockBreakableResult. +type MockBreakableResultMockRecorder struct { + mock *MockBreakableResult +} + +// NewMockBreakableResult creates a new mock instance. +func NewMockBreakableResult(ctrl *gomock.Controller) *MockBreakableResult { + mock := &MockBreakableResult{ctrl: ctrl} + mock.recorder = &MockBreakableResultMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBreakableResult) EXPECT() *MockBreakableResultMockRecorder { + return m.recorder +} + +// Break mocks base method. +func (m *MockBreakableResult) Break() WithMessage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Break") + ret0, _ := ret[0].(WithMessage) + return ret0 +} + +// Break indicates an expected call of Break. +func (mr *MockBreakableResultMockRecorder) Break() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Break", reflect.TypeOf((*MockBreakableResult)(nil).Break)) +} + +// With mocks base method. +func (m *MockBreakableResult) With(format string, args ...any) Result { + m.ctrl.T.Helper() + varargs := []any{format} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "With", varargs...) + ret0, _ := ret[0].(Result) + return ret0 +} + +// With indicates an expected call of With. +func (mr *MockBreakableResultMockRecorder) With(format any, args ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{format}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "With", reflect.TypeOf((*MockBreakableResult)(nil).With), varargs...) +} + +// MockContinuableResult is a mock of ContinuableResult interface. +type MockContinuableResult struct { + ctrl *gomock.Controller + recorder *MockContinuableResultMockRecorder +} + +// MockContinuableResultMockRecorder is the mock recorder for MockContinuableResult. +type MockContinuableResultMockRecorder struct { + mock *MockContinuableResult +} + +// NewMockContinuableResult creates a new mock instance. +func NewMockContinuableResult(ctrl *gomock.Controller) *MockContinuableResult { + mock := &MockContinuableResult{ctrl: ctrl} + mock.recorder = &MockContinuableResultMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockContinuableResult) EXPECT() *MockContinuableResultMockRecorder { + return m.recorder +} + +// Continue mocks base method. +func (m *MockContinuableResult) Continue() WithMessage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Continue") + ret0, _ := ret[0].(WithMessage) + return ret0 +} + +// Continue indicates an expected call of Continue. +func (mr *MockContinuableResultMockRecorder) Continue() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Continue", reflect.TypeOf((*MockContinuableResult)(nil).Continue)) +} + +// With mocks base method. +func (m *MockContinuableResult) With(format string, args ...any) Result { + m.ctrl.T.Helper() + varargs := []any{format} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "With", varargs...) + ret0, _ := ret[0].(Result) + return ret0 +} + +// With indicates an expected call of With. +func (mr *MockContinuableResultMockRecorder) With(format any, args ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{format}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "With", reflect.TypeOf((*MockContinuableResult)(nil).With), varargs...) +} + +// MockTaskRunner is a mock of TaskRunner interface. +type MockTaskRunner[T any] struct { + ctrl *gomock.Controller + recorder *MockTaskRunnerMockRecorder[T] +} + +// MockTaskRunnerMockRecorder is the mock recorder for MockTaskRunner. +type MockTaskRunnerMockRecorder[T any] struct { + mock *MockTaskRunner[T] +} + +// NewMockTaskRunner creates a new mock instance. +func NewMockTaskRunner[T any](ctrl *gomock.Controller) *MockTaskRunner[T] { + mock := &MockTaskRunner[T]{ctrl: ctrl} + mock.recorder = &MockTaskRunnerMockRecorder[T]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTaskRunner[T]) EXPECT() *MockTaskRunnerMockRecorder[T] { + return m.recorder +} + +// AddTasks mocks base method. +func (m *MockTaskRunner[T]) AddTasks(tasks ...Task[T]) { + m.ctrl.T.Helper() + varargs := []any{} + for _, a := range tasks { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "AddTasks", varargs...) +} + +// AddTasks indicates an expected call of AddTasks. +func (mr *MockTaskRunnerMockRecorder[T]) AddTasks(tasks ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTasks", reflect.TypeOf((*MockTaskRunner[T])(nil).AddTasks), tasks...) +} + +// Run mocks base method. +func (m *MockTaskRunner[T]) Run(ctx Context[T]) (controller_runtime.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Run", ctx) + ret0, _ := ret[0].(controller_runtime.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Run indicates an expected call of Run. +func (mr *MockTaskRunnerMockRecorder[T]) Run(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockTaskRunner[T])(nil).Run), ctx) +} + +// MockTask is a mock of Task interface. +type MockTask[T any] struct { + ctrl *gomock.Controller + recorder *MockTaskMockRecorder[T] +} + +// MockTaskMockRecorder is the mock recorder for MockTask. +type MockTaskMockRecorder[T any] struct { + mock *MockTask[T] +} + +// NewMockTask creates a new mock instance. +func NewMockTask[T any](ctrl *gomock.Controller) *MockTask[T] { + mock := &MockTask[T]{ctrl: ctrl} + mock.recorder = &MockTaskMockRecorder[T]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTask[T]) EXPECT() *MockTaskMockRecorder[T] { + return m.recorder +} + +// Name mocks base method. +func (m *MockTask[T]) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MockTaskMockRecorder[T]) Name() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockTask[T])(nil).Name)) +} + +// Sync mocks base method. +func (m *MockTask[T]) Sync(ctx Context[T]) Result { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Sync", ctx) + ret0, _ := ret[0].(Result) + return ret0 +} + +// Sync indicates an expected call of Sync. +func (mr *MockTaskMockRecorder[T]) Sync(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sync", reflect.TypeOf((*MockTask[T])(nil).Sync), ctx) +} + +// MockTaskReporter is a mock of TaskReporter interface. +type MockTaskReporter struct { + ctrl *gomock.Controller + recorder *MockTaskReporterMockRecorder +} + +// MockTaskReporterMockRecorder is the mock recorder for MockTaskReporter. +type MockTaskReporterMockRecorder struct { + mock *MockTaskReporter +} + +// NewMockTaskReporter creates a new mock instance. +func NewMockTaskReporter(ctrl *gomock.Controller) *MockTaskReporter { + mock := &MockTaskReporter{ctrl: ctrl} + mock.recorder = &MockTaskReporterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTaskReporter) EXPECT() *MockTaskReporterMockRecorder { + return m.recorder +} + +// AddResult mocks base method. +func (m *MockTaskReporter) AddResult(name, status, msg string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddResult", name, status, msg) +} + +// AddResult indicates an expected call of AddResult. +func (mr *MockTaskReporterMockRecorder) AddResult(name, status, msg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddResult", reflect.TypeOf((*MockTaskReporter)(nil).AddResult), name, status, msg) +} + +// Summary mocks base method. +func (m *MockTaskReporter) Summary() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Summary") + ret0, _ := ret[0].(string) + return ret0 +} + +// Summary indicates an expected call of Summary. +func (mr *MockTaskReporterMockRecorder) Summary() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Summary", reflect.TypeOf((*MockTaskReporter)(nil).Summary)) +} diff --git a/pkg/utils/task/task.go b/pkg/utils/task/task.go new file mode 100644 index 00000000000..daffdae0336 --- /dev/null +++ b/pkg/utils/task/task.go @@ -0,0 +1,267 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/olekukonko/tablewriter" + ctrl "sigs.k8s.io/controller-runtime" +) + +// Context is a wrapper of any struct which can return its self +// It's defined to avoid calling ctx.Value() +type Context[T any] interface { + context.Context + Self() *T +} + +// Result defines the result of a task +type Result interface { + IsFailed() bool + ShouldContinue() bool + RequeueAfter() time.Duration + Message() string +} + +// WithMessage defines an interface to set message into task result +type WithMessage interface { + With(format string, args ...any) Result +} + +// BreakableResult defines a result which can stop task execution +type BreakableResult interface { + WithMessage + // Break will stop task execution + Break() WithMessage +} + +// ContinuableResult defines a result which can continue task execution +type ContinuableResult interface { + WithMessage + // Continue ignores errs of the current task and will continue task execution + Continue() WithMessage +} + +// TaskRunner is an executor to run a series of tasks sequentially +type TaskRunner[T any] interface { + AddTasks(tasks ...Task[T]) + Run(ctx Context[T]) (ctrl.Result, error) +} + +// Task defines a task can be executed by TaskRunner +type Task[T any] interface { + Name() string + Sync(ctx Context[T]) Result +} + +type taskResult struct { + isFailed bool + shouldContinue bool + requeueAfter time.Duration + message string +} + +func (r *taskResult) IsFailed() bool { + return r.isFailed +} + +func (r *taskResult) ShouldContinue() bool { + return r.shouldContinue +} + +func (r *taskResult) RequeueAfter() time.Duration { + return r.requeueAfter +} + +func (r *taskResult) Message() string { + if r.requeueAfter > 0 { + return fmt.Sprintf("%s(requeue after %s)", r.message, r.requeueAfter) + } + return r.message +} + +func (r *taskResult) With(format string, args ...any) Result { + r.message = fmt.Sprintf(format, args...) + return r +} + +func (r *taskResult) Break() WithMessage { + r.shouldContinue = false + return r +} + +func (r *taskResult) Continue() WithMessage { + r.shouldContinue = true + return r +} + +// Complete means complete the current task and run the next one +func Complete() BreakableResult { + return &taskResult{ + isFailed: false, + shouldContinue: true, + } +} + +// Fail means fail the current task and skip all next tasks +func Fail() ContinuableResult { + return &taskResult{ + isFailed: true, + shouldContinue: false, + } +} + +// Retry means continue all next tasks and retry after dur +func Retry(dur time.Duration) BreakableResult { + return &taskResult{ + isFailed: false, + shouldContinue: true, + requeueAfter: dur, + } +} + +// TaskReporter appends results of tasks and output a summary +type TaskReporter interface { + AddResult(name, status, msg string) + Summary() string +} + +type tableReporter struct { + table *tablewriter.Table + builder *strings.Builder +} + +type dummyReporter struct{} + +func (*dummyReporter) AddResult(_, _, _ string) {} + +func (*dummyReporter) Summary() string { + return "" +} + +func NewTableTaskReporter() TaskReporter { + builder := strings.Builder{} + table := tablewriter.NewWriter(&builder) + table.SetHeader([]string{"Name", "Status", "Message"}) + return &tableReporter{ + table: table, + builder: &builder, + } +} + +func (t *tableReporter) AddResult(name, status, msg string) { + t.table.Append([]string{name, status, msg}) +} + +func (t *tableReporter) Summary() string { + t.table.Render() + return t.builder.String() +} + +type taskRunner[T any] struct { + reporter TaskReporter + tasks []Task[T] +} + +// There are five status of tasks +// - Complete: means this task is complete and all is expected +// - Failed: means an err occurred +// - Retry: means this task need to wait an interval and retry +// - NotRun: means this task is not run +// - Skip: means this task is skipped +// And five results of reconiling +// 1. All tasks are complete, the key will not be re-added +// 2. Some tasks are failed, return err and wait with backoff +// 3. Some tasks need retry, requeue after an interval +// 4. Some tasks are not run, return err and wait with backoff +// 5. Particular tasks are complete and left are skipped, the key will not be re-added +func NewTaskRunner[T any](reporter TaskReporter) TaskRunner[T] { + if reporter == nil { + reporter = &dummyReporter{} + } + return &taskRunner[T]{ + reporter: reporter, + } +} + +func (r *taskRunner[T]) AddTasks(ts ...Task[T]) { + r.tasks = append(r.tasks, ts...) +} + +func (r *taskRunner[T]) Run(ctx Context[T]) (ctrl.Result, error) { + shouldContinue := true + minRequeueAfter := time.Duration(0) + failedTasks := []string{} + for _, t := range r.tasks { + if !shouldContinue { + if len(failedTasks) != 0 { + // write unknown info + r.reporter.AddResult(t.Name(), "NotRun", "") + } else { + r.reporter.AddResult(t.Name(), "Skip", "") + } + continue + } + + res := t.Sync(ctx) + + if res == nil { + panic("please set result with message for " + t.Name()) + } + + if !res.ShouldContinue() { + shouldContinue = false + } + + if res.IsFailed() { + // write fail info + r.reporter.AddResult(t.Name(), "Fail", res.Message()) + failedTasks = append(failedTasks, t.Name()) + continue + } + + dur := res.RequeueAfter() + if dur > 0 { + r.reporter.AddResult(t.Name(), "Retry", res.Message()) + + if minRequeueAfter == 0 { + minRequeueAfter = dur + } else if dur > 0 && dur < minRequeueAfter { + minRequeueAfter = dur + } + } else { + // write complete info + r.reporter.AddResult(t.Name(), "Complete", res.Message()) + } + } + + // some tasks are failed + if len(failedTasks) != 0 { + return ctrl.Result{}, fmt.Errorf("some tasks are failed: %v", failedTasks) + } + + if minRequeueAfter > 0 { + return ctrl.Result{ + Requeue: true, + RequeueAfter: minRequeueAfter, + }, nil + } + + return ctrl.Result{}, nil +} diff --git a/pkg/utils/task/task_test.go b/pkg/utils/task/task_test.go new file mode 100644 index 00000000000..f9c35823beb --- /dev/null +++ b/pkg/utils/task/task_test.go @@ -0,0 +1,268 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + gomock "go.uber.org/mock/gomock" + ctrl "sigs.k8s.io/controller-runtime" +) + +type fakeTask[T any] struct { + name string + res Result +} + +func (t *fakeTask[T]) Name() string { + return t.name +} + +func (t *fakeTask[T]) Sync(_ Context[T]) Result { + return t.res +} + +func NewFakeTask[T any](name string, res Result) Task[T] { + return &fakeTask[T]{ + name: name, + res: res, + } +} + +type task struct { + res WithMessage + status string +} + +func TestTaskRunner(t *testing.T) { + mc := gomock.NewController(t) + + cases := []struct { + desc string + tasks []task + hasErr bool + res ctrl.Result + }{ + { + desc: "empty tasks", + hasErr: false, + }, + { + desc: "two complete tasks", + tasks: []task{ + { + res: Complete(), + status: "Complete", + }, + { + res: Complete(), + status: "Complete", + }, + }, + hasErr: false, + }, + { + desc: "a complete but break task", + tasks: []task{ + { + res: Complete().Break(), + status: "Complete", + }, + { + res: Complete(), + status: "Skip", + }, + }, + hasErr: false, + }, + { + desc: "a fail task", + tasks: []task{ + { + res: Fail(), + status: "Fail", + }, + { + res: Complete(), + status: "NotRun", + }, + }, + hasErr: true, + res: ctrl.Result{}, + }, + { + desc: "a fail but continue task", + tasks: []task{ + { + res: Fail().Continue(), + status: "Fail", + }, + { + res: Complete(), + status: "Complete", + }, + }, + hasErr: true, + }, + { + desc: "a retry task", + tasks: []task{ + { + res: Retry(time.Second), + status: "Retry", + }, + { + res: Complete(), + status: "Complete", + }, + }, + hasErr: false, + res: ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, + }, + { + desc: "many retry tasks", + tasks: []task{ + { + res: Retry(3 * time.Second), + status: "Retry", + }, + { + res: Retry(2 * time.Second), + status: "Retry", + }, + { + res: Retry(1 * time.Second), + status: "Retry", + }, + }, + hasErr: false, + res: ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, + }, + { + desc: "retry + failed tasks", + tasks: []task{ + { + res: Retry(time.Second), + status: "Retry", + }, + { + res: Fail(), + status: "Fail", + }, + }, + hasErr: true, + }, + { + desc: "retry break + failed tasks", + tasks: []task{ + { + res: Retry(time.Second).Break(), + status: "Retry", + }, + { + res: Fail(), + status: "Skip", + }, + }, + hasErr: false, + res: ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, + }, + { + desc: "retry + complete tasks", + tasks: []task{ + { + res: Retry(time.Second), + status: "Retry", + }, + { + res: Complete(), + status: "Complete", + }, + }, + hasErr: false, + res: ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, + }, + { + desc: "retry break + complete tasks", + tasks: []task{ + { + res: Retry(time.Second).Break(), + status: "Retry", + }, + { + res: Complete(), + status: "Skip", + }, + }, + hasErr: false, + res: ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second, + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + r := NewMockTaskReporter(mc) + tr := NewTaskRunner[struct{}](r) + + for i, task := range c.tasks { + name := strconv.Itoa(i) + r.EXPECT().AddResult( + name, + task.status, + // ignore msg + gomock.Any(), + ) + tr.AddTasks(NewFakeTask[struct{}](name, task.res.With(""))) + } + + res, err := tr.Run(nil) + if c.hasErr { + assert.Error(tt, err, c.desc) + } else { + require.NoError(tt, err, c.desc) + assert.Equal(tt, c.res, res, c.desc) + } + }) + } + + tr := NewTaskRunner[struct{}](nil) + tr.AddTasks(NewFakeTask[struct{}]("panic", nil)) + assert.Panics(t, func() { + _, err := tr.Run(nil) + assert.NoError(t, err) + }) +} diff --git a/pkg/utils/task/v2/result.go b/pkg/utils/task/v2/result.go new file mode 100644 index 00000000000..cf43ac31273 --- /dev/null +++ b/pkg/utils/task/v2/result.go @@ -0,0 +1,202 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "fmt" + "strings" + "time" +) + +type Status int + +const ( + // task is complete and will not be requeue + SComplete Status = iota + // task is unexpectedly failed, runner will be interrupted + SFail + // some preconditions are not met, wait update events to trigger next run + SWait + // retry tasks after specified duration + SRetry +) + +func (s Status) String() string { + switch s { + case SComplete: + return "Complete" + case SFail: + return "Fail" + case SWait: + return "Wait" + case SRetry: + return "Retry" + } + + return "Unknown" +} + +// Result defines the result of a task +type Result interface { + Status() Status + RequeueAfter() time.Duration + Message() string +} + +type NamedResult interface { + Result + Name() string +} + +type AggregateResult interface { + Result + Results() []Result +} + +// WithMessage defines an interface to set message into task result +type WithMessage interface { + With(format string, args ...any) Result +} + +type taskResult struct { + status Status + requeueAfter time.Duration + message string +} + +func (r *taskResult) Status() Status { + return r.status +} + +func (r *taskResult) RequeueAfter() time.Duration { + return r.requeueAfter +} + +func (r *taskResult) Message() string { + if r.requeueAfter > 0 { + return fmt.Sprintf("%s(requeue after %s)", r.message, r.requeueAfter) + } + return r.message +} + +func (r *taskResult) With(format string, args ...any) Result { + r.message = fmt.Sprintf(format, args...) + return r +} + +// Complete means complete the current task and run the next one +func Complete() WithMessage { + return &taskResult{ + status: SComplete, + } +} + +// Fail means fail the current task and skip all next tasks +func Fail() WithMessage { + return &taskResult{ + status: SFail, + } +} + +// Retry means continue all next tasks and retry after dur +func Retry(dur time.Duration) WithMessage { + return &taskResult{ + status: SRetry, + requeueAfter: dur, + } +} + +// Wait means continue all next tasks and wait until next event triggers task run +func Wait() WithMessage { + return &taskResult{ + status: SWait, + } +} + +type namedResult struct { + Result + name string +} + +func AnnotateName(name string, r Result) Result { + if _, ok := r.(AggregateResult); ok { + return r + } + return &namedResult{ + Result: r, + name: name, + } +} + +func (r *namedResult) Name() string { + return r.name +} + +type aggregateResult struct { + rs []Result +} + +func NewAggregateResult(rs ...Result) AggregateResult { + return &aggregateResult{rs: rs} +} + +func (r *aggregateResult) Results() []Result { + return r.rs +} + +func (r *aggregateResult) Status() Status { + needRetry := false + needWait := false + for _, res := range r.rs { + switch res.Status() { + case SFail: + return SFail + case SRetry: + needRetry = true + case SWait: + needWait = true + } + } + + if needRetry { + return SRetry + } + + if needWait { + return SWait + } + + return SComplete +} + +func (r *aggregateResult) RequeueAfter() time.Duration { + var minDur time.Duration = 0 + for _, res := range r.rs { + if minDur < res.RequeueAfter() { + minDur = res.RequeueAfter() + } + } + + return minDur +} + +func (r *aggregateResult) Message() string { + sb := strings.Builder{} + for _, res := range r.rs { + sb.WriteString(res.Message()) + } + + return sb.String() +} diff --git a/pkg/utils/task/v2/runner.go b/pkg/utils/task/v2/runner.go new file mode 100644 index 00000000000..ca4f866a480 --- /dev/null +++ b/pkg/utils/task/v2/runner.go @@ -0,0 +1,69 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" +) + +// TaskRunner is an executor to run a series of tasks sequentially +type TaskRunner[T any] interface { + Run(ctx Context[T]) (ctrl.Result, error) +} + +type taskRunner[T any] struct { + reporter TaskReporter + taskQueue Task[T] +} + +// There are four status of tasks +// - Complete: means this task is complete and all is expected +// - Failed: means an err occurred +// - Retry: means this task need to wait an interval and retry +// - Wait: means this task will wait for next event trigger +// And five results of reconiling +// 1. All tasks are complete, the key will not be re-added +// 2. Some tasks are failed, return err and wait with backoff +// 3. Some tasks need retry, requeue after an interval +// 4. Some tasks are not run, return err and wait with backoff +// 5. Particular tasks are complete and left are skipped, the key will not be re-added +func NewTaskRunner[T any](reporter TaskReporter, ts ...Task[T]) TaskRunner[T] { + if reporter == nil { + reporter = &dummyReporter{} + } + return &taskRunner[T]{ + reporter: reporter, + taskQueue: NewTaskQueue(ts...), + } +} + +func (r *taskRunner[T]) Run(ctx Context[T]) (ctrl.Result, error) { + res := r.taskQueue.Sync(ctx) + r.reporter.AddResult(res) + + switch res.Status() { + case SFail: + return ctrl.Result{}, fmt.Errorf("some tasks are failed: %v", res.Message()) + case SRetry: + return ctrl.Result{ + RequeueAfter: res.RequeueAfter(), + }, nil + default: + // SComplete and SWait + return ctrl.Result{}, nil + } +} diff --git a/pkg/utils/task/v2/task.go b/pkg/utils/task/v2/task.go new file mode 100644 index 00000000000..7c5f0752fc8 --- /dev/null +++ b/pkg/utils/task/v2/task.go @@ -0,0 +1,233 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "context" + "reflect" + "strings" + + "github.com/olekukonko/tablewriter" +) + +// Context is a wrapper of any struct which can return its self +// It's defined to avoid calling ctx.Value() +type Context[T any] interface { + context.Context + Self() *T +} + +type Task[T any] interface { + Sync(ctx Context[T]) Result +} + +type Condition[T any] interface { + Satisfy(ctx Context[T]) bool +} + +type OptionalTask[T any] interface { + Task[T] + Satisfied(ctx Context[T]) bool +} + +type FinalTask[T any] interface { + Task[T] + IsFinal() bool +} + +type TaskNamer interface { + Name() string +} + +type TaskFunc[T any] func(ctx Context[T]) Result + +func (f TaskFunc[T]) Sync(ctx Context[T]) Result { + return f(ctx) +} + +type namedTask[T any] struct { + Task[T] + name string +} + +func NameTaskFunc[T any](name string, t TaskFunc[T]) Task[T] { + return &namedTask[T]{ + Task: t, + name: name, + } +} + +func (t *namedTask[T]) Name() string { + return t.name +} + +type CondFunc[T any] func(ctx Context[T]) bool + +func (f CondFunc[T]) Satisfy(ctx Context[T]) bool { + return f(ctx) +} + +type optional[T any] struct { + Task[T] + cond Condition[T] +} + +var ( + _ OptionalTask[int] = &optional[int]{} + _ FinalTask[int] = &optional[int]{} +) + +func (t *optional[T]) Satisfied(ctx Context[T]) bool { + return t.cond.Satisfy(ctx) +} + +func (t *optional[T]) IsFinal() bool { + final, ok := t.Task.(FinalTask[T]) + if ok { + return final.IsFinal() + } + return false +} + +func NewOptionalTask[T any](cond Condition[T], ts ...Task[T]) Task[T] { + return &optional[T]{ + Task: NewTaskQueue(ts...), + cond: cond, + } +} + +func NewSwitchTask[T any](cond Condition[T], ts ...Task[T]) Task[T] { + return NewOptionalTask(cond, NewFinalTask(ts...)) +} + +type final[T any] struct { + Task[T] +} + +func (t *final[T]) Satisfied(ctx Context[T]) bool { + optional, ok := t.Task.(OptionalTask[T]) + if ok { + return optional.Satisfied(ctx) + } + return true +} + +func (*final[T]) IsFinal() bool { + return true +} + +var ( + _ OptionalTask[int] = &final[int]{} + _ FinalTask[int] = &final[int]{} +) + +func NewFinalTask[T any](ts ...Task[T]) Task[T] { + return &final[T]{Task: NewTaskQueue(ts...)} +} + +type queue[T any] struct { + ts []Task[T] + isFinal bool +} + +func (t *queue[T]) Sync(ctx Context[T]) Result { + rs := []Result{} + for _, tt := range t.ts { + optional, ok := tt.(OptionalTask[T]) + if ok && !optional.Satisfied(ctx) { + continue + } + + r := tt.Sync(ctx) + rs = append(rs, AnnotateName(Name(tt), r)) + if r.Status() == SFail { + break + } + + if final, ok := tt.(FinalTask[T]); ok && final.IsFinal() { + t.isFinal = true + break + } + } + + return NewAggregateResult(rs...) +} + +func (*queue[T]) Satisfied(Context[T]) bool { + return true +} + +func (t *queue[T]) IsFinal() bool { + return t.isFinal +} + +func NewTaskQueue[T any](ts ...Task[T]) Task[T] { + return &queue[T]{ts: ts} +} + +type TaskReporter interface { + AddResult(r Result) + Summary() string +} + +type tableReporter struct { + table *tablewriter.Table + builder *strings.Builder +} + +type dummyReporter struct{} + +func (*dummyReporter) AddResult(Result) {} + +func (*dummyReporter) Summary() string { + return "" +} + +func NewTableTaskReporter() TaskReporter { + builder := strings.Builder{} + table := tablewriter.NewWriter(&builder) + table.SetHeader([]string{"Name", "Status", "Message"}) + return &tableReporter{ + table: table, + builder: &builder, + } +} + +func (t *tableReporter) AddResult(r Result) { + switch underlying := r.(type) { + case AggregateResult: + for _, rr := range underlying.Results() { + t.AddResult(rr) + } + case NamedResult: + t.table.Append([]string{underlying.Name(), r.Status().String(), r.Message()}) + default: + t.table.Append([]string{"", r.Status().String(), r.Message()}) + } +} + +func (t *tableReporter) Summary() string { + t.table.Render() + return t.builder.String() +} + +func Name[T any](t Task[T]) string { + namer, ok := t.(TaskNamer) + if ok { + return namer.Name() + } + + return reflect.TypeOf(t).Name() +} diff --git a/pkg/utils/task/v3/result.go b/pkg/utils/task/v3/result.go new file mode 100644 index 00000000000..a19e2435116 --- /dev/null +++ b/pkg/utils/task/v3/result.go @@ -0,0 +1,214 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "fmt" + "strings" + "time" +) + +type Status int + +const ( + // task is complete and will not be requeue + SComplete Status = iota + // task is unexpectedly failed, runner will be interrupted + SFail + // some preconditions are not met, wait update events to trigger next run + SWait + // retry tasks after specified duration + SRetry +) + +func (s Status) String() string { + switch s { + case SComplete: + return "Complete" + case SFail: + return "Fail" + case SWait: + return "Wait" + case SRetry: + return "Retry" + } + + return "Unknown" +} + +// Result defines the result of a task +type Result interface { + Status() Status + RequeueAfter() time.Duration + Message() string +} + +type NamedResult interface { + Result + name() string +} + +type AggregateResult interface { + Result + results() []Result +} + +// WithMessage defines an interface to set message into task result +type WithMessage interface { + With(format string, args ...any) Result +} + +type taskResult struct { + status Status + requeueAfter time.Duration + message string +} + +func (r *taskResult) Status() Status { + return r.status +} + +func (r *taskResult) RequeueAfter() time.Duration { + return r.requeueAfter +} + +func (r *taskResult) Message() string { + if r.requeueAfter > 0 { + return fmt.Sprintf("%s(requeue after %s)", r.message, r.requeueAfter) + } + return r.message +} + +func (r *taskResult) With(format string, args ...any) Result { + r.message = fmt.Sprintf(format, args...) + return r +} + +// Complete means complete the current task and run the next one +func Complete() WithMessage { + return &taskResult{ + status: SComplete, + } +} + +// Fail means fail the current task and skip all next tasks +func Fail() WithMessage { + return &taskResult{ + status: SFail, + } +} + +// Retry means continue all next tasks and retry after dur +func Retry(dur time.Duration) WithMessage { + return &taskResult{ + status: SRetry, + requeueAfter: dur, + } +} + +// Wait means continue all next tasks and wait until next event triggers task run +func Wait() WithMessage { + return &taskResult{ + status: SWait, + } +} + +type namedResult struct { + Result + n string +} + +func nameResult(name string, r Result) Result { + return &namedResult{ + Result: r, + n: name, + } +} + +func (r *namedResult) name() string { + return r.n +} + +type aggregateResult struct { + rs []Result +} + +func newAggregate(rs ...Result) AggregateResult { + var nrs []Result + for _, r := range rs { + ar, ok := r.(AggregateResult) + if ok { + nrs = append(nrs, ar.results()...) + } else { + nrs = append(nrs, r) + } + } + return &aggregateResult{rs: nrs} +} + +func (r *aggregateResult) results() []Result { + return r.rs +} + +func (r *aggregateResult) Status() Status { + needRetry := false + needWait := false + for _, res := range r.rs { + switch res.Status() { + case SFail: + return SFail + case SRetry: + needRetry = true + case SWait: + needWait = true + } + } + + if needRetry { + return SRetry + } + + if needWait { + return SWait + } + + return SComplete +} + +func (r *aggregateResult) RequeueAfter() time.Duration { + var minDur time.Duration = 0 + for _, res := range r.rs { + if minDur < res.RequeueAfter() { + minDur = res.RequeueAfter() + } + } + + return minDur +} + +func (r *aggregateResult) Message() string { + sb := strings.Builder{} + for _, res := range r.rs { + named, ok := res.(NamedResult) + if ok { + sb.WriteString(named.name()) + sb.WriteString(": ") + } + sb.WriteString(res.Message()) + sb.WriteString("\n") + } + + return sb.String() +} diff --git a/pkg/utils/task/v3/result_test.go b/pkg/utils/task/v3/result_test.go new file mode 100644 index 00000000000..71117b4bd56 --- /dev/null +++ b/pkg/utils/task/v3/result_test.go @@ -0,0 +1,169 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestAggregateResult(t *testing.T) { + cases := []struct { + desc string + rs []Result + expectedResults []Result + expectedStatus Status + expectedRequeueAfter time.Duration + expectedMessage string + }{ + { + desc: "no result", + rs: nil, + expectedResults: nil, + expectedStatus: SComplete, + expectedRequeueAfter: 0, + expectedMessage: "", + }, + { + desc: "only complete result", + rs: []Result{ + Complete().With("success"), + Complete().With("success"), + }, + expectedResults: []Result{ + Complete().With("success"), + Complete().With("success"), + }, + expectedStatus: SComplete, + expectedRequeueAfter: 0, + expectedMessage: "success\nsuccess\n", + }, + { + desc: "contains a fail result", + rs: []Result{ + Complete().With("success"), + Retry(1).With("retry"), + Wait().With("wait"), + Fail().With("fail"), + }, + expectedResults: []Result{ + Complete().With("success"), + Retry(1).With("retry"), + Wait().With("wait"), + Fail().With("fail"), + }, + expectedStatus: SFail, + expectedRequeueAfter: 1, + expectedMessage: "success\nretry(requeue after 1ns)\nwait\nfail\n", + }, + { + desc: "contains a retry result and no fail result", + rs: []Result{ + Complete().With("success"), + Retry(1).With("retry"), + Wait().With("wait"), + }, + expectedResults: []Result{ + Complete().With("success"), + Retry(1).With("retry"), + Wait().With("wait"), + }, + expectedStatus: SRetry, + expectedRequeueAfter: 1, + expectedMessage: "success\nretry(requeue after 1ns)\nwait\n", + }, + { + desc: "contains two retry results", + rs: []Result{ + Retry(1).With("retry"), + Retry(2).With("retry"), + }, + expectedResults: []Result{ + Retry(1).With("retry"), + Retry(2).With("retry"), + }, + expectedStatus: SRetry, + expectedRequeueAfter: 2, + expectedMessage: "retry(requeue after 1ns)\nretry(requeue after 2ns)\n", + }, + { + desc: "contains a wait result and no fail and retry result", + rs: []Result{ + Complete().With("success"), + Wait().With("wait"), + Complete().With("success"), + }, + expectedResults: []Result{ + Complete().With("success"), + Wait().With("wait"), + Complete().With("success"), + }, + expectedStatus: SWait, + expectedRequeueAfter: 0, + expectedMessage: "success\nwait\nsuccess\n", + }, + { + desc: "contains an aggregate result", + rs: []Result{ + Complete().With("success"), + newAggregate( + Wait().With("wait"), + Complete().With("success"), + ), + }, + expectedResults: []Result{ + Complete().With("success"), + Wait().With("wait"), + Complete().With("success"), + }, + expectedStatus: SWait, + expectedRequeueAfter: 0, + expectedMessage: "success\nwait\nsuccess\n", + }, + { + desc: "contains a named result", + rs: []Result{ + nameResult("aaa", Complete().With("success")), + newAggregate( + Wait().With("wait"), + Complete().With("success"), + ), + }, + expectedResults: []Result{ + nameResult("aaa", Complete().With("success")), + Wait().With("wait"), + Complete().With("success"), + }, + expectedStatus: SWait, + expectedRequeueAfter: 0, + expectedMessage: "aaa: success\nwait\nsuccess\n", + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + ar := newAggregate(c.rs...) + assert.Equal(tt, c.expectedResults, ar.results(), c.desc) + assert.Equal(tt, c.expectedStatus, ar.Status(), c.desc) + assert.Equal(tt, c.expectedRequeueAfter, ar.RequeueAfter(), c.desc) + assert.Equal(tt, c.expectedMessage, ar.Message(), c.desc) + }) + } +} diff --git a/pkg/utils/task/v3/runner.go b/pkg/utils/task/v3/runner.go new file mode 100644 index 00000000000..ef98327b289 --- /dev/null +++ b/pkg/utils/task/v3/runner.go @@ -0,0 +1,126 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "fmt" + "strings" + + "github.com/olekukonko/tablewriter" + ctrl "sigs.k8s.io/controller-runtime" +) + +// TaskRunner is an executor to run a series of tasks sequentially +type TaskRunner interface { + Run() (ctrl.Result, error) +} + +type taskRunner struct { + reporter TaskReporter + task Task +} + +// There are four status of tasks +// - Complete: means this task is complete and all is expected +// - Failed: means an err occurred +// - Retry: means this task need to wait an interval and retry +// - Wait: means this task will wait for next event trigger +// And three results of reconiling +// 1. A task is failed, return err and wait with backoff +// 2. Some tasks need retry and max interval is 0, requeue directly with backoff +// 3. Some tasks need retry and max interval is higher than MinRequeueAfter, requeue after the max interval without backoff +// 4. All tasks are complete or wait, the key will not be re-added +func NewTaskRunner(reporter TaskReporter, ts ...Task) TaskRunner { + if reporter == nil { + reporter = &dummyReporter{} + } + return &taskRunner{ + reporter: reporter, + task: Block(ts...), + } +} + +func (r *taskRunner) Run() (ctrl.Result, error) { + res, _ := r.task.sync() + r.reporter.AddResult(res) + + switch res.Status() { + case SFail: + return ctrl.Result{}, fmt.Errorf("some tasks are failed: %v", res.Message()) + case SRetry: + if res.RequeueAfter() == 0 { + return ctrl.Result{ + Requeue: true, + }, nil + } + return ctrl.Result{ + RequeueAfter: res.RequeueAfter(), + }, nil + default: + // SComplete and SWait + return ctrl.Result{}, nil + } +} + +type TaskReporter interface { + AddResult(r Result) + Summary() string +} + +type tableReporter struct { + table *tablewriter.Table + builder *strings.Builder +} + +type dummyReporter struct{} + +func (*dummyReporter) AddResult(Result) {} + +func (*dummyReporter) Summary() string { + return "" +} + +const ( + tableColWidth = 80 +) + +func NewTableTaskReporter() TaskReporter { + builder := strings.Builder{} + table := tablewriter.NewWriter(&builder) + table.SetColWidth(tableColWidth) + table.SetHeader([]string{"Name", "Status", "Message"}) + return &tableReporter{ + table: table, + builder: &builder, + } +} + +func (t *tableReporter) AddResult(r Result) { + switch underlying := r.(type) { + case AggregateResult: + for _, rr := range underlying.results() { + t.AddResult(rr) + } + case NamedResult: + t.table.Append([]string{underlying.name(), r.Status().String(), r.Message()}) + default: + t.table.Append([]string{"", r.Status().String(), r.Message()}) + } +} + +func (t *tableReporter) Summary() string { + t.table.Render() + return t.builder.String() +} diff --git a/pkg/utils/task/v3/runner_test.go b/pkg/utils/task/v3/runner_test.go new file mode 100644 index 00000000000..c7d8d96182f --- /dev/null +++ b/pkg/utils/task/v3/runner_test.go @@ -0,0 +1,180 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "testing" + + "github.com/stretchr/testify/assert" + ctrl "sigs.k8s.io/controller-runtime" +) + +func TestTaskRunner(t *testing.T) { + cases := []struct { + desc string + ts []Task + hasErr bool + expectedResult ctrl.Result + }{ + { + desc: "a task fail", + ts: []Task{ + NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + NameTaskFunc("bbb", func() Result { + return Fail().With("fail") + }), + }, + hasErr: true, + }, + { + desc: "a retry task with 0 interval", + ts: []Task{ + NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + NameTaskFunc("bbb", func() Result { + return Retry(0).With("retry") + }), + }, + expectedResult: ctrl.Result{ + Requeue: true, + }, + }, + { + desc: "a retry task with not 0 interval", + ts: []Task{ + NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + NameTaskFunc("bbb", func() Result { + return Retry(5).With("retry") + }), + }, + expectedResult: ctrl.Result{ + RequeueAfter: 5, + }, + }, + { + desc: "all tasks are Complete or Wait", + ts: []Task{ + NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + NameTaskFunc("bbb", func() Result { + return Wait().With("wait") + }), + NameTaskFunc("ccc", func() Result { + return Complete().With("success") + }), + }, + expectedResult: ctrl.Result{}, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + runner := NewTaskRunner(&dummyReporter{}, c.ts...) + res, err := runner.Run() + if c.hasErr { + assert.Error(tt, err, c.desc) + } else { + assert.Equal(tt, c.expectedResult, res, c.desc) + } + }) + } +} + +func TestTaskReporter(t *testing.T) { + cases := []struct { + desc string + rs []Result + expectedSummary string + }{ + { + desc: "no result", + rs: nil, + expectedSummary: ` ++------+--------+---------+ +| NAME | STATUS | MESSAGE | ++------+--------+---------+ ++------+--------+---------+ +`, + }, + { + desc: "unnamed result", + rs: []Result{ + Complete().With("success"), + }, + expectedSummary: ` ++------+----------+---------+ +| NAME | STATUS | MESSAGE | ++------+----------+---------+ +| | Complete | success | ++------+----------+---------+ +`, + }, + { + desc: "named result", + rs: []Result{ + nameResult("aaa", Complete().With("success")), + }, + expectedSummary: ` ++------+----------+---------+ +| NAME | STATUS | MESSAGE | ++------+----------+---------+ +| aaa | Complete | success | ++------+----------+---------+ +`, + }, + { + desc: "aggregate result", + rs: []Result{ + nameResult("aaa", Complete().With("success")), + newAggregate( + nameResult("bbb", Complete().With("success")), + nameResult("ccc", Complete().With("success")), + ), + }, + expectedSummary: ` ++------+----------+---------+ +| NAME | STATUS | MESSAGE | ++------+----------+---------+ +| aaa | Complete | success | +| bbb | Complete | success | +| ccc | Complete | success | ++------+----------+---------+ +`, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + reporter := NewTableTaskReporter() + for _, r := range c.rs { + reporter.AddResult(r) + } + summary := reporter.Summary() + assert.Equal(tt, c.expectedSummary, "\n"+summary, c.desc) + }) + } +} diff --git a/pkg/utils/task/v3/task.go b/pkg/utils/task/v3/task.go new file mode 100644 index 00000000000..60596ad0ba0 --- /dev/null +++ b/pkg/utils/task/v3/task.go @@ -0,0 +1,126 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +// Syncer defines an action to sync actual states to desired. +type Syncer interface { + Sync() Result +} + +type SyncFunc func() Result + +func (f SyncFunc) Sync() Result { + return f() +} + +type Condition interface { + Satisfy() bool +} + +type CondFunc func() bool + +func (f CondFunc) Satisfy() bool { + return f() +} + +// Task is a Syncer wrapper, which can be orchestrated using control structures +// such as if and break for conditional logic and flow control. +type Task interface { + sync() (_ Result, done bool) +} + +type task struct { + name string + f Syncer +} + +func (e *task) sync() (Result, bool) { + return nameResult(e.name, e.f.Sync()), false +} + +func NameTaskFunc(name string, f SyncFunc) Task { + return &task{ + name: name, + f: f, + } +} + +type optionalTask struct { + Task + cond Condition +} + +func (e *optionalTask) sync() (Result, bool) { + if e.cond.Satisfy() { + return e.Task.sync() + } + + return nil, false +} + +func If(cond Condition, tasks ...Task) Task { + return &optionalTask{ + Task: Block(tasks...), + cond: cond, + } +} + +type breakTask struct { + Task +} + +func (e *breakTask) sync() (Result, bool) { + r, _ := e.Task.sync() + return r, true +} + +func Break(tasks ...Task) Task { + return &breakTask{ + Task: Block(tasks...), + } +} + +func IfBreak(cond Condition, tasks ...Task) Task { + return If(cond, Break(tasks...)) +} + +type blockTask struct { + tasks []Task +} + +func (e *blockTask) sync() (Result, bool) { + var rs []Result + for _, expr := range e.tasks { + r, done := expr.sync() + if r == nil { + continue + } + + rs = append(rs, r) + if r.Status() == SFail { + break + } + + if done { + return newAggregate(rs...), true + } + } + + return newAggregate(rs...), false +} + +func Block(tasks ...Task) Task { + return &blockTask{tasks: tasks} +} diff --git a/pkg/utils/task/v3/task_test.go b/pkg/utils/task/v3/task_test.go new file mode 100644 index 00000000000..5f0f5fa78e0 --- /dev/null +++ b/pkg/utils/task/v3/task_test.go @@ -0,0 +1,290 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package task + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func condition(b bool) Condition { + return CondFunc(func() bool { + return b + }) +} + +func TestNameTask(t *testing.T) { + cases := []struct { + desc string + name string + syncer SyncFunc + expectedResult Result + }{ + { + desc: "normal", + name: "aaa", + syncer: SyncFunc(func() Result { + return Complete().With("success") + }), + expectedResult: nameResult( + "aaa", + Complete().With("success"), + ), + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + task := NameTaskFunc(c.name, c.syncer) + res, done := task.sync() + assert.Equal(tt, c.expectedResult, res, c.desc) + assert.False(tt, done, c.desc) + }) + } +} + +func TestIf(t *testing.T) { + cases := []struct { + desc string + task Task + cond Condition + expectedResult Result + }{ + { + desc: "cond is true", + task: NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + cond: condition(true), + expectedResult: newAggregate( + nameResult("aaa", Complete().With("success")), + ), + }, + { + desc: "cond is false", + task: NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + cond: condition(false), + expectedResult: nil, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + task := If(c.cond, c.task) + res, done := task.sync() + assert.Equal(tt, c.expectedResult, res, c.desc) + assert.False(tt, done, c.desc) + }) + } +} + +func TestBreak(t *testing.T) { + cases := []struct { + desc string + task Task + expectedResult Result + }{ + { + desc: "cond is true", + task: NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + expectedResult: newAggregate( + nameResult("aaa", Complete().With("success")), + ), + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + task := Break(c.task) + res, done := task.sync() + assert.Equal(tt, c.expectedResult, res, c.desc) + assert.True(tt, done, c.desc) + }) + } +} + +func TestIfBreak(t *testing.T) { + cases := []struct { + desc string + task Task + cond Condition + expectedResult Result + expectedDone bool + }{ + { + desc: "cond is true", + task: NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + cond: CondFunc(func() bool { return true }), + expectedResult: newAggregate( + nameResult("aaa", Complete().With("success")), + ), + expectedDone: true, + }, + { + desc: "cond is false", + task: NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + cond: CondFunc(func() bool { return false }), + expectedResult: nil, + expectedDone: false, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + task := IfBreak(c.cond, c.task) + res, done := task.sync() + assert.Equal(tt, c.expectedResult, res, c.desc) + assert.Equal(tt, c.expectedDone, done, c.desc) + }) + } +} + +func TestBlock(t *testing.T) { + cases := []struct { + desc string + tasks []Task + expectedResult Result + expectedDone bool + }{ + { + desc: "no task", + tasks: nil, + expectedResult: newAggregate(), + expectedDone: false, + }, + { + desc: "1 complete task", + tasks: []Task{ + NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + }, + expectedResult: newAggregate( + nameResult("aaa", Complete().With("success")), + ), + expectedDone: false, + }, + { + desc: "2 complete tasks", + tasks: []Task{ + NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + NameTaskFunc("bbb", func() Result { + return Complete().With("success") + }), + }, + expectedResult: newAggregate( + nameResult("aaa", Complete().With("success")), + nameResult("bbb", Complete().With("success")), + ), + expectedDone: false, + }, + { + desc: "2 tasks with 1 fail task", + tasks: []Task{ + NameTaskFunc("aaa", func() Result { + return Fail().With("fail") + }), + NameTaskFunc("bbb", func() Result { + return Complete().With("success") + }), + }, + expectedResult: newAggregate( + nameResult("aaa", Fail().With("fail")), + ), + expectedDone: false, + }, + { + desc: "if task", + tasks: []Task{ + If(condition(false), NameTaskFunc("aaa", func() Result { + return Fail().With("fail") + })), + NameTaskFunc("bbb", func() Result { + return Complete().With("success") + }), + }, + expectedResult: newAggregate( + nameResult("bbb", Complete().With("success")), + ), + expectedDone: false, + }, + { + desc: "break task", + tasks: []Task{ + NameTaskFunc("aaa", func() Result { + return Complete().With("success") + }), + Break(NameTaskFunc("bbb", func() Result { + return Complete().With("success") + })), + }, + expectedResult: newAggregate( + nameResult("aaa", Complete().With("success")), + nameResult("bbb", Complete().With("success")), + ), + expectedDone: true, + }, + { + desc: "if break task", + tasks: []Task{ + IfBreak(condition(true), NameTaskFunc("aaa", func() Result { + return Complete().With("success") + })), + NameTaskFunc("bbb", func() Result { + return Complete().With("success") + }), + }, + expectedResult: newAggregate( + nameResult("aaa", Complete().With("success")), + ), + expectedDone: true, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + task := Block(c.tasks...) + r, done := task.sync() + assert.Equal(tt, c.expectedResult, r, c.desc) + assert.Equal(tt, c.expectedDone, done, c.desc) + }) + } +} diff --git a/pkg/utils/time/clock.go b/pkg/utils/time/clock.go new file mode 100644 index 00000000000..29ab8baa246 --- /dev/null +++ b/pkg/utils/time/clock.go @@ -0,0 +1,50 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import "time" + +type Clock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +var ( + _ Clock = &RealClock{} + _ Clock = &FakeClock{} +) + +type RealClock struct{} + +func (RealClock) Since(t time.Time) time.Duration { + return time.Since(t) +} + +func (RealClock) Now() time.Time { + return time.Now() +} + +type FakeClock struct { + NowFunc func() time.Time + SinceFunc func(time.Time) time.Duration +} + +func (f FakeClock) Now() time.Time { + return f.NowFunc() +} + +func (f FakeClock) Since(t time.Time) time.Duration { + return f.SinceFunc(t) +} diff --git a/pkg/utils/tls/tls.go b/pkg/utils/tls/tls.go new file mode 100644 index 00000000000..91b930ae832 --- /dev/null +++ b/pkg/utils/tls/tls.go @@ -0,0 +1,65 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/pkg/client" +) + +// GetTLSConfigFromSecret returns *tls.Config for the given secret. +func GetTLSConfigFromSecret(ctx context.Context, cli client.Client, namespace, secretName string) (*tls.Config, error) { + var secret corev1.Secret + if err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: secretName}, &secret); err != nil { + return nil, fmt.Errorf("failed to get secret %s/%s: %w", namespace, secretName, err) + } + return LoadTLSConfigFromSecret(&secret) +} + +// LoadTlsConfigFromSecret loads *tls.Config from the given secret. +// The secret should often contain the following keys: +// - "ca.crt": CA certificate +// - "tls.crt": TLS certificate +// - "tls.key": TLS private key +func LoadTLSConfigFromSecret(secret *corev1.Secret) (*tls.Config, error) { + rootCAs := x509.NewCertPool() + + if !rootCAs.AppendCertsFromPEM(secret.Data[corev1.ServiceAccountRootCAKey]) { + return nil, fmt.Errorf("failed to append ca certs") + } + + clientCert, certExists := secret.Data[corev1.TLSCertKey] + clientKey, keyExists := secret.Data[corev1.TLSPrivateKeyKey] + if !certExists || !keyExists { + return nil, fmt.Errorf("cert or key does not exist in the secret %s/%s", secret.Namespace, secret.Name) + } + tlsCert, err := tls.X509KeyPair(clientCert, clientKey) + if err != nil { + return nil, fmt.Errorf("unable to load certificates from the secret %s/%s: %w", secret.Namespace, secret.Name, err) + } + + //nolint:gosec // we didn't force to use a specific TLS version yet + return &tls.Config{ + RootCAs: rootCAs, + ClientCAs: rootCAs, + Certificates: []tls.Certificate{tlsCert}, + }, nil +} diff --git a/pkg/utils/tls/tls_test.go b/pkg/utils/tls/tls_test.go new file mode 100644 index 00000000000..5423d789bc3 --- /dev/null +++ b/pkg/utils/tls/tls_test.go @@ -0,0 +1,104 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" +) + +func TestLoadTlsConfigFromSecret(t *testing.T) { + secret := &corev1.Secret{ + Data: map[string][]byte{ + corev1.ServiceAccountRootCAKey: []byte( + `-----BEGIN CERTIFICATE----- +MIIC6jCCAdKgAwIBAgIQWCZFlAx6Qqg5VypjCRRDEjANBgkqhkiG9w0BAQsFADAP +MQ0wCwYDVQQDEwRUaURCMB4XDTI0MDIyOTA4NDk0MloXDTM0MDIyNjA4NDk0Mlow +DzENMAsGA1UEAxMEVGlEQjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALuJ5+ef6hcALcVGzSUjlKX5CixXbr5To8zImZivjr6IzQVEMeyOLt0xb3SuXL49 +nnz/N9s3ET06Sc1Zf3QhyJqxy9wT7vVq352tmZtZwFreR+UfHirh1xoTIJJ7e6ai +SYH2UHi5bUFTamFOqo5/QsueNcK4lQO8WxMgyPBmaUfPfn5b0uuD2gdNlA6yEcCt +7Cr84xeHnpmoevtf7Obk3fv+S5nGahl0AvV0ilXZI0BN6u15fZz8c49JA9rrD6x0 +XjdndZPIT3Brz5sRZoZlmdAj9DBXqo1CJ7Z4uezWmaF3+Su/TT/FuoZwnX2GUIxc +VqkYh1QiWSpYI66RQGle1IcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFNiNgN9eiqNZgxWiUKWuW/yTN+XDMA0GCSqG +SIb3DQEBCwUAA4IBAQB0t7MXGI1bwFBNYb1VbpdsI1KdVUT2S3oKKoeq2wMfwIJd +DYeiU4banRjSSgrbsSd38WiY9G249yLViTWC+Q6p4X8jaVZO86b/rA7fx0wD4NRG +K6JusFhZPKI7rHYDQulpbvIASZ1epJmTAiNz99z3hL3DF9EnDipO9TTEqYsempJu +cSWegdrRAWFpU821DF9DWpnN/4QSWAHsdFnpt+F8h1YK2A0tnLyb5zKYMnYLErrX +FkfMxRTUHY1Rz0fHvb4rSjMObpOf7rEjYyHL1KJIPPFXmy5bDBx8ReHxCscMPSQW +zINRz3PfryIPIyjvVEJ6vvA+Eb4hke2YCZmhrWMk +-----END CERTIFICATE-----`), + corev1.TLSCertKey: []byte( + `-----BEGIN CERTIFICATE----- +MIIDATCCAemgAwIBAgIRAKXw/bNMzl1mQR1aqeHogOcwDQYJKoZIhvcNAQELBQAw +DzENMAsGA1UEAxMEVGlEQjAeFw0yNDAzMDEwNjExNTlaFw0yNTAzMDEwNjExNTla +MCExEDAOBgNVBAoTB1BpbmdDQVAxDTALBgNVBAMTBFRpREIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCkhF/P9qaCdIsF82CLR6ij5UPjjJC8b/n1RdYD +DLsyByKjEe+T1gTfgPWaKU5vykfSyqxj1sHDBwXUnBbHGQDv4blId0HOaxPXll8w +H7F67rR506R1RsYzPzNjcV8bfRlEDkYn/dCtNek5UHXVWwdhwL6+fieC8VUQmovl +jq309imQsHZ3kYcnqRoihPsSpIZdc55C6R7b16XBQ19wNvAa8dPx2aB1855xREZT +xZrn2O+d9bEt6GKc5J883YIzi8VlZobS1BzKvYVS893BpNGAK5A8tQq2rXjOOEAb +/s/lEIs7U3sQbnCjVcf6ZYKjTvoySlvikpVs34IzruAWqfgBAgMBAAGjRjBEMBMG +A1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAU2I2A +316Ko1mDFaJQpa5b/JM35cMwDQYJKoZIhvcNAQELBQADggEBAIjRT4b39XV9mczX +TuTBaXBSfNO5drAYDcjAC6U3/r1OSSmpVGKf/ZSG/MWXkgEtWQHzq0A6ggel6HYX +r2I4I/YXC9H6yXbPph9tC7mSPvMF0/4zMjapPSwZzp83TMdNNaVqXrhaEC7s8LgA +ebUVFsrlgCHAAyBQ+AHIf9AmxWEtl7LDdwuvXh96e5Uv/ZagxkOO58nOuoFH6lFy +64ijiZGM9pIL3Bc3v5M5VhPRzXeoeRQI600KeXko6yIlj3i1Jr6DvnNgNsbqVVu2 +Oc+TcFgc1vWQo4HfEFOeGHgPKiOuwX+F8nEtog4b4Fo9G5LSIq9mKruaDXPVxeh6 +mrVBKbU= +-----END CERTIFICATE-----`), + corev1.TLSPrivateKeyKey: []byte( + `-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApIRfz/amgnSLBfNgi0eoo+VD44yQvG/59UXWAwy7MgcioxHv +k9YE34D1milOb8pH0sqsY9bBwwcF1JwWxxkA7+G5SHdBzmsT15ZfMB+xeu60edOk +dUbGMz8zY3FfG30ZRA5GJ/3QrTXpOVB11VsHYcC+vn4ngvFVEJqL5Y6t9PYpkLB2 +d5GHJ6kaIoT7EqSGXXOeQuke29elwUNfcDbwGvHT8dmgdfOecURGU8Wa59jvnfWx +LehinOSfPN2CM4vFZWaG0tQcyr2FUvPdwaTRgCuQPLUKtq14zjhAG/7P5RCLO1N7 +EG5wo1XH+mWCo076Mkpb4pKVbN+CM67gFqn4AQIDAQABAoIBAGOMVg2YyhiWPKlV +I04kBj9mMzY1kD714uIvZ9hgk8Up3COgbr+d+UTk27h01il+1QcP7FBdWtGQJk8I +RCAlWRPOGjdnMkKdOFxzeRW9l78zQbGWByWPtc68p3O83jfb8rXjjUAVrXeh74Xm +0eZQNp9H6iOKYo4xSa/KVGyLcWePs/NxATpwbL2MDWdr2aJjiZwma70gvHKZz3WQ +LWpW9vcw+cl6+D+p0lOZzJFL9s/iamJyH1TjAmyIhN+S7szlC8T2srmjFb+Y38I6 +0FBld1farVwDp+par4pGtRq+KslUc3yN6XNma+I25B8C2l4+DksnjCeqrm0bm7EY +JKtyVaECgYEAxNzk0xBjrKNW6/RXMDql+ukFcMISljD1IJFktdfVia5deFlzV3eQ +Nq+IrG7zuNIiSmegjJqEunoGLkT9NwO0y48f1QDYj2FBXlXOWEiLG1pitL8X53ns +MiXulfneemiE6RNg8MvM4Q6Mu6Lsbijgjqsj/saIWXdwCszjHiytK/cCgYEA1fAI +PQWqECL3eWdKCO5tFsrvIkYJwTJBYJNJfrbftpZ2mr2AQD+0bA9Ty5tuGyBykFEO +Hua6UfHxYrbBDzqpAiOFFS5wlPyGVJISWFC6oubRPeGXHx5fFpimoMa/J9YZ7v78 +DjXcmEDYgCQybAILzVWIpZ97WOF81ksJy+jozccCgYBiVXR3eVhQg8aHViW3EZSX +II53JHnkS9Al1HpZ2tXvUAmgdA4JQs/mgQfkGgfj6hL214x6rzRdcVZlBlD1igRl +KbjczO9fr1TXqkTIFHRn1V44qrtmBKDW69uhTo6y1kKNqgBiR2qvgHULxPYUkJaa +rSHtwX2aMu7kdjN8fxSBQQKBgQClQlsK0FJTTr9+P4SYK52HKtHY1uN4ItsPwBbY +1GkxwT7zP4lPmCZGBv0C3hkKyWDWDFbtFew9mriNOYEew4CEj22hNBNxczRNJd0X +7ZyOc+CUfavgNPTdHqQws/Y7zo6P6NZKH988mXLkYZG1j0sQnY8F6ZE90kk9vA9g +PZWARwKBgCMmUWaz2KdZS8z3DzxY0E0rP+WIiVXPDJr8h/i2wh9XrQhbvY7OAYXU +RwYYRMKBT/YKLf2phDeHgd8D67p9EQqCD6mMl+TG5BWxiA4CjrHWdhNY7XvjybO8 +nyCmZvl1qIEV8/2BE4zUWZqizbhrattyt10YvnJCBt6YuFB6Jcqg +-----END RSA PRIVATE KEY-----`), + }, + } + + tlsConfig, err := LoadTLSConfigFromSecret(secret) + require.NoError(t, err) + assert.NotNil(t, tlsConfig) + assert.NotNil(t, tlsConfig.RootCAs) + assert.NotNil(t, tlsConfig.ClientCAs) + assert.Len(t, tlsConfig.Certificates, 1) +} diff --git a/pkg/utils/toml/toml.go b/pkg/utils/toml/toml.go new file mode 100644 index 00000000000..06f68fead8f --- /dev/null +++ b/pkg/utils/toml/toml.go @@ -0,0 +1,170 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toml + +import ( + "bytes" + "fmt" + "hash/fnv" + "reflect" + + "github.com/mitchellh/mapstructure" + "github.com/pelletier/go-toml/v2" + "k8s.io/apimachinery/pkg/util/rand" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + hashutil "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/util/hash" +) + +type Decoder[T any, PT *T] interface { + Decode(data []byte, obj PT) error +} + +type Encoder[T any, PT *T] interface { + Encode(obj PT) ([]byte, error) +} + +type codec[T any, PT *T] struct { + raw map[string]any +} + +func Codec[T any, PT *T]() (Decoder[T, PT], Encoder[T, PT]) { + c := &codec[T, PT]{} + + return c, c +} + +func (c *codec[T, PT]) Decode(data []byte, obj PT) error { + raw := make(map[string]any) + if err := toml.NewDecoder(bytes.NewReader(data)).Decode(&raw); err != nil { + return err + } + + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + TagName: "toml", + Result: obj, + }) + if err != nil { + return err + } + if err := decoder.Decode(raw); err != nil { + return err + } + + c.raw = raw + + return nil +} + +func (c *codec[T, PT]) Encode(obj PT) ([]byte, error) { + if err := Overwrite(obj, c.raw); err != nil { + return nil, err + } + + buf := bytes.Buffer{} + if err := toml.NewEncoder(&buf).Encode(c.raw); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func Overwrite(obj any, m map[string]any) error { + structVal := reflect.ValueOf(obj).Elem() + fieldTypes := reflect.VisibleFields(structVal.Type()) + for _, fieldType := range fieldTypes { + if !fieldType.IsExported() { + continue + } + + name := fieldType.Tag.Get("toml") + src := structVal.FieldByIndex(fieldType.Index) + + for src.Kind() == reflect.Pointer { + src = src.Elem() + } + + if src.IsZero() { + continue + } + + v, ok := m[name] + if !ok { + m[name] = src.Interface() + continue + } + + val, err := getField(src, v) + if err != nil { + return err + } + m[name] = val + } + + return nil +} + +func getField(src reflect.Value, dst any) (any, error) { + switch src.Kind() { + case reflect.Struct: + vm, ok := dst.(map[string]any) + if !ok { + return nil, fmt.Errorf("type mismatched, expected map, actual %T", dst) + } + if err := Overwrite(src.Addr().Interface(), vm); err != nil { + return nil, err + } + + return vm, nil + case reflect.Slice, reflect.Array: + vs, ok := dst.([]any) + if !ok { + return nil, fmt.Errorf("type mismatched, expected array or slice, actual %T", dst) + } + for i := range vs { + if i >= src.Len() { + break + } + srcIndex := src.Index(i) + val, err := getField(srcIndex, vs[i]) + if err != nil { + return nil, err + } + vs[i] = val + } + if len(vs) < src.Len() { + for i := len(vs); i < src.Len(); i++ { + vs = append(vs, src.Index(i).Interface()) + } + } + return vs, nil + default: + return src.Interface(), nil + } +} + +// GenerateHash takes a TOML string as input, unmarshals it into a map, +// and generates a hash of the resulting configuration. The hash is then +// encoded into a safe string format and returned. +// If the order of keys in the TOML string is different, the hash will be the same. +func GenerateHash(tomlStr v1alpha1.ConfigFile) (string, error) { + var config map[string]any + if err := toml.NewDecoder(bytes.NewReader([]byte(tomlStr))).Decode(&config); err != nil { + return "", fmt.Errorf("failed to unmarshal toml string %s: %w", tomlStr, err) + } + hasher := fnv.New32a() + hashutil.DeepHashObject(hasher, config) + return rand.SafeEncodeString(fmt.Sprint(hasher.Sum32())), nil +} diff --git a/pkg/utils/toml/toml_test.go b/pkg/utils/toml/toml_test.go new file mode 100644 index 00000000000..90f425b897e --- /dev/null +++ b/pkg/utils/toml/toml_test.go @@ -0,0 +1,240 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toml + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +type TestType struct { + String string `toml:"string"` +} + +func TestCodec(t *testing.T) { + cases := []struct { + desc string + input []byte + output []byte + obj TestType + change func(obj *TestType) *TestType + }{ + { + desc: "empty input", + input: []byte(``), + output: []byte(` +string = 'changed' + `), + change: func(obj *TestType) *TestType { + obj.String = "changed" + return obj + }, + }, + { + desc: "reserve unknown field", + input: []byte(` +unknown = 'xxx' +unknown_int = 10 + +[[unknown_arr]] +unknown = 'yyy' + +[[unknown_arr]] +unknown = 'yyy' + +[unknown_double_map] +[unknown_double_map.map] +unknown = 'yyy' + +[unknown_map] +unknown = 'yyy' + `), + output: []byte(` +string = 'mmm' +unknown = 'xxx' +unknown_int = 10 + +[[unknown_arr]] +unknown = 'yyy' + +[[unknown_arr]] +unknown = 'yyy' + +[unknown_double_map] +[unknown_double_map.map] +unknown = 'yyy' + +[unknown_map] +unknown = 'yyy' + `), + change: func(obj *TestType) *TestType { + obj.String = "mmm" + return obj + }, + }, + { + desc: "change existing field", + input: []byte(` +string = 'aaa' +unknown = 'xxx' + `), + output: []byte(` +string = 'yyy' +unknown = 'xxx' + `), + change: func(obj *TestType) *TestType { + obj.String = "yyy" + return obj + }, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + decoder, encoder := Codec[TestType]() + require.NoError(t, decoder.Decode(c.input, &c.obj)) + c.change(&c.obj) + res, err := encoder.Encode(&c.obj) + require.NoError(tt, err) + assert.Equal(tt, string(bytes.TrimSpace(c.output)), string(bytes.TrimSpace(res))) + }) + } +} + +func TestGenerateHash(t *testing.T) { + tests := []struct { + name string + tomlStr v1alpha1.ConfigFile + semanticallyEquivalentStr v1alpha1.ConfigFile + wantHash string + wantError bool + }{ + { + name: "Valid TOML string", + tomlStr: v1alpha1.ConfigFile(`foo = 'bar' +[log] +k1 = 'v1' +k2 = 'v2'`), + semanticallyEquivalentStr: v1alpha1.ConfigFile(`foo = 'bar' +[log] +k2 = 'v2' +k1 = 'v1'`), + wantHash: "5dbbcf4574", + wantError: false, + }, + { + name: "Different config value", + tomlStr: v1alpha1.ConfigFile(`foo = 'foo' +[log] +k2 = 'v2' +k1 = 'v1'`), + wantHash: "f5bc46cb9", + wantError: false, + }, + { + name: "multiple sections with blank line", + tomlStr: v1alpha1.ConfigFile(`[a] +k1 = 'v1' +[b] +k2 = 'v2'`), + semanticallyEquivalentStr: v1alpha1.ConfigFile(`[a] +k1 = 'v1' +[b] + +k2 = 'v2'`), + wantHash: "79598d5977", + wantError: false, + }, + { + name: "Empty TOML string", + tomlStr: v1alpha1.ConfigFile(``), + wantHash: "7d6fc488b7", + wantError: false, + }, + { + name: "Invalid TOML string", + tomlStr: v1alpha1.ConfigFile(`key1 = "value1" + key2 = value2`), // Missing quotes around value2 + wantHash: "", + wantError: true, + }, + { + name: "Nested tables", + tomlStr: v1alpha1.ConfigFile(`[parent] +child1 = "value1" +child2 = "value2" +[parent.child] +grandchild1 = "value3" +grandchild2 = "value4"`), + semanticallyEquivalentStr: v1alpha1.ConfigFile(`[parent] +child2 = "value2" +child1 = "value1" +[parent.child] +grandchild2 = "value4" +grandchild1 = "value3"`), + wantHash: "7bf645ccb4", + wantError: false, + }, + { + name: "Array of tables", + tomlStr: v1alpha1.ConfigFile(`[[products]] +name = "Hammer" +sku = 738594937 + +[[products]] +name = "Nail" +sku = 284758393 + +color = "gray"`), + semanticallyEquivalentStr: v1alpha1.ConfigFile(`[[products]] +sku = 738594937 +name = "Hammer" + +[[products]] +sku = 284758393 +name = "Nail" + +color = "gray"`), + wantHash: "7549cf87f4", + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotHash, err := GenerateHash(tt.tomlStr) + if tt.wantError { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantHash, gotHash) + + if string(tt.semanticallyEquivalentStr) != "" { + reorderedHash, err := GenerateHash(tt.semanticallyEquivalentStr) + require.NoError(t, err) + assert.Equal(t, tt.wantHash, reorderedHash) + } + } + }) + } +} diff --git a/pkg/utils/topology/scheduler.go b/pkg/utils/topology/scheduler.go new file mode 100644 index 00000000000..3e5c2e57c47 --- /dev/null +++ b/pkg/utils/topology/scheduler.go @@ -0,0 +1,215 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package topology + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +// This interface defines a scheduler to choose the next topology +type Scheduler interface { + // Add adds an scheduled instance. + // All scheduled instances should be added before calling Next() + Add(name string, topo v1alpha1.Topology) + // NextAdd returns the topology of the next pending instance + NextAdd() v1alpha1.Topology + // Del removes an scheduled instance. + Del(name string) + // NextDel returns names which can be choosed to del + NextDel() []string +} + +func New(st []v1alpha1.ScheduleTopology) (Scheduler, error) { + s := &topologyScheduler{ + e: NewEncoder(), + hashToIndex: map[string]int{}, + nameToIndex: map[string]int{}, + } + + for index, topo := range st { + hash := s.e.Encode(topo.Topology) + if hash == "" { + return nil, fmt.Errorf("topology of %vth is empty", index) + } + + info := topoInfo{ + topo: topo.Topology, + names: sets.New[string](), + } + if topo.Weight == nil { + info.weight = 1 + } else { + info.weight = *topo.Weight + } + + s.hashToIndex[hash] = index + s.info = append(s.info, info) + s.totalWeight += int64(info.weight) + } + + // append an unknown topo + s.info = append(s.info, topoInfo{ + names: sets.New[string](), + }) + + return s, nil +} + +type topoInfo struct { + topo v1alpha1.Topology + names sets.Set[string] + weight int32 +} + +type topologyScheduler struct { + e Encoder + info []topoInfo + // topo hash to topo index + hashToIndex map[string]int + // instance name to topo index + nameToIndex map[string]int + + totalWeight int64 + totalCount int +} + +func (s *topologyScheduler) Add(name string, t v1alpha1.Topology) { + hash := s.e.Encode(t) + index, ok := s.hashToIndex[hash] + if !ok { + // add into unknown topo + index = len(s.info) - 1 + } else { + // only count instances not in unknown topo + s.totalCount += 1 + } + + s.info[index].names.Insert(name) + s.nameToIndex[name] = index +} + +func (s *topologyScheduler) Del(name string) { + index, ok := s.nameToIndex[name] + if !ok { + // cannot found this name, just return + return + } + + delete(s.nameToIndex, name) + s.info[index].names.Delete(name) + if index != len(s.info)-1 { + // not unknown topo + s.totalCount -= 1 + } +} + +func (s *topologyScheduler) NextAdd() v1alpha1.Topology { + // only unknown topo + if len(s.info) == 1 { + return nil + } + maximum := int64(0) + choosed := 0 + for i, v := range s.info[:len(s.info)-1] { + count := v.names.Len() + // avoid some topos are starved + if v.names.Len() == 0 { + // If weight of a topo is very high, countPerWeight*weight ~= totalCount, + // so add len(topologies) score for empty topo. + // len(topologies) will always be bigger than the totalCount at the first round. + // All topos will be scheduled at least one instance at the first round. + count = -len(s.info) + 1 + } + score := int64(v.weight)*int64(s.totalCount) - int64(count)*s.totalWeight + if score > maximum { + maximum = score + choosed = i + } + } + + return s.info[choosed].topo +} + +func (s *topologyScheduler) NextDel() []string { + unknown := s.info[len(s.info)-1] + if unknown.names.Len() != 0 { + return unknown.names.UnsortedList() + } + + maximum := -int64(s.totalCount+len(s.info)-1) * s.totalWeight + choosed := 0 + for i, v := range s.info[:len(s.info)-1] { + count := v.names.Len() + if count == 0 { + continue + } + if v.names.Len() == 1 { + count = -len(s.info) + 1 + } + score := int64(count)*s.totalWeight - int64(s.totalCount)*int64(v.weight) + if score > maximum { + maximum = score + choosed = i + } + } + + return s.info[choosed].names.UnsortedList() +} + +// Encoder is defined to encode a map to a unique string +// This encoder is designed to avoid iter all topologies in policy multiple times +// Assume there are M toplogies and N instances, time complexity will be optimized from O(M*N) to O(M+N) +type Encoder interface { + Encode(topo v1alpha1.Topology) string +} + +func NewEncoder() Encoder { + return &encoder{ + kv: map[string]int{}, + } +} + +type encoder struct { + kv map[string]int + maxIndex int +} + +func (e *encoder) Encode(t v1alpha1.Topology) string { + var hash []byte + for k, v := range t { + count, ok := e.kv[k+":"+v] + if !ok { + count = e.maxIndex + e.maxIndex += 1 + e.kv[k+":"+v] = count + } + a := count / 8 + b := count % 8 + if a >= len(hash) { + for i := 0; i < a-len(hash); i++ { + hash = append(hash, 0) + } + hash = append(hash, 1< maxSize) { + return fmt.Errorf("size %d is out of range [%d, %d] for volume type %s", size, minSize, maxSize, desired.Type) + } + } + + if desired.IOPS != nil { + iops := int(*desired.IOPS) + minIops, ok1 := minIOPS[desired.Type] + maxIops, ok2 := maxIOPS[desired.Type] + if !ok1 || !ok2 { + return fmt.Errorf("modifying IOPS for volume type %s is not supported", desired.Type) + } + if iops < minIops || iops > maxIops { + return fmt.Errorf("iops %d is out of range [%d, %d] for volume type %s", iops, minIops, maxIops, desired.Type) + } + } + + if desired.Throughput != nil { + throughput := int(*desired.Throughput) + if throughput < minThroughput || throughput > maxThroughput { + return fmt.Errorf("throughput %d is out of range [%d, %d]", throughput, minThroughput, maxThroughput) + } + } + + return nil +} diff --git a/pkg/volumes/cloud/aws/ebs_modifier_test.go b/pkg/volumes/cloud/aws/ebs_modifier_test.go new file mode 100644 index 00000000000..a699bc746ad --- /dev/null +++ b/pkg/volumes/cloud/aws/ebs_modifier_test.go @@ -0,0 +1,287 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" +) + +func newTestPVC(size string) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(size), + }, + }, + }, + } +} + +func newTestPV(volID string) *corev1.PersistentVolume { + return &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + VolumeHandle: volID, + }, + }, + }, + } +} + +func newTestStorageClass(provisioner, typ, iops, throughput string) *storagev1.StorageClass { + return &storagev1.StorageClass{ + Provisioner: provisioner, + Parameters: map[string]string{ + paramKeyIOPS: iops, + paramKeyType: typ, + paramKeyThroughput: throughput, + }, + } +} + +func TestModifyVolume(t *testing.T) { + initialPVC := newTestPVC("10Gi") + initialPV := newTestPV("aaa") + initialSC := newTestStorageClass("", "gp3", "3000", "125") + + cases := []struct { + desc string + + pvc *corev1.PersistentVolumeClaim + pv *corev1.PersistentVolume + sc *storagev1.StorageClass + + getState GetVolumeStateFunc + + wait bool + hasErr bool + }{ + { + desc: "volume modification is failed, modify again", + pvc: initialPVC, + pv: initialPV, + sc: initialSC, + + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateFailed + }, + + wait: true, + hasErr: false, + }, + { + desc: "volume modification is optimizing, no need to wait to avoid waiting too long time", + pvc: initialPVC, + pv: initialPV, + sc: initialSC, + + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateOptimizing + }, + + wait: false, + hasErr: false, + }, + { + desc: "volume modification is completed, no need to wait", + pvc: initialPVC, + pv: initialPV, + sc: initialSC, + + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateCompleted + }, + + wait: false, + hasErr: false, + }, + { + desc: "volume modification is modifying, wait", + pvc: initialPVC, + pv: initialPV, + sc: initialSC, + + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateModifying + }, + + wait: true, + hasErr: false, + }, + { + desc: "volume has been modified, but size is changed", + pvc: newTestPVC("20Gi"), + pv: initialPV, + sc: initialSC, + + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateCompleted + }, + + wait: true, + hasErr: false, + }, + { + desc: "volume has been modified, but sc is changed", + pvc: initialPVC, + pv: initialPV, + sc: newTestStorageClass("", "io2", "3000", "300"), + + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateCompleted + }, + + wait: true, + hasErr: false, + }, + { + desc: "volume is modifying, but size/sc is changed", + pvc: newTestPVC("20Gi"), + pv: initialPV, + sc: newTestStorageClass("", "gp2", "3000", "300"), + + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateModifying + }, + + wait: false, + hasErr: true, + }, + } + + for _, tt := range cases { + t.Run(tt.desc, func(t *testing.T) { + g := NewGomegaWithT(t) + m := NewFakeEBSModifier(tt.getState) + + wait1, err := m.Modify(context.TODO(), initialPVC, initialPV, initialSC) + g.Expect(err).Should(Succeed(), tt.desc) + g.Expect(wait1).Should(BeTrue(), tt.desc) + + wait2, err := m.Modify(context.TODO(), tt.pvc, tt.pv, tt.sc) + if tt.hasErr { + g.Expect(err).Should(HaveOccurred(), tt.desc) + } else { + g.Expect(err).Should(Succeed(), tt.desc) + } + g.Expect(wait2).Should(Equal(tt.wait), tt.desc) + }) + } +} + +func TestValidate(t *testing.T) { + tests := []struct { + name string + ssc *storagev1.StorageClass + dsc *storagev1.StorageClass + wantErr bool + }{ + { + name: "same provisioner, but not ebs", + ssc: newTestStorageClass("foo", "gp3", "1000", "100"), + dsc: newTestStorageClass("foo", "gp3", "2000", "200"), + wantErr: true, + }, + { + name: "different provisioner", + ssc: newTestStorageClass("foo", "gp3", "1000", "100"), + dsc: newTestStorageClass("bar", "gp3", "2000", "200"), + wantErr: true, + }, + { + name: "happy path", + ssc: newTestStorageClass("ebs.csi.aws.com", "gp3", "1000", "100"), + dsc: newTestStorageClass("ebs.csi.aws.com", "gp3", "2000", "200"), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &EBSModifier{logger: logr.Logger{}} + if err := m.Validate(nil, nil, tt.ssc, tt.dsc); (err != nil) != tt.wantErr { + t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_validateVolume(t *testing.T) { + tests := []struct { + name string + desired *Volume + wantErr bool + }{ + { + name: "empty volume type", + desired: &Volume{}, + }, + { + name: "nil volume", + desired: &Volume{}, + }, + { + name: "valid volume", + desired: &Volume{ + Size: ptr.To(int32(10)), + IOPS: ptr.To(int32(3000)), + Throughput: ptr.To(int32(125)), + Type: types.VolumeTypeGp3, + }, + wantErr: false, + }, + { + name: "invalid size", + desired: &Volume{ + Size: ptr.To(int32(100000)), + Type: types.VolumeTypeGp3, + }, + wantErr: true, + }, + { + name: "invalid IOPS", + desired: &Volume{ + IOPS: ptr.To(int32(20000)), + Type: types.VolumeTypeGp3, + }, + wantErr: true, + }, + { + name: "invalid throughput", + desired: &Volume{ + Throughput: ptr.To(int32(2000)), + Type: types.VolumeTypeGp3, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := validateVolume(tt.desired); (err != nil) != tt.wantErr { + t.Errorf("validateVolume() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/pkg/volumes/cloud/aws/fake.go b/pkg/volumes/cloud/aws/fake.go new file mode 100644 index 00000000000..0b7a8510644 --- /dev/null +++ b/pkg/volumes/cloud/aws/fake.go @@ -0,0 +1,119 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go" + + "github.com/pingcap/tidb-operator/pkg/volumes/cloud" +) + +func NewFakeEBSModifier(f GetVolumeStateFunc) cloud.VolumeModifier { + return &EBSModifier{ + cli: NewFakeEC2VolumeAPI(f), + } +} + +type GetVolumeStateFunc func(id string) types.VolumeModificationState + +type FakeEC2VolumeAPI struct { + vs []Volume + f GetVolumeStateFunc +} + +func NewFakeEC2VolumeAPI(f GetVolumeStateFunc) *FakeEC2VolumeAPI { + m := &FakeEC2VolumeAPI{ + f: f, + } + + return m +} + +func (m *FakeEC2VolumeAPI) ModifyVolume( + _ context.Context, param *ec2.ModifyVolumeInput, + _ ...func(*ec2.Options)) (*ec2.ModifyVolumeOutput, error) { + for i := range m.vs { + v := &m.vs[i] + if v.VolumeID == *param.VolumeId { + state := m.f(v.VolumeID) + switch state { + // NOTE(liubo02): I'm not sure the behavior to recall the aws api when the last modification + // is in some states + case types.VolumeModificationStateCompleted, types.VolumeModificationStateFailed: + m.vs[i] = Volume{ + VolumeID: *param.VolumeId, + Size: param.Size, + IOPS: param.Iops, + Throughput: param.Throughput, + Type: param.VolumeType, + } + + return &ec2.ModifyVolumeOutput{}, nil + } + + return nil, fmt.Errorf("volume %s has been modified or modification is not finished", v.VolumeID) + } + } + + v := Volume{ + VolumeID: *param.VolumeId, + Size: param.Size, + IOPS: param.Iops, + Throughput: param.Throughput, + Type: param.VolumeType, + } + + m.vs = append(m.vs, v) + + return &ec2.ModifyVolumeOutput{}, nil +} + +func (m *FakeEC2VolumeAPI) DescribeVolumesModifications( + _ context.Context, param *ec2.DescribeVolumesModificationsInput, + _ ...func(*ec2.Options)) (*ec2.DescribeVolumesModificationsOutput, error) { + var mods []types.VolumeModification + for _, id := range param.VolumeIds { + for i := range m.vs { + v := m.vs[i] + if v.VolumeID != id { + continue + } + + mods = append(mods, types.VolumeModification{ + VolumeId: &v.VolumeID, + TargetIops: v.IOPS, + TargetSize: v.Size, + TargetThroughput: v.Throughput, + TargetVolumeType: v.Type, + ModificationState: m.f(id), + }) + } + } + + if len(mods) == 0 { + return nil, &smithy.GenericAPIError{ + Code: errCodeNotFound, + } + } + + return &ec2.DescribeVolumesModificationsOutput{ + VolumesModifications: mods, + }, nil +} diff --git a/pkg/volumes/cloud/interface.go b/pkg/volumes/cloud/interface.go new file mode 100644 index 00000000000..fa0c3c8ccf9 --- /dev/null +++ b/pkg/volumes/cloud/interface.go @@ -0,0 +1,62 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" +) + +type VolumeModifier interface { + // Name returns the name of the volume modifier. + Name() string + + // Modify modifies the underlay volume of pvc to match the args of storageclass. + // If no PV permission (e.g `-cluster-permission-pv=false`), the `pv` may be nil and will return `false, nil`. + Modify(ctx context.Context, pvc *corev1.PersistentVolumeClaim, pv *corev1.PersistentVolume, sc *storagev1.StorageClass) (bool, error) + + MinWaitDuration() time.Duration + + Validate(spvc, dpvc *corev1.PersistentVolumeClaim, ssc, dsc *storagev1.StorageClass) error +} + +// FakeVolumeModifier is a fake implementation of the VolumeModifier interface for unit testing. +type FakeVolumeModifier struct { + name string + modifyResult bool + modifyError error + minWait time.Duration + validateError error +} + +func (f *FakeVolumeModifier) Name() string { + return f.name +} + +func (f *FakeVolumeModifier) Modify(_ context.Context, _ *corev1.PersistentVolumeClaim, + _ *corev1.PersistentVolume, _ *storagev1.StorageClass) (bool, error) { + return f.modifyResult, f.modifyError +} + +func (f *FakeVolumeModifier) MinWaitDuration() time.Duration { + return f.minWait +} + +func (f *FakeVolumeModifier) Validate(_, _ *corev1.PersistentVolumeClaim, _, _ *storagev1.StorageClass) error { + return f.validateError +} diff --git a/pkg/volumes/mock.go b/pkg/volumes/mock.go new file mode 100644 index 00000000000..5bf488279b7 --- /dev/null +++ b/pkg/volumes/mock.go @@ -0,0 +1,85 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/pingcap/tidb-operator/pkg/volumes (interfaces: Modifier) +// +// Generated by this command: +// +// mockgen -destination mock.go -package=volumes github.com/pingcap/tidb-operator/pkg/volumes Modifier +// + +// Package volumes is a generated GoMock package. +package volumes + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + v1 "k8s.io/api/core/v1" +) + +// MockModifier is a mock of Modifier interface. +type MockModifier struct { + ctrl *gomock.Controller + recorder *MockModifierMockRecorder + isgomock struct{} +} + +// MockModifierMockRecorder is the mock recorder for MockModifier. +type MockModifierMockRecorder struct { + mock *MockModifier +} + +// NewMockModifier creates a new mock instance. +func NewMockModifier(ctrl *gomock.Controller) *MockModifier { + mock := &MockModifier{ctrl: ctrl} + mock.recorder = &MockModifierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModifier) EXPECT() *MockModifierMockRecorder { + return m.recorder +} + +// GetActualVolume mocks base method. +func (m *MockModifier) GetActualVolume(ctx context.Context, expect, current *v1.PersistentVolumeClaim) (*ActualVolume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActualVolume", ctx, expect, current) + ret0, _ := ret[0].(*ActualVolume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActualVolume indicates an expected call of GetActualVolume. +func (mr *MockModifierMockRecorder) GetActualVolume(ctx, expect, current any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActualVolume", reflect.TypeOf((*MockModifier)(nil).GetActualVolume), ctx, expect, current) +} + +// Modify mocks base method. +func (m *MockModifier) Modify(ctx context.Context, vol *ActualVolume) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Modify", ctx, vol) + ret0, _ := ret[0].(error) + return ret0 +} + +// Modify indicates an expected call of Modify. +func (mr *MockModifierMockRecorder) Modify(ctx, vol any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Modify", reflect.TypeOf((*MockModifier)(nil).Modify), ctx, vol) +} + +// ShouldModify mocks base method. +func (m *MockModifier) ShouldModify(ctx context.Context, actual *ActualVolume) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldModify", ctx, actual) + ret0, _ := ret[0].(bool) + return ret0 +} + +// ShouldModify indicates an expected call of ShouldModify. +func (mr *MockModifierMockRecorder) ShouldModify(ctx, actual any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldModify", reflect.TypeOf((*MockModifier)(nil).ShouldModify), ctx, actual) +} diff --git a/pkg/volumes/native_modifer.go b/pkg/volumes/native_modifer.go new file mode 100644 index 00000000000..f40341ba379 --- /dev/null +++ b/pkg/volumes/native_modifer.go @@ -0,0 +1,157 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package volumes + +import ( + "context" + "fmt" + + storagev1 "k8s.io/api/storage/v1" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + + "github.com/pingcap/tidb-operator/pkg/client" +) + +var _ Modifier = &nativeModifier{} + +// nativeModifier modifies volumes via K8s native API - VolumeAttributesClass. +type nativeModifier struct { + k8sClient client.Client + logger logr.Logger +} + +func NewNativeModifier(k8sClient client.Client, logger logr.Logger) Modifier { + return &nativeModifier{ + k8sClient: k8sClient, + logger: logger, + } +} + +func (m *nativeModifier) GetActualVolume(ctx context.Context, expect, current *corev1.PersistentVolumeClaim) (*ActualVolume, error) { + pv, err := getBoundPVFromPVC(ctx, m.k8sClient, current) + if err != nil { + return nil, fmt.Errorf("failed to get bound PV from PVC %s/%s: %w", current.Namespace, current.Name, err) + } + + desired := &DesiredVolume{ + Size: getStorageSize(expect.Spec.Resources.Requests), + VACName: expect.Spec.VolumeAttributesClassName, + } + if desired.VACName != nil { + var vac storagev1beta1.VolumeAttributesClass + if err = m.k8sClient.Get(ctx, client.ObjectKey{Name: *desired.VACName}, &vac); err != nil { + return nil, fmt.Errorf("failed to get desired VolumeAttributesClass %s: %w", *desired.VACName, err) + } + desired.VAC = &vac + } + + actual := ActualVolume{ + Desired: desired, + PVC: current, + PV: pv, + VACName: current.Status.CurrentVolumeAttributesClassName, + } + if actual.VACName != nil { + var vac storagev1beta1.VolumeAttributesClass + if err = m.k8sClient.Get(ctx, client.ObjectKey{Name: *actual.VACName}, &vac); err != nil { + return nil, fmt.Errorf("failed to get current VolumeAttributesClass %s: %w", *actual.VACName, err) + } + actual.VAC = &vac + } + if current.Spec.StorageClassName != nil { + var sc storagev1.StorageClass + if err = m.k8sClient.Get(ctx, client.ObjectKey{Name: *current.Spec.StorageClassName}, &sc); err != nil { + return nil, fmt.Errorf("failed to get current StorageClass %s: %w", *current.Spec.StorageClassName, err) + } + actual.StorageClass = &sc + } + return &actual, nil +} + +func (m *nativeModifier) ShouldModify(_ context.Context, actual *ActualVolume) (modify bool) { + if actual.PVC.Status.ModifyVolumeStatus != nil { + m.logger.Info("there is a ModifyVolume operation being attempted", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "target", actual.PVC.Status.ModifyVolumeStatus.TargetVolumeAttributesClassName, + "status", actual.PVC.Status.ModifyVolumeStatus.Status) + return false + } + + if isStorageClassChanged(actual.GetStorageClassName(), actual.Desired.GetStorageClassName()) { + m.logger.Info("cannot change storage class", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "current", actual.GetStorageClassName(), "desired", actual.Desired.GetStorageClassName()) + return false + } + + vacChanged := isVolumeAttributesClassChanged(actual) + if vacChanged && actual.Desired.VAC != nil && actual.StorageClass != nil && + actual.Desired.VAC.DriverName != actual.StorageClass.Provisioner { + m.logger.Info("the drive name in VAC should be same as the providioner in StorageClass", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "in VAC", actual.Desired.VAC.DriverName, "in StorageClass", actual.StorageClass.Provisioner) + return false + } + + desiredSize := actual.Desired.GetStorageSize() + actualSize := actual.GetStorageSize() + sizeChanged := false + result := desiredSize.Cmp(actualSize) + if result < 0 { + m.logger.Info("can't shrink volume size", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "current", &actualSize, "desired", &desiredSize) + return false + } else if result > 0 { + supported, err := isVolumeExpansionSupported(actual.StorageClass) + if err != nil { + m.logger.Error(err, "volume expansion of storage class may be not supported, but it will be tried", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "storageclass", actual.GetStorageClassName()) + } + if !supported { + m.logger.Info("volume expansion is not supported by storage class", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "storageclass", actual.GetStorageClassName()) + return false + } + sizeChanged = true + } + + defer func() { + // Log PVC conditions if not modified for debugging + if !modify { + m.logger.Info("PVC not modified", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "size changed", sizeChanged, "vac changed", vacChanged) + for _, cond := range actual.PVC.Status.Conditions { + m.logger.Info("PVC condition", + "namespace", actual.PVC.Namespace, "name", actual.PVC.Name, + "type", cond.Type, "status", cond.Status, "reason", cond.Reason, "message", cond.Message) + } + } + }() + return sizeChanged || vacChanged +} + +func (m *nativeModifier) Modify(ctx context.Context, vol *ActualVolume) error { + desiredPVC := vol.PVC.DeepCopy() + desiredPVC.Spec.Resources.Requests[corev1.ResourceStorage] = vol.Desired.GetStorageSize() + desiredPVC.Spec.VolumeAttributesClassName = vol.Desired.VACName + return m.k8sClient.Update(ctx, desiredPVC) +} diff --git a/pkg/volumes/native_modifer_test.go b/pkg/volumes/native_modifer_test.go new file mode 100644 index 00000000000..3d8a4df6c43 --- /dev/null +++ b/pkg/volumes/native_modifer_test.go @@ -0,0 +1,270 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package volumes + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +func Test_nativeModifier_GetActualVolume(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + current *corev1.PersistentVolumeClaim + expect *corev1.PersistentVolumeClaim + expectFunc func(*WithT, *ActualVolume) + wantErr bool + }{ + { + name: "no changes", + existingObjs: []client.Object{ + fake.FakeObj[corev1.PersistentVolume]("pv-0"), + fake.FakeObj[storagev1.StorageClass]("sc-0"), + }, + current: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", + withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi"), + withPVCStatus("10Gi", nil)), + expect: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", + withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi")), + expectFunc: func(g *WithT, volume *ActualVolume) { + g.Expect(volume).ShouldNot(BeNil()) + g.Expect(volume.Desired).ShouldNot(BeNil()) + g.Expect(volume.Desired.Size).Should(Equal(resource.MustParse("10Gi"))) + g.Expect(volume.Desired.StorageClassName).Should(BeNil()) + g.Expect(volume.Desired.StorageClass).Should(BeNil()) + + g.Expect(volume.PVC).ShouldNot(BeNil()) + g.Expect(volume.PV).ShouldNot(BeNil()) + g.Expect(volume.StorageClass).ShouldNot(BeNil()) + }, + }, + { + name: "set the VolumeAttributesClass", + existingObjs: []client.Object{ + fake.FakeObj[corev1.PersistentVolume]("pv-0"), + fake.FakeObj[storagev1.StorageClass]("sc-0"), + fake.FakeObj[storagev1beta1.VolumeAttributesClass]("vac-0"), + }, + current: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", + withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi"), + withPVCStatus("10Gi", nil)), + expect: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", + withPVCSpec(ptr.To("sc-0"), ptr.To("vac-0"), "pv-0", "10Gi")), + expectFunc: func(g *WithT, volume *ActualVolume) { + g.Expect(volume).ShouldNot(BeNil()) + g.Expect(volume.VAC).Should(BeNil()) + g.Expect(volume.Desired).ShouldNot(BeNil()) + g.Expect(volume.Desired.VACName).Should(Equal(ptr.To("vac-0"))) + g.Expect(volume.Desired.VAC).ShouldNot(BeNil()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cli := client.NewFakeClient(tt.existingObjs...) + m := &nativeModifier{ + k8sClient: cli, + logger: logr.Discard(), + } + got, err := m.GetActualVolume(context.TODO(), tt.expect, tt.current) + if (err != nil) != tt.wantErr { + t.Errorf("GetActualVolume() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.expectFunc != nil { + tt.expectFunc(NewGomegaWithT(t), got) + } + }) + } +} + +func Test_nativeModifier_ShouldModify(t *testing.T) { + tests := []struct { + name string + actual *ActualVolume + want bool + }{ + { + name: "pvc is being modified", + actual: &ActualVolume{ + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", + func(pvc *corev1.PersistentVolumeClaim) *corev1.PersistentVolumeClaim { + pvc.Status.ModifyVolumeStatus = &corev1.ModifyVolumeStatus{ + TargetVolumeAttributesClassName: "vac-0", + Status: corev1.PersistentVolumeClaimModifyVolumeInProgress, + } + return pvc + }), + }, + }, + { + name: "try to shrink size", + actual: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("5Gi"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCStatus("10Gi", nil)), + }, + }, + { + name: "try to expand size, but it's not supported", + actual: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("50Gi"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCStatus("10Gi", nil)), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-0", func(sc *storagev1.StorageClass) *storagev1.StorageClass { + sc.AllowVolumeExpansion = ptr.To(false) + return sc + }), + }, + }, + { + name: "try to expand size, and it's supported", + actual: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("50Gi"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCStatus("10Gi", nil)), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-0", func(sc *storagev1.StorageClass) *storagev1.StorageClass { + sc.AllowVolumeExpansion = ptr.To(true) + return sc + }), + }, + want: true, + }, + { + name: "try to change sc", + actual: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("50Gi"), + StorageClassName: ptr.To("sc-1"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCStatus("10Gi", nil)), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-0"), + }, + }, + { + name: "modify attributes", + actual: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("10Gi"), + StorageClassName: ptr.To("sc-0"), + VACName: ptr.To("vac-1"), + VAC: fake.FakeObj[storagev1beta1.VolumeAttributesClass]("vac-1"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCSpec(ptr.To("sc-0"), ptr.To("vac-0"), "pv-0", "10Gi"), withPVCStatus("10Gi", nil)), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-0"), + }, + want: true, + }, + { + name: "try to modify attributes with wrong vac", + actual: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("10Gi"), + StorageClassName: ptr.To("sc-0"), + VACName: ptr.To("vac-1"), + VAC: fake.FakeObj[storagev1beta1.VolumeAttributesClass]("vac-1", func(vac *storagev1beta1.VolumeAttributesClass) *storagev1beta1.VolumeAttributesClass { + vac.DriverName = "wrong" + return vac + }), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCSpec(ptr.To("sc-0"), ptr.To("vac-0"), "pv-0", "10Gi"), withPVCStatus("10Gi", nil)), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-0", func(sc *storagev1.StorageClass) *storagev1.StorageClass { + sc.Provisioner = "right" + return sc + }), + }, + }, + { + name: "nothing is changed", + actual: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("10Gi"), + StorageClassName: ptr.To("sc-0"), + VACName: ptr.To("vac-0"), + VAC: fake.FakeObj[storagev1beta1.VolumeAttributesClass]("vac-0"), + }, + VACName: ptr.To("vac-0"), + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", + withPVCSpec(ptr.To("sc-0"), ptr.To("vac-0"), "pv-0", "10Gi"), + withPVCStatus("10Gi", ptr.To("vac-0"))), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-0"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &nativeModifier{logger: logr.Discard()} + if got := m.ShouldModify(context.TODO(), tt.actual); got != tt.want { + t.Errorf("ShouldModify() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_nativeModifier_Modify(t *testing.T) { + tests := []struct { + name string + prePVC *corev1.PersistentVolumeClaim + vol *ActualVolume + wantErr bool + }{ + { + name: "happy path", + prePVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc", + withPVCSpec(ptr.To("sc-0"), ptr.To("vac-0"), "pv-0", "10Gi"), + withPVCStatus("10Gi", ptr.To("vac-0"))), + vol: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("50Gi"), + VACName: ptr.To("vac-1"), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cli := client.NewFakeClient(tt.prePVC) + m := &nativeModifier{ + k8sClient: cli, + logger: logr.Discard(), + } + tt.vol.PVC = tt.prePVC + if err := m.Modify(context.TODO(), tt.vol); (err != nil) != tt.wantErr { + t.Errorf("Modify() error = %v, wantErr %v", err, tt.wantErr) + } + + var pvc corev1.PersistentVolumeClaim + g := NewGomegaWithT(t) + g.Expect(cli.Get(context.TODO(), client.ObjectKey{Namespace: tt.vol.PVC.Namespace, Name: tt.vol.PVC.Name}, &pvc)).To(Succeed()) + g.Expect(pvc.Spec.Resources.Requests[corev1.ResourceStorage]).Should(Equal(tt.vol.Desired.Size)) + g.Expect(pvc.Spec.VolumeAttributesClassName).Should(Equal(tt.vol.Desired.VACName)) + }) + } +} diff --git a/pkg/volumes/raw_modifier.go b/pkg/volumes/raw_modifier.go new file mode 100644 index 00000000000..070c854119b --- /dev/null +++ b/pkg/volumes/raw_modifier.go @@ -0,0 +1,274 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package volumes + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + + "github.com/pingcap/tidb-operator/pkg/client" + timeutils "github.com/pingcap/tidb-operator/pkg/utils/time" + "github.com/pingcap/tidb-operator/pkg/volumes/cloud" +) + +var _ Modifier = &rawModifier{} + +// rawModifier modifies volumes by calling cloud provider API. +type rawModifier struct { + k8sClient client.Client + logger logr.Logger + volumeModifier cloud.VolumeModifier + clock timeutils.Clock +} + +func NewRawModifier(modifier cloud.VolumeModifier, k8sClient client.Client, logger logr.Logger) Modifier { + return &rawModifier{ + k8sClient: k8sClient, + logger: logger, + clock: &timeutils.RealClock{}, + volumeModifier: modifier, + } +} + +func (m *rawModifier) GetActualVolume(ctx context.Context, expect, current *corev1.PersistentVolumeClaim) (*ActualVolume, error) { + pv, err := getBoundPVFromPVC(ctx, m.k8sClient, current) + if err != nil { + return nil, fmt.Errorf("failed to get bound PV from PVC %s/%s: %w", current.Namespace, current.Name, err) + } + + curSC, err := getStorageClassFromPVC(ctx, m.k8sClient, current) + if err != nil { + return nil, fmt.Errorf("failed to get StorageClass from PVC %s/%s: %w", current.Namespace, current.Name, err) + } + + desired := &DesiredVolume{ + Size: getStorageSize(expect.Spec.Resources.Requests), + StorageClassName: expect.Spec.StorageClassName, + } + if desired.StorageClassName != nil { + var sc storagev1.StorageClass + if err = m.k8sClient.Get(ctx, client.ObjectKey{Name: *desired.StorageClassName}, &sc); err != nil { + return nil, fmt.Errorf("failed to get StorageClass %s: %w", *desired.StorageClassName, err) + } + desired.StorageClass = &sc + } + + actual := ActualVolume{ + Desired: desired, + PVC: current, + PV: pv, + StorageClass: curSC, + } + return &actual, nil +} + +func (m *rawModifier) ShouldModify(_ context.Context, actual *ActualVolume) bool { + actual.Phase = m.getVolumePhase(actual) + return actual.Phase == VolumePhasePreparing || actual.Phase == VolumePhaseModifying +} + +func (m *rawModifier) Modify(ctx context.Context, vol *ActualVolume) error { + m.logger.Info("try to sync volume", "namespace", vol.PVC.Namespace, "name", vol.PVC.Name, "phase", vol.Phase) + + switch vol.Phase { + case VolumePhasePreparing: + if err := m.modifyPVCAnnoSpec(ctx, vol); err != nil { + return err + } + + fallthrough + case VolumePhaseModifying: + pvc := vol.PVC.DeepCopy() + pvc.Spec.Resources.Requests[corev1.ResourceStorage] = vol.Desired.Size + wait, err := m.volumeModifier.Modify(ctx, pvc, vol.PV, vol.Desired.StorageClass) + if err != nil { + return err + } + if wait { + return fmt.Errorf("wait for volume %s/%s modification completed", vol.PVC.Namespace, vol.PVC.Name) + } + + // try to resize fs + synced, err := m.syncPVCSize(ctx, vol) + if err != nil { + return err + } + if !synced { + return fmt.Errorf("wait for fs resize completed") + } + if err := m.modifyPVCAnnoStatus(ctx, vol); err != nil { + return err + } + default: + return fmt.Errorf("volume %s/%s is in phase %s, cannot modify", vol.PVC.Namespace, vol.PVC.Name, vol.Phase) + } + return nil +} + +func (m *rawModifier) modifyPVCAnnoStatus(ctx context.Context, vol *ActualVolume) error { + pvc := vol.PVC.DeepCopy() + + if pvc.Annotations == nil { + pvc.Annotations = map[string]string{} + } + + pvc.Annotations[annoKeyPVCStatusRevision] = pvc.Annotations[annoKeyPVCSpecRevision] + if scName := pvc.Annotations[annoKeyPVCSpecStorageClass]; scName != "" { + pvc.Annotations[annoKeyPVCStatusStorageClass] = scName + } + pvc.Annotations[annoKeyPVCStatusStorageSize] = pvc.Annotations[annoKeyPVCSpecStorageSize] + + err := m.k8sClient.Update(ctx, pvc) + if err != nil { + return err + } + + vol.PVC = pvc + return nil +} + +func (m *rawModifier) getVolumePhase(vol *ActualVolume) VolumePhase { + if err := m.validate(vol); err != nil { + m.logger.Info("volume modification is not allowed", "namespace", vol.PVC.Namespace, "name", vol.PVC.Name, "error", err) + return VolumePhaseCannotModify + } + if isPVCRevisionChanged(vol.PVC) { + return VolumePhaseModifying + } + + if !needModify(vol.PVC, vol.Desired) { + return VolumePhaseModified + } + + if m.waitForNextTime(vol.PVC) { + return VolumePhasePending + } + + return VolumePhasePreparing +} + +func (m *rawModifier) validate(vol *ActualVolume) error { + if vol.Desired == nil { + return fmt.Errorf("can't match desired volume") + } + desired := vol.Desired.GetStorageSize() + actual := vol.GetStorageSize() + result := desired.Cmp(actual) + if result < 0 { + return fmt.Errorf("can't shrunk size from %s to %s", &actual, &desired) + } + if result > 0 { + supported, err := isVolumeExpansionSupported(vol.StorageClass) + if err != nil { + m.logger.Info("volume expansion may be not supported, but it will be tried", + "namespace", vol.PVC.Namespace, "name", vol.PVC.Name, + "storageclass", vol.GetStorageClassName(), "error", err) + } + if !supported { + return fmt.Errorf("volume expansion is not supported by storageclass %s", vol.StorageClass.Name) + } + } + + // if no pv permission but have sc permission: cannot change sc + if isStorageClassChanged(vol.GetStorageClassName(), vol.Desired.GetStorageClassName()) && vol.PV == nil { + return fmt.Errorf("cannot change storage class (%s to %s), because there is no permission to get persistent volume", + vol.GetStorageClassName(), vol.Desired.GetStorageClassName()) + } + + desiredPVC := vol.PVC.DeepCopy() + desiredPVC.Spec.Resources.Requests[corev1.ResourceStorage] = desired + + return m.volumeModifier.Validate(vol.PVC, desiredPVC, vol.StorageClass, vol.Desired.StorageClass) +} + +func (m *rawModifier) waitForNextTime(pvc *corev1.PersistentVolumeClaim) bool { + str, ok := pvc.Annotations[annoKeyPVCLastTransitionTimestamp] + if !ok { + return false + } + timestamp, err := time.Parse(time.RFC3339, str) + if err != nil { + return false + } + + waitDur := defaultModifyWaitingDuration + if m != nil { + waitDur = m.volumeModifier.MinWaitDuration() + } + + if d := m.clock.Since(timestamp); d < waitDur { + m.logger.Info("volume modification is pending, should wait", + "namespace", pvc.Namespace, "name", pvc.Name, "duration", waitDur-d) + return true + } + + return false +} + +func getStorageClassFromPVC(ctx context.Context, cli client.Client, pvc *corev1.PersistentVolumeClaim) (*storagev1.StorageClass, error) { + scName := getStorageClassNameFromPVC(pvc) + if scName == "" { + return nil, fmt.Errorf("StorageClass of pvc %s is not set", pvc.Name) + } + var sc storagev1.StorageClass + if err := cli.Get(ctx, client.ObjectKey{Name: scName}, &sc); err != nil { + return nil, fmt.Errorf("failed to get StorageClass %s: %w", scName, err) + } + return &sc, nil +} + +func (m *rawModifier) syncPVCSize(ctx context.Context, vol *ActualVolume) (bool, error) { + capacity := vol.PVC.Status.Capacity.Storage() + requestSize := vol.PVC.Spec.Resources.Requests.Storage() + if requestSize.Cmp(vol.Desired.Size) == 0 && capacity.Cmp(vol.Desired.Size) == 0 { + return true, nil + } + + if requestSize.Cmp(vol.Desired.Size) == 0 { + return false, nil + } + + pvc := vol.PVC.DeepCopy() + pvc.Spec.Resources.Requests[corev1.ResourceStorage] = vol.Desired.Size + err := m.k8sClient.Update(ctx, pvc) + if err != nil { + return false, err + } + + vol.PVC = pvc + return false, nil +} + +func (m *rawModifier) modifyPVCAnnoSpec(ctx context.Context, vol *ActualVolume) error { + pvc := vol.PVC.DeepCopy() + size := vol.Desired.Size + scName := vol.Desired.GetStorageClassName() + + if isChanged := snapshotStorageClassAndSize(pvc, scName, size); isChanged { + upgradeRevision(pvc) + } + + setLastTransitionTimestamp(pvc) + if err := m.k8sClient.Update(ctx, pvc); err != nil { + return fmt.Errorf("failed to update PVC %s/%s: %w", pvc.Namespace, pvc.Name, err) + } + vol.PVC = pvc + return nil +} diff --git a/pkg/volumes/raw_modifier_test.go b/pkg/volumes/raw_modifier_test.go new file mode 100644 index 00000000000..116926ab74a --- /dev/null +++ b/pkg/volumes/raw_modifier_test.go @@ -0,0 +1,240 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package volumes + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/time" + "github.com/pingcap/tidb-operator/pkg/volumes/cloud" + "github.com/pingcap/tidb-operator/pkg/volumes/cloud/aws" +) + +func withPVCStatus(size string, curVACName *string) fake.ChangeFunc[corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaim] { + return func(pvc *corev1.PersistentVolumeClaim) *corev1.PersistentVolumeClaim { + pvc.Status.Phase = corev1.ClaimBound + pvc.Status.Capacity = corev1.ResourceList{} + pvc.Status.Capacity[corev1.ResourceStorage] = resource.MustParse(size) + pvc.Status.CurrentVolumeAttributesClassName = curVACName + return pvc + } +} + +func withPVCSpec(scName, vacName *string, vol, size string) fake.ChangeFunc[corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaim] { + return func(pvc *corev1.PersistentVolumeClaim) *corev1.PersistentVolumeClaim { + pvc.Spec.StorageClassName = scName + pvc.Spec.VolumeAttributesClassName = vacName + pvc.Spec.VolumeName = vol + pvc.Spec.Resources.Requests = corev1.ResourceList{} + pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse(size) + return pvc + } +} + +func withParameters(params map[string]string) fake.ChangeFunc[storagev1.StorageClass, *storagev1.StorageClass] { + return func(sc *storagev1.StorageClass) *storagev1.StorageClass { + sc.Parameters = params + return sc + } +} + +func getObjectsFromActualVolume(vol *ActualVolume) []client.Object { + var objs []client.Object + if vol != nil { + if vol.Desired != nil && vol.Desired.StorageClass != nil { + objs = append(objs, vol.Desired.StorageClass) + } + if vol.StorageClass != nil { + objs = append(objs, vol.StorageClass) + } + if vol.PVC != nil { + objs = append(objs, vol.PVC) + } + if vol.PV != nil { + objs = append(objs, vol.PV) + } + } + return objs +} + +func Test_rawModifier_GetActualVolume(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + desired *corev1.PersistentVolumeClaim + current *corev1.PersistentVolumeClaim + getState aws.GetVolumeStateFunc + expect func(*WithT, *ActualVolume) + wantErr bool + }{ + { + name: "happy path: no modification", + existingObjs: []client.Object{ + fake.FakeObj[corev1.PersistentVolume]("pv-0"), + fake.FakeObj[storagev1.StorageClass]("sc-0"), + }, + desired: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi")), + current: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCStatus("10Gi", nil), withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi")), + getState: func(_ string) types.VolumeModificationState { + return types.VolumeModificationStateFailed + }, + expect: func(g *WithT, volume *ActualVolume) { + g.Expect(volume).ShouldNot(BeNil()) + g.Expect(volume.Desired).ShouldNot(BeNil()) + g.Expect(volume.Desired.Size).Should(Equal(resource.MustParse("10Gi"))) + g.Expect(volume.Desired.StorageClassName).Should(Equal(ptr.To("sc-0"))) + g.Expect(volume.Desired.StorageClass).ShouldNot(BeNil()) + + g.Expect(volume.PVC).ShouldNot(BeNil()) + g.Expect(volume.PV).ShouldNot(BeNil()) + g.Expect(volume.StorageClass).ShouldNot(BeNil()) + g.Expect(volume.Phase).Should(Equal(VolumePhaseUnknown)) + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cli := client.NewFakeClient(tt.existingObjs...) + m := NewRawModifier(aws.NewFakeEBSModifier(tt.getState), cli, logr.Discard()) + got, err := m.GetActualVolume(context.TODO(), tt.desired, tt.current) + if (err != nil) != tt.wantErr { + t.Errorf("GetActualVolume() error = %v, wantErr %v", err, tt.wantErr) + return + } + g := NewGomegaWithT(t) + if tt.expect != nil { + tt.expect(g, got) + } + }) + } +} + +func Test_rawModifier_getVolumePhase(t *testing.T) { + tests := []struct { + name string + volumeModifier cloud.VolumeModifier + clock time.Clock + vol *ActualVolume + want VolumePhase + }{ + { + name: "no need to modify", + vol: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("10Gi"), + StorageClassName: ptr.To("sc-0"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi"), withPVCStatus("10Gi", nil)), + }, + volumeModifier: &cloud.FakeVolumeModifier{}, + want: VolumePhaseModified, + }, + { + name: "change storage class", + vol: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("10Gi"), + StorageClassName: ptr.To("sc-1"), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-1", withParameters(map[string]string{"iops": "100"})), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi"), withPVCStatus("10Gi", nil)), + StorageClass: fake.FakeObj[storagev1.StorageClass]("sc-0"), + PV: fake.FakeObj[corev1.PersistentVolume]("pv-0"), + }, + volumeModifier: &cloud.FakeVolumeModifier{}, + want: VolumePhasePreparing, + }, + { + name: "increase size", + vol: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("100Gi"), + StorageClassName: ptr.To("sc-0"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", withPVCSpec(ptr.To("sc-0"), nil, "pv-0", "10Gi"), withPVCStatus("10Gi", nil)), + }, + volumeModifier: &cloud.FakeVolumeModifier{}, + want: VolumePhasePreparing, + }, + { + name: "decrease size", + vol: &ActualVolume{ + Desired: &DesiredVolume{ + Size: resource.MustParse("1Gi"), + StorageClassName: ptr.To("sc-0"), + }, + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-1", withPVCSpec(ptr.To("sc-0"), nil, "pv-1", "20Gi"), withPVCStatus("20Gi", nil)), + }, + volumeModifier: &cloud.FakeVolumeModifier{}, + want: VolumePhaseCannotModify, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &rawModifier{ + k8sClient: client.NewFakeClient(getObjectsFromActualVolume(tt.vol)...), + logger: logr.Logger{}, + volumeModifier: tt.volumeModifier, + clock: tt.clock, + } + if got := m.getVolumePhase(tt.vol); got != tt.want { + t.Errorf("getVolumePhase() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_rawModifier_Modify(t *testing.T) { + tests := []struct { + name string + vol *ActualVolume + getState aws.GetVolumeStateFunc + wantErr bool + }{ + { + name: "can not modify", + vol: &ActualVolume{ + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0"), + Phase: VolumePhaseModified, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &rawModifier{ + k8sClient: client.NewFakeClient(getObjectsFromActualVolume(tt.vol)...), + logger: logr.Discard(), + volumeModifier: aws.NewFakeEBSModifier(tt.getState), + clock: &time.RealClock{}, + } + if err := m.Modify(context.TODO(), tt.vol); (err != nil) != tt.wantErr { + t.Errorf("Modify() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/pkg/volumes/types.go b/pkg/volumes/types.go new file mode 100644 index 00000000000..c016b8c2cd9 --- /dev/null +++ b/pkg/volumes/types.go @@ -0,0 +1,135 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ${GOBIN}/mockgen -destination mock.go -package=volumes ${GO_MODULE}/pkg/volumes Modifier +package volumes + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type Modifier interface { + GetActualVolume(ctx context.Context, expect, current *corev1.PersistentVolumeClaim) (*ActualVolume, error) + + ShouldModify(ctx context.Context, actual *ActualVolume) bool + + Modify(ctx context.Context, vol *ActualVolume) error +} + +type DesiredVolume struct { + Size resource.Quantity + // it may be nil if there is no permission to get storage class + StorageClass *storagev1.StorageClass + // it is sc name specified by user + // the sc may not exist + StorageClassName *string + // VACName is the name of VolumeAttributesClass specified by user. + // The VAC may not exist. + VACName *string + VAC *storagev1beta1.VolumeAttributesClass +} + +// GetStorageClassName may return empty because SC is unset or no permission to verify the existence of SC. +func (v *DesiredVolume) GetStorageClassName() string { + if v.StorageClassName == nil { + return "" + } + return *v.StorageClassName +} + +func (v *DesiredVolume) GetStorageSize() resource.Quantity { + return v.Size +} + +func (v *DesiredVolume) String() string { + return fmt.Sprintf("[Size: %v, StorageClass: %s, VAC: %v]", v.Size, v.GetStorageClassName(), v.VACName) +} + +type ActualVolume struct { + Desired *DesiredVolume + PVC *corev1.PersistentVolumeClaim + Phase VolumePhase + // PV may be nil if there is no permission to get pvc + PV *corev1.PersistentVolume + // StorageClass may be nil if there is no permission to get storage class + StorageClass *storagev1.StorageClass + + VACName *string + VAC *storagev1beta1.VolumeAttributesClass +} + +func (v *ActualVolume) GetStorageClassName() string { + return getStorageClassNameFromPVC(v.PVC) +} + +func (v *ActualVolume) GetStorageSize() resource.Quantity { + return getStorageSize(v.PVC.Status.Capacity) +} + +func (v *ActualVolume) String() string { + return fmt.Sprintf("[PVC: %s/%s, Phase: %s, StorageClass: %s, Size: %v, VAC: %v; desired: %s]", + v.PVC.Namespace, v.PVC.Name, v.Phase, v.GetStorageClassName(), + v.GetStorageSize(), v.VACName, v.Desired.String()) +} + +type VolumePhase int + +const ( + VolumePhaseUnknown VolumePhase = iota + // VolumePhasePending will be set when: + // 1. isPVCRevisionChanged: false + // 2. needModify: true + // 3. waitForNextTime: true + VolumePhasePending + // VolumePhasePreparing will be set when: + // 1. isPVCRevisionChanged: false + // 2. needModify: true + // 3. waitForNextTime: false + VolumePhasePreparing + // VolumePhaseModifying will be set when: + // 1. isPVCRevisionChanged: true + // 2. needModify: true/false + // 3. waitForNextTime: true/false + VolumePhaseModifying + // VolumePhaseModified will be set when: + // 1. isPVCRevisionChanged: false + // 2. needModify: false + // 3. waitForNextTime: true/false + VolumePhaseModified + + VolumePhaseCannotModify +) + +func (p VolumePhase) String() string { + switch p { + case VolumePhasePending: + return "Pending" + case VolumePhasePreparing: + return "Preparing" + case VolumePhaseModifying: + return "Modifying" + case VolumePhaseModified: + return "Modified" + case VolumePhaseCannotModify: + return "CannotModify" + default: + return "Unknown" + } +} diff --git a/pkg/volumes/utils.go b/pkg/volumes/utils.go new file mode 100644 index 00000000000..50077ad6fb0 --- /dev/null +++ b/pkg/volumes/utils.go @@ -0,0 +1,280 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package volumes + +import ( + "context" + "fmt" + "strconv" + "time" + + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/volumes/cloud/aws" +) + +const ( + annoKeyPVCSpecRevision = "spec.tidb.pingcap.com/revision" + annoKeyPVCSpecStorageClass = "spec.tidb.pingcap.com/storage-class" + annoKeyPVCSpecStorageSize = "spec.tidb.pingcap.com/storage-size" + + annoKeyPVCStatusRevision = "status.tidb.pingcap.com/revision" + annoKeyPVCStatusStorageClass = "status.tidb.pingcap.com/storage-class" + annoKeyPVCStatusStorageSize = "status.tidb.pingcap.com/storage-size" + + annoKeyPVCLastTransitionTimestamp = "status.tidb.pingcap.com/last-transition-timestamp" + + defaultModifyWaitingDuration = time.Minute * 1 +) + +func getBoundPVFromPVC(ctx context.Context, cli client.Client, pvc *corev1.PersistentVolumeClaim) (*corev1.PersistentVolume, error) { + if pvc.Status.Phase != corev1.ClaimBound { + return nil, fmt.Errorf("pvc %s/%s is not bound", pvc.Namespace, pvc.Name) + } + + name := pvc.Spec.VolumeName + var pv corev1.PersistentVolume + if err := cli.Get(ctx, client.ObjectKey{Name: name}, &pv); err != nil { + return nil, fmt.Errorf("failed to get PV %s: %w", name, err) + } + + return &pv, nil +} + +func getStorageSize(r corev1.ResourceList) resource.Quantity { + return r[corev1.ResourceStorage] +} + +func ignoreNil(s *string) string { + if s == nil { + return "" + } + return *s +} + +func setLastTransitionTimestamp(pvc *corev1.PersistentVolumeClaim) { + if pvc.Annotations == nil { + pvc.Annotations = map[string]string{} + } + + pvc.Annotations[annoKeyPVCLastTransitionTimestamp] = metav1.Now().Format(time.RFC3339) +} + +func upgradeRevision(pvc *corev1.PersistentVolumeClaim) { + rev := 1 + str, ok := pvc.Annotations[annoKeyPVCSpecRevision] + if ok { + oldRev, err := strconv.Atoi(str) + if err != nil { + klog.Warningf("revision format err: %v, reset to 0", err) + oldRev = 0 + } + rev = oldRev + 1 + } + + if pvc.Annotations == nil { + pvc.Annotations = map[string]string{} + } + + pvc.Annotations[annoKeyPVCSpecRevision] = strconv.Itoa(rev) +} + +// isPVCSpecMatched checks if the storage class or storage size of the PVC is changed. +func isPVCSpecMatched(pvc *corev1.PersistentVolumeClaim, scName string, size resource.Quantity) bool { + isChanged := false + + oldSc := ignoreNil(pvc.Spec.StorageClassName) + scAnno, ok := pvc.Annotations[annoKeyPVCSpecStorageClass] + if ok && scAnno != "" { + oldSc = scAnno + } + + if scName != "" && oldSc != scName { + isChanged = true + } + + oldSize, ok := pvc.Annotations[annoKeyPVCSpecStorageSize] + if !ok { + quantity := getStorageSize(pvc.Spec.Resources.Requests) + oldSize = quantity.String() + } + if oldSize != size.String() { + isChanged = true + } + + return isChanged +} + +func snapshotStorageClassAndSize(pvc *corev1.PersistentVolumeClaim, scName string, size resource.Quantity) bool { + isChanged := isPVCSpecMatched(pvc, scName, size) + + if pvc.Annotations == nil { + pvc.Annotations = map[string]string{} + } + + if scName != "" { + pvc.Annotations[annoKeyPVCSpecStorageClass] = scName + } + pvc.Annotations[annoKeyPVCSpecStorageSize] = size.String() + + return isChanged +} + +func getStorageClassNameFromPVC(pvc *corev1.PersistentVolumeClaim) string { + sc := ignoreNil(pvc.Spec.StorageClassName) + + scAnno, ok := pvc.Annotations[annoKeyPVCStatusStorageClass] + if ok && scAnno != "" { + sc = scAnno + } + + return sc +} + +func needModify(pvc *corev1.PersistentVolumeClaim, desired *DesiredVolume) bool { + size := desired.Size + scName := desired.GetStorageClassName() + + return isPVCStatusMatched(pvc, scName, size) +} + +func isPVCStatusMatched(pvc *corev1.PersistentVolumeClaim, scName string, size resource.Quantity) bool { + oldSc := getStorageClassNameFromPVC(pvc) + isChanged := isStorageClassChanged(oldSc, scName) + + oldSize, ok := pvc.Annotations[annoKeyPVCStatusStorageSize] + if !ok { + quantity := getStorageSize(pvc.Spec.Resources.Requests) + oldSize = quantity.String() + } + if oldSize != size.String() { + isChanged = true + } + if isChanged { + klog.Infof("volume %s/%s is changed, sc (%s => %s), size (%s => %s)", pvc.Namespace, pvc.Name, oldSc, scName, oldSize, size.String()) + } + + return isChanged +} + +func isStorageClassChanged(pre, cur string) bool { + if cur != "" && pre != cur { + return true + } + return false +} + +func isVolumeAttributesClassChanged(actual *ActualVolume) bool { + return areStringsDifferent(actual.VACName, actual.Desired.VACName) +} + +func areStringsDifferent(pre, cur *string) bool { + if pre == cur { + return false + } + return pre == nil || cur == nil || *pre != *cur +} + +func isPVCRevisionChanged(pvc *corev1.PersistentVolumeClaim) bool { + specRevision, statusRevision := pvc.Annotations[annoKeyPVCSpecRevision], pvc.Annotations[annoKeyPVCStatusRevision] + return specRevision != statusRevision +} + +func isVolumeExpansionSupported(sc *storagev1.StorageClass) (bool, error) { + if sc == nil { + // always assume expansion is supported + return true, fmt.Errorf("expansion cap of volume is unknown") + } + if sc.AllowVolumeExpansion == nil { + return false, nil + } + return *sc.AllowVolumeExpansion, nil +} + +// NewModifier creates a volume modifier. +// TODO: check the feature gate via webhook to decide whether to use the native modifier. +func NewModifier(ctx context.Context, logger logr.Logger, cli client.Client) (Modifier, error) { + awsCfg, err := awsconfig.LoadDefaultConfig(ctx) + if err != nil { + logger.Error(err, "failed to load aws config, will use native modifier") + return NewNativeModifier(cli, logger), nil + } + logger.Info("use aws ebs modifier") + return NewRawModifier(aws.NewEBSModifier(&awsCfg, logger), cli, logger), nil +} + +// SyncPVCs gets the actual PVCs and compares them with the expected PVCs. +// If the actual PVCs are different from the expected PVCs, it will update the PVCs. +func SyncPVCs(ctx context.Context, cli client.Client, + expectPVCs []*corev1.PersistentVolumeClaim, vm Modifier, logger logr.Logger, +) (wait bool, err error) { + for _, expectPVC := range expectPVCs { + var actualPVC corev1.PersistentVolumeClaim + if err := cli.Get(ctx, client.ObjectKey{Namespace: expectPVC.Namespace, Name: expectPVC.Name}, &actualPVC); err != nil { + if client.IgnoreNotFound(err) != nil { + return false, fmt.Errorf("can't get expectPVC %s/%s: %w", expectPVC.Namespace, expectPVC.Name, err) + } + + // Create PVC + if e := cli.Apply(ctx, expectPVC); e != nil { + return false, fmt.Errorf("can't create expectPVC %s/%s: %w", expectPVC.Namespace, expectPVC.Name, e) + } + continue + } + + if actualPVC.Status.Phase != corev1.ClaimBound { + // do not try to modify the PVC if it's not bound yet + wait = true + continue + } + + // Set default storage class name if it's not specified and the claim is bound. + // Otherwise, it will be considered as a change and trigger a PVC update. + if expectPVC.Spec.StorageClassName == nil && actualPVC.Status.Phase == corev1.ClaimBound { + expectPVC.Spec.StorageClassName = actualPVC.Spec.StorageClassName + } + + vol, err := vm.GetActualVolume(ctx, expectPVC, &actualPVC) + if err != nil { + return false, fmt.Errorf("failed to get the actual volume: %w", err) + } + if vm.ShouldModify(ctx, vol) { + logger.Info("modifying volume's attributes", "volume", vol.String()) + if e := vm.Modify(ctx, vol); e != nil { + return false, fmt.Errorf("failed to modify volume's attributes %s/%s: %w", expectPVC.Namespace, expectPVC.Name, e) + } + continue + } + + logger.Info("volume's attributes are not changed", "volume", vol.String()) + if expectPVC.Spec.StorageClassName != nil && + actualPVC.Spec.StorageClassName != nil && + *expectPVC.Spec.StorageClassName != *actualPVC.Spec.StorageClassName { + // Avoid updating the storage class name as it's immutable. + expectPVC.Spec.StorageClassName = actualPVC.Spec.StorageClassName + } + if err := cli.Apply(ctx, expectPVC); err != nil { + return false, fmt.Errorf("can't update expectPVC %s/%s: %w", expectPVC.Namespace, expectPVC.Name, err) + } + } + return wait, nil +} diff --git a/pkg/volumes/utils_test.go b/pkg/volumes/utils_test.go new file mode 100644 index 00000000000..7aa054b536e --- /dev/null +++ b/pkg/volumes/utils_test.go @@ -0,0 +1,136 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package volumes + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" +) + +func Test_areStringsDifferent(t *testing.T) { + tests := []struct { + name string + pre *string + cur *string + want bool + }{ + { + name: "both nil", + pre: nil, + cur: nil, + want: false, + }, + { + name: "both same non-nil", + pre: ptr.To("same"), + cur: ptr.To("same"), + want: false, + }, + { + name: "from nil to non-nil", + pre: nil, + cur: ptr.To("non-nil"), + want: true, + }, + { + name: "from non-nil to nil", + pre: ptr.To("non-nil"), + cur: nil, + want: true, + }, + { + name: "different non-nil", + pre: ptr.To("pre"), + cur: ptr.To("cur"), + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := areStringsDifferent(tt.pre, tt.cur); got != tt.want { + t.Errorf("areStringsDifferent() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestSyncPVCs(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + expectPVCs []*corev1.PersistentVolumeClaim + setup func(*MockModifier) + expectFunc func(*WithT, client.Client) + wantErr bool + }{ + { + name: "create PVC", + expectPVCs: []*corev1.PersistentVolumeClaim{ + fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0"), + }, + }, + { + name: "did not change PVC", + existingObjs: []client.Object{ + fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", func(obj *corev1.PersistentVolumeClaim) *corev1.PersistentVolumeClaim { + obj.Status.Phase = corev1.ClaimBound + return obj + }), + }, + expectPVCs: []*corev1.PersistentVolumeClaim{ + fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0", fake.Label[corev1.PersistentVolumeClaim]("foo", "bar")), + }, + setup: func(vm *MockModifier) { + vm.EXPECT().GetActualVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(&ActualVolume{ + PVC: fake.FakeObj[corev1.PersistentVolumeClaim]("pvc-0"), + Desired: &DesiredVolume{}, + }, nil) + vm.EXPECT().ShouldModify(gomock.Any(), gomock.Any()).Return(false) + }, + expectFunc: func(g *WithT, cli client.Client) { + var pvc corev1.PersistentVolumeClaim + g.Expect(cli.Get(context.TODO(), client.ObjectKey{Name: "pvc-0"}, &pvc)).To(Succeed()) + g.Expect(pvc.Labels).To(HaveKeyWithValue("foo", "bar")) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + vm := NewMockModifier(ctrl) + if tt.setup != nil { + tt.setup(vm) + } + + cli := client.NewFakeClient(tt.existingObjs...) + if _, err := SyncPVCs(context.TODO(), cli, tt.expectPVCs, vm, logr.Discard()); (err != nil) != tt.wantErr { + t.Errorf("SyncPVCs() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.expectFunc != nil { + tt.expectFunc(NewGomegaWithT(t), cli) + } + }) + } +} diff --git a/tests/e2e/cluster/cluster.go b/tests/e2e/cluster/cluster.go new file mode 100644 index 00000000000..6b86389889c --- /dev/null +++ b/tests/e2e/cluster/cluster.go @@ -0,0 +1,1761 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "context" + "database/sql" + "fmt" + "slices" + "strings" + "sync" + "sync/atomic" + "time" + + //nolint: stylecheck // too many changes, refactor later + . "github.com/onsi/ginkgo/v2" + //nolint: stylecheck // too many changes, refactor later + . "github.com/onsi/gomega" + + "github.com/Masterminds/semver/v3" + _ "github.com/go-sql-driver/mysql" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + cacheddiscovery "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/tests/e2e/config" + "github.com/pingcap/tidb-operator/tests/e2e/utils/data" + "github.com/pingcap/tidb-operator/tests/e2e/utils/jwt" + "github.com/pingcap/tidb-operator/tests/e2e/utils/k8s" + utiltidb "github.com/pingcap/tidb-operator/tests/e2e/utils/tidb" +) + +const ( + createClusterTimeout = 10 * time.Minute + createClusterPolling = 5 * time.Second + + deleteClusterTimeout = 10 * time.Minute + deleteClusterPolling = 5 * time.Second + + suspendResumePolling = 5 * time.Second + + logLevelConfig = "log.level = 'warn'" +) + +func initK8sClient() (kubernetes.Interface, client.Client, *rest.Config) { + restConfig, err := k8s.LoadConfig() + Expect(err).NotTo(HaveOccurred()) + + clientset, err := kubernetes.NewForConfig(restConfig) + Expect(err).NotTo(HaveOccurred()) + + scheme := runtime.NewScheme() + metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha1.Install(scheme)).To(Succeed()) + + // also init a controller-runtime client + k8sClient, err := client.New(restConfig, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + return clientset, k8sClient, restConfig +} + +func LoadClientRawConfig() (clientcmdapi.Config, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides).RawConfig() +} + +var _ = Describe("TiDB Cluster", func() { + var ( + clientSet kubernetes.Interface + k8sClient client.Client + restConfig *rest.Config + fw k8s.PortForwarder + yamlApplier *k8s.YAMLApplier + + ns *corev1.Namespace + tc *v1alpha1.Cluster + ctx = context.Background() + ) + + BeforeEach(func() { + // TODO: only run once + clientRawConfig, err := LoadClientRawConfig() + Expect(err).To(BeNil()) + fw, err = k8s.NewPortForwarder(ctx, config.NewSimpleRESTClientGetter(&clientRawConfig)) + Expect(err).To(BeNil()) + + clientSet, k8sClient, restConfig = initK8sClient() + _ = restConfig + + dynamicClient := dynamic.NewForConfigOrDie(restConfig) + cachedDiscovery := cacheddiscovery.NewMemCacheClient(clientSet.Discovery()) + restmapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery) + restmapper.Reset() + yamlApplier = k8s.NewYAMLApplier(dynamicClient, restmapper) + ns = data.NewNamespace() + tc = data.NewCluster(ns.Name, "tc") + + By(fmt.Sprintf("Creating a Cluster in namespace %s", tc.Namespace)) + Expect(k8sClient.Create(ctx, ns)).To(Succeed()) + Expect(k8sClient.Create(ctx, tc)).To(Succeed()) + + By(fmt.Sprintf("Waiting for the Cluster in namespace %s to be ready", tc.Namespace)) + Eventually(func(g Gomega) { + var tcGet v1alpha1.Cluster + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: tc.Name}, &tcGet)).To(Succeed()) + g.Expect(tcGet.Status.ObservedGeneration).To(Equal(tcGet.Generation)) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + DeferCleanup(func() { + // uncomment the following lines to keep the resources for debugging + // if CurrentSpecReport().Failed() { + // return + // } + By(fmt.Sprintf("Deleting the Cluster in namespace %s", tc.Namespace)) + Expect(k8sClient.Delete(ctx, tc)).To(Succeed()) + + By(fmt.Sprintf("Checking the Cluster in namespace %s has been deleted", tc.Namespace)) + Eventually(func(g Gomega) { + err := k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: tc.Name}, tc) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + }).WithTimeout(deleteClusterTimeout).WithPolling(deleteClusterPolling).Should(Succeed()) + + By(fmt.Sprintf("Checking resources have already been deleted in namespace %s", tc.Namespace)) + var podList corev1.PodList + Expect(k8sClient.List(ctx, &podList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + })).To(Succeed()) + Expect(len(podList.Items)).To(Equal(0)) + var cmList corev1.ConfigMapList + Expect(k8sClient.List(ctx, &cmList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + })).To(Succeed()) + Expect(len(cmList.Items)).To(Equal(0)) + var pvcList corev1.PersistentVolumeClaimList + Expect(k8sClient.List(ctx, &pvcList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + })).To(Succeed()) + Expect(len(pvcList.Items)).To(Equal(0)) + + var svcList corev1.ServiceList + Expect(k8sClient.List(ctx, &svcList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + })).To(Succeed()) + Expect(len(svcList.Items)).To(Equal(0)) + + By(fmt.Sprintf("Deleting the Namespace %s", ns.Name)) + Expect(k8sClient.Delete(ctx, ns)).To(Succeed()) + + By("Checking namespace can be deleted") + Eventually(func(g Gomega) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ns.Name}, ns) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + }).WithTimeout(deleteClusterTimeout).WithPolling(deleteClusterPolling).Should(Succeed()) + }) + }) + PContext("Evenly scheduling", func() { + It("should create a tidb cluster that all components have 3 nodes", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(3)), func(g *v1alpha1.PDGroup) { + g.Spec.SchedulePolicies = []v1alpha1.SchedulePolicy{ + { + Type: v1alpha1.SchedulePolicyTypeEvenlySpread, + EvenlySpread: &v1alpha1.SchedulePolicyEvenlySpread{ + Topologies: []v1alpha1.ScheduleTopology{ + { + Topology: v1alpha1.Topology{ + "zone": "zone-a", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-b", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-c", + }, + }, + }, + }, + }, + } + }) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(3)), func(g *v1alpha1.TiKVGroup) { + g.Spec.SchedulePolicies = []v1alpha1.SchedulePolicy{ + { + Type: v1alpha1.SchedulePolicyTypeEvenlySpread, + EvenlySpread: &v1alpha1.SchedulePolicyEvenlySpread{ + Topologies: []v1alpha1.ScheduleTopology{ + { + Topology: v1alpha1.Topology{ + "zone": "zone-a", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-b", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-c", + }, + }, + }, + }, + }, + } + }) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(3)), func(g *v1alpha1.TiDBGroup) { + g.Spec.SchedulePolicies = []v1alpha1.SchedulePolicy{ + { + Type: v1alpha1.SchedulePolicyTypeEvenlySpread, + EvenlySpread: &v1alpha1.SchedulePolicyEvenlySpread{ + Topologies: []v1alpha1.ScheduleTopology{ + { + Topology: v1alpha1.Topology{ + "zone": "zone-a", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-b", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-c", + }, + }, + }, + }, + }, + } + }) + flashg := data.NewTiFlashGroup(ns.Name, "flashg", tc.Name, ptr.To(int32(3)), func(g *v1alpha1.TiFlashGroup) { + g.Spec.SchedulePolicies = []v1alpha1.SchedulePolicy{ + { + Type: v1alpha1.SchedulePolicyTypeEvenlySpread, + EvenlySpread: &v1alpha1.SchedulePolicyEvenlySpread{ + Topologies: []v1alpha1.ScheduleTopology{ + { + Topology: v1alpha1.Topology{ + "zone": "zone-a", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-b", + }, + }, + { + Topology: v1alpha1.Topology{ + "zone": "zone-c", + }, + }, + }, + }, + }, + } + }) + + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + Expect(k8sClient.Create(ctx, flashg)).To(Succeed()) + + nodes := &corev1.NodeList{} + Expect(k8sClient.List(ctx, nodes)).Should(Succeed()) + nodeToZone := map[string]int{} + for _, node := range nodes.Items { + switch node.Labels["zone"] { + case "zone-a": + nodeToZone[node.Name] = 0 + case "zone-b": + nodeToZone[node.Name] = 1 + case "zone-c": + nodeToZone[node.Name] = 2 + } + } + By("Checking the status of the cluster and the connection to the TiDB service") + Eventually(func(g Gomega) { + tcGet, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + // TODO: move cluster status check into `IsClusterReady`? + for _, compStatus := range tcGet.Status.Components { + switch compStatus.Kind { + case v1alpha1.ComponentKindPD, v1alpha1.ComponentKindTiKV, v1alpha1.ComponentKindTiDB, v1alpha1.ComponentKindTiFlash: + g.Expect(compStatus.Replicas).To(Equal(int32(3))) + default: + g.Expect(compStatus.Replicas).To(BeZero()) + } + } + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, + []*v1alpha1.TiFlashGroup{flashg})).To(Succeed()) + + pods := &corev1.PodList{} + Expect(k8sClient.List(ctx, pods, client.InNamespace(tc.Namespace))).Should(Succeed()) + var components [4][3]int + for _, pod := range pods.Items { + index, ok := nodeToZone[pod.Spec.NodeName] + g.Expect(ok).To(BeTrue()) + switch pod.Labels[v1alpha1.LabelKeyComponent] { + case v1alpha1.LabelValComponentPD: + components[0][index] += 1 + case v1alpha1.LabelValComponentTiDB: + components[1][index] += 1 + case v1alpha1.LabelValComponentTiKV: + components[2][index] += 1 + case v1alpha1.LabelValComponentTiFlash: + components[3][index] += 1 + default: + Fail("unexpected component " + pod.Labels[v1alpha1.LabelKeyComponent]) + } + } + g.Expect(components).To(Equal([4][3]int{{1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}})) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + }) + }) + + Context("Basic", func() { + It("should create a minimal tidb cluster", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + flashg := data.NewTiFlashGroup(ns.Name, "flashg", tc.Name, ptr.To(int32(1)), nil) + + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + Expect(k8sClient.Create(ctx, flashg)).To(Succeed()) + + By("Checking the status of the cluster and the connection to the TiDB service") + Eventually(func(g Gomega) { + tcGet, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + // TODO: move cluster status check into `IsClusterReady`? + for _, compStatus := range tcGet.Status.Components { + switch compStatus.Kind { + case v1alpha1.ComponentKindPD, v1alpha1.ComponentKindTiKV, v1alpha1.ComponentKindTiDB, v1alpha1.ComponentKindTiFlash: + g.Expect(compStatus.Replicas).To(Equal(int32(1))) + default: + g.Expect(compStatus.Replicas).To(BeZero()) + } + } + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, + []*v1alpha1.TiFlashGroup{flashg})).To(Succeed()) + + g.Expect(utiltidb.IsTiDBConnectable(ctx, k8sClient, fw, + tc.Namespace, tc.Name, dbg.Name, "root", "", "")).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking log tailer sidercar container") + var tidbPodList corev1.PodList + Expect(k8sClient.List(ctx, &tidbPodList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: tc.Name, + v1alpha1.LabelKeyGroup: dbg.Name, + })).To(Succeed()) + Expect(len(tidbPodList.Items)).To(Equal(1)) + tidbPod := tidbPodList.Items[0] + Expect(tidbPod.Spec.InitContainers).To(HaveLen(1)) // sidercar container in `initContainers` + Expect(tidbPod.Spec.InitContainers[0].Name).To(Equal(v1alpha1.TiDBSlowLogContainerName)) + + var tiflashPodList corev1.PodList + Expect(k8sClient.List(ctx, &tiflashPodList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: tc.Name, + v1alpha1.LabelKeyGroup: flashg.Name, + })).To(Succeed()) + Expect(len(tiflashPodList.Items)).To(Equal(1)) + tiflashPod := tiflashPodList.Items[0] + Expect(tiflashPod.Spec.InitContainers).To(HaveLen(2)) + Expect(tiflashPod.Spec.InitContainers[0].Name).To(Equal(v1alpha1.TiFlashServerLogContainerName)) + Expect(tiflashPod.Spec.InitContainers[1].Name).To(Equal(v1alpha1.TiFlashErrorLogContainerName)) + }) + + It("should suspend and resume the tidb cluster", func() { + checkSuspendCondtion := func(g Gomega, condStatus metav1.ConditionStatus) { + var pdGroup v1alpha1.PDGroup + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: "pdg"}, &pdGroup)).To(Succeed()) + g.Expect(meta.IsStatusConditionPresentAndEqual(pdGroup.Status.Conditions, v1alpha1.PDGroupCondSuspended, condStatus)).To(BeTrue()) + + var tikvGroup v1alpha1.TiKVGroup + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: "kvg"}, &tikvGroup)).To(Succeed()) + g.Expect(meta.IsStatusConditionPresentAndEqual(tikvGroup.Status.Conditions, v1alpha1.TiKVGroupCondSuspended, condStatus)).To(BeTrue()) + + var tidbGroup v1alpha1.TiDBGroup + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: "dbg"}, &tidbGroup)).To(Succeed()) + g.Expect(meta.IsStatusConditionPresentAndEqual(tidbGroup.Status.Conditions, v1alpha1.TiDBGroupCondSuspended, condStatus)).To(BeTrue()) + + var flashGroup v1alpha1.TiFlashGroup + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: "flashg"}, &flashGroup)).To(Succeed()) + g.Expect(meta.IsStatusConditionPresentAndEqual( + flashGroup.Status.Conditions, v1alpha1.TiFlashGroupCondSuspended, condStatus)).To(BeTrue()) + + var pdList v1alpha1.PDList + g.Expect(k8sClient.List(ctx, &pdList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyGroup: pdGroup.Name, + })).To(Succeed()) + g.Expect(len(pdList.Items)).To(Equal(1)) + pd := pdList.Items[0] + g.Expect(pd.Spec.Cluster.Name).To(Equal(tc.Name)) + g.Expect(meta.IsStatusConditionPresentAndEqual(pd.Status.Conditions, v1alpha1.PDCondSuspended, condStatus)).To(BeTrue()) + + var tikvList v1alpha1.TiKVList + g.Expect(k8sClient.List(ctx, &tikvList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyGroup: tikvGroup.Name, + })).To(Succeed()) + g.Expect(len(tikvList.Items)).To(Equal(1)) + tikv := tikvList.Items[0] + g.Expect(tikv.Spec.Cluster.Name).To(Equal(tc.Name)) + g.Expect(meta.IsStatusConditionPresentAndEqual(tikv.Status.Conditions, v1alpha1.TiKVCondSuspended, condStatus)).To(BeTrue()) + + var tidbList v1alpha1.TiDBList + g.Expect(k8sClient.List(ctx, &tidbList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyGroup: tidbGroup.Name, + })).To(Succeed()) + g.Expect(len(tidbList.Items)).To(Equal(1)) + tidb := tidbList.Items[0] + g.Expect(tidb.Spec.Cluster.Name).To(Equal(tc.Name)) + g.Expect(meta.IsStatusConditionPresentAndEqual(tidb.Status.Conditions, v1alpha1.TiDBCondSuspended, condStatus)).To(BeTrue()) + + var flashList v1alpha1.TiFlashList + g.Expect(k8sClient.List(ctx, &flashList, client.InNamespace(tc.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyCluster: tc.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyGroup: flashGroup.Name, + })).To(Succeed()) + g.Expect(len(flashList.Items)).To(Equal(1)) + flash := flashList.Items[0] + g.Expect(flash.Spec.Cluster.Name).To(Equal(tc.Name)) + g.Expect(meta.IsStatusConditionPresentAndEqual(flash.Status.Conditions, v1alpha1.TiFlashCondSuspended, condStatus)).To(BeTrue()) + + var tcGet v1alpha1.Cluster + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: tc.Name}, &tcGet)).To(Succeed()) + g.Expect(meta.IsStatusConditionPresentAndEqual(tcGet.Status.Conditions, v1alpha1.ClusterCondSuspended, condStatus)).To(BeTrue()) + } + + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + flashg := data.NewTiFlashGroup(ns.Name, "flashg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + Expect(k8sClient.Create(ctx, flashg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + var tcGet *v1alpha1.Cluster + Eventually(func(g Gomega) { + var ready bool + tcGet, ready = utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{flashg})).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking suspend condition is False") + checkSuspendCondtion(Default, metav1.ConditionFalse) + + By("Suspending the TiDB cluster") + tcGet.Spec.SuspendAction = &v1alpha1.SuspendAction{SuspendCompute: true} + Expect(k8sClient.Update(ctx, tcGet)).To(Succeed()) + + By("Checking suspend condition is True") + Eventually(func(g Gomega) { + checkSuspendCondtion(g, metav1.ConditionTrue) + }).WithTimeout(2 * time.Minute).WithPolling(suspendResumePolling).Should(Succeed()) + + By("Checking all Pods are deleted") + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, metav1.ListOptions{}) + Expect(err).To(BeNil()) + Expect(len(podList.Items)).To(BeZero()) + + By("Resuming the TiDB cluster") + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: tc.Name}, tcGet)).To(Succeed()) + tcGet.Spec.SuspendAction = nil + Expect(k8sClient.Update(ctx, tcGet)).To(Succeed()) + + By("Checking suspend condition is False") + Eventually(func(g Gomega) { + checkSuspendCondtion(g, metav1.ConditionFalse) + }).WithTimeout(time.Minute).WithPolling(suspendResumePolling).Should(Succeed()) + + By("Checking the Pods are re-created") + Eventually(func(g Gomega) { + podList, err = clientSet.CoreV1().Pods(tc.Namespace).List(ctx, metav1.ListOptions{}) + g.Expect(err).To(BeNil()) + //nolint:mnd // easy to understand + g.Expect(len(podList.Items)).To(Equal(4)) + }).WithTimeout(time.Minute).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking the cluster is ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{flashg})).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Suspend the TiDB cluster again before deleting it") + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: tc.Name}, tcGet)).To(Succeed()) + tcGet.Spec.SuspendAction = &v1alpha1.SuspendAction{SuspendCompute: true} + Expect(k8sClient.Update(ctx, tcGet)).To(Succeed()) + + By("Checking suspend condition is True") + Eventually(func(g Gomega) { + checkSuspendCondtion(g, metav1.ConditionTrue) + }).WithTimeout(2 * time.Minute).WithPolling(suspendResumePolling).Should(Succeed()) + }) + + It("should be able to scale in/out", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Recording the pod's UID") + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(podList.Items)).To(Equal(1)) + originalPodName, originalPodUID := podList.Items[0].Name, podList.Items[0].UID + + By("scale out tidb from 1 to 2") + var dbgGet v1alpha1.TiDBGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: dbg.Name}, &dbgGet)).To(Succeed()) + dbgGet.Spec.Replicas = ptr.To(int32(2)) + Expect(k8sClient.Update(ctx, &dbgGet)).To(Succeed()) + + podNameToUID := make(map[string]types.UID, 2) + Eventually(func(g Gomega) { + g.Expect(utiltidb.AreAllTiDBHealthy(k8sClient, &dbgGet)).To(Succeed()) + podList, err = clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + g.Expect(err).To(BeNil()) + g.Expect(len(podList.Items)).To(Equal(2)) + + // Should not recreate the pod + for _, pod := range podList.Items { + podNameToUID[pod.Name] = pod.UID + } + g.Expect(podNameToUID[originalPodName]).To(Equal(originalPodUID)) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("scale in tidb from 2 to 1") + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: dbg.Name}, &dbgGet)).To(Succeed()) + dbgGet.Spec.Replicas = ptr.To(int32(1)) + Expect(k8sClient.Update(ctx, &dbgGet)).To(Succeed()) + + Eventually(func(g Gomega) { + g.Expect(utiltidb.AreAllTiDBHealthy(k8sClient, &dbgGet)).To(Succeed()) + podList, err = clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + g.Expect(err).To(BeNil()) + g.Expect(len(podList.Items)).To(Equal(1)) + // Should not recreate the pod + g.Expect(podNameToUID[podList.Items[0].Name]).To(Equal(podList.Items[0].UID)) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + }) + + It("should be able to increase the volume size without restarting", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), func(kvg *v1alpha1.TiKVGroup) { + for i := range kvg.Spec.Template.Spec.Volumes { + vol := &kvg.Spec.Template.Spec.Volumes[i] + if vol.Name == "data" { + vol.Storage = data.StorageSizeGi2quantity(2) + break + } + } + }) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + scList, err := clientSet.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) + Expect(err).To(BeNil()) + for _, sc := range scList.Items { + if sc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" && + (sc.AllowVolumeExpansion == nil || !*sc.AllowVolumeExpansion) { + Skip("default storage class does not support volume expansion") + } + } + + By("Checking the pvc size") + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, kvg.Name)} + pvcList, err := clientSet.CoreV1().PersistentVolumeClaims(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(pvcList.Items)).To(Equal(1)) + Expect(pvcList.Items[0].Status.Capacity.Storage().Equal(data.StorageSizeGi2quantity(2))).To(BeTrue()) + + By("Recording the pod's UID") + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(podList.Items)).To(Equal(1)) + originalPodName, originalPodUID := podList.Items[0].Name, podList.Items[0].UID + + By("Increasing the tikv's volume size from 2Gi to 5Gi") + var kvgGet v1alpha1.TiKVGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: kvg.Name}, &kvgGet)).To(Succeed()) + Expect(len(kvgGet.Spec.Template.Spec.Volumes)).To(Equal(1)) + //nolint:mnd // easy to understand + kvgGet.Spec.Template.Spec.Volumes[0].Storage = data.StorageSizeGi2quantity(5) + Expect(k8sClient.Update(ctx, &kvgGet)).To(Succeed()) + + Eventually(func(g Gomega) { + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + podList, err = clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + g.Expect(err).To(BeNil()) + g.Expect(len(podList.Items)).To(Equal(1)) + g.Expect(podList.Items[0].Name).To(Equal(originalPodName)) + g.Expect(podList.Items[0].UID).To(Equal(originalPodUID)) + + pvcList, err := clientSet.CoreV1().PersistentVolumeClaims(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(pvcList.Items)).To(Equal(1)) + //nolint:mnd // easy to understand + Expect(pvcList.Items[0].Status.Capacity.Storage()).To(Equal(data.StorageSizeGi2quantity(5))) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + }) + + It("should not recreate pods for labels/annotations modification", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Recording the pod's UID") + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(podList.Items)).To(Equal(1)) + originalPodName, originalPodUID := podList.Items[0].Name, podList.Items[0].UID + + By("Changing labels and annotations") + var dbgGet v1alpha1.TiDBGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: dbg.Name}, &dbgGet)).To(Succeed()) + if dbgGet.Spec.Template.Labels == nil { + dbgGet.Spec.Template.Labels = map[string]string{} + } + dbgGet.Spec.Template.Labels["foo"] = "bar" + if dbgGet.Spec.Template.Annotations == nil { + dbgGet.Spec.Template.Annotations = map[string]string{} + } + dbgGet.Spec.Template.Annotations["foo"] = "bar" + Expect(k8sClient.Update(ctx, &dbgGet)).To(Succeed()) + + By("Checking the pods are not recreated") + Eventually(func(g Gomega) { + var pod corev1.Pod + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: originalPodName}, &pod)).To(Succeed()) + g.Expect(pod.UID).To(Equal(originalPodUID)) + g.Expect(pod.Labels["foo"]).To(Equal("bar")) + g.Expect(pod.Annotations["foo"]).To(Equal("bar")) + g.Expect(pod.Status.Phase).To(Equal(corev1.PodRunning)) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + }) + }) + + Context("Rolling Update", func() { + It("tidb: should perform a rolling update for config change", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(3)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking the number of ControllerRevisions, and revision names in tidb group's status") + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + crList, err := clientSet.AppsV1().ControllerRevisions(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(crList.Items)).To(Equal(1)) + crBeforeChange := crList.Items[0] + var dbgGet v1alpha1.TiDBGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: dbg.Name}, &dbgGet)).To(Succeed()) + Expect(dbgGet.Status.CurrentRevision).To(Equal(crBeforeChange.Name)) + Expect(dbgGet.Status.UpdateRevision).To(Equal(crBeforeChange.Name)) + + By("Checking the number of configmaps before changing the config") + cmList, err := clientSet.CoreV1().ConfigMaps(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(cmList.Items)).To(Equal(3)) + + By("Checking the number of pods and their labels before changing the config") + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(podList.Items)).To(Equal(3)) + for _, pod := range podList.Items { + Expect(pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash]).To(Equal(crBeforeChange.Name)) + } + + By("collecting the events of pods for verifying rolling update") + watchCtx, cancel := context.WithCancel(ctx) + podWatcher, err := clientSet.CoreV1().Pods(tc.Namespace).Watch(watchCtx, listOpts) + defer cancel() + Expect(err).NotTo(HaveOccurred()) + type podInfo struct { + name string + uid string + creationTime metav1.Time + deletionTime metav1.Time + } + + podMap := map[string]podInfo{} + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer GinkgoRecover() + for { + select { + case <-watchCtx.Done(): + GinkgoWriter.Println("podWatcher is stopped") + podWatcher.Stop() + wg.Done() + return + + case event := <-podWatcher.ResultChan(): + pod, isPod := event.Object.(*corev1.Pod) + if !isPod { + continue + } + info, ok := podMap[string(pod.UID)] + if !ok { + info = podInfo{ + name: pod.Name, + uid: string(pod.UID), + creationTime: pod.CreationTimestamp, + } + } + if !pod.DeletionTimestamp.IsZero() && pod.DeletionGracePeriodSeconds != nil && *pod.DeletionGracePeriodSeconds == 0 { + info.deletionTime = *pod.DeletionTimestamp + } + podMap[string(pod.UID)] = info + } + } + }() + + By("Changing the config of the TiDB cluster") + updateTime := time.Now() + dbgGet.Spec.Template.Spec.Config = logLevelConfig + Expect(k8sClient.Update(ctx, &dbgGet)).To(Succeed()) + + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + + crList, err = clientSet.AppsV1().ControllerRevisions(tc.Namespace).List(ctx, listOpts) + g.Expect(err).To(BeNil()) + g.Expect(len(crList.Items)).To(Equal(2)) + var crAfterChange *appsv1.ControllerRevision + for i := range crList.Items { + cr := &crList.Items[i] + if cr.Name != crBeforeChange.Name { + crAfterChange = cr + break + } + } + g.Expect(crAfterChange.Name).ShouldNot(BeEmpty()) + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: dbg.Name}, &dbgGet)).To(Succeed()) + g.Expect(dbgGet.Status.CurrentRevision).To(Equal(crAfterChange.Name)) + g.Expect(dbgGet.Status.UpdateRevision).To(Equal(crAfterChange.Name)) + g.Expect(crAfterChange.Revision).Should(BeNumerically("==", crBeforeChange.Revision+1)) + + cmList, err := clientSet.CoreV1().ConfigMaps(tc.Namespace).List(ctx, listOpts) + g.Expect(err).To(BeNil()) + g.Expect(len(cmList.Items)).To(Equal(3)) + + curPodList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + g.Expect(err).To(BeNil()) + g.Expect(len(curPodList.Items)).To(Equal(3)) + + // Ensure pods are re-created + for _, pod := range curPodList.Items { + g.Expect(pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash]).To(Equal(crAfterChange.Name)) + g.Expect(pod.Status.StartTime).ShouldNot(BeNil()) + g.Expect(pod.Status.StartTime.After(updateTime)).To(BeTrue()) + } + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + cancel() + wg.Wait() + + infos := []podInfo{} + for _, v := range podMap { + infos = append(infos, v) + } + slices.SortFunc(infos, func(a podInfo, b podInfo) int { + if a.deletionTime.IsZero() && b.deletionTime.IsZero() { + return a.creationTime.Compare(b.creationTime.Time) + } + if a.deletionTime.IsZero() { + return a.creationTime.Compare(b.deletionTime.Time) + } + if b.deletionTime.IsZero() { + return a.deletionTime.Compare(b.creationTime.Time) + } + return a.deletionTime.Compare(b.deletionTime.Time) + }) + for _, info := range infos { + if info.deletionTime.IsZero() { + GinkgoWriter.Printf("%v(%v) created at %s\n", info.name, info.uid, info.creationTime) + } else { + GinkgoWriter.Printf("%v(%v) created at %s, deleted at %s\n", info.name, info.uid, info.creationTime, info.deletionTime) + } + } + //nolint:mnd // easy to understand + Expect(len(infos)).To(Equal(6)) + Expect(infos[0].name).To(Equal(infos[1].name)) + Expect(infos[2].name).To(Equal(infos[3].name)) + Expect(infos[4].name).To(Equal(infos[5].name)) + }) + + It("tikv: should not perform a rolling update when ConfigUpdateStrategy is InPlace", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(3)), func(tk *v1alpha1.TiKVGroup) { + tk.Spec.ConfigUpdateStrategy = v1alpha1.ConfigUpdateStrategyInPlace + }) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking the config") + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, kvg.Name)} + cms, err := clientSet.CoreV1().ConfigMaps(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(cms.Items)).To(Equal(3)) + Expect(cms.Items[0].Data).ShouldNot(ContainElement(ContainSubstring("level = 'warn'"))) + Expect(cms.Items[1].Data).ShouldNot(ContainElement(ContainSubstring("level = 'warn'"))) + Expect(cms.Items[2].Data).ShouldNot(ContainElement(ContainSubstring("level = 'warn'"))) + + By("Recording tikv pods' UID before changing the config") + podMap := map[string]string{} + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(podList.Items)).To(Equal(3)) + for _, pod := range podList.Items { + podMap[pod.Name] = string(pod.GetUID()) + } + + By("Changing the config of the TiKVGroup") + var kvgGet v1alpha1.TiKVGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: kvg.Name}, &kvgGet)).To(Succeed()) + kvgGet.Spec.Template.Spec.Config = logLevelConfig + Expect(k8sClient.Update(ctx, &kvgGet)).To(Succeed()) + + Consistently(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + + // Ensure pods are not re-created + curPodList, err2 := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + g.Expect(err2).To(BeNil()) + g.Expect(len(curPodList.Items)).To(Equal(3)) + exceptedPodMap := map[string]string{} + for _, pod := range curPodList.Items { + exceptedPodMap[pod.Name] = string(pod.GetUID()) + } + g.Expect(podMap).To(Equal(exceptedPodMap)) + }).WithTimeout(3 * time.Minute).WithPolling(createClusterPolling).Should(Succeed()) + + cms, err = clientSet.CoreV1().ConfigMaps(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(cms.Items)).To(Equal(3)) + Expect(cms.Items[0].Data).Should(ContainElement(ContainSubstring("level = 'warn'"))) + Expect(cms.Items[1].Data).Should(ContainElement(ContainSubstring("level = 'warn'"))) + Expect(cms.Items[2].Data).Should(ContainElement(ContainSubstring("level = 'warn'"))) + }) + + It("pd/tikv/tiflash: should perform a rolling update for config change", func() { + pdg := data.NewPDGroup(ns.Name, "pd", tc.Name, ptr.To(int32(3)), nil) + kvg := data.NewTiKVGroup(ns.Name, "tikv", tc.Name, ptr.To(int32(3)), nil) + dbg := data.NewTiDBGroup(ns.Name, "tidb", tc.Name, ptr.To(int32(1)), nil) + flashg := data.NewTiFlashGroup(ns.Name, "flash", tc.Name, ptr.To(int32(3)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + Expect(k8sClient.Create(ctx, flashg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{flashg})).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + groupNames := []string{pdg.Name, kvg.Name, flashg.Name} + outerCtx, cancel := context.WithCancel(ctx) + defer cancel() + for _, groupName := range groupNames { + By("Checking the logic of rolling update for " + groupName) + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, groupName)} + + By("collecting the events of pods for verifying rolling update") + watchCtx, cancel := context.WithCancel(outerCtx) + podWatcher, err := clientSet.CoreV1().Pods(tc.Namespace).Watch(watchCtx, listOpts) + Expect(err).NotTo(HaveOccurred()) + type podInfo struct { + name string + uid string + creationTime metav1.Time + deletionTime metav1.Time + } + + podMap := map[string]podInfo{} + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + defer GinkgoRecover() + for { + select { + case <-watchCtx.Done(): + GinkgoWriter.Println("podWatcher is stopped") + podWatcher.Stop() + wg.Done() + return + + case event := <-podWatcher.ResultChan(): + pod, isPod := event.Object.(*corev1.Pod) + if !isPod { + continue + } + info, ok := podMap[string(pod.UID)] + if !ok { + info = podInfo{ + name: pod.Name, + uid: string(pod.UID), + creationTime: pod.CreationTimestamp, + } + } + if !pod.DeletionTimestamp.IsZero() && pod.DeletionGracePeriodSeconds != nil && *pod.DeletionGracePeriodSeconds == 0 { + info.deletionTime = *pod.DeletionTimestamp + } + podMap[string(pod.UID)] = info + } + } + }() + + By("Changing the spec") + cfg := "log.level = 'debug'" + var updateTime time.Time + switch groupName { + case "pd": + var pdgGet v1alpha1.PDGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: groupName}, &pdgGet)).To(Succeed()) + pdgGet.Spec.Template.Spec.Config = v1alpha1.ConfigFile(cfg) + updateTime = time.Now() + Expect(k8sClient.Update(ctx, &pdgGet)).To(Succeed()) + case "tikv": + var kvgGet v1alpha1.TiKVGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: groupName}, &kvgGet)).To(Succeed()) + kvgGet.Spec.Template.Spec.Config = v1alpha1.ConfigFile(cfg) + updateTime = time.Now() + Expect(k8sClient.Update(ctx, &kvgGet)).To(Succeed()) + case "flash": + cfg = "logger.level = 'debug'" + var flashgGet v1alpha1.TiFlashGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: groupName}, &flashgGet)).To(Succeed()) + flashgGet.Spec.Template.Spec.Config = v1alpha1.ConfigFile(cfg) + updateTime = time.Now() + Expect(k8sClient.Update(ctx, &flashgGet)).To(Succeed()) + } + + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{flashg})).To(Succeed()) + + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + g.Expect(err).To(BeNil()) + g.Expect(len(podList.Items)).To(Equal(3)) + for _, pod := range podList.Items { + // Ensure pods are re-created + g.Expect(pod.Status.StartTime).ShouldNot(BeNil()) + g.Expect(pod.Status.StartTime.After(updateTime)).To(BeTrue()) + } + // g.Expect(k8s.CheckRollingRestartLogic(eventSlice)).To(BeTrue()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + cancel() + + wg.Wait() + + infos := []podInfo{} + for _, v := range podMap { + infos = append(infos, v) + } + slices.SortFunc(infos, func(a podInfo, b podInfo) int { + if a.deletionTime.IsZero() && b.deletionTime.IsZero() { + return a.creationTime.Compare(b.creationTime.Time) + } + if a.deletionTime.IsZero() { + return a.creationTime.Compare(b.deletionTime.Time) + } + if b.deletionTime.IsZero() { + return a.deletionTime.Compare(b.creationTime.Time) + } + return a.deletionTime.Compare(b.deletionTime.Time) + }) + for _, info := range infos { + if info.deletionTime.IsZero() { + GinkgoWriter.Printf("%v(%v) created at %s\n", info.name, info.uid, info.creationTime) + } else { + GinkgoWriter.Printf("%v(%v) created at %s, deleted at %s\n", info.name, info.uid, info.creationTime, info.deletionTime) + } + } + //nolint:mnd // easy to understand + Expect(len(infos)).To(Equal(6)) + Expect(infos[0].name).To(Equal(infos[1].name)) + Expect(infos[2].name).To(Equal(infos[3].name)) + Expect(infos[4].name).To(Equal(infos[5].name)) + } + }) + }) + + Context("Version Upgrade", func() { + When("use the default policy", func() { + It("should wait for pd upgrade to complete before upgrading tikv", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(3)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(3)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{})).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking the version of tikv group") + var kvgGet v1alpha1.TiKVGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: kvg.Name}, &kvgGet)).To(Succeed()) + oldVersion := kvgGet.Spec.Version + Expect(oldVersion).NotTo(BeEmpty()) + Expect(kvgGet.Status.Version).To(Equal(oldVersion)) + v, err := semver.NewVersion(oldVersion) + Expect(err).To(BeNil()) + newVersion := "v" + v.IncMinor().String() + + By(fmt.Sprintf("Updating the version of the tikv group from %s to %s", oldVersion, newVersion)) + kvgGet.Spec.Version = newVersion + Expect(k8sClient.Update(ctx, &kvgGet)).To(Succeed()) + + Consistently(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{})).To(Succeed()) + + // tikv should not be upgraded + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: kvg.Name}, &kvgGet)).To(Succeed()) + g.Expect(kvgGet.Spec.Version).To(Equal(newVersion)) + g.Expect(kvgGet.Status.Version).To(Equal(oldVersion)) + }).WithTimeout(1 * time.Minute).WithPolling(createClusterPolling).Should(Succeed()) + + By("Pause the reconciliation") + var tcGet v1alpha1.Cluster + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: tc.Name}, &tcGet)).To(Succeed()) + tcGet.Spec.Paused = true + Expect(k8sClient.Update(ctx, &tcGet)).To(Succeed()) + + By("Upgrading the pd group") + var pdgGet v1alpha1.PDGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: pdg.Name}, &pdgGet)).To(Succeed()) + Expect(pdgGet.Spec.Version).To(Equal(oldVersion)) + Expect(pdgGet.Status.Version).To(Equal(oldVersion)) + pdgGet.Spec.Version = newVersion + Expect(k8sClient.Update(ctx, &pdgGet)).To(Succeed()) + + By("Checking the version of pd group when paused") + Consistently(func(g Gomega) { + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: pdg.Name}, &pdgGet)).To(Succeed()) + g.Expect(pdgGet.Spec.Version).To(Equal(newVersion)) + g.Expect(pdgGet.Status.Version).To(Equal(oldVersion)) + }).WithTimeout(1 * time.Minute).WithPolling(createClusterPolling).Should(Succeed()) + + By("Resume the reconciliation") + tcGet.Spec.Paused = false + Expect(k8sClient.Update(ctx, &tcGet)).To(Succeed()) + + By("Checking the version of pd and tikv group after resuming") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{})).To(Succeed()) + + // pd and tikv should be upgraded + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: pdg.Name}, &pdgGet)).To(Succeed()) + g.Expect(pdgGet.Spec.Version).To(Equal(newVersion)) + g.Expect(pdgGet.Status.Version).To(Equal(newVersion)) + var pdPods corev1.PodList + g.Expect(k8sClient.List(ctx, &pdPods, client.InNamespace(tc.Namespace), + client.MatchingLabels{v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD})) + for _, pod := range pdPods.Items { + for _, container := range pod.Spec.Containers { + if container.Name == v1alpha1.ContainerNamePD { + g.Expect(strings.Contains(container.Image, newVersion)).To(BeTrue()) + } + } + } + + g.Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: kvg.Name}, &kvgGet)).To(Succeed()) + g.Expect(kvgGet.Spec.Version).To(Equal(newVersion)) + g.Expect(kvgGet.Status.Version).To(Equal(newVersion)) + g.Expect(k8sClient.List(ctx, &pdPods, client.InNamespace(tc.Namespace), + client.MatchingLabels{v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV})) + for _, pod := range pdPods.Items { + for _, container := range pod.Spec.Containers { + if container.Name == v1alpha1.ContainerNameTiKV { + g.Expect(strings.Contains(container.Image, newVersion)).To(BeTrue()) + } + } + } + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + }) + }) + }) + + Context("TLS", func() { + It("should enable TLS for MySQL Client and between TiDB components", func() { + By("Installing the certificates") + Expect(installTiDBIssuer(ctx, yamlApplier, ns.Name, tc.Name)).To(Succeed()) + Expect(installTiDBCertificates(ctx, yamlApplier, ns.Name, tc.Name, "dbg")).To(Succeed()) + Expect(installTiDBComponentsCertificates(ctx, yamlApplier, ns.Name, tc.Name, "pdg", "kvg", "dbg", "flashg")).To(Succeed()) + + By("Enabling TLS for TiDB components") + var tcGet v1alpha1.Cluster + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: tc.Name}, &tcGet)).To(Succeed()) + tcGet.Spec.TLSCluster = &v1alpha1.TLSCluster{Enabled: true} + Expect(k8sClient.Update(ctx, &tcGet)).To(Succeed()) + + By("Creating the components with TLS client enabled") + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), func(group *v1alpha1.PDGroup) { + group.Spec.MountClusterClientSecret = ptr.To(true) + }) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), func(group *v1alpha1.TiKVGroup) { + group.Spec.MountClusterClientSecret = ptr.To(true) + }) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), func(group *v1alpha1.TiDBGroup) { + group.Spec.TLSClient = &v1alpha1.TiDBTLSClient{Enabled: true} + }) + flashg := data.NewTiFlashGroup(ns.Name, "flashg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + Expect(k8sClient.Create(ctx, flashg)).To(Succeed()) + + By("Checking the status of the cluster and the connection to the TiDB service") + Eventually(func(g Gomega) { + tcGet, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(len(tcGet.Status.PD)).NotTo(BeZero()) + for _, compStatus := range tcGet.Status.Components { + switch compStatus.Kind { + case v1alpha1.ComponentKindPD, v1alpha1.ComponentKindTiKV, v1alpha1.ComponentKindTiDB, v1alpha1.ComponentKindTiFlash: + g.Expect(compStatus.Replicas).To(Equal(int32(1))) + default: + g.Expect(compStatus.Replicas).To(BeZero()) + } + } + + checkComponent := func(groupName, componentName string, expectedReplicas *int32) { + listOptions := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, groupName)} + podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOptions) + g.Expect(err).To(BeNil()) + g.Expect(len(podList.Items)).To(Equal(int(*expectedReplicas))) + for _, pod := range podList.Items { + g.Expect(pod.Status.Phase).To(Equal(corev1.PodRunning)) + + // check for mTLS + g.Expect(pod.Spec.Volumes).To(ContainElement(corev1.Volume{ + Name: fmt.Sprintf("%s%s-tls", v1alpha1.NamePrefix, componentName), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: tc.TLSClusterSecretName(groupName), + //nolint:mnd // easy to understand + DefaultMode: ptr.To(int32(420)), + }, + }, + })) + g.Expect(pod.Spec.Containers[0].VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: fmt.Sprintf("%s%s-tls", v1alpha1.NamePrefix, componentName), + MountPath: fmt.Sprintf("/var/lib/%s-tls", componentName), + ReadOnly: true, + })) + + switch componentName { + case v1alpha1.LabelValComponentPD, v1alpha1.LabelValComponentTiKV: + // check for `mountClusterClientSecret` + g.Expect(pod.Spec.Volumes).To(ContainElement(corev1.Volume{ + Name: v1alpha1.ClusterTLSClientVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: tc.ClusterClientTLSSecretName(), + //nolint:mnd // easy to understand + DefaultMode: ptr.To(int32(420)), + }, + }, + })) + g.Expect(pod.Spec.Containers[0].VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: v1alpha1.ClusterTLSClientVolumeName, + MountPath: v1alpha1.ClusterTLSClientMountPath, + ReadOnly: true, + })) + case v1alpha1.LabelValComponentTiDB: + // check for TiDB server & mysql client TLS + g.Expect(pod.Spec.Volumes).To(ContainElement(corev1.Volume{ + Name: v1alpha1.TiDBServerTLSVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-%s-server-secret", tc.Name, groupName), + //nolint:mnd // easy to understand + DefaultMode: ptr.To(int32(420)), + }, + }, + })) + g.Expect(pod.Spec.Containers[0].VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: v1alpha1.TiDBServerTLSVolumeName, + MountPath: v1alpha1.TiDBServerTLSMountPath, + ReadOnly: true, + })) + } + } + } + checkComponent(pdg.Name, v1alpha1.LabelValComponentPD, pdg.Spec.Replicas) + checkComponent(kvg.Name, v1alpha1.LabelValComponentTiKV, kvg.Spec.Replicas) + checkComponent(dbg.Name, v1alpha1.LabelValComponentTiDB, dbg.Spec.Replicas) + checkComponent(flashg.Name, v1alpha1.LabelValComponentTiFlash, flashg.Spec.Replicas) + + g.Expect(utiltidb.IsTiDBConnectable(ctx, k8sClient, fw, + tc.Namespace, tc.Name, dbg.Name, "root", "", dbg.TiDBClientTLSSecretName())).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + // TODO: version upgrade test + }) + }) + + Context("TiDB Feature", func() { + It("should init a cluster with bootstrap SQL specified", func() { + By("Creating a ConfigMap with bootstrap SQL") + bsqlCm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bootstrap-sql", + Namespace: ns.Name, + }, + Data: map[string]string{ + v1alpha1.BootstrapSQLConfigMapKey: "SET PASSWORD FOR 'root'@'%' = 'pingcap';", + }, + } + Expect(k8sClient.Create(ctx, &bsqlCm)).To(Succeed()) + + By("Creating the components with bootstrap SQL") + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), func(group *v1alpha1.TiDBGroup) { + group.Spec.BootstrapSQLConfigMapName = &bsqlCm.Name + }) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Checking the status of the cluster and the connection to the TiDB service") + Eventually(func(g Gomega) { + tcGet, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(len(tcGet.Status.PD)).NotTo(BeZero()) + for _, compStatus := range tcGet.Status.Components { + switch compStatus.Kind { + case v1alpha1.ComponentKindPD, v1alpha1.ComponentKindTiKV, v1alpha1.ComponentKindTiDB: + g.Expect(compStatus.Replicas).To(Equal(int32(1))) + default: + g.Expect(compStatus.Replicas).To(BeZero()) + } + } + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + + // connect with the password set in bootstrap SQL + g.Expect(utiltidb.IsTiDBConnectable(ctx, k8sClient, fw, + tc.Namespace, tc.Name, dbg.Name, "root", "pingcap", "")).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + }) + + It("should connect to the TiDB cluster with JWT authentication", func() { + const ( + kid = "the-key-id-0" + sub = "user@pingcap.com" + email = "user@pingcap.com" + iss = "issuer-abc" + ) + token, err := jwt.GenerateJWT(kid, sub, email, iss) + if err != nil { + Skip(fmt.Sprintf("failed to generate JWT token: %v", err)) + } + + // use Bootstrap SQL to create the a user with JWT authentication + bsqlCm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bootstrap-sql", + Namespace: ns.Name, + }, + Data: map[string]string{ + v1alpha1.BootstrapSQLConfigMapKey: fmt.Sprintf( + `CREATE USER '%s' IDENTIFIED WITH 'tidb_auth_token' REQUIRE TOKEN_ISSUER '%s' ATTRIBUTE '{"email": "%s"}'; +GRANT ALL PRIVILEGES ON *.* TO '%s'@'%s';`, sub, iss, email, sub, "%"), + }, + } + + // The `tidb_auth_token` authentication method requires clients to support + // the `mysql_clear_password` plugin to send the token to TiDB in plain text. + // Therefore, it's better to enale TLS between clients and servers before using `tidb_auth_token`. + By("Installing the certificates for conmutication between MySQL client and TiDB server") + Expect(installTiDBIssuer(ctx, yamlApplier, ns.Name, tc.Name)).To(Succeed()) + Expect(installTiDBCertificates(ctx, yamlApplier, ns.Name, tc.Name, "dbg")).To(Succeed()) + + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), func(group *v1alpha1.TiDBGroup) { + group.Spec.TLSClient = &v1alpha1.TiDBTLSClient{Enabled: true} + group.Spec.BootstrapSQLConfigMapName = &bsqlCm.Name + group.Spec.TiDBAuthToken = &v1alpha1.TiDBAuthToken{ + Enabled: true, + } + }) + + By("Creating the JWKS secret") + jwksSecret := jwt.GenerateJWKSSecret(dbg.Namespace, dbg.TiDBAuthTokenJWKSSecretName()) + Expect(k8sClient.Create(ctx, &jwksSecret)).To(Succeed()) + + By("Creating the ConfigMap with a user created by Bootstrap SQL") + Expect(k8sClient.Create(ctx, &bsqlCm)).To(Succeed()) + + By("Creating the components with JWT authentication") + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Checking the status of the cluster and the connection to the TiDB service") + Eventually(func(g Gomega) { + tcGet, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(len(tcGet.Status.PD)).NotTo(BeZero()) + for _, compStatus := range tcGet.Status.Components { + switch compStatus.Kind { + case v1alpha1.ComponentKindPD, v1alpha1.ComponentKindTiKV, v1alpha1.ComponentKindTiDB: + g.Expect(compStatus.Replicas).To(Equal(int32(1))) + default: + g.Expect(compStatus.Replicas).To(BeZero()) + } + } + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + + // connect with the JWT token + g.Expect(utiltidb.IsTiDBConnectable(ctx, k8sClient, fw, + tc.Namespace, tc.Name, dbg.Name, sub, token, dbg.TiDBClientTLSSecretName())).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + }) + + It("should set store labels for TiKV and TiFlash, set server lables for TiDB", func() { + By("Creating the components with location labels") + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), func(group *v1alpha1.PDGroup) { + group.Spec.Template.Spec.Config = `[replication] +location-labels = ["region", "zone", "host"]` + }) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + flashg := data.NewTiFlashGroup(ns.Name, "flashg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + Expect(k8sClient.Create(ctx, flashg)).To(Succeed()) + + By("Checking the status of the cluster and the connection to the TiDB service") + Eventually(func(g Gomega) { + tcGet, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(len(tcGet.Status.PD)).NotTo(BeZero()) + for _, compStatus := range tcGet.Status.Components { + switch compStatus.Kind { + case v1alpha1.ComponentKindPD, v1alpha1.ComponentKindTiKV, v1alpha1.ComponentKindTiDB, v1alpha1.ComponentKindTiFlash: + g.Expect(compStatus.Replicas).To(Equal(int32(1))) + default: + g.Expect(compStatus.Replicas).To(BeZero()) + } + } + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, []*v1alpha1.TiFlashGroup{flashg})).To(Succeed()) + g.Expect(utiltidb.IsTiDBConnectable(ctx, k8sClient, fw, + tc.Namespace, tc.Name, dbg.Name, "root", "", "")).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking the store labels and server labels") + svcName := fmt.Sprintf("%s-%s", tc.Name, dbg.Name) + dsn, cancel, err := utiltidb.PortForwardAndGetTiDBDSN(fw, tc.Namespace, svcName, "root", "", "test", "charset=utf8mb4") + Expect(err).To(BeNil()) + defer cancel() + db, err := sql.Open("mysql", dsn) + Expect(err).To(BeNil()) + defer db.Close() + + Eventually(func(g Gomega) { // retry as the labels may need some time to be set + plRows, err := db.QueryContext(ctx, "SHOW PLACEMENT LABELS") + g.Expect(err).To(BeNil()) + defer plRows.Close() + plKeyToValues := make(map[string]string) + for plRows.Next() { + var key, values string + err = plRows.Scan(&key, &values) + g.Expect(err).To(BeNil()) + plKeyToValues[key] = values + } + g.Expect(plKeyToValues).To(HaveKey("zone")) // short name for `topology.kubernetes.io/zone` + g.Expect(plKeyToValues).To(HaveKey("host")) // short name for `kubernetes.io/hostname` + + storeRows, err := db.QueryContext(ctx, "SELECT store_id,label FROM INFORMATION_SCHEMA.TIKV_STORE_STATUS") + g.Expect(err).To(BeNil()) + defer storeRows.Close() + storeKeyToValues := make(map[int]string) + for storeRows.Next() { + var storeID int + var label string + err = storeRows.Scan(&storeID, &label) + g.Expect(err).To(BeNil()) + storeKeyToValues[storeID] = label + } + g.Expect(storeKeyToValues).To(HaveLen(2)) // TiKV and TiFlash + for _, v := range storeKeyToValues { + g.Expect(v).To(ContainSubstring(`"key": "zone"`)) + g.Expect(v).To(ContainSubstring(`"key": "host"`)) + } + + serverRows, err := db.QueryContext(ctx, "SELECT IP,LABELS FROM INFORMATION_SCHEMA.TIDB_SERVERS_INFO") + g.Expect(err).To(BeNil()) + defer serverRows.Close() + serverKeyToValues := make(map[string]string) + for serverRows.Next() { + var ip, labels string + err = serverRows.Scan(&ip, &labels) + g.Expect(err).To(BeNil()) + serverKeyToValues[ip] = labels + } + g.Expect(serverKeyToValues).To(HaveLen(1)) + for _, v := range serverKeyToValues { + g.Expect(v).To(ContainSubstring(`zone=`)) + g.Expect(v).To(ContainSubstring(`host=`)) + } + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) //nolint:mnd // easy to understand + }) + + It("should enable readiness probe for PD, TiKV and TiFlash", func() { + By("Creating the components with readiness probe enabled") + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Checking the status of the cluster and the connection to the TiDB service") + Eventually(func(g Gomega) { + tcGet, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(len(tcGet.Status.PD)).NotTo(BeZero()) + for _, compStatus := range tcGet.Status.Components { + switch compStatus.Kind { + case v1alpha1.ComponentKindPD, v1alpha1.ComponentKindTiKV, v1alpha1.ComponentKindTiDB: + g.Expect(compStatus.Replicas).To(Equal(int32(1))) + default: + g.Expect(compStatus.Replicas).To(BeZero()) + } + } + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + g.Expect(utiltidb.IsTiDBConnectable(ctx, k8sClient, fw, + tc.Namespace, tc.Name, dbg.Name, "root", "", "")).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Checking the readiness probe in the Pods") + var podList corev1.PodList + Expect(k8sClient.List(ctx, &podList, client.InNamespace(tc.Namespace))).To(Succeed()) + for _, pod := range podList.Items { + switch pod.Labels[v1alpha1.LabelKeyComponent] { + case v1alpha1.LabelValComponentTiDB: // TiDB enables Readiness probe by default + Expect(pod.Spec.Containers).To(HaveLen(1)) + Expect(pod.Spec.Containers[0].ReadinessProbe).NotTo(BeNil()) + Expect(pod.Spec.Containers[0].ReadinessProbe.TimeoutSeconds).To(Equal(int32(1))) + Expect(pod.Spec.Containers[0].ReadinessProbe.FailureThreshold).To(Equal(int32(3))) + Expect(pod.Spec.Containers[0].ReadinessProbe.SuccessThreshold).To(Equal(int32(1))) + Expect(pod.Spec.Containers[0].ReadinessProbe.InitialDelaySeconds).To(Equal(int32(10))) //nolint:mnd // default value in Operator + Expect(pod.Spec.Containers[0].ReadinessProbe.PeriodSeconds).To(Equal(int32(10))) //nolint:mnd // easy to understand + Expect(pod.Spec.Containers[0].ReadinessProbe.TCPSocket).NotTo(BeNil()) + } + } + }) + + It("should be able to gracefully shutdown tidb", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(3)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + g.Expect(utiltidb.IsTiDBConnectable(ctx, k8sClient, fw, + tc.Namespace, tc.Name, dbg.Name, "root", "", "")).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Connect to the TiDB cluster to run transactions") + svcName := fmt.Sprintf("%s-%s", tc.Name, dbg.Name) + dsn, cancel, err := utiltidb.PortForwardAndGetTiDBDSN(fw, tc.Namespace, svcName, "root", "", "test", "charset=utf8mb4") + Expect(err).To(BeNil()) + defer cancel() + db, err := sql.Open("mysql", dsn) + Expect(err).To(BeNil()) + defer db.Close() + maxConn := 30 + db.SetMaxIdleConns(maxConn) + db.SetMaxOpenConns(maxConn) + + table := "test.e2e_test" + str := fmt.Sprintf("create table if not exists %s(id int primary key auto_increment, v int);", table) + _, err = db.Exec(str) + Expect(err).To(BeNil()) + + var totalCount, failCount atomic.Uint64 + var wg sync.WaitGroup + clientCtx, cancel2 := context.WithCancel(ctx) + defer cancel2() + for i := 0; i < maxConn; i++ { + id := i + wg.Add(1) + go func(db *sql.DB) { + defer wg.Done() + for { + select { + case <-clientCtx.Done(): + return + default: + err := utiltidb.ExecuteSimpleTransaction(db, id, table) + totalCount.Add(1) + if err != nil { + failCount.Add(1) + } + time.Sleep(50 * time.Millisecond) //nolint:mnd // easy to understand + } + } + }(db) + } + + By("Rolling restart TiDB") + var dbgGet v1alpha1.TiDBGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: dbg.Name}, &dbgGet)).To(Succeed()) + dbgGet.Spec.Template.Spec.Config = logLevelConfig + Expect(k8sClient.Update(ctx, &dbgGet)).To(Succeed()) + + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + + g.Expect(totalCount.Load()).To(BeNumerically(">", 0)) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + GinkgoWriter.Printf("total count: %d, fail count: %d\n", totalCount.Load(), failCount.Load()) + Expect(failCount.Load()).To(BeZero()) + cancel2() + wg.Wait() + }) + }) + + Context("Overlay", func() { + It("should be able to overlay the pod's terminationGracePeriodSeconds", func() { + pdg := data.NewPDGroup(ns.Name, "pdg", tc.Name, ptr.To(int32(1)), nil) + kvg := data.NewTiKVGroup(ns.Name, "kvg", tc.Name, ptr.To(int32(1)), nil) + dbg := data.NewTiDBGroup(ns.Name, "dbg", tc.Name, ptr.To(int32(1)), nil) + Expect(k8sClient.Create(ctx, pdg)).To(Succeed()) + Expect(k8sClient.Create(ctx, kvg)).To(Succeed()) + Expect(k8sClient.Create(ctx, dbg)).To(Succeed()) + + By("Waiting for the cluster to be ready") + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) + + By("Recording the terminationGracePeriodSeconds before overlay") + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + pods, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(pods.Items)).To(Equal(1)) + beforeTermSeconds := pods.Items[0].Spec.TerminationGracePeriodSeconds + Expect(beforeTermSeconds).NotTo(BeNil()) + beforeUID := pods.Items[0].UID + + By("Overlaying the terminationGracePeriodSeconds") + var dbgGet v1alpha1.TiDBGroup + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: tc.Namespace, Name: dbg.Name}, &dbgGet)).To(Succeed()) + overlaySeconds := *beforeTermSeconds + 5 + dbgGet.Spec.Template.Spec.Overlay = &v1alpha1.Overlay{ + Pod: &v1alpha1.PodOverlay{ + Spec: &corev1.PodSpec{TerminationGracePeriodSeconds: &overlaySeconds}, + }, + } + Expect(k8sClient.Update(ctx, &dbgGet)).To(Succeed()) + + Eventually(func(g Gomega) { + _, ready := utiltidb.IsClusterReady(k8sClient, tc.Name, tc.Namespace) + g.Expect(ready).To(BeTrue()) + g.Expect(utiltidb.AreAllInstancesReady(k8sClient, pdg, + []*v1alpha1.TiKVGroup{kvg}, []*v1alpha1.TiDBGroup{dbg}, nil)).To(Succeed()) + + pods, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) + Expect(err).To(BeNil()) + Expect(len(pods.Items)).To(Equal(1)) + Expect(pods.Items[0].UID).NotTo(Equal(beforeUID)) + Expect(pods.Items[0].Spec.TerminationGracePeriodSeconds).NotTo(BeNil()) + Expect(*pods.Items[0].Spec.TerminationGracePeriodSeconds).To(Equal(overlaySeconds)) + }) + }) + }) +}) diff --git a/tests/e2e/cluster/tls.go b/tests/e2e/cluster/tls.go new file mode 100644 index 00000000000..6ace719ae81 --- /dev/null +++ b/tests/e2e/cluster/tls.go @@ -0,0 +1,305 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "bytes" + "context" + "fmt" + "text/template" + + "github.com/pingcap/tidb-operator/tests/e2e/utils/k8s" +) + +var tidbIssuerTmpl = ` +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ .ClusterName }}-selfsigned-ca-issuer + namespace: {{ .Namespace }} +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-ca + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-ca-secret + commonName: "TiDB CA" + isCA: true + issuerRef: + name: {{ .ClusterName }}-selfsigned-ca-issuer + kind: Issuer +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ .ClusterName }}-tidb-issuer + namespace: {{ .Namespace }} +spec: + ca: + secretName: {{ .ClusterName }}-ca-secret +` + +var tidbCertificatesTmpl = ` +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-{{ .TiDBGroupName}}-server-secret + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-{{ .TiDBGroupName}}-server-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB Server" + usages: + - server auth + dnsNames: + - "{{ .ClusterName }}-{{ .TiDBGroupName}}" + - "{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}.svc" + - "*.{{ .ClusterName }}-{{ .TiDBGroupName}}" + - "*.{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}" + - "*.{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: {{ .ClusterName }}-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-{{ .TiDBGroupName}}-client-secret + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-{{ .TiDBGroupName}}-client-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB Client" + usages: + - client auth + issuerRef: + name: {{ .ClusterName }}-tidb-issuer + kind: Issuer + group: cert-manager.io +` + +var tidbComponentsCertificatesTmpl = ` +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-{{ .PDGroupName }}-cluster-secret + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-{{ .PDGroupName }}-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "{{ .ClusterName }}-{{ .PDGroupName }}" + - "{{ .ClusterName }}-{{ .PDGroupName }}.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .PDGroupName }}.{{ .Namespace }}.svc" + - "{{ .ClusterName }}-{{ .PDGroupName }}-peer" + - "{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}.svc" + - "*.{{ .ClusterName }}-{{ .PDGroupName }}-peer" + - "*.{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}" + - "*.{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: {{ .ClusterName }}-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-{{ .TiKVGroupName }}-cluster-secret + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-{{ .TiKVGroupName }}-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "{{ .ClusterName }}-{{ .TiKVGroupName }}" + - "{{ .ClusterName }}-{{ .TiKVGroupName }}.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .TiKVGroupName }}.{{ .Namespace }}.svc" + - "{{ .ClusterName }}-{{ .TiKVGroupName }}-peer" + - "{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}.svc" + - "*.{{ .ClusterName }}-{{ .TiKVGroupName }}-peer" + - "*.{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}" + - "*.{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: {{ .ClusterName }}-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-{{ .TiDBGroupName }}-cluster-secret + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-{{ .TiDBGroupName }}-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "{{ .ClusterName }}-{{ .TiDBGroupName }}" + - "{{ .ClusterName }}-{{ .TiDBGroupName }}.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .TiDBGroupName }}.{{ .Namespace }}.svc" + - "{{ .ClusterName }}-{{ .TiDBGroupName }}-peer" + - "{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}.svc" + - "*.{{ .ClusterName }}-{{ .TiDBGroupName }}-peer" + - "*.{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}" + - "*.{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: {{ .ClusterName }}-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-{{ .TiFlashGroupName }}-cluster-secret + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-{{ .TiFlashGroupName }}-cluster-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - server auth + - client auth + dnsNames: + - "{{ .ClusterName }}-{{ .TiFlashGroupName }}" + - "{{ .ClusterName }}-{{ .TiFlashGroupName }}.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .TiFlashGroupName }}.{{ .Namespace }}.svc" + - "{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer" + - "{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}" + - "{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}.svc" + - "*.{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer" + - "*.{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}" + - "*.{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}.svc" + ipAddresses: + - 127.0.0.1 + - ::1 + issuerRef: + name: {{ .ClusterName }}-tidb-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .ClusterName }}-cluster-client-secret + namespace: {{ .Namespace }} +spec: + secretName: {{ .ClusterName }}-cluster-client-secret + duration: 8760h # 365d + renewBefore: 360h # 15d + subject: + organizations: + - PingCAP + commonName: "TiDB" + usages: + - client auth + issuerRef: + name: {{ .ClusterName }}-tidb-issuer + kind: Issuer + group: cert-manager.io +` + +type tcTmplMeta struct { + Namespace string + ClusterName string + PDGroupName string + TiKVGroupName string + TiDBGroupName string + TiFlashGroupName string +} + +func installTiDBIssuer(ctx context.Context, yamlApplier *k8s.YAMLApplier, ns, clusterName string) error { + return installCert(ctx, yamlApplier, tidbIssuerTmpl, tcTmplMeta{Namespace: ns, ClusterName: clusterName}) +} + +func installTiDBCertificates(ctx context.Context, yamlApplier *k8s.YAMLApplier, ns, clusterName, tidbGroupName string) error { + return installCert(ctx, yamlApplier, tidbCertificatesTmpl, tcTmplMeta{ + Namespace: ns, ClusterName: clusterName, TiDBGroupName: tidbGroupName}) +} + +func installTiDBComponentsCertificates(ctx context.Context, yamlApplier *k8s.YAMLApplier, ns, clusterName string, + pdGroupName, tikvGroupName, tidbGroupName, tiFlashGroupName string) error { + return installCert(ctx, yamlApplier, tidbComponentsCertificatesTmpl, tcTmplMeta{ + Namespace: ns, ClusterName: clusterName, + PDGroupName: pdGroupName, TiKVGroupName: tikvGroupName, TiDBGroupName: tidbGroupName, TiFlashGroupName: tiFlashGroupName}) +} + +func installCert(ctx context.Context, yamlApplier *k8s.YAMLApplier, tmplStr string, tp any) error { + var buf bytes.Buffer + tmpl, err := template.New("template").Parse(tmplStr) + if err != nil { + return fmt.Errorf("error when parsing template: %w", err) + } + err = tmpl.Execute(&buf, tp) + if err != nil { + return fmt.Errorf("error when executing template: %w", err) + } + + return yamlApplier.Apply(ctx, &buf) +} diff --git a/tests/e2e/config/restclientgetter.go b/tests/e2e/config/restclientgetter.go new file mode 100644 index 00000000000..dca27ed5477 --- /dev/null +++ b/tests/e2e/config/restclientgetter.go @@ -0,0 +1,99 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "path/filepath" + "regexp" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/discovery" + diskcached "k8s.io/client-go/discovery/cached/disk" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/util/homedir" +) + +const ( + discoveryCacheTTL = 10 * time.Minute +) + +var defaultCacheDir = filepath.Join(homedir.HomeDir(), ".kube", "http-cache") + +// simpleRestClientGetter implements genericclioptions.RESTClientGetter +type simpleRestClientGetter struct { + clientcmdapi.Config +} + +// ToRESTConfig returns restconfig +func (getter *simpleRestClientGetter) ToRESTConfig() (*rest.Config, error) { + return getter.ToRawKubeConfigLoader().ClientConfig() +} + +// ToDiscoveryClient returns discovery client +func (getter *simpleRestClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + config, err := getter.ToRESTConfig() + if err != nil { + return nil, err + } + + config.Burst = 100 + + httpCacheDir := defaultCacheDir + discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube", "cache", "discovery"), config.Host) + + return diskcached.NewCachedDiscoveryClientForConfig(config, discoveryCacheDir, httpCacheDir, discoveryCacheTTL) +} + +// ToRESTMapper returns a restmapper +func (getter *simpleRestClientGetter) ToRESTMapper() (meta.RESTMapper, error) { + discoveryClient, err := getter.ToDiscoveryClient() + if err != nil { + return nil, err + } + + mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, discoveryClient, nil) + return expander, nil +} + +// ToRawKubeConfigLoader return kubeconfig loader as-is +func (getter *simpleRestClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { + return clientcmd.NewDefaultClientConfig(getter.Config, &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults}) +} + +// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. +// Windows is really restrictive, so this is really restrictive +var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/.)]`) + +// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name. +func computeDiscoverCacheDir(parentDir, host string) string { + // strip the optional scheme from host if its there: + schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1) + // now do a simple collapse of non-AZ09 characters. + // Collisions are possible but unlikely. Even if we do collide the problem is short lived + safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_") + return filepath.Join(parentDir, safeHost) +} + +// NewSimpleRESTClientGetter initializes a new genericclioptions.RESTClientGetter from clientcmdapi.Config. +func NewSimpleRESTClientGetter(config *clientcmdapi.Config) genericclioptions.RESTClientGetter { + return &simpleRestClientGetter{*config} +} diff --git a/tests/e2e/data/cluster.go b/tests/e2e/data/cluster.go new file mode 100644 index 00000000000..1f6e2555fb0 --- /dev/null +++ b/tests/e2e/data/cluster.go @@ -0,0 +1,33 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +func NewCluster(namespace string) *v1alpha1.Cluster { + return &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: defaultClusterName, + }, + Spec: v1alpha1.ClusterSpec{ + UpgradePolicy: v1alpha1.UpgradePolicyDefault, + }, + } +} diff --git a/tests/e2e/data/data.go b/tests/e2e/data/data.go new file mode 100644 index 00000000000..5f418c60254 --- /dev/null +++ b/tests/e2e/data/data.go @@ -0,0 +1,41 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +type ( + ClusterPatch func(obj *v1alpha1.Cluster) + GroupPatch[T runtime.Group] func(obj T) +) + +const ( + defaultClusterName = "tc" + defaultPDGroupName = "pdg" + defaultTiDBGroupName = "dbg" + defaultTiKVGroupName = "kvg" + defaultTiFlashGroupName = "fg" + + defaultVersion = "v8.1.0" +) + +func WithReplicas[T runtime.Group](replicas *int32) GroupPatch[T] { + return func(obj T) { + obj.SetReplicas(replicas) + } +} diff --git a/tests/e2e/data/ns.go b/tests/e2e/data/ns.go new file mode 100644 index 00000000000..75937f0baa2 --- /dev/null +++ b/tests/e2e/data/ns.go @@ -0,0 +1,45 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "math/rand" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + nsBaseName = "e2e-tidb-operator" +) + +// NewNamespace returns a random namespace object for testing. +func NewNamespace() *v1.Namespace { + return &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsBaseName + "-" + randString(5), //nolint:mnd // refactor to a constant if needed + }, + } +} + +func randString(n int) string { + const letterBytes = "abcdefghijklmnopqrstuvwxyz" + b := make([]byte, n) + for i := range b { + //nolint:gosec // no need to use cryptographically secure random number generator + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + return string(b) +} diff --git a/tests/e2e/data/pd.go b/tests/e2e/data/pd.go new file mode 100644 index 00000000000..af335e250a4 --- /dev/null +++ b/tests/e2e/data/pd.go @@ -0,0 +1,53 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +func NewPDGroup(ns string, patches ...GroupPatch[*runtime.PDGroup]) *v1alpha1.PDGroup { + pdg := &runtime.PDGroup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: defaultPDGroupName, + }, + Spec: v1alpha1.PDGroupSpec{ + Cluster: v1alpha1.ClusterReference{Name: defaultClusterName}, + Version: defaultVersion, + Template: v1alpha1.PDTemplate{ + Spec: v1alpha1.PDTemplateSpec{ + Volumes: []v1alpha1.Volume{ + { + Name: "data", + Path: "/var/lib/pd", + For: []v1alpha1.VolumeUsage{{Type: "data"}}, + Storage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + } + for _, p := range patches { + p(pdg) + } + + return runtime.ToPDGroup(pdg) +} diff --git a/tests/e2e/e2e.go b/tests/e2e/e2e.go new file mode 100644 index 00000000000..d97b312594e --- /dev/null +++ b/tests/e2e/e2e.go @@ -0,0 +1,94 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "fmt" + "time" + + //nolint: stylecheck // too many changes, refactor later + . "github.com/onsi/ginkgo/v2" + //nolint: stylecheck // too many changes, refactor later + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/tests/e2e/utils/k8s" +) + +const ( + operatorNamespace = "tidb-admin" + operatorDeploymentName = "tidb-operator" +) + +var _ = SynchronizedBeforeSuite(func() []byte { + // TODO(csuzhangxc): options to install operator for upgrade test + + // load kubeconfig + restConfig, err := k8s.LoadConfig() + Expect(err).NotTo(HaveOccurred()) + + // check if CRD exists + discoveryClient, err := discovery.NewDiscoveryClientForConfig(restConfig) + Expect(err).NotTo(HaveOccurred()) + crdList, err := discoveryClient.ServerPreferredResources() + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + for _, resourceList := range crdList { + if resourceList.GroupVersion == v1alpha1.GroupVersion.String() { + for _, resource := range resourceList.APIResources { + if resource.Group == v1alpha1.GroupName && resource.Kind == "Cluster" { + return nil + } + } + } + } + return fmt.Errorf("CRD not found") + }).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) //nolint:mnd // refactor to use constant + + // check if operator deployment exists and is running + clientset, err := kubernetes.NewForConfig(restConfig) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + deployment, err2 := clientset.AppsV1().Deployments(operatorNamespace).Get( + context.Background(), operatorDeploymentName, metav1.GetOptions{}) + if err2 != nil { + return err2 + } + if deployment.Status.ReadyReplicas > 0 { + return nil + } + return fmt.Errorf("operator deployment not ready") + }).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) //nolint:mnd // refactor to use constant + + // set zone labels for nodes if not exists + nodeList, err := clientset.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeList.Items { + if _, ok := node.Labels[corev1.LabelTopologyZone]; !ok { + node.Labels[corev1.LabelTopologyZone] = "az-1" // just for test + _, err = clientset.CoreV1().Nodes().Update(context.Background(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + } + + return nil +}, func([]byte) { + // This will run on all nodes +}) diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go new file mode 100644 index 00000000000..706949a7bb2 --- /dev/null +++ b/tests/e2e/e2e_test.go @@ -0,0 +1,35 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "io" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + _ "github.com/pingcap/tidb-operator/tests/e2e/cluster" + _ "github.com/pingcap/tidb-operator/tests/e2e/pd" +) + +func TestE2E(t *testing.T) { + ctrl.SetLogger(zap.New(zap.WriteTo(io.Discard))) + + RegisterFailHandler(Fail) + RunSpecs(t, "E2E Suite") +} diff --git a/tests/e2e/framework/framework.go b/tests/e2e/framework/framework.go new file mode 100644 index 00000000000..ddc012b3ff7 --- /dev/null +++ b/tests/e2e/framework/framework.go @@ -0,0 +1,83 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/tests/e2e/data" + "github.com/pingcap/tidb-operator/tests/e2e/utils/waiter" +) + +type Framework struct { + Namespace *corev1.Namespace + Cluster *v1alpha1.Cluster + + Client client.Client +} + +func New() *Framework { + return &Framework{} +} + +func (f *Framework) Setup() { + // TODO: get context and config path from options + c, err := newClient("", "") + gomega.Expect(err).To(gomega.Succeed()) + f.Client = c + + ginkgo.BeforeEach(func(ctx context.Context) { + ns := data.NewNamespace() + + f.Namespace = ns + f.Cluster = data.NewCluster(ns.Name) + + gomega.Expect( + f.Client.Create(ctx, f.Namespace), + ).To(gomega.Succeed()) + + gomega.Expect( + f.Client.Create(ctx, f.Cluster), + ).To(gomega.Succeed()) + + ginkgo.DeferCleanup(func(ctx context.Context) { + ginkgo.By("Delete the namespace") + + gomega.Expect( + f.Client.Delete(ctx, f.Namespace), + ).To(gomega.Succeed()) + + ginkgo.By("Ensure the namespace can be deleted") + + gomega.Expect( + waiter.WaitForObjectDeleted(ctx, f.Client, f.Namespace, waiter.ShortTaskTimeout), + ).To(gomega.Succeed()) + }) + }) +} + +func (*Framework) Must(err error) { + gomega.ExpectWithOffset(1, err).To(gomega.Succeed()) +} + +func (*Framework) True(b bool) { + gomega.ExpectWithOffset(1, b).To(gomega.BeTrue()) +} diff --git a/tests/e2e/framework/pd.go b/tests/e2e/framework/pd.go new file mode 100644 index 00000000000..d98f30e5439 --- /dev/null +++ b/tests/e2e/framework/pd.go @@ -0,0 +1,57 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/tests/e2e/utils/waiter" +) + +func (f *Framework) WaitForPDGroupReady(ctx context.Context, pdg *v1alpha1.PDGroup) { + // TODO: maybe wait for cluster ready + f.Must(waiter.WaitForPDsHealthy(ctx, f.Client, pdg, waiter.LongTaskTimeout)) + f.Must(waiter.WaitForPodsReady(ctx, f.Client, runtime.FromPDGroup(pdg), waiter.LongTaskTimeout)) +} + +func (f *Framework) WaitForPDGroupSuspended(ctx context.Context, pdg *v1alpha1.PDGroup) { + f.Must(waiter.WaitForListDeleted(ctx, f.Client, &corev1.PodList{}, waiter.LongTaskTimeout, client.InNamespace(f.Cluster.Namespace))) + f.Must(waiter.WaitForObjectCondition( + ctx, + f.Client, + runtime.FromPDGroup(pdg), + v1alpha1.PDGroupCondSuspended, + metav1.ConditionTrue, + waiter.ShortTaskTimeout, + )) +} + +func (f *Framework) WaitForPDGroupReadyAndNotSuspended(ctx context.Context, pdg *v1alpha1.PDGroup) { + f.Must(waiter.WaitForObjectCondition( + ctx, + f.Client, + runtime.FromPDGroup(pdg), + v1alpha1.PDGroupCondSuspended, + metav1.ConditionFalse, + waiter.ShortTaskTimeout, + )) + f.WaitForPDGroupReady(ctx, pdg) +} diff --git a/tests/e2e/framework/util.go b/tests/e2e/framework/util.go new file mode 100644 index 00000000000..8d63bc565f3 --- /dev/null +++ b/tests/e2e/framework/util.go @@ -0,0 +1,43 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "fmt" + + "k8s.io/client-go/tools/clientcmd" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/scheme" +) + +func newClient(configPath, ctxName string) (client.Client, error) { + rule := clientcmd.NewDefaultClientConfigLoadingRules() + rule.ExplicitPath = configPath + + kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + rule, + &clientcmd.ConfigOverrides{CurrentContext: ctxName}).ClientConfig() + if err != nil { + return nil, fmt.Errorf("can't parse kubeconfig: %w", err) + } + c, err := client.New(kubeconfig, client.Options{ + Scheme: scheme.Scheme, + }) + if err != nil { + return nil, fmt.Errorf("can't new client: %w", err) + } + return c, nil +} diff --git a/tests/e2e/label/well_known.go b/tests/e2e/label/well_known.go new file mode 100644 index 00000000000..8d63aa12a5a --- /dev/null +++ b/tests/e2e/label/well_known.go @@ -0,0 +1,36 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package label + +import "github.com/onsi/ginkgo/v2" + +var ( + // Components + Cluster = ginkgo.Label("component:Cluster") + PD = ginkgo.Label("component:PD") + TiDB = ginkgo.Label("component:TiDB") + TiKV = ginkgo.Label("component:TiKV") + TiFlash = ginkgo.Label("component:TiFlash") + + // Priority + P0 = ginkgo.Label("P0") + P1 = ginkgo.Label("P1") + P2 = ginkgo.Label("P2") + + // Operations + Update = ginkgo.Label("op:Update") + Scale = ginkgo.Label("op:Scale") + Suspend = ginkgo.Label("op:Suspend") +) diff --git a/tests/e2e/pd/pd.go b/tests/e2e/pd/pd.go new file mode 100644 index 00000000000..f4b562776bf --- /dev/null +++ b/tests/e2e/pd/pd.go @@ -0,0 +1,160 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "time" + + "github.com/onsi/ginkgo/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/tests/e2e/data" + "github.com/pingcap/tidb-operator/tests/e2e/framework" + "github.com/pingcap/tidb-operator/tests/e2e/label" + "github.com/pingcap/tidb-operator/tests/e2e/utils/waiter" +) + +var _ = ginkgo.Describe("PD", label.PD, func() { + f := framework.New() + f.Setup() + + ginkgo.Context("Basic", label.P0, func() { + ginkgo.It("support create PD with 1 replica", func(ctx context.Context) { + pdg := data.NewPDGroup( + f.Namespace.Name, + data.WithReplicas[*runtime.PDGroup](ptr.To[int32](1)), + ) + + ginkgo.By("Create PDGroup") + f.Must(f.Client.Create(ctx, pdg)) + f.WaitForPDGroupReady(ctx, pdg) + }) + + ginkgo.It("support create PD with 3 replica", func(ctx context.Context) { + pdg := data.NewPDGroup( + f.Namespace.Name, + data.WithReplicas[*runtime.PDGroup](ptr.To[int32](3)), + ) + + ginkgo.By("Create PDGroup") + f.Must(f.Client.Create(ctx, pdg)) + f.WaitForPDGroupReady(ctx, pdg) + }) + }) + + ginkgo.Context("Scale", label.P0, label.Scale, func() { + ginkgo.It("support scale PD form 3 to 5", func(ctx context.Context) { + pdg := data.NewPDGroup( + f.Namespace.Name, + data.WithReplicas[*runtime.PDGroup](ptr.To[int32](3)), + ) + + ginkgo.By("Create PDGroup") + f.Must(f.Client.Create(ctx, pdg)) + f.WaitForPDGroupReady(ctx, pdg) + + patch := client.MergeFrom(pdg.DeepCopy()) + pdg.Spec.Replicas = ptr.To[int32](5) //nolint:mnd // easy for test + + ginkgo.By("Change replica of the PDGroup") + f.Must(f.Client.Patch(ctx, pdg, patch)) + f.WaitForPDGroupReady(ctx, pdg) + }) + + ginkgo.It("support scale PD form 5 to 3", func(ctx context.Context) { + pdg := data.NewPDGroup( + f.Namespace.Name, + //nolint:mnd // easy for test + data.WithReplicas[*runtime.PDGroup](ptr.To[int32](5)), + ) + + ginkgo.By("Create PDGroup") + f.Must(f.Client.Create(ctx, pdg)) + f.WaitForPDGroupReady(ctx, pdg) + + patch := client.MergeFrom(pdg.DeepCopy()) + pdg.Spec.Replicas = ptr.To[int32](3) + + ginkgo.By("Change replica of the PDGroup") + f.Must(f.Client.Patch(ctx, pdg, patch)) + f.WaitForPDGroupReady(ctx, pdg) + }) + }) + + ginkgo.Context("Update", label.P0, label.Update, func() { + ginkgo.It("support rolling update PD by change config file", func(ctx context.Context) { + pdg := data.NewPDGroup( + f.Namespace.Name, + data.WithReplicas[*runtime.PDGroup](ptr.To[int32](3)), + ) + + ginkgo.By("Create PDGroup") + f.Must(f.Client.Create(ctx, pdg)) + f.WaitForPDGroupReady(ctx, pdg) + + patch := client.MergeFrom(pdg.DeepCopy()) + pdg.Spec.Template.Spec.Config = `log.level = 'warn'` + + nctx, cancel := context.WithCancel(ctx) + ch := make(chan struct{}) + go func() { + defer close(ch) + defer ginkgo.GinkgoRecover() + f.Must(waiter.WaitPodsRollingUpdateOnce(nctx, f.Client, runtime.FromPDGroup(pdg), waiter.LongTaskTimeout)) + }() + + changeTime := time.Now() + ginkgo.By("Change replica of the PDGroup") + f.Must(f.Client.Patch(ctx, pdg, patch)) + f.Must(waiter.WaitForPodsRecreated(ctx, f.Client, runtime.FromPDGroup(pdg), changeTime, waiter.LongTaskTimeout)) + f.WaitForPDGroupReady(ctx, pdg) + cancel() + <-ch + }) + }) + + ginkgo.Context("Suspend", label.P0, label.Suspend, func() { + ginkgo.It("support suspend and resume PD", func(ctx context.Context) { + pdg := data.NewPDGroup( + f.Namespace.Name, + data.WithReplicas[*runtime.PDGroup](ptr.To[int32](3)), + ) + + ginkgo.By("Create PDGroup") + f.Must(f.Client.Create(ctx, pdg)) + f.WaitForPDGroupReadyAndNotSuspended(ctx, pdg) + + patch := client.MergeFrom(f.Cluster.DeepCopy()) + f.Cluster.Spec.SuspendAction = &v1alpha1.SuspendAction{ + SuspendCompute: true, + } + + ginkgo.By("Suspend cluster") + f.Must(f.Client.Patch(ctx, f.Cluster, patch)) + f.WaitForPDGroupSuspended(ctx, pdg) + + patch = client.MergeFrom(f.Cluster.DeepCopy()) + f.Cluster.Spec.SuspendAction = nil + + ginkgo.By("Resume cluster") + f.Must(f.Client.Patch(ctx, f.Cluster, patch)) + f.WaitForPDGroupReadyAndNotSuspended(ctx, pdg) + }) + }) +}) diff --git a/tests/e2e/utils/data/cluster.go b/tests/e2e/utils/data/cluster.go new file mode 100644 index 00000000000..1621832161b --- /dev/null +++ b/tests/e2e/utils/data/cluster.go @@ -0,0 +1,191 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO: migrate to tests/e2e/data pkg +package data + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +const ( + imageRegistry = "gcr.io/pingcap-public/dbaas/" + helperImage = "gcr.io/pingcap-public/dbaas/busybox:1.36.0" + + version = "v8.1.0" + GiB = 1024 * 1024 * 1024 +) + +func StorageSizeGi2quantity(sizeGi uint32) resource.Quantity { + return *resource.NewQuantity(int64(sizeGi)*GiB, resource.BinarySI) +} + +func NewCluster(namespace, name string) *v1alpha1.Cluster { + return &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: v1alpha1.ClusterSpec{ + UpgradePolicy: v1alpha1.UpgradePolicyDefault, + }, + } +} + +func NewPDGroup(namespace, name, clusterName string, replicas *int32, apply func(*v1alpha1.PDGroup)) *v1alpha1.PDGroup { + pdg := &v1alpha1.PDGroup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{ + v1alpha1.LabelKeyGroup: name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyCluster: clusterName, + }, + }, + Spec: v1alpha1.PDGroupSpec{ + Cluster: v1alpha1.ClusterReference{Name: clusterName}, + Replicas: replicas, + Version: version, + Template: v1alpha1.PDTemplate{ + Spec: v1alpha1.PDTemplateSpec{ + Image: ptr.To(imageRegistry + "pd"), + Volumes: []v1alpha1.Volume{ + { + Name: "data", + Path: "/var/lib/pd", + For: []v1alpha1.VolumeUsage{{Type: "data"}}, + Storage: StorageSizeGi2quantity(1), + }, + }, + }, + }, + }, + } + if apply != nil { + apply(pdg) + } + return pdg +} + +func NewTiKVGroup(namespace, name, clusterName string, replicas *int32, apply func(*v1alpha1.TiKVGroup)) *v1alpha1.TiKVGroup { + kvg := &v1alpha1.TiKVGroup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{ + v1alpha1.LabelKeyGroup: name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyCluster: clusterName, + }, + }, + Spec: v1alpha1.TiKVGroupSpec{ + Cluster: v1alpha1.ClusterReference{Name: clusterName}, + Version: version, + Replicas: replicas, + Template: v1alpha1.TiKVTemplate{ + Spec: v1alpha1.TiKVTemplateSpec{ + Image: ptr.To(imageRegistry + "tikv"), + Volumes: []v1alpha1.Volume{ + { + Name: "data", + Path: "/var/lib/tikv", + For: []v1alpha1.VolumeUsage{{Type: "data"}}, + Storage: StorageSizeGi2quantity(1), + }, + }, + }, + }, + }, + } + if apply != nil { + apply(kvg) + } + return kvg +} + +func NewTiDBGroup(namespace, name, clusterName string, replicas *int32, apply func(group *v1alpha1.TiDBGroup)) *v1alpha1.TiDBGroup { + tg := &v1alpha1.TiDBGroup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{ + v1alpha1.LabelKeyGroup: name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyCluster: clusterName, + }, + }, + Spec: v1alpha1.TiDBGroupSpec{ + Cluster: v1alpha1.ClusterReference{Name: clusterName}, + Version: version, + Replicas: replicas, + Template: v1alpha1.TiDBTemplate{ + Spec: v1alpha1.TiDBTemplateSpec{ + Image: ptr.To(imageRegistry + "tidb"), + SlowLog: &v1alpha1.TiDBSlowLog{ + Image: ptr.To(helperImage), + }, + }, + }, + }, + } + if apply != nil { + apply(tg) + } + return tg +} + +func NewTiFlashGroup(namespace, name, clusterName string, replicas *int32, + apply func(group *v1alpha1.TiFlashGroup)) *v1alpha1.TiFlashGroup { + flashg := &v1alpha1.TiFlashGroup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{ + v1alpha1.LabelKeyGroup: name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyCluster: clusterName, + }, + }, + Spec: v1alpha1.TiFlashGroupSpec{ + Cluster: v1alpha1.ClusterReference{Name: clusterName}, + Version: version, + Replicas: replicas, + Template: v1alpha1.TiFlashTemplate{ + Spec: v1alpha1.TiFlashTemplateSpec{ + Image: ptr.To(imageRegistry + "tiflash"), + Volumes: []v1alpha1.Volume{ + { + Name: "data", + Path: "/data0", // different path format + For: []v1alpha1.VolumeUsage{{Type: "data"}}, + Storage: StorageSizeGi2quantity(1), + }, + }, + LogTailer: &v1alpha1.TiFlashLogTailer{ + Image: ptr.To(helperImage), + }, + }, + }, + }, + } + if apply != nil { + apply(flashg) + } + return flashg +} diff --git a/tests/e2e/utils/data/namespace.go b/tests/e2e/utils/data/namespace.go new file mode 100644 index 00000000000..75937f0baa2 --- /dev/null +++ b/tests/e2e/utils/data/namespace.go @@ -0,0 +1,45 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "math/rand" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + nsBaseName = "e2e-tidb-operator" +) + +// NewNamespace returns a random namespace object for testing. +func NewNamespace() *v1.Namespace { + return &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsBaseName + "-" + randString(5), //nolint:mnd // refactor to a constant if needed + }, + } +} + +func randString(n int) string { + const letterBytes = "abcdefghijklmnopqrstuvwxyz" + b := make([]byte, n) + for i := range b { + //nolint:gosec // no need to use cryptographically secure random number generator + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + return string(b) +} diff --git a/tests/e2e/utils/jwt/jwt.go b/tests/e2e/utils/jwt/jwt.go new file mode 100644 index 00000000000..79c374f055d --- /dev/null +++ b/tests/e2e/utils/jwt/jwt.go @@ -0,0 +1,95 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jwt + +import ( + "fmt" + "os/exec" + "path/filepath" + "runtime" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" +) + +// ref: https://raw.githubusercontent.com/CbcWestwolf/generate_jwt/master/JWKS.json +// +//nolint:lll // this is a long string +const jwksJSON = ` +{ + "keys": [ + { + "alg": "RS256", + "e": "AQAB", + "kid": "the-key-id-0", + "kty": "RSA", + "n": "q8G5n9XBidxmBMVJKLOBsmdOHrCqGf17y9-VUXingwDUZxRp2XbuLZLbJtLgcln1lC0L9BsogrWf7-pDhAzWovO6Ai4Aybu00tJ2u0g4j1aLiDdsy0gyvSb5FBoL08jFIH7t_JzMt4JpF487AjzvITwZZcnsrB9a9sdn2E5B_aZmpDGi2-Isf5osnlw0zvveTwiMo9ba416VIzjntAVEvqMFHK7vyHqXbfqUPAyhjLO-iee99Tg5AlGfjo1s6FjeML4xX7sAMGEy8FVBWNfpRU7ryTWoSn2adzyA_FVmtBvJNQBCMrrAhXDTMJ5FNi8zHhvzyBKHU0kBTS1UNUbP9w", + "use": "sig" + } + ] +} +` + +func GenerateJWKSSecret(ns, name string) v1.Secret { + return v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Data: map[string][]byte{ + v1alpha1.TiDBAuthTokenJWKS: []byte(jwksJSON), + }, + } +} + +func GenerateJWT(kid, sub, email, iss string) (string, error) { + _, filename, _, ok := runtime.Caller(0) + if !ok { + return "", fmt.Errorf("cannot get the runtime info") + } + curDir := filepath.Dir(filename) + toolPath := filepath.Join(curDir, "..", "..", "..", "..", "_output", "bin", "generate_jwt") + absPath, err := filepath.Abs(toolPath) + if err != nil { + return "", err + } + + cmd := exec.Command(absPath, "--kid", kid, "--sub", sub, "--email", email, "--iss", iss) + out, err := cmd.Output() + if err != nil { + return "", err + } + + // the output contains both the jwt and the public key, like: + // -----BEGIN PUBLIC KEY----- + // MIIBCgKCAQEAq8G5n9XBidxmBMVJKLOBsmdOHrCqGf17y9+VUXingwDUZxRp2Xbu + // LZLbJtLgcln1lC0L9BsogrWf7+pDhAzWovO6Ai4Aybu00tJ2u0g4j1aLiDdsy0gy + // vSb5FBoL08jFIH7t/JzMt4JpF487AjzvITwZZcnsrB9a9sdn2E5B/aZmpDGi2+Is + // f5osnlw0zvveTwiMo9ba416VIzjntAVEvqMFHK7vyHqXbfqUPAyhjLO+iee99Tg5 + // AlGfjo1s6FjeML4xX7sAMGEy8FVBWNfpRU7ryTWoSn2adzyA/FVmtBvJNQBCMrrA + // hXDTMJ5FNi8zHhvzyBKHU0kBTS1UNUbP9wIDAQAB + // -----END PUBLIC KEY----- + + //nolint:lll // this is a long string + // eyJhbGciOiJSUzI1NiIsImtpZCI6InRoZS1rZXktaWQtMCIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6InVzZXJAcGluZ2NhcC5jb20iLCJleHAiOjE3MjczNDE3NzEsImlhdCI6MTcyNzM0MDg3MSwiaXNzIjoiaXNzdWVyLWFiYyIsInN1YiI6InVzZXJAcGluZ2NhcC5jb20ifQ.jYdvNNvVJ4NmbMuG4ilU0_ajBwnB7P17zaJYwEQDtLrG6N-uWp75-dCX48CFX0OSldahI9-bpM_tzaCYeuzjMF5Ee2sMq1Mbz7FHg15oW8yYCfOeTnFG8f0YH15Ql3p62TJ-G7IVk9rDKIJnHFpeNSvbue_V0tiH5ZCaJztKDidJ6HkA6zoW90mltojVtaVV05d-amLbYIWB5nRX80mJpAOB_WBUK8e_kyPzL11-j9-34TFvVx78Bd4O43Jx-eC7VZUKNtFCewM47sLZbgabFNY-_f3bv-Nu_LaWXlaWq6BYgNErr7vIEJjjyeTe6vuDE6ziyB_QM-cwiNK8gQ7sAA + + // we only need the jwt part + slices := strings.Split(string(out), "\n") + jwt := slices[len(slices)-2] + return jwt, nil +} diff --git a/tests/e2e/utils/k8s/k8s.go b/tests/e2e/utils/k8s/k8s.go new file mode 100644 index 00000000000..51bda2921e8 --- /dev/null +++ b/tests/e2e/utils/k8s/k8s.go @@ -0,0 +1,125 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "os" + "slices" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/ptr" +) + +// LoadConfig loads the k8s config from the default location. +func LoadConfig() (*rest.Config, error) { + // init k8s client with the current default config + kubeconfigPath := os.Getenv("KUBECONFIG") + if kubeconfigPath == "" { + kubeconfigPath = os.ExpandEnv("$HOME/.kube/config") + } + return clientcmd.BuildConfigFromFlags("", kubeconfigPath) +} + +// CheckRollingRestartLogic checks if the rolling restart logic is correctly followed +// based on the provided list of Kubernetes watch events. +// The function first sort events by time, then checks for pod creation events and +// verifies the restart events to ensure that the rolling restart logic is adhered to. +// An example of a rolling restart event sequence is like: +// 1. pod1 added +// 2. pod2 added +// 4. pod2 deleted +// 5. pod2 added +// 8. pod1 deleted +// 9. pod1 added +// +//nolint:gocyclo // refactor if possible +func CheckRollingRestartLogic(events []watch.Event) bool { + if len(events) == 0 { + return false + } + + // Sort eventsBeforeShuffle by time, since the eventsBeforeShuffle are not guaranteed to be in order + slices.SortFunc(events, func(e1, e2 watch.Event) int { + t1, t2 := getTimeFromEvent(e1), getTimeFromEvent(e2) + if t1.Before(t2) { + return -1 + } else if t1.After(t2) { + return 1 + } + return 0 + }) + + podCreated := make(map[string]bool) + restartEvents := make([]watch.Event, 0, len(events)) + + // check pod creation events + creationPhase := true + for _, e := range events { + if creationPhase { + podName := e.Object.(*corev1.Pod).Name + switch e.Type { + case watch.Added: + podCreated[podName] = true + case watch.Deleted: + if len(podCreated) == 0 { + return false + } + creationPhase = false + restartEvents = append(restartEvents, e) + } + } else { + restartEvents = append(restartEvents, e) + } + } + + // Check pod restart events + if len(restartEvents) == 0 { + return false + } + var restartPodName *string + for _, e := range restartEvents { + podName := e.Object.(*corev1.Pod).Name + + switch e.Type { + case watch.Added: + if restartPodName == nil || *restartPodName != podName || podCreated[podName] { + return false + } + podCreated[podName] = true + restartPodName = nil + case watch.Deleted: + if restartPodName != nil || !podCreated[podName] { + return false + } + restartPodName = ptr.To(podName) + podCreated[podName] = false + } + } + return true +} + +func getTimeFromEvent(e watch.Event) time.Time { + switch e.Type { + case watch.Added: + return e.Object.(*corev1.Pod).CreationTimestamp.Time + case watch.Deleted: + return e.Object.(*corev1.Pod).DeletionTimestamp.Time + } + return time.Time{} +} diff --git a/tests/e2e/utils/k8s/k8s_test.go b/tests/e2e/utils/k8s/k8s_test.go new file mode 100644 index 00000000000..c14cb230c04 --- /dev/null +++ b/tests/e2e/utils/k8s/k8s_test.go @@ -0,0 +1,125 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "math/rand" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +func makeTestEvent(podName string, typ watch.EventType, tim time.Time) watch.Event { + return watch.Event{ + Type: typ, + Object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + CreationTimestamp: metav1.Time{Time: tim}, + DeletionTimestamp: &metav1.Time{Time: tim}, + }, + }, + } +} + +const fixedUnixTime = 1257894000 + +func getTestTime(s int) time.Time { + return time.Unix(fixedUnixTime+int64(s), 0) +} + +func shuffleEvents(events []watch.Event) []watch.Event { + for i := range events { + //nolint:gosec // no need to use cryptographically secure random number generator + j := rand.Intn(i + 1) + events[i], events[j] = events[j], events[i] + } + return events +} + +func TestCheckRollingRestartLogic(t *testing.T) { + tests := []struct { + name string + eventsBeforeShuffle []watch.Event + want bool + }{ + { + name: "happy path", + eventsBeforeShuffle: []watch.Event{ + makeTestEvent("pod1", watch.Added, getTestTime(1)), + makeTestEvent("pod2", watch.Added, getTestTime(2)), + makeTestEvent("pod3", watch.Added, getTestTime(3)), + + makeTestEvent("pod2", watch.Deleted, getTestTime(4)), + makeTestEvent("pod2", watch.Added, getTestTime(5)), + makeTestEvent("pod3", watch.Deleted, getTestTime(6)), + makeTestEvent("pod3", watch.Added, getTestTime(7)), + makeTestEvent("pod1", watch.Deleted, getTestTime(8)), + makeTestEvent("pod1", watch.Added, getTestTime(9)), + }, + want: true, + }, + { + name: "Empty events", + eventsBeforeShuffle: []watch.Event{}, + want: false, + }, + { + name: "only add events", + eventsBeforeShuffle: []watch.Event{ + makeTestEvent("pod1", watch.Added, getTestTime(1)), + makeTestEvent("pod2", watch.Added, getTestTime(2)), + makeTestEvent("pod3", watch.Added, getTestTime(3)), + }, + want: false, + }, + { + name: "Alternating delete and add events", + eventsBeforeShuffle: []watch.Event{ + makeTestEvent("pod1", watch.Added, getTestTime(1)), + makeTestEvent("pod1", watch.Deleted, getTestTime(2)), + makeTestEvent("pod2", watch.Added, getTestTime(3)), + makeTestEvent("pod2", watch.Deleted, getTestTime(4)), + }, + want: false, + }, + { + name: "two pods are deleted at the same time", + eventsBeforeShuffle: []watch.Event{ + makeTestEvent("pod2", watch.Added, getTestTime(1)), + makeTestEvent("pod1", watch.Added, getTestTime(2)), + makeTestEvent("pod3", watch.Added, getTestTime(3)), + makeTestEvent("pod1", watch.Deleted, getTestTime(4)), + makeTestEvent("pod3", watch.Deleted, getTestTime(4)), + makeTestEvent("pod1", watch.Added, getTestTime(5)), + makeTestEvent("pod3", watch.Added, getTestTime(5)), + makeTestEvent("pod2", watch.Deleted, getTestTime(6)), + makeTestEvent("pod2", watch.Added, getTestTime(7)), + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := CheckRollingRestartLogic(shuffleEvents(tt.eventsBeforeShuffle)); got != tt.want { + t.Errorf("CheckRollingRestartLogic() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/tests/e2e/utils/k8s/port_forward.go b/tests/e2e/utils/k8s/port_forward.go new file mode 100644 index 00000000000..3c3ff752735 --- /dev/null +++ b/tests/e2e/utils/k8s/port_forward.go @@ -0,0 +1,195 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + "k8s.io/kubectl/pkg/polymorphichelpers" +) + +const ( + getPodTimeout = time.Minute + localHost = "127.0.0.1" +) + +// PortForwarder represents an interface which can forward local ports to a pod. +type PortForwarder interface { + Forward(namespace, resourceName string, addresses []string, ports []string) ([]portforward.ForwardedPort, context.CancelFunc, error) + ForwardPod(pod *corev1.Pod, addresses []string, ports []string) ([]portforward.ForwardedPort, context.CancelFunc, error) +} + +// portForwarder implements PortForwarder interface +type portForwarder struct { + genericclioptions.RESTClientGetter + ctx context.Context + config *rest.Config + client kubernetes.Interface +} + +var _ PortForwarder = &portForwarder{} + +func (f *portForwarder) forwardPorts(_, method string, urlObj *url.URL, addresses, ports []string) ( + forwardedPorts []portforward.ForwardedPort, cancel context.CancelFunc, err error) { + transport, upgrader, err := spdy.RoundTripperFor(f.config) + if err != nil { + return nil, nil, err + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, method, urlObj) + r, w := io.Pipe() + ctx, cancel := context.WithCancel(f.ctx) + readyChan := make(chan struct{}) + fw, err := portforward.NewOnAddresses(dialer, addresses, ports, ctx.Done(), readyChan, w, w) + if err != nil { + cancel() + return nil, nil, err + } + + // logging stdout/stderr of port forwarding + go func() { + // close pipe if the context is done + <-ctx.Done() + _ = w.Close() + }() + + go func() { + lineScanner := bufio.NewScanner(r) + for lineScanner.Scan() { + //nolint:gocritic // keep it for debug + // log.Logf("log from port forwarding %q: %s", podKey, lineScanner.Text()) + } + }() + + // run port forwarding + errChan := make(chan error) + go func() { + errChan <- fw.ForwardPorts() + }() + + // wait for ready or error + select { + case <-readyChan: + break + case err = <-errChan: + cancel() + return nil, nil, err + } + + forwardedPorts, err = fw.GetPorts() + if err != nil { + cancel() + return nil, nil, err + } + + return forwardedPorts, cancel, nil +} + +func (f *portForwarder) Forward(namespace, resourceName string, addresses, ports []string) ( + forwardedPorts []portforward.ForwardedPort, cancel context.CancelFunc, err error) { + builder := resource.NewBuilder(f). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). + ContinueOnError(). + NamespaceParam(namespace).DefaultNamespace() + + builder.ResourceNames("pods", resourceName) + + obj, err := builder.Do().Object() + if err != nil { + return nil, nil, err + } + + forwardablePod, err := polymorphichelpers.AttachablePodForObjectFn(f, obj, getPodTimeout) + if err != nil { + return nil, nil, err + } + + pod, err := f.client.CoreV1().Pods(namespace).Get(context.TODO(), forwardablePod.Name, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + return f.ForwardPod(pod, addresses, ports) +} + +func (f *portForwarder) ForwardPod(pod *corev1.Pod, addresses, ports []string) ( + forwardedPorts []portforward.ForwardedPort, cancel context.CancelFunc, err error) { + if pod.Status.Phase != corev1.PodRunning { + return nil, nil, fmt.Errorf("unable to forward port because pod is not running. Current status=%v", pod.Status.Phase) + } + + req := f.client.CoreV1().RESTClient().Post(). + Resource("pods"). + Namespace(pod.Namespace). + Name(pod.Name). + SubResource("portforward") + + return f.forwardPorts(fmt.Sprintf("%s/%s", pod.Namespace, pod.Name), "POST", req.URL(), addresses, ports) +} + +func NewPortForwarder(ctx context.Context, restClientGetter genericclioptions.RESTClientGetter) (PortForwarder, error) { + config, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + client, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + f := &portForwarder{ + RESTClientGetter: restClientGetter, + ctx: ctx, + config: config, + client: client, + } + return f, nil +} + +// ForwardOnePort is a helper utility to forward one port of Kubernetes resource. +func ForwardOnePort(fw PortForwarder, ns, resourceName string, port uint16) ( + fwdHost string, fwdPort uint16, cancel context.CancelFunc, err error) { + ports := []string{fmt.Sprintf("0:%d", port)} + forwardedPorts, cancel, err := fw.Forward(ns, resourceName, []string{localHost}, ports) + if err != nil { + return "", 0, nil, err + } + var localPort uint16 + var found bool + for _, p := range forwardedPorts { + if p.Remote == port { + localPort = p.Local + found = true + } + } + if !found { + cancel() + return "", 0, nil, errors.New("unexpected error") + } + return localHost, localPort, cancel, nil +} diff --git a/tests/e2e/utils/k8s/yaml.go b/tests/e2e/utils/k8s/yaml.go new file mode 100644 index 00000000000..9730100b94d --- /dev/null +++ b/tests/e2e/utils/k8s/yaml.go @@ -0,0 +1,88 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "context" + "errors" + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/restmapper" +) + +// YAMLApplier applies a list of unstructured objects to the K8s cluster. +type YAMLApplier struct { + DynamicClient dynamic.Interface + Mapper *restmapper.DeferredDiscoveryRESTMapper +} + +// NewYAMLApplier creates a new YAMLApplier. +func NewYAMLApplier(dynamicClient dynamic.Interface, mapper *restmapper.DeferredDiscoveryRESTMapper) *YAMLApplier { + return &YAMLApplier{ + DynamicClient: dynamicClient, + Mapper: mapper, + } +} + +// Apply applies a list of unstructured objects to the K8s cluster. +func (a *YAMLApplier) Apply(ctx context.Context, r io.Reader) error { + objs, err := DecodeYAML(r) + if err != nil { + return err + } + return ApplyUnstructured(ctx, objs, a.DynamicClient, a.Mapper) +} + +// DecodeYAML decodes a YAML file into a list of unstructured objects. +func DecodeYAML(r io.Reader) ([]*unstructured.Unstructured, error) { + var objects []*unstructured.Unstructured + //nolint:mnd // refactor to a constant if needed + decoder := yaml.NewYAMLOrJSONDecoder(r, 4096) + for { + obj := &unstructured.Unstructured{} + if err := decoder.Decode(obj); err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, err + } + objects = append(objects, obj) + } + return objects, nil +} + +// ApplyUnstructured applies a list of unstructured objects to the K8s cluster. +func ApplyUnstructured(ctx context.Context, objects []*unstructured.Unstructured, + dynamicClient dynamic.Interface, mapper *restmapper.DeferredDiscoveryRESTMapper) error { + for _, obj := range objects { + gvk := obj.GroupVersionKind() + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + gvr := mapping.Resource + + _, err = dynamicClient.Resource(gvr).Namespace(obj.GetNamespace()).Apply( + ctx, obj.GetName(), obj, metav1.ApplyOptions{FieldManager: "operator-e2e"}) + if err != nil { + return err + } + } + return nil +} diff --git a/tests/e2e/utils/tidb/tidb.go b/tests/e2e/utils/tidb/tidb.go new file mode 100644 index 00000000000..23f71d4bfdd --- /dev/null +++ b/tests/e2e/utils/tidb/tidb.go @@ -0,0 +1,392 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidb + +import ( + "context" + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/go-sql-driver/mysql" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/tests/e2e/utils/k8s" +) + +var dummyCancel = func() {} + +// PortForwardAndGetTiDBDSN create a port forward for TiDB and return its DSN. +func PortForwardAndGetTiDBDSN(fw k8s.PortForwarder, ns, svcName, + user, password, database, params string) (string, context.CancelFunc, error) { + localHost, localPort, cancel, err := k8s.ForwardOnePort(fw, ns, fmt.Sprintf("svc/%s", svcName), uint16(v1alpha1.DefaultTiDBPortClient)) + if err != nil { + return "", dummyCancel, err + } + return fmt.Sprintf("%s:%s@(%s:%d)/%s?%s", user, password, localHost, localPort, database, params), cancel, nil +} + +// IsTiDBConnectable checks whether the tidb cluster is connectable. +func IsTiDBConnectable(ctx context.Context, cli client.Client, fw k8s.PortForwarder, + ns, tcName, dbgName, user, password, tlsSecretName string, +) error { + var svcList corev1.ServiceList + if err := cli.List(ctx, &svcList, client.InNamespace(ns), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: tcName, v1alpha1.LabelKeyGroup: dbgName, + }); err != nil { + return fmt.Errorf("failed to list tidb service %s for tidb cluster %s/%s: %w", dbgName, ns, tcName, err) + } + var svc *corev1.Service + for i := range svcList.Items { + item := &svcList.Items[i] + if item.Spec.ClusterIP != corev1.ClusterIPNone { + svc = item + break + } + } + if svc == nil { + return fmt.Errorf("tidb service %s for tidb cluster %s/%s not found", dbgName, ns, tcName) + } + + // enable "cleartext client side plugin" for `tidb_auth_token`. + // ref: https://github.com/go-sql-driver/mysql?tab=readme-ov-file#allowcleartextpasswords + parms := []string{"charset=utf8", "allowCleartextPasswords=true"} + if tlsSecretName != "" { + var secret corev1.Secret + if err := cli.Get(ctx, client.ObjectKey{Namespace: ns, Name: tlsSecretName}, &secret); err != nil { + return fmt.Errorf("failed to get TLS secret %s/%s: %w", ns, tlsSecretName, err) + } + + rootCAs := x509.NewCertPool() + rootCAs.AppendCertsFromPEM(secret.Data[corev1.ServiceAccountRootCAKey]) + clientCert, certExists := secret.Data[corev1.TLSCertKey] + clientKey, keyExists := secret.Data[corev1.TLSPrivateKeyKey] + if !certExists || !keyExists { + return fmt.Errorf("cert or key does not exist in secret %s/%s", ns, tlsSecretName) + } + + tlsCert, err := tls.X509KeyPair(clientCert, clientKey) + if err != nil { + return fmt.Errorf("unable to load certificates from secret %s/%s: %w", ns, tlsSecretName, err) + } + tlsKey := fmt.Sprintf("%s-%s", ns, tlsSecretName) + err = mysql.RegisterTLSConfig(tlsKey, &tls.Config{ + RootCAs: rootCAs, + Certificates: []tls.Certificate{tlsCert}, + InsecureSkipVerify: true, //nolint:gosec // skip verify because we may use self-signed certificate + }) + if err != nil { + return fmt.Errorf("unable to register TLS config %s: %w", tlsKey, err) + } + + parms = append(parms, fmt.Sprintf("tls=%s", tlsKey)) + } + + var db *sql.DB + dsn, cancel, err := PortForwardAndGetTiDBDSN(fw, ns, svc.Name, user, password, "test", strings.Join(parms, "&")) + if err != nil { + return fmt.Errorf("failed to port forward for tidb service %s/%s: %w", ns, svc.Name, err) + } + defer cancel() + if db, err = sql.Open("mysql", dsn); err != nil { + return fmt.Errorf("failed to open MySQL connection to tidb service %s/%s: %w", ns, svc.Name, err) + } + defer db.Close() + if err := db.Ping(); err != nil { + return fmt.Errorf("failed to ping TiDB server with tidb service %s/%s: %w", ns, svc.Name, err) + } + return nil +} + +// IsTiDBInserted checks whether the tidb cluster has insert some data. +func IsTiDBInserted(fw k8s.PortForwarder, ns, tc, user, password, dbName, tableName string) wait.ConditionFunc { + return func() (bool, error) { + parms := []string{"charset=utf8"} + var db *sql.DB + dsn, cancel, err := PortForwardAndGetTiDBDSN(fw, ns, tc, user, password, dbName, strings.Join(parms, "&")) + if err != nil { + return false, err + } + + defer cancel() + if db, err = sql.Open("mysql", dsn); err != nil { + return false, err + } + + defer db.Close() + //nolint:gocritic // use := will shadow err + if err = db.Ping(); err != nil { + return false, err + } + + getCntFn := func(db *sql.DB, tableName string) (int, error) { + var cnt int + row := db.QueryRow(fmt.Sprintf("SELECT count(*) FROM %s", tableName)) + + err = row.Scan(&cnt) + if err != nil { + return cnt, fmt.Errorf("failed to scan count from %s, %w", tableName, err) + } + return cnt, nil + } + + cnt, err := getCntFn(db, tableName) + if err != nil { + return false, err + } + if cnt == 0 { + return false, nil + } + + return true, nil + } +} + +func IsClusterReady(cli client.Client, name, ns string) (*v1alpha1.Cluster, bool) { + var tcGet v1alpha1.Cluster + if err := cli.Get(context.TODO(), client.ObjectKey{Namespace: ns, Name: name}, &tcGet); err == nil { + availCond := meta.FindStatusCondition(tcGet.Status.Conditions, v1alpha1.ClusterCondAvailable) + if availCond != nil && availCond.Status == metav1.ConditionTrue { + return &tcGet, true + } + } + + return nil, false +} + +func AreAllInstancesReady(cli client.Client, pdg *v1alpha1.PDGroup, kvg []*v1alpha1.TiKVGroup, + dbg []*v1alpha1.TiDBGroup, flashg []*v1alpha1.TiFlashGroup, +) error { + if err := AreAllPDHealthy(cli, pdg); err != nil { + return err + } + for _, kv := range kvg { + if err := AreAllTiKVHealthy(cli, kv); err != nil { + return err + } + } + for _, db := range dbg { + if err := AreAllTiDBHealthy(cli, db); err != nil { + return err + } + } + for _, flash := range flashg { + if err := AreAllTiFlashHealthy(cli, flash); err != nil { + return err + } + } + return nil +} + +func AreAllPDHealthy(cli client.Client, pdg *v1alpha1.PDGroup) error { + var pdList v1alpha1.PDList + if err := cli.List(context.TODO(), &pdList, client.InNamespace(pdg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyGroup: pdg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + }); err != nil { + return fmt.Errorf("failed to list pd %s/%s: %w", pdg.Namespace, pdg.Name, err) + } + if len(pdList.Items) != int(*pdg.Spec.Replicas) { + return fmt.Errorf("pd %s/%s replicas %d not equal to %d", pdg.Namespace, pdg.Name, len(pdList.Items), *pdg.Spec.Replicas) + } + for i := range pdList.Items { + pd := &pdList.Items[i] + if !meta.IsStatusConditionPresentAndEqual(pd.Status.Conditions, v1alpha1.PDCondInitialized, metav1.ConditionTrue) { + return fmt.Errorf("pd %s/%s is not initialized", pd.Namespace, pd.Name) + } + if !meta.IsStatusConditionPresentAndEqual(pd.Status.Conditions, v1alpha1.PDCondHealth, metav1.ConditionTrue) { + return fmt.Errorf("pd %s/%s is not healthy", pd.Namespace, pd.Name) + } + } + + var podList corev1.PodList + if err := cli.List(context.TODO(), &podList, client.InNamespace(pdg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: pdg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + }); err != nil { + return fmt.Errorf("failed to list pd pod %s/%s: %w", pdg.Namespace, pdg.Name, err) + } + if len(podList.Items) != int(*pdg.Spec.Replicas) { + return fmt.Errorf("pd %s/%s pod replicas %d not equal to %d", pdg.Namespace, pdg.Name, len(podList.Items), *pdg.Spec.Replicas) + } + for i := range podList.Items { + pod := &podList.Items[i] + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("pd %s/%s pod %s is not running, current phase: %s", pdg.Namespace, pdg.Name, pod.Name, pod.Status.Phase) + } + } + + return nil +} + +func AreAllTiKVHealthy(cli client.Client, kvg *v1alpha1.TiKVGroup) error { + var tikvList v1alpha1.TiKVList + if err := cli.List(context.TODO(), &tikvList, client.InNamespace(kvg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyGroup: kvg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + }); err != nil { + return fmt.Errorf("failed to list tikv %s/%s: %w", kvg.Namespace, kvg.Name, err) + } + if len(tikvList.Items) != int(*kvg.Spec.Replicas) { + return fmt.Errorf("tikv %s/%s replicas %d not equal to %d", kvg.Namespace, kvg.Name, len(tikvList.Items), *kvg.Spec.Replicas) + } + for i := range tikvList.Items { + tikv := &tikvList.Items[i] + if tikv.Status.State != v1alpha1.StoreStateServing { + return fmt.Errorf("tikv %s/%s is not Serving, current state: %s", tikv.Namespace, tikv.Name, tikv.Status.State) + } + } + + var podList corev1.PodList + if err := cli.List(context.TODO(), &podList, client.InNamespace(kvg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: kvg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: kvg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + }); err != nil { + return fmt.Errorf("failed to list tikv pod %s/%s: %w", kvg.Namespace, kvg.Name, err) + } + if len(podList.Items) != int(*kvg.Spec.Replicas) { + return fmt.Errorf("tikv %s/%s pod replicas %d not equal to %d", kvg.Namespace, kvg.Name, len(podList.Items), *kvg.Spec.Replicas) + } + for i := range podList.Items { + pod := &podList.Items[i] + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("tikv %s/%s pod %s is not running, current phase: %s", kvg.Namespace, kvg.Name, pod.Name, pod.Status.Phase) + } + } + + return nil +} + +func AreAllTiDBHealthy(cli client.Client, dbg *v1alpha1.TiDBGroup) error { + var tidbList v1alpha1.TiDBList + if err := cli.List(context.TODO(), &tidbList, client.InNamespace(dbg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyGroup: dbg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + }); err != nil { + return fmt.Errorf("failed to list tidb %s/%s: %w", dbg.Namespace, dbg.Name, err) + } + if len(tidbList.Items) != int(*dbg.Spec.Replicas) { + return fmt.Errorf("tidb %s/%s replicas %d not equal to %d", dbg.Namespace, dbg.Name, len(tidbList.Items), *dbg.Spec.Replicas) + } + for i := range tidbList.Items { + tidb := &tidbList.Items[i] + if !meta.IsStatusConditionPresentAndEqual(tidb.Status.Conditions, v1alpha1.TiDBCondHealth, metav1.ConditionTrue) { + return fmt.Errorf("tidb %s/%s is not healthy", tidb.Namespace, tidb.Name) + } + } + + var podList corev1.PodList + if err := cli.List(context.TODO(), &podList, client.InNamespace(dbg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: dbg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: dbg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + }); err != nil { + return fmt.Errorf("failed to list tidb pod %s/%s: %w", dbg.Namespace, dbg.Name, err) + } + if len(podList.Items) != int(*dbg.Spec.Replicas) { + return fmt.Errorf("tidb %s/%s pod replicas %d not equal to %d", dbg.Namespace, dbg.Name, len(podList.Items), *dbg.Spec.Replicas) + } + for i := range podList.Items { + pod := &podList.Items[i] + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("tidb %s/%s pod %s is not running, current phase: %s", dbg.Namespace, dbg.Name, pod.Name, pod.Status.Phase) + } + } + + return nil +} + +func AreAllTiFlashHealthy(cli client.Client, flashg *v1alpha1.TiFlashGroup) error { + var tiflashList v1alpha1.TiFlashList + if err := cli.List(context.TODO(), &tiflashList, client.InNamespace(flashg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyGroup: flashg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + }); err != nil { + return fmt.Errorf("failed to list tiflash %s/%s: %w", flashg.Namespace, flashg.Name, err) + } + if len(tiflashList.Items) != int(*flashg.Spec.Replicas) { + return fmt.Errorf("tiflash %s/%s replicas %d not equal to %d", + flashg.Namespace, flashg.Name, len(tiflashList.Items), *flashg.Spec.Replicas) + } + for i := range tiflashList.Items { + tiflash := &tiflashList.Items[i] + if tiflash.Status.State != v1alpha1.StoreStateServing { + return fmt.Errorf("tiflash %s/%s is not Serving, current state: %s", tiflash.Namespace, tiflash.Name, tiflash.Status.State) + } + } + + var podList corev1.PodList + if err := cli.List(context.TODO(), &podList, client.InNamespace(flashg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: flashg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: flashg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + }); err != nil { + return fmt.Errorf("failed to list tiflash pod %s/%s: %w", flashg.Namespace, flashg.Name, err) + } + if len(podList.Items) != int(*flashg.Spec.Replicas) { + return fmt.Errorf("tiflash %s/%s pod replicas %d not equal to %d", + flashg.Namespace, flashg.Name, len(podList.Items), *flashg.Spec.Replicas) + } + for i := range podList.Items { + pod := &podList.Items[i] + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("tiflash %s/%s pod %s is not running, current phase: %s", flashg.Namespace, flashg.Name, pod.Name, pod.Status.Phase) + } + } + + return nil +} + +// ExecuteSimpleTransaction performs a transaction to insert or update the given id in the specified table. +func ExecuteSimpleTransaction(db *sql.DB, id int, table string) error { + tx, err := db.Begin() + if err != nil { + return fmt.Errorf("failed to begin txn: %w", err) + } + + // Prepare SQL statement to replace or insert a record + //nolint:gosec // only replace table name in test + str := fmt.Sprintf("replace into %s(id, v) values(?, ?);", table) + if _, err = tx.Exec(str, id, id); err != nil { + return fmt.Errorf("failed to exec statement: %w", err) + } + + // Simulate a different operation by updating the value + if _, err = tx.Exec(fmt.Sprintf("update %s set v = ? where id = ?;", table), id*2, id); err != nil { + return fmt.Errorf("failed to exec update statement: %w", err) + } + + // Simulate a long transaction by sleeping for 5 seconds for even ids + if id%3 == 0 { + //nolint:mnd // just for testing + time.Sleep(10 * time.Second) + } + + // Commit the transaction + if err = tx.Commit(); err != nil { + return fmt.Errorf("failed to commit txn: %w", err) + } + return nil +} diff --git a/tests/e2e/utils/waiter/cluster.go b/tests/e2e/utils/waiter/cluster.go new file mode 100644 index 00000000000..6edfe275f38 --- /dev/null +++ b/tests/e2e/utils/waiter/cluster.go @@ -0,0 +1,48 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package waiter + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" +) + +func WaitForClusterReady(ctx context.Context, c client.Client, ns, name string, timeout time.Duration) error { + tc := v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + } + + return WaitForObject(ctx, c, &tc, func() error { + cond := meta.FindStatusCondition(tc.Status.Conditions, v1alpha1.ClusterCondAvailable) + if cond == nil { + return fmt.Errorf("available cond is unset") + } + if cond.Status != metav1.ConditionTrue { + return fmt.Errorf("available cond is not true, status: %s, reason: %s, message: %s", cond.Status, cond.Reason, cond.Message) + } + + return nil + }, timeout) +} diff --git a/tests/e2e/utils/waiter/common.go b/tests/e2e/utils/waiter/common.go new file mode 100644 index 00000000000..dc6cd635b25 --- /dev/null +++ b/tests/e2e/utils/waiter/common.go @@ -0,0 +1,175 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package waiter + +import ( + "context" + "fmt" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +var ( + Poll = time.Second * 2 + + ShortTaskTimeout = time.Minute * 3 + LongTaskTimeout = time.Minute * 10 +) + +func WaitForObject( + ctx context.Context, + c client.Client, + obj client.Object, + cond func() error, + timeout time.Duration, +) error { + var lastErr error + if err := wait.PollUntilContextTimeout(ctx, Poll, timeout, true, func(ctx context.Context) (bool, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + + return false, fmt.Errorf("can't get obj %s: %w", key, err) + } + + if err := cond(); err != nil { + lastErr = err + return false, nil + } + + return true, nil + }); err != nil { + if wait.Interrupted(err) { + return fmt.Errorf("wait for object %T(%v) condition timeout: %w", obj, client.ObjectKeyFromObject(obj), lastErr) + } + + return fmt.Errorf("can't wait for object %T(%v) condition, error : %w", obj, client.ObjectKeyFromObject(obj), err) + } + + return nil +} + +func WaitForObjectDeleted( + ctx context.Context, + c client.Client, + obj client.Object, + timeout time.Duration, +) error { + if err := wait.PollUntilContextTimeout(ctx, Poll, timeout, true, func(ctx context.Context) (bool, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + + return false, fmt.Errorf("can't get object %s: %w", key, err) + } + + return false, nil + }); err != nil { + if wait.Interrupted(err) { + return fmt.Errorf("wait for object %v deleted timeout: %w", obj, err) + } + + return fmt.Errorf("can't wait for object %v deleted, error : %w", obj, err) + } + + return nil +} + +func WaitForList( + ctx context.Context, + c client.Client, + list client.ObjectList, + cond func() error, + timeout time.Duration, + opts ...client.ListOption, +) error { + var lastErr error + if err := wait.PollUntilContextTimeout(ctx, Poll, timeout, true, func(ctx context.Context) (bool, error) { + if err := c.List(ctx, list, opts...); err != nil { + return false, fmt.Errorf("can't list object %T: %w", list, err) + } + + if err := cond(); err != nil { + lastErr = err + return false, nil + } + + return true, nil + }); err != nil { + if wait.Interrupted(err) { + return fmt.Errorf("wait for list %T condition timeout: %w", list, lastErr) + } + + return fmt.Errorf("can't wait for list %T condition, error : %w", list, err) + } + + return nil +} + +func WaitForListDeleted( + ctx context.Context, + c client.Client, + list client.ObjectList, + timeout time.Duration, + opts ...client.ListOption, +) error { + return WaitForList(ctx, c, list, func() error { + l := meta.LenList(list) + if l == 0 { + return nil + } + + return fmt.Errorf("there are still %v items", l) + }, timeout, opts...) +} + +func WaitForObjectCondition[T runtime.Object]( + ctx context.Context, + c client.Client, + obj T, + condType string, + status metav1.ConditionStatus, + timeout time.Duration, +) error { + return WaitForObject(ctx, c, obj.To(), func() error { + cond := meta.FindStatusCondition(obj.Conditions(), condType) + if cond == nil { + return fmt.Errorf("obj %s/%s's condition %s is not set", obj.GetNamespace(), obj.GetName(), condType) + } + if cond.Status == status { + return nil + } + return fmt.Errorf("obj %s/%s's condition %s has unexpected status, expected is %v, current is %v, reason: %v, message: %v", + obj.GetNamespace(), + obj.GetName(), + cond.Type, + status, + cond.Status, + cond.Reason, + cond.Message, + ) + }, timeout) +} diff --git a/tests/e2e/utils/waiter/pd.go b/tests/e2e/utils/waiter/pd.go new file mode 100644 index 00000000000..9a4dbb53d06 --- /dev/null +++ b/tests/e2e/utils/waiter/pd.go @@ -0,0 +1,54 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package waiter + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" +) + +func WaitForPDsHealthy(ctx context.Context, c client.Client, pdg *v1alpha1.PDGroup, timeout time.Duration) error { + list := v1alpha1.PDList{} + return WaitForList(ctx, c, &list, func() error { + if len(list.Items) != int(*pdg.Spec.Replicas) { + return fmt.Errorf("pd %s/%s replicas %d not equal to %d", pdg.Namespace, pdg.Name, len(list.Items), *pdg.Spec.Replicas) + } + for i := range list.Items { + pd := &list.Items[i] + if pd.Generation != pd.Status.ObservedGeneration { + return fmt.Errorf("pd %s/%s is not synced", pd.Namespace, pd.Name) + } + if !meta.IsStatusConditionPresentAndEqual(pd.Status.Conditions, v1alpha1.PDCondInitialized, metav1.ConditionTrue) { + return fmt.Errorf("pd %s/%s is not initialized", pd.Namespace, pd.Name) + } + if !meta.IsStatusConditionPresentAndEqual(pd.Status.Conditions, v1alpha1.PDCondHealth, metav1.ConditionTrue) { + return fmt.Errorf("pd %s/%s is not healthy", pd.Namespace, pd.Name) + } + } + + return nil + }, timeout, client.InNamespace(pdg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: pdg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + }) +} diff --git a/tests/e2e/utils/waiter/pod.go b/tests/e2e/utils/waiter/pod.go new file mode 100644 index 00000000000..79ade4273f3 --- /dev/null +++ b/tests/e2e/utils/waiter/pod.go @@ -0,0 +1,200 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package waiter + +import ( + "context" + "fmt" + "slices" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +type podInfo struct { + name string + uid string + creationTime metav1.Time + deletionTime metav1.Time +} + +//nolint:gocyclo // refactor if possible +func WaitPodsRollingUpdateOnce[T runtime.Group](ctx context.Context, c client.Client, g T, timeout time.Duration) error { + podMap := map[string]podInfo{} + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, timeout) + defer cancel() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (kuberuntime.Object, error) { + list := &corev1.PodList{} + if err := c.List(ctx, list, &client.ListOptions{ + Namespace: g.GetNamespace(), + LabelSelector: labels.SelectorFromSet(labels.Set{ + v1alpha1.LabelKeyCluster: g.Cluster(), + v1alpha1.LabelKeyGroup: g.GetName(), + v1alpha1.LabelKeyComponent: g.Component(), + }), + Raw: &options, + }); err != nil { + return nil, err + } + return list, nil + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + list := &corev1.PodList{} + return c.Watch(ctx, list, &client.ListOptions{ + Namespace: g.GetNamespace(), + LabelSelector: labels.SelectorFromSet(labels.Set{ + v1alpha1.LabelKeyCluster: g.Cluster(), + v1alpha1.LabelKeyGroup: g.GetName(), + v1alpha1.LabelKeyComponent: g.Component(), + }), + Raw: &options, + }) + }, + } + _, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, func(event watch.Event) (bool, error) { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + // ignore events without pod + return false, nil + } + + info, ok := podMap[string(pod.UID)] + if !ok { + info = podInfo{ + name: pod.Name, + uid: string(pod.UID), + creationTime: pod.CreationTimestamp, + } + } + + if !pod.DeletionTimestamp.IsZero() && pod.DeletionGracePeriodSeconds != nil && *pod.DeletionGracePeriodSeconds == 0 { + info.deletionTime = *pod.DeletionTimestamp + } + podMap[string(pod.UID)] = info + + return false, nil + }) + + if !wait.Interrupted(err) { + return fmt.Errorf("watch stopped unexpected: %w", err) + } + + infos := []podInfo{} + for _, v := range podMap { + infos = append(infos, v) + } + slices.SortFunc(infos, func(a podInfo, b podInfo) int { + if a.deletionTime.IsZero() && b.deletionTime.IsZero() { + return a.creationTime.Compare(b.creationTime.Time) + } + if a.deletionTime.IsZero() { + return a.creationTime.Compare(b.deletionTime.Time) + } + if b.deletionTime.IsZero() { + return a.deletionTime.Compare(b.creationTime.Time) + } + return a.deletionTime.Compare(b.deletionTime.Time) + }) + detail := strings.Builder{} + for _, info := range infos { + if info.deletionTime.IsZero() { + detail.WriteString(fmt.Sprintf("%v(%v) created at %s\n", info.name, info.uid, info.creationTime)) + } else { + detail.WriteString(fmt.Sprintf("%v(%v) created at %s, deleted at %s\n", info.name, info.uid, info.creationTime, info.deletionTime)) + } + } + + if len(infos) != 2*int(*g.Replicas()) { + return fmt.Errorf("expect %v pods info, now only %v, detail:\n%v", 2**g.Replicas(), len(infos), detail.String()) + } + for i := range *g.Replicas() { + if infos[2*i].name != infos[2*i+1].name { + return fmt.Errorf("pod may be restarted at same time, detail:\n%v", detail.String()) + } + } + + return nil +} + +func WaitForPodsReady[T runtime.Group](ctx context.Context, c client.Client, g T, timeout time.Duration) error { + list := corev1.PodList{} + return WaitForList(ctx, c, &list, func() error { + if len(list.Items) != int(*g.Replicas()) { + return fmt.Errorf("%s/%s pod replicas %d not equal to %d", g.GetNamespace(), g.GetName(), len(list.Items), *g.Replicas()) + } + for i := range list.Items { + pod := &list.Items[i] + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("%s/%s pod %s is not running, current phase: %s", g.GetNamespace(), g.GetName(), pod.Name, pod.Status.Phase) + } + for i := range pod.Status.Conditions { + cond := &pod.Status.Conditions[i] + if cond.Type != corev1.PodReady { + continue + } + if cond.Status != corev1.ConditionTrue { + return fmt.Errorf("%s/%s pod %s is not ready, current status: %s, reason: %v, message: %v", + g.GetNamespace(), + g.GetName(), + pod.Name, + cond.Status, + cond.Reason, + cond.Message, + ) + } + } + } + + return nil + }, timeout, client.InNamespace(g.GetNamespace()), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: g.Cluster(), + v1alpha1.LabelKeyGroup: g.GetName(), + v1alpha1.LabelKeyComponent: g.Component(), + }) +} + +func WaitForPodsRecreated[T runtime.Group](ctx context.Context, c client.Client, g T, changeTime time.Time, timeout time.Duration) error { + list := corev1.PodList{} + return WaitForList(ctx, c, &list, func() error { + if len(list.Items) != int(*g.Replicas()) { + return fmt.Errorf("%s/%s replicas %d not equal to %d", g.GetNamespace(), g.GetName(), len(list.Items), *g.Replicas()) + } + for i := range list.Items { + pod := &list.Items[i] + if pod.CreationTimestamp.Time.Before(changeTime) { + return fmt.Errorf("pod %s/%s is created at %v before change time %v", pod.Namespace, pod.Name, pod.CreationTimestamp, changeTime) + } + } + + return nil + }, timeout, client.InNamespace(g.GetNamespace()), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: g.Cluster(), + v1alpha1.LabelKeyGroup: g.GetName(), + v1alpha1.LabelKeyComponent: g.Component(), + }) +} diff --git a/third_party/kubernetes/LICENSE b/third_party/kubernetes/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/kubernetes/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/kubernetes/pkg/controller/history/controller_history.go b/third_party/kubernetes/pkg/controller/history/controller_history.go new file mode 100644 index 00000000000..de74a958703 --- /dev/null +++ b/third_party/kubernetes/pkg/controller/history/controller_history.go @@ -0,0 +1,279 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copied from https://github.com/kubernetes/kubernetes/blob/00236ae0d73d2455a2470469ed1005674f8ed61f/pkg/controller/history/controller_history.go#L49 +// and made some modifications. + +package history + +import ( + "bytes" + "context" + "fmt" + "hash/fnv" + "sort" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/util/retry" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/util/hash" +) + +// HashLabel is the label used to indicate the hash value of a ControllerRevision's Data. +const HashLabel = "controller.kubernetes.io/hash" + +// ControllerRevisionName returns the ControllerRevisionName for a ControllerRevision in the form prefix-hash. If the length +// of prefix is greater than 223 bytes, it is truncated to allow for a name that is no larger than 253 bytes. +func ControllerRevisionName(prefix string, hash string) string { + if len(prefix) > 223 { + prefix = prefix[:223] + } + + return fmt.Sprintf("%s-%s", prefix, hash) +} + +// NewControllerRevision returns a ControllerRevision with a ControllerRef pointing to parent and indicating that +// parent is of parentKind. The ControllerRevision has labels matching template labels, contains Data equal to data, and +// has a Revision equal to revision. The collisionCount is used when creating the name of the ControllerRevision +// so the name is likely unique. If the returned error is nil, the returned ControllerRevision is valid. If the +// returned error is not nil, the returned ControllerRevision is invalid for use. +func NewControllerRevision(parent metav1.Object, + templateLabels map[string]string, + data runtime.RawExtension, + revision int64, + collisionCount *int32) (*appsv1.ControllerRevision, error) { + labelMap := make(map[string]string) + for k, v := range templateLabels { + labelMap[k] = v + } + cr := &appsv1.ControllerRevision{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labelMap, + Namespace: parent.GetNamespace(), + }, + Data: data, + Revision: revision, + } + hash := HashControllerRevision(cr, collisionCount) + cr.Name = ControllerRevisionName(parent.GetName(), hash) + cr.Labels[HashLabel] = hash + return cr, nil +} + +// HashControllerRevision hashes the contents of revision's Data using FNV hashing. If probe is not nil, the byte value +// of probe is added written to the hash as well. The returned hash will be a safe encoded string to avoid bad words. +func HashControllerRevision(revision *appsv1.ControllerRevision, probe *int32) string { + hf := fnv.New32() + if len(revision.Data.Raw) > 0 { + hf.Write(revision.Data.Raw) + } + if revision.Data.Object != nil { + hash.DeepHashObject(hf, revision.Data.Object) + } + if probe != nil { + hf.Write([]byte(strconv.FormatInt(int64(*probe), 10))) + } + return rand.SafeEncodeString(fmt.Sprint(hf.Sum32())) +} + +// SortControllerRevisions sorts revisions by their Revision. +func SortControllerRevisions(revisions []*appsv1.ControllerRevision) { + sort.Stable(byRevision(revisions)) +} + +// EqualRevision returns true if lhs and rhs are either both nil, or both point to non-nil ControllerRevisions that +// contain semantically equivalent data. Otherwise, this method returns false. +func EqualRevision(lhs *appsv1.ControllerRevision, rhs *appsv1.ControllerRevision) bool { + var lhsHash, rhsHash *uint32 + if lhs == nil || rhs == nil { + return lhs == rhs + } + if hs, found := lhs.Labels[HashLabel]; found { + hash, err := strconv.ParseInt(hs, 10, 32) + if err == nil { + lhsHash = new(uint32) + *lhsHash = uint32(hash) + } + } + if hs, found := rhs.Labels[HashLabel]; found { + hash, err := strconv.ParseInt(hs, 10, 32) + if err == nil { + rhsHash = new(uint32) + *rhsHash = uint32(hash) + } + } + if lhsHash != nil && rhsHash != nil && *lhsHash != *rhsHash { + return false + } + return bytes.Equal(lhs.Data.Raw, rhs.Data.Raw) && apiequality.Semantic.DeepEqual(lhs.Data.Object, rhs.Data.Object) +} + +// FindEqualRevisions returns all ControllerRevisions in revisions that are equal to needle using EqualRevision as the +// equality test. The returned slice preserves the order of revisions. +func FindEqualRevisions(revisions []*appsv1.ControllerRevision, needle *appsv1.ControllerRevision) []*appsv1.ControllerRevision { + var eq []*appsv1.ControllerRevision + for i := range revisions { + if EqualRevision(revisions[i], needle) { + eq = append(eq, revisions[i]) + } + } + return eq +} + +// byRevision implements sort.Interface to allow ControllerRevisions to be sorted by Revision. +type byRevision []*appsv1.ControllerRevision + +func (br byRevision) Len() int { + return len(br) +} + +// Less breaks ties first by creation timestamp, then by name +func (br byRevision) Less(i, j int) bool { + if br[i].Revision == br[j].Revision { + if br[j].CreationTimestamp.Equal(&br[i].CreationTimestamp) { + return br[i].Name < br[j].Name + } + return br[j].CreationTimestamp.After(br[i].CreationTimestamp.Time) + } + return br[i].Revision < br[j].Revision +} + +func (br byRevision) Swap(i, j int) { + br[i], br[j] = br[j], br[i] +} + +// Interface provides an interface allowing for management of a Controller's history as realized by recorded +// ControllerRevisions. An instance of Interface can be retrieved from NewHistory. Implementations must treat all +// pointer parameters as "in" parameter, and they must not be mutated. +type Interface interface { + // ListControllerRevisions lists all ControllerRevisions matching selector and owned by parent or no other + // controller. If the returned error is nil the returned slice of ControllerRevisions is valid. If the + // returned error is not nil, the returned slice is not valid. + ListControllerRevisions(parent client.Object, selector labels.Selector) ([]*appsv1.ControllerRevision, error) + // CreateControllerRevision attempts to create the revision as owned by parent via a ControllerRef. If name + // collision occurs, collisionCount (incremented each time collision occurs except for the first time) is + // added to the hash of the revision, and it is renamed using ControllerRevisionName. Implementations may + // cease to attempt to retry creation after some number of attempts and return an error. If the returned + // error is not nil, creation failed. If the returned error is nil, the returned ControllerRevision has been + // created. + // Callers must make sure that collisionCount is not nil. An error is returned if it is. + CreateControllerRevision(parent client.Object, revision *appsv1.ControllerRevision, collisionCount *int32) (*appsv1.ControllerRevision, error) + // DeleteControllerRevision attempts to delete revision. If the returned error is not nil, deletion has failed. + DeleteControllerRevision(revision *appsv1.ControllerRevision) error + // UpdateControllerRevision updates revision such that its Revision is equal to newRevision. Implementations + // may retry on conflict. If the returned error is nil, the update was successful and returned ControllerRevision + // is valid. If the returned error is not nil, the update failed and the returned ControllerRevision is invalid. + UpdateControllerRevision(revision *appsv1.ControllerRevision, newRevision int64) (*appsv1.ControllerRevision, error) +} + +// NewClient returns an instance of Interface that uses client to communicate with the API Server and lister to list +// ControllerRevisions. This method should be used to create an Interface for all scenarios other than testing. +func NewClient(cli client.Client) Interface { + return &realHistory{cli: cli} +} + +type realHistory struct { + cli client.Client +} + +func (rh *realHistory) ListControllerRevisions(parent client.Object, selector labels.Selector) ([]*appsv1.ControllerRevision, error) { + // List all revisions in the namespace that match the selector + var list appsv1.ControllerRevisionList + if err := rh.cli.List(context.TODO(), &list, &client.ListOptions{ + Namespace: parent.GetNamespace(), + LabelSelector: selector, + }); err != nil { + return nil, err + } + var owned []*appsv1.ControllerRevision + for i := range list.Items { + ref := metav1.GetControllerOfNoCopy(&list.Items[i]) + if ref == nil || ref.UID == parent.GetUID() { + owned = append(owned, &list.Items[i]) + } + + } + return owned, nil +} + +func (rh *realHistory) CreateControllerRevision(parent client.Object, revision *appsv1.ControllerRevision, collisionCount *int32) (*appsv1.ControllerRevision, error) { + if collisionCount == nil { + return nil, fmt.Errorf("collisionCount should not be nil") + } + + // Clone the input + clone := revision.DeepCopy() + + // Continue to attempt to create the revision updating the name with a new hash on each iteration + for { + hash := HashControllerRevision(revision, collisionCount) + // Update the revisions name + clone.Name = ControllerRevisionName(parent.GetName(), hash) + clone.Namespace = parent.GetNamespace() + err := rh.cli.Create(context.TODO(), clone) + if errors.IsAlreadyExists(err) { + var exists appsv1.ControllerRevision + if err := rh.cli.Get(context.TODO(), client.ObjectKey{ + Namespace: clone.Namespace, + Name: clone.Name, + }, &exists); err != nil { + return nil, err + } + if bytes.Equal(exists.Data.Raw, clone.Data.Raw) { + return &exists, nil + } + *collisionCount++ + continue + } + return clone, err + } +} + +func (rh *realHistory) UpdateControllerRevision(revision *appsv1.ControllerRevision, newRevision int64) (*appsv1.ControllerRevision, error) { + clone := revision.DeepCopy() + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if clone.Revision == newRevision { + return nil + } + clone.Revision = newRevision + updateErr := rh.cli.Update(context.TODO(), clone) + if updateErr == nil { + return nil + } + var updated appsv1.ControllerRevision + if err := rh.cli.Get(context.TODO(), client.ObjectKey{ + Namespace: clone.Namespace, + Name: clone.Name, + }, &updated); err == nil { + // make a copy so we don't mutate the shared cache + clone = updated.DeepCopy() + } + return updateErr + }) + return clone, err +} + +func (rh *realHistory) DeleteControllerRevision(revision *appsv1.ControllerRevision) error { + return rh.cli.Delete(context.TODO(), revision) +} diff --git a/third_party/kubernetes/pkg/controller/statefulset/stateful_set_utils.go b/third_party/kubernetes/pkg/controller/statefulset/stateful_set_utils.go new file mode 100644 index 00000000000..459c5e8fb9b --- /dev/null +++ b/third_party/kubernetes/pkg/controller/statefulset/stateful_set_utils.go @@ -0,0 +1,132 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copied from https://github.com/kubernetes/kubernetes/blob/00236ae0d73d2455a2470469ed1005674f8ed61f/pkg/controller/statefulset/stateful_set_utils.go#L17 +// Exported some functions. + +package statefulset + +import ( + "time" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NextRevision finds the next valid revision number based on revisions. If the length of revisions +// is 0 this is 1. Otherwise, it is 1 greater than the largest revision's Revision. This method +// assumes that revisions has been sorted by Revision. +func NextRevision(revisions []*appsv1.ControllerRevision) int64 { + count := len(revisions) + if count <= 0 { + return 1 + } + return revisions[count-1].Revision + 1 +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || (!c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time)) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + condition := GetPodReadyCondition(pod.Status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + return GetPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// IsPodRunningAndReady returns true if pod is in the PodRunning Phase, if it has a condition of PodReady. +func IsPodRunningAndReady(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodRunning && IsPodReady(pod) +} + +func IsPodRunningAndAvailable(pod *v1.Pod, minReadySeconds int32) bool { + return IsPodAvailable(pod, minReadySeconds, metav1.Now()) +} + +// IsPodCreated returns true if pod has been created and is maintained by the API server +func IsPodCreated(pod *v1.Pod) bool { + return pod.Status.Phase != "" +} + +// IsPodPending returns true if pod has a Phase of PodPending +func IsPodPending(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodPending +} + +// IsPodFailed returns true if pod has a Phase of PodFailed +func IsPodFailed(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodFailed +} + +// IsPodSucceeded returns true if pod has a Phase of PodSucceeded +func IsPodSucceeded(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodSucceeded +} + +// IsPodTerminating returns true if pod's DeletionTimestamp has been set +func IsPodTerminating(pod *v1.Pod) bool { + return pod.DeletionTimestamp != nil +} + +// IsPodHealthy returns true if pod is running and ready and has not been terminated +func IsPodHealthy(pod *v1.Pod) bool { + return IsPodRunningAndReady(pod) && !IsPodTerminating(pod) +} diff --git a/third_party/kubernetes/pkg/util/hash/hash.go b/third_party/kubernetes/pkg/util/hash/hash.go new file mode 100644 index 00000000000..49251f081c7 --- /dev/null +++ b/third_party/kubernetes/pkg/util/hash/hash.go @@ -0,0 +1,34 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copied from https://github.com/kubernetes/kubernetes/blob/00236ae0d73d2455a2470469ed1005674f8ed61f/pkg/util/hash/hash.go#L29 + +package hash + +import ( + "fmt" + "hash" + + "k8s.io/apimachinery/pkg/util/dump" +) + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { + hasher.Reset() + fmt.Fprintf(hasher, "%v", dump.ForHash(objectToWrite)) +} diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 00000000000..f4f922e4ac9 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,22 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + // Intend to import "k8s.io/code-generator/cmd/register-gen", but typecheck will fail + _ "k8s.io/code-generator" + // Intend to import "sigs.k8s.io/controller-tools/cmd/controller-gen", but typecheck will fail + _ "sigs.k8s.io/controller-tools/pkg/version" +)