diff --git a/deploy/gcp/charts/tidb-cluster b/deploy/gcp/charts/tidb-cluster deleted file mode 120000 index 326d3821047..00000000000 --- a/deploy/gcp/charts/tidb-cluster +++ /dev/null @@ -1 +0,0 @@ -../../../charts/tidb-cluster \ No newline at end of file diff --git a/go.mod b/go.mod index 32c0d273c89..91a0c24b131 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/onsi/ginkgo v1.14.1 github.com/onsi/gomega v1.10.2 github.com/openshift/generic-admission-server v1.14.1-0.20210422140326-da96454c926d - github.com/pingcap/advanced-statefulset/client v1.17.1-0.20230830071059-cfaedeea6cb3 + github.com/pingcap/advanced-statefulset/client v1.17.1-0.20231124094705-00595b4ef4ac github.com/pingcap/errors v0.11.4 github.com/pingcap/kvproto v0.0.0-20231122054644-fb0f5c2a0a10 github.com/pingcap/tidb-operator/pkg/apis v1.6.0-alpha.8 @@ -66,7 +66,7 @@ require ( k8s.io/kube-aggregator v0.23.17 k8s.io/kube-scheduler v0.23.17 k8s.io/kubectl v0.23.17 - k8s.io/kubernetes v1.23.17 + k8s.io/kubelet v0.23.17 k8s.io/utils v0.0.0-20211116205334-6203023598ed mvdan.cc/sh/v3 v3.4.3 sigs.k8s.io/controller-runtime v0.7.2 @@ -77,7 +77,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.2 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/cyphar/filepath-securejoin v0.2.2 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/go-errors/errors v1.0.1 // indirect @@ -87,7 +86,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/opencontainers/runc v1.0.2 // indirect github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.opentelemetry.io/contrib v0.20.0 // indirect @@ -104,8 +102,6 @@ require ( go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect - k8s.io/component-helpers v0.23.17 // indirect - k8s.io/kubelet v0.0.0 // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/kustomize/api v0.10.1 // indirect sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect @@ -123,7 +119,6 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 // indirect - github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3 // indirect github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect @@ -212,18 +207,13 @@ require ( google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a // indirect google.golang.org/protobuf v1.31.0 // indirect - gopkg.in/gcfg.v1 v1.2.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/warnings.v0 v0.1.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cloud-provider v0.23.17 // indirect - k8s.io/csi-translation-lib v0.23.17 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - k8s.io/legacy-cloud-providers v0.0.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.2.0 // indirect @@ -235,62 +225,10 @@ replace github.com/pingcap/tidb-operator/pkg/client => ./pkg/client replace github.com/renstrom/dedent => github.com/lithammer/dedent v1.1.0 -replace k8s.io/api => k8s.io/api v0.23.17 - -replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.17 - -replace k8s.io/apimachinery => k8s.io/apimachinery v0.23.17 - -replace k8s.io/apiserver => k8s.io/apiserver v0.23.17 - -replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.17 - -replace k8s.io/client-go => k8s.io/client-go v0.23.17 - -replace k8s.io/code-generator => k8s.io/code-generator v0.23.17 - -replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.17 - -replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.17 - -replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.17 - -replace k8s.io/kubelet => k8s.io/kubelet v0.23.17 - -replace k8s.io/metrics => k8s.io/metrics v0.23.17 - -replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.17 - -replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.17 - -replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.17 - -replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.17 - -replace k8s.io/component-base => k8s.io/component-base v0.23.17 - -replace k8s.io/cri-api => k8s.io/cri-api v0.23.17 - -replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.17 - -replace k8s.io/kubectl => k8s.io/kubectl v0.23.17 - -replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.17 - -replace github.com/uber-go/atomic => go.uber.org/atomic v1.5.0 - replace github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible replace github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.11.1 -replace k8s.io/controller-manager => k8s.io/controller-manager v0.23.17 - -replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.17 - -replace k8s.io/component-helpers => k8s.io/component-helpers v0.23.17 - -replace k8s.io/mount-utils => k8s.io/mount-utils v0.23.17 - // workaround for github.com/advisories/GHSA-25xm-hr59-7c27 // TODO: remove it after upgrading github.com/mholt/archiver greater than v3.5.0 replace github.com/ulikunitz/xz => github.com/ulikunitz/xz v0.5.8 diff --git a/go.sum b/go.sum index ed649a7b942..db0568aabb2 100644 --- a/go.sum +++ b/go.sum @@ -7,6 +7,7 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -14,11 +15,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -33,7 +29,6 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -61,14 +56,19 @@ github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.6/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= @@ -76,21 +76,26 @@ github.com/Azure/go-autorest/autorest/azure/auth v0.5.2 h1:R1pgoZkhXuv4+0ky9r3e5 github.com/Azure/go-autorest/autorest/azure/auth v0.5.2/go.mod h1:q98IH4qgc3eWM4/WOeR5+YPmBuy8Lq0jNRDwSM0CuFk= github.com/Azure/go-autorest/autorest/azure/cli v0.4.1 h1:jwcD1wURu0+hKceV04MubZmKLzwEYOCz6q4aOtVZ+Ng= github.com/Azure/go-autorest/autorest/azure/cli v0.4.1/go.mod h1:JfDgiIO1/RPu6z42AdQTyjOoCM2MFhLqSBDvMEkDgcg= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3 h1:FCalqNmQYSMCCHoCtAxZN/ZgLc8ufgeo5Z3wrIoJZvs= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e h1:eb0Pzkt15Bm7f2FFYv7sjY7NPFi3cPkS3tv1CcrFBWA= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= @@ -98,24 +103,23 @@ github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agiledragon/gomonkey/v2 v2.7.0 h1:CFT/xdr6xbsIN04Yll4OhKq/vPm0MVD8ykV99jDBesM= github.com/agiledragon/gomonkey/v2 v2.7.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -149,68 +153,53 @@ github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1 h1:HD4PLRzjuCVW79mQ0/pdsalOLHJ+FaEoqJLxfltpb2U= github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.15 h1:cKRCLMj3Ddm54bKSpemfQ8AtYFBhAI2MPmdys22fBdc= github.com/creack/pty v1.1.15/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -220,12 +209,15 @@ github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TR github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -238,10 +230,8 @@ github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQm github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= @@ -263,27 +253,26 @@ github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNy github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.1 h1:xVm/f9seEhZFL9+n5kv5XLrGwy6elc4V9v/XFY2vmd8= github.com/frankban/quicktest v1.13.1/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -294,27 +283,66 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -328,7 +356,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -344,7 +371,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -354,8 +380,6 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -382,7 +406,6 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -392,10 +415,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= @@ -403,6 +422,7 @@ github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkj github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= @@ -417,12 +437,14 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -430,6 +452,7 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -437,32 +460,15 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -482,7 +488,9 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -501,14 +509,13 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -516,8 +523,9 @@ github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -527,33 +535,26 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU= github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/minio-go/v6 v6.0.55 h1:Hqm41952DdRNKXM+6hCnPXCsHCYSgLf03iuYoxJG2Wk= github.com/minio/minio-go/v6 v6.0.55/go.mod h1:KQMM+/44DSlSGSQWSfRrAZ12FVMmpWNuX37i2AX0jfI= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -567,7 +568,6 @@ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQih github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -581,35 +581,31 @@ github.com/nwaples/rardecode v1.0.0 h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMB github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/openshift/generic-admission-server v1.14.1-0.20210422140326-da96454c926d h1:Z5xcujYaukvRqLWxBiIRn6KdM5Pd1De01fmfD1Rr4dk= github.com/openshift/generic-admission-server v1.14.1-0.20210422140326-da96454c926d/go.mod h1:m+wYlVQdnPe8JGqoKVpCYnFRIVraqC1SrUowQXh6XlA= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pingcap/advanced-statefulset/client v1.17.1-0.20230830071059-cfaedeea6cb3 h1:/GofeTlMzVNnrtkU0XbTeiAwubdro18H5ROMgx43KGE= -github.com/pingcap/advanced-statefulset/client v1.17.1-0.20230830071059-cfaedeea6cb3/go.mod h1:iJ3ur9e2ZW1/AjiBiPACpvj0rC/T28mU4sBGO7gjORM= +github.com/pingcap/advanced-statefulset/client v1.17.1-0.20231124094705-00595b4ef4ac h1:PHznG4cEPpDAltq8pCw+a5qly8c00yhTWqVb4OYDEKg= +github.com/pingcap/advanced-statefulset/client v1.17.1-0.20231124094705-00595b4ef4ac/go.mod h1:pHQk/hK89qM+LTAkVowB9PgVBBhN/9F4mgBkkStSDPQ= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -623,26 +619,22 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= @@ -664,19 +656,15 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -687,29 +675,23 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -727,10 +709,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tikv/pd v2.1.17+incompatible h1:48YYz8r16tItl3fxHmSGxGC2UemO6/Xp3Yq0/G38SnE= github.com/tikv/pd v2.1.17+incompatible/go.mod h1:v6C/D7ONC49SgjI4jbGnooSizvijaO/bdIm62DVR4tI= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -738,9 +720,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= @@ -757,8 +738,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -774,14 +759,15 @@ go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= @@ -829,24 +815,23 @@ go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= gocloud.dev v0.18.0 h1:HX6uFZYZs9tUP87jzoWgB8dl4ihsRpiAsBDKTthiApY= gocloud.dev v0.18.0/go.mod h1:lhLOb91+9tKB8RnNlsx+weJGEd0AHM94huK1bmrhPwM= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= @@ -872,7 +857,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -882,20 +866,17 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -906,6 +887,7 @@ golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -923,22 +905,13 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -947,16 +920,7 @@ golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -973,36 +937,33 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1016,43 +977,30 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210925032602-92d5a993a665/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210916214954-140adaaadfaf/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1060,25 +1008,25 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1089,6 +1037,8 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1098,7 +1048,6 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1121,18 +1070,13 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -1162,13 +1106,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1203,7 +1140,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1213,21 +1149,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8= google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= @@ -1247,16 +1170,11 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= @@ -1282,14 +1200,12 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= @@ -1298,8 +1214,6 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1314,6 +1228,8 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= @@ -1324,28 +1240,38 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.19.5/go.mod h1:yGZReuNa0vj56op6eT+NLrXJne0R0u9ktexZ8jdJzpc= k8s.io/api v0.23.17 h1:gC11V5AIsNXUUa/xd5RQo7djukvl5O1ZDQKwEYu0H7g= k8s.io/api v0.23.17/go.mod h1:upM9VIzXUjEyLTmGGi0KnH8kdlPnvgv+fEJ3tggDHfE= +k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apiextensions-apiserver v0.23.17 h1:UNb9hKyIy55wCsdOPs2/z9n88gUocoKLi8JYksUm2jU= k8s.io/apiextensions-apiserver v0.23.17/go.mod h1:UxwuPKJJuColqbexlNWu09nVh+7JLIQIFtBkUeWgVag= +k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.5/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= k8s.io/apimachinery v0.23.17 h1:ipJ0SrpI6EzH8zVw0WhCBldgJhzIamiYIumSGTdFExY= k8s.io/apimachinery v0.23.17/go.mod h1:87v5Wl9qpHbnapX1PSNgln4oO3dlyjAU3NSIwNhT4Lo= +k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= +k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/apiserver v0.23.17 h1:0br6oJhknp1mT0epMS84ibj+XcpmthPd60B5bPdbko8= k8s.io/apiserver v0.23.17/go.mod h1:Z5Wx5AY9iCZDblpI37Rzs099Rwi192FoS4iWDVODU9M= k8s.io/cli-runtime v0.23.17 h1:JI6O9Txnm1RZB3+BI2Ij+hsSOGNOv3BnyHu9oLS7Rxc= k8s.io/cli-runtime v0.23.17/go.mod h1:1E3XqaGCQNsUkdrT5DHZ9DK8lTKnshciOL484xjtj3o= +k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/client-go v0.19.5/go.mod h1:BSG3iuxI40Bs0nNDLS1JRa/7ReBQDHzf0x8nZZrK0fo= k8s.io/client-go v0.23.17 h1:MbW05RO5sy+TFw2ds36SDdNSkJbr8DFVaaVrClSA8Vs= k8s.io/client-go v0.23.17/go.mod h1:X5yz7nbJHS7q8977AKn8BWKgxeAXjl1sFsgstczUsCM= -k8s.io/cloud-provider v0.23.17 h1:Kw0MqtoKSkTNXAOxPUN8iQWOxx5UcEGZhVOWDkIkQ+A= -k8s.io/cloud-provider v0.23.17/go.mod h1:dZL4KeG2HT3jY5d9ntlg5jBO1UGgqHGdp0FMQ3oNfJA= -k8s.io/code-generator v0.23.17/go.mod h1:F/QjOqu2asaurFxLPpZY350/ts3UWy31VHdEwXR4JK4= +k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.19.5/go.mod h1:5N/uv5A7fyr0d+t/b1HynXKkUVPEhc8ljkMaBJv4Tp8= k8s.io/component-base v0.23.17 h1:yWK39HTP+rUPjr8HGvNzLECZWibcZcYsGiiQhrNH6zM= k8s.io/component-base v0.23.17/go.mod h1:m/Em46sTbBgGa4O1K8jRXCWlJEkzBwKt18ipv3ckSCc= -k8s.io/component-helpers v0.23.17 h1:zDqL5GfaJVy7cPVS1gCAA0NwEvPheQYPAxwAMPvcw/U= -k8s.io/component-helpers v0.23.17/go.mod h1:CDkNKvJCUqICwwd8tc7QtdCCokaXSOGlLkemMZM4AJ0= -k8s.io/csi-translation-lib v0.23.17 h1:OahcZ4vH7fBUnI6jKJq0SnKQ0+gNn9TW0sll8Dw/w00= -k8s.io/csi-translation-lib v0.23.17/go.mod h1:CLBeZaaTQ9BmnJGkoavtoE7U+rjfuxKJI15Wz5LbexM= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -1356,6 +1282,7 @@ k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.23.17 h1:r8nmXLZHKNJz8GFldoNILp99xL0u6xuBQ7IPTzW1nxQ= k8s.io/kube-aggregator v0.23.17/go.mod h1:2PyVOSRBOSFesdOfPMnnjMDAUk3PwNKZvSaCSc8vles= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= @@ -1365,10 +1292,7 @@ k8s.io/kubectl v0.23.17 h1:WhUAOGwGx+WLjlyGSROPI7YgVUqsIcbDfWupGICkAC8= k8s.io/kubectl v0.23.17/go.mod h1:ELEmnj6hyVI8fI81g7fzfKJxpt0YCpMBlYn28usa4XI= k8s.io/kubelet v0.23.17 h1:fOjEZjAT4oavH7zj9i6dQ5wgNuL9kXE8NU10oRKQrew= k8s.io/kubelet v0.23.17/go.mod h1:71DMJwiCuIQshkQk8GEN34eZOigMZXICCquMmbcwDDs= -k8s.io/kubernetes v1.23.17 h1:nzPjHGp+Qbh44wHxPv4DJp4Znot26yuENYLMu4Av6ZU= -k8s.io/kubernetes v1.23.17/go.mod h1:WFbvJ1vNtgtY2BRfjLTH4iS3eXRevz36S6EO2qXCCng= -k8s.io/legacy-cloud-providers v0.23.17 h1:vJsUHHPSSGglHdNWdMouM48gsV5o5Mv8tfXSL+I1njY= -k8s.io/legacy-cloud-providers v0.23.17/go.mod h1:pLSPPr2qZOFYtiGgK1VKujTcAcccBHINmt2etakAcds= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= @@ -1380,6 +1304,7 @@ pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= sigs.k8s.io/controller-runtime v0.7.2 h1:gD2JZp0bBLLuvSRYVNvox+bRCz1UUUxKDjPUCb56Ukk= @@ -1390,8 +1315,10 @@ sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0 sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= sigs.k8s.io/kustomize/kyaml v0.13.0 h1:9c+ETyNfSrVhxvphs+K2dzT3dh5oVPPEqPOE/cUpScY= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/pin-deps.sh b/hack/pin-deps.sh deleted file mode 100755 index 592b2da2d7a..00000000000 --- a/hack/pin-deps.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash - -# Copyright 2020 PingCAP, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# See the License for the specific language governing permissions and -# limitations under the License. - -# Pin all k8s.io dependencies to a specified version. -# -# Kubernetes staging repos are hosted in code repo (k8s.io/kubernetes). If we -# use pkgs from k8s.io/kubernetes, `replace` directive must be used to override -# version constraints derived from k8s.io/kubernetes. - -VERSION=${VERSION:-${1:-}} - -if [ -z "$VERSION" ]; then - echo "VERSION is required, e.g. VERSION=x.y.z $0 or $0 x.y.z" - exit 1 -fi - -echo "VERSION: $VERSION" - -# Explicitly opt into go modules, even though we're inside a GOPATH directory -export GO111MODULE=on - -go mod edit -require k8s.io/kubernetes@v$VERSION - -# -# Return true if "$v2" is greater or equal to "$v1". -# -# Usage: version_ge "$v1" "$v2" -# -function version_ge() { - local a="$1" - local b="$2" - [[ "${a}" == $(echo -e "${a}\n${b}" | sort -s -t. -k 1,1n -k 2,2n -k3,3n | head -n1) ]] -} - -if version_ge "1.15.0" $VERSION; then - STAGING_REPOS=($(curl -sS https://raw.githubusercontent.com/kubernetes/kubernetes/v${VERSION}/go.mod | sed -n 's|.*k8s.io/\(.*\) => ./staging/src/k8s.io/.*|k8s.io/\1|p')) -else - STAGING_REPOS=($(curl -sS https://raw.githubusercontent.com/kubernetes/kubernetes/v${VERSION}/staging/README.md | sed -n 's|.*\[`\(k8s.io/[^`]*\)`\].*|\1|p')) -fi - -edit_args=( - -fmt - # workaround for https://github.com/uber-go/atomic - -replace github.com/uber-go/atomic=go.uber.org/atomic@v1.5.0 -) - -for repo in ${STAGING_REPOS[@]}; do - if version_ge "1.17.0" $VERSION; then - echo ">=1.17.0" - staging_v=${VERSION/#1/0} - edit_args+=(-replace $repo=$repo@v$staging_v) - else - edit_args+=(-replace $repo=$repo@kubernetes-$VERSION) - fi -done -echo "edit_args=$edit_args" - -go mod edit ${edit_args[@]} -# workaround for https://github.com/golang/go/issues/33008 -# go mod tidy does not always remove unncessary lines from go.sum. For now we -# can remove it first and populate again. -rm go.sum -go mod tidy diff --git a/hack/verify-boilerplate.sh b/hack/verify-boilerplate.sh index 8026bd0464a..3ec9036edc2 100755 --- a/hack/verify-boilerplate.sh +++ b/hack/verify-boilerplate.sh @@ -41,8 +41,10 @@ files=($(find . -type f -not \( \ -o -path './.git/*' \ -o -path './.*/*' \ -o -path './pkg/client/*' \ + -o -path './pkg/third_party/*' \ -o -path './*/.terraform/*' \ -o -path './tests/images/*/*' \ + -o -path './tests/third_party/*' \ -o -path './deploy/*' \ -o -path '*/Makefile' \ -o -path '*/Dockerfile' \ diff --git a/pkg/apis/federation/pingcap/v1alpha1/register.go b/pkg/apis/federation/pingcap/v1alpha1/register.go index ff96e7c7725..b290cbb25a9 100644 --- a/pkg/apis/federation/pingcap/v1alpha1/register.go +++ b/pkg/apis/federation/pingcap/v1alpha1/register.go @@ -20,7 +20,6 @@ import ( ) var ( - // SchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder // AddToScheme applies all the stored functions to the scheme. diff --git a/pkg/apis/pingcap/v1alpha1/register.go b/pkg/apis/pingcap/v1alpha1/register.go index 74489d3703c..2b98a786e6b 100644 --- a/pkg/apis/pingcap/v1alpha1/register.go +++ b/pkg/apis/pingcap/v1alpha1/register.go @@ -20,7 +20,6 @@ import ( ) var ( - // SchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder // AddToScheme applies all the stored functions to the scheme. diff --git a/pkg/controller/backup/backup_control.go b/pkg/controller/backup/backup_control.go index a316f1df030..14ad76a976a 100644 --- a/pkg/controller/backup/backup_control.go +++ b/pkg/controller/backup/backup_control.go @@ -23,10 +23,10 @@ import ( "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/util/slice" ) // ControlInterface implements the control logic for updating Backup @@ -98,7 +98,7 @@ func (c *defaultBackupControl) removeProtectionFinalizer(backup *v1alpha1.Backup name := backup.GetName() if needToRemoveFinalizer(backup) { - backup.Finalizers = slice.RemoveString(backup.Finalizers, label.BackupProtectionFinalizer, nil) + backup.Finalizers = k8s.RemoveString(backup.Finalizers, label.BackupProtectionFinalizer, nil) _, err := c.cli.PingcapV1alpha1().Backups(ns).Update(context.TODO(), backup, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("remove backup %s/%s protection finalizers failed, err: %v", ns, name, err) @@ -110,7 +110,7 @@ func (c *defaultBackupControl) removeProtectionFinalizer(backup *v1alpha1.Backup } func needToAddFinalizer(backup *v1alpha1.Backup) bool { - return backup.DeletionTimestamp == nil && v1alpha1.IsCleanCandidate(backup) && !slice.ContainsString(backup.Finalizers, label.BackupProtectionFinalizer, nil) + return backup.DeletionTimestamp == nil && v1alpha1.IsCleanCandidate(backup) && !k8s.ContainsString(backup.Finalizers, label.BackupProtectionFinalizer, nil) } func needToRemoveFinalizer(backup *v1alpha1.Backup) bool { @@ -119,7 +119,7 @@ func needToRemoveFinalizer(backup *v1alpha1.Backup) bool { } func isDeletionCandidate(backup *v1alpha1.Backup) bool { - return backup.DeletionTimestamp != nil && slice.ContainsString(backup.Finalizers, label.BackupProtectionFinalizer, nil) + return backup.DeletionTimestamp != nil && k8s.ContainsString(backup.Finalizers, label.BackupProtectionFinalizer, nil) } var _ ControlInterface = &defaultBackupControl{} diff --git a/pkg/controller/fedvolumebackup/fed_volume_backup_control.go b/pkg/controller/fedvolumebackup/fed_volume_backup_control.go index 639b7fdec9a..77b4d8d0407 100644 --- a/pkg/controller/fedvolumebackup/fed_volume_backup_control.go +++ b/pkg/controller/fedvolumebackup/fed_volume_backup_control.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/util/slice" "github.com/pingcap/tidb-operator/pkg/apis/federation/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/apis/label" @@ -29,6 +28,7 @@ import ( informers "github.com/pingcap/tidb-operator/pkg/client/federation/informers/externalversions/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/fedvolumebackup" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" ) // ControlInterface implements the control logic for updating VolumeBackup @@ -119,7 +119,7 @@ func (c *defaultBackupControl) removeProtectionFinalizer(volumeBackup *v1alpha1. name := volumeBackup.GetName() if needToRemoveFinalizer(volumeBackup) { - volumeBackup.Finalizers = slice.RemoveString(volumeBackup.Finalizers, label.BackupProtectionFinalizer, nil) + volumeBackup.Finalizers = k8s.RemoveString(volumeBackup.Finalizers, label.BackupProtectionFinalizer, nil) _, err := c.cli.FederationV1alpha1().VolumeBackups(ns).Update(context.TODO(), volumeBackup, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("remove VolumeBackup %s/%s protection finalizers failed, err: %v", ns, name, err) @@ -132,7 +132,7 @@ func (c *defaultBackupControl) removeProtectionFinalizer(volumeBackup *v1alpha1. func needToAddFinalizer(volumeBackup *v1alpha1.VolumeBackup) bool { return volumeBackup.DeletionTimestamp == nil && - !slice.ContainsString(volumeBackup.Finalizers, label.BackupProtectionFinalizer, nil) + !k8s.ContainsString(volumeBackup.Finalizers, label.BackupProtectionFinalizer, nil) } func needToRemoveFinalizer(volumeBackup *v1alpha1.VolumeBackup) bool { @@ -140,7 +140,7 @@ func needToRemoveFinalizer(volumeBackup *v1alpha1.VolumeBackup) bool { } func isDeletionCandidate(volumeBackup *v1alpha1.VolumeBackup) bool { - return volumeBackup.DeletionTimestamp != nil && slice.ContainsString(volumeBackup.Finalizers, label.BackupProtectionFinalizer, nil) + return volumeBackup.DeletionTimestamp != nil && k8s.ContainsString(volumeBackup.Finalizers, label.BackupProtectionFinalizer, nil) } var _ ControlInterface = &defaultBackupControl{} diff --git a/pkg/controller/fedvolumerestore/fed_volume_restore_control.go b/pkg/controller/fedvolumerestore/fed_volume_restore_control.go index 0c529883451..bae097f59e1 100644 --- a/pkg/controller/fedvolumerestore/fed_volume_restore_control.go +++ b/pkg/controller/fedvolumerestore/fed_volume_restore_control.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/util/slice" "github.com/pingcap/tidb-operator/pkg/apis/federation/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/apis/label" @@ -29,6 +28,7 @@ import ( informers "github.com/pingcap/tidb-operator/pkg/client/federation/informers/externalversions/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/fedvolumebackup" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" ) // ControlInterface implements the control logic for updating VolumeRestore @@ -118,7 +118,7 @@ func (c *defaultRestoreControl) removeProtectionFinalizer(volumeRestore *v1alpha name := volumeRestore.GetName() if needToRemoveFinalizer(volumeRestore) { - volumeRestore.Finalizers = slice.RemoveString(volumeRestore.Finalizers, label.VolumeRestoreFederationFinalizer, nil) + volumeRestore.Finalizers = k8s.RemoveString(volumeRestore.Finalizers, label.VolumeRestoreFederationFinalizer, nil) _, err := c.cli.FederationV1alpha1().VolumeRestores(ns).Update(context.TODO(), volumeRestore, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("remove VolumeRestore %s/%s protection finalizers failed, err: %v", ns, name, err) @@ -131,7 +131,7 @@ func (c *defaultRestoreControl) removeProtectionFinalizer(volumeRestore *v1alpha func needToAddFinalizer(volumeRestore *v1alpha1.VolumeRestore) bool { return volumeRestore.DeletionTimestamp == nil && - !slice.ContainsString(volumeRestore.Finalizers, label.VolumeRestoreFederationFinalizer, nil) + !k8s.ContainsString(volumeRestore.Finalizers, label.VolumeRestoreFederationFinalizer, nil) } func needToRemoveFinalizer(volumeRestore *v1alpha1.VolumeRestore) bool { @@ -139,7 +139,7 @@ func needToRemoveFinalizer(volumeRestore *v1alpha1.VolumeRestore) bool { } func isDeletionCandidate(volumeRestore *v1alpha1.VolumeRestore) bool { - return volumeRestore.DeletionTimestamp != nil && slice.ContainsString(volumeRestore.Finalizers, label.VolumeRestoreFederationFinalizer, nil) + return volumeRestore.DeletionTimestamp != nil && k8s.ContainsString(volumeRestore.Finalizers, label.VolumeRestoreFederationFinalizer, nil) } var _ ControlInterface = &defaultRestoreControl{} diff --git a/pkg/controller/tidbcluster/pod_control.go b/pkg/controller/tidbcluster/pod_control.go index a125a872174..36a44c09a69 100644 --- a/pkg/controller/tidbcluster/pod_control.go +++ b/pkg/controller/tidbcluster/pod_control.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/member" "github.com/pingcap/tidb-operator/pkg/metrics" "github.com/pingcap/tidb-operator/pkg/pdapi" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,7 +37,6 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -496,7 +496,7 @@ func (c *PodController) syncTiKVPodForEviction(ctx context.Context, pod *corev1. evictStatus := tc.Status.TiKV.EvictLeader[pod.Name] if evictStatus != nil { if evictStatus.Value == v1alpha1.EvictLeaderValueDeletePod { - if podutil.IsPodReady(pod) { + if k8s.IsPodReady(pod) { err := endEvict() if err != nil { return reconcile.Result{}, err diff --git a/pkg/manager/member/dm_master_member_manager.go b/pkg/manager/member/dm_master_member_manager.go index f24003652cb..d881deb45af 100644 --- a/pkg/manager/member/dm_master_member_manager.go +++ b/pkg/manager/member/dm_master_member_manager.go @@ -27,6 +27,7 @@ import ( v1 "github.com/pingcap/tidb-operator/pkg/manager/member/startscript/v1" "github.com/pingcap/tidb-operator/pkg/manager/suspender" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/util" apps "k8s.io/api/apps/v1" @@ -37,7 +38,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/utils/pointer" ) @@ -265,7 +265,7 @@ func (m *masterMemberManager) shouldRecover(dc *v1alpha1.DMCluster) bool { klog.Errorf("pod %s/%s does not exist: %v", dc.Namespace, name, err) return false } - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return false } status, ok := dc.Status.Master.Members[pod.Name] diff --git a/pkg/manager/member/dm_master_upgrader.go b/pkg/manager/member/dm_master_upgrader.go index e843822b728..6be5544aa41 100644 --- a/pkg/manager/member/dm_master_upgrader.go +++ b/pkg/manager/member/dm_master_upgrader.go @@ -20,10 +20,10 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" apps "k8s.io/api/apps/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) type masterUpgrader struct { @@ -88,7 +88,7 @@ func (u *masterUpgrader) gracefulUpgrade(dc *v1alpha1.DMCluster, oldSet *apps.St } if revision == dc.Status.Master.StatefulSet.UpdateRevision { - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return controller.RequeueErrorf("dmcluster: [%s/%s]'s upgraded dm pod: [%s] is not ready", ns, dcName, podName) } if member, exist := dc.Status.Master.Members[podName]; !exist || !member.Health { diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index b14abdcaa6d..cb86c01025d 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/suspender" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" "github.com/pingcap/tidb-operator/pkg/manager/volumes" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/util" "github.com/Masterminds/semver" @@ -40,7 +41,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/utils/pointer" ) @@ -288,7 +288,7 @@ func (m *pdMemberManager) shouldRecover(tc *v1alpha1.TidbCluster) bool { klog.Errorf("pod %s/%s does not exist: %v", tc.Namespace, name, err) return false } - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return false } ok := false diff --git a/pkg/manager/member/pd_upgrader.go b/pkg/manager/member/pd_upgrader.go index 0c0a5410536..c9ff94d765f 100644 --- a/pkg/manager/member/pd_upgrader.go +++ b/pkg/manager/member/pd_upgrader.go @@ -20,11 +20,11 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" "github.com/pingcap/tidb-operator/pkg/pdapi" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" apps "k8s.io/api/apps/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) const ( @@ -95,7 +95,7 @@ func (u *pdUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Stat } if revision == tc.Status.PD.StatefulSet.UpdateRevision { - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return controller.RequeueErrorf("tidbcluster: [%s/%s]'s upgraded pd pod: [%s] is not ready", ns, tcName, podName) } if member, exist := tc.Status.PD.Members[PdName(tc.Name, i, tc.Namespace, tc.Spec.ClusterDomain, tc.Spec.AcrossK8s)]; !exist || !member.Health { diff --git a/pkg/manager/member/pump_scaler.go b/pkg/manager/member/pump_scaler.go index 8d4ea32a5c3..62670521a77 100644 --- a/pkg/manager/member/pump_scaler.go +++ b/pkg/manager/member/pump_scaler.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/util" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -29,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) type pumpScaler struct { @@ -213,7 +213,7 @@ func (s *pumpScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, newSe // In this situation return error to wait for another round for safety. // 2. Pump pod is not ready, such as in pending state. // In this situation we should delete this Pump pod immediately to avoid blocking the subsequent operations. - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { safeTimeDeadline := pod.CreationTimestamp.Add(5 * s.deps.CLIConfig.ResyncDuration) if time.Now().Before(safeTimeDeadline) { // Wait for 5 resync periods to ensure that the following situation does not occur: diff --git a/pkg/manager/member/ticdc_upgrader.go b/pkg/manager/member/ticdc_upgrader.go index 27e78bd0d25..3515792568c 100644 --- a/pkg/manager/member/ticdc_upgrader.go +++ b/pkg/manager/member/ticdc_upgrader.go @@ -19,11 +19,11 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" apps "k8s.io/api/apps/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) type ticdcUpgrader struct { @@ -96,7 +96,7 @@ func (u *ticdcUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulS } if revision == tc.Status.TiCDC.StatefulSet.UpdateRevision { - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return controller.RequeueErrorf("tidbcluster: [%s/%s]'s upgraded ticdc pod: [%s] is not ready", ns, tcName, podName) } if _, exist := tc.Status.TiCDC.Captures[podName]; !exist { diff --git a/pkg/manager/member/tidb_failover.go b/pkg/manager/member/tidb_failover.go index 482007b5a46..f357daa347e 100644 --- a/pkg/manager/member/tidb_failover.go +++ b/pkg/manager/member/tidb_failover.go @@ -19,10 +19,10 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) type tidbFailover struct { @@ -77,7 +77,7 @@ func (f *tidbFailover) Failover(tc *v1alpha1.TidbCluster) error { return fmt.Errorf("tidbFailover.Failover: failed to get pods %s for cluster %s/%s, error: %s", tidbMember.Name, tc.GetNamespace(), tc.GetName(), err) } - _, condition := podutil.GetPodCondition(&pod.Status, corev1.PodScheduled) + _, condition := k8s.GetPodCondition(&pod.Status, corev1.PodScheduled) if condition == nil || condition.Status != corev1.ConditionTrue { // if a member is unheathy because it's not scheduled yet, we // should not create failover pod for it diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index 891bedd3821..565a68d44bd 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/suspender" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" "github.com/pingcap/tidb-operator/pkg/manager/volumes" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/util" "github.com/pingcap/tidb-operator/pkg/util/cmpver" @@ -46,7 +47,6 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/utils/pointer" // for sql/driver @@ -450,7 +450,7 @@ func (m *tidbMemberManager) shouldRecover(tc *v1alpha1.TidbCluster) bool { klog.Errorf("pod %s/%s does not exist: %v", tc.Namespace, name, err) return false } - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return false } status, ok := tc.Status.TiDB.Members[pod.Name] diff --git a/pkg/manager/member/tidb_upgrader.go b/pkg/manager/member/tidb_upgrader.go index 429ca24b558..685b6f3cb4a 100644 --- a/pkg/manager/member/tidb_upgrader.go +++ b/pkg/manager/member/tidb_upgrader.go @@ -20,13 +20,13 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) const ( @@ -116,8 +116,8 @@ func (u *tidbUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe } if revision == tc.Status.TiDB.StatefulSet.UpdateRevision { - if !podutil.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { - readyCond := podutil.GetPodReadyCondition(pod.Status) + if !k8s.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { + readyCond := k8s.GetPodReadyCondition(pod.Status) if readyCond == nil || readyCond.Status != corev1.ConditionTrue { return controller.RequeueErrorf("tidbcluster: [%s/%s]'s upgraded tidb pod: [%s] is not ready", ns, tcName, podName) diff --git a/pkg/manager/member/tiflash_scaler.go b/pkg/manager/member/tiflash_scaler.go index 5ad06fc1d9e..470f0896cef 100644 --- a/pkg/manager/member/tiflash_scaler.go +++ b/pkg/manager/member/tiflash_scaler.go @@ -21,12 +21,12 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" errorutils "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) type tiflashScaler struct { @@ -178,7 +178,7 @@ func (s *tiflashScaler) scaleInOne(tc *v1alpha1.TidbCluster, ordinal int32) erro // // 2. This can happen when TiFlash pod has not been successfully registered in the cluster, such as always pending. // In this situation we should delete this TiFlash pod immediately to avoid blocking the subsequent operations. - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { safeTimeDeadline := pod.CreationTimestamp.Add(5 * s.deps.CLIConfig.ResyncDuration) if time.Now().Before(safeTimeDeadline) { // Wait for 5 resync periods to ensure that the following situation does not occur: diff --git a/pkg/manager/member/tiflash_upgrader.go b/pkg/manager/member/tiflash_upgrader.go index df708552b7c..2799be82629 100644 --- a/pkg/manager/member/tiflash_upgrader.go +++ b/pkg/manager/member/tiflash_upgrader.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/tiflashapi" "github.com/pingcap/tidb-operator/pkg/util/cmpver" @@ -28,7 +29,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) const ( @@ -121,8 +121,8 @@ func (u *tiflashUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Statefu } if revision == tc.Status.TiFlash.StatefulSet.UpdateRevision { - if !podutil.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { - readyCond := podutil.GetPodReadyCondition(pod.Status) + if !k8s.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { + readyCond := k8s.GetPodReadyCondition(pod.Status) if readyCond == nil || readyCond.Status != corev1.ConditionTrue { return controller.RequeueErrorf("tidbcluster: [%s/%s]'s upgraded tiflash pod: [%s] is not ready", ns, tcName, podName) diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index 875d38ecff3..9c35a14e0ee 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/util" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -29,7 +30,6 @@ import ( errorutils "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) type tikvScaler struct { @@ -265,7 +265,7 @@ func (s *tikvScaler) scaleInOne(tc *v1alpha1.TidbCluster, skipPreCheck bool, upT // 3. TiKV pod has a valid store in label, but no active stores from PD (via status). // In this situation we assume that store has been Tombstone'd but pd has gc'ed it, so not available in // TombstoneStores. We delete the pod in this case. - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { if tc.TiKVBootStrapped() { safeTimeDeadline := pod.CreationTimestamp.Add(5 * s.deps.CLIConfig.ResyncDuration) if time.Now().Before(safeTimeDeadline) { diff --git a/pkg/manager/member/tikv_upgrader.go b/pkg/manager/member/tikv_upgrader.go index 6662cbc7be1..e2ed4366596 100644 --- a/pkg/manager/member/tikv_upgrader.go +++ b/pkg/manager/member/tikv_upgrader.go @@ -25,13 +25,13 @@ import ( mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" errorutils "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/utils/pointer" ) @@ -147,8 +147,8 @@ func (u *tikvUpgrader) Upgrade(meta metav1.Object, oldSet *apps.StatefulSet, new if revision == status.StatefulSet.UpdateRevision { - if !podutil.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { - readyCond := podutil.GetPodReadyCondition(pod.Status) + if !k8s.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { + readyCond := k8s.GetPodReadyCondition(pod.Status) if readyCond == nil || readyCond.Status != corev1.ConditionTrue { return controller.RequeueErrorf("tidbcluster: [%s/%s]'s upgraded tikv pod: [%s] is not ready", ns, tcName, podName) diff --git a/pkg/manager/member/tiproxy_upgrader.go b/pkg/manager/member/tiproxy_upgrader.go index 838ae3d1e7a..d53a8f36380 100644 --- a/pkg/manager/member/tiproxy_upgrader.go +++ b/pkg/manager/member/tiproxy_upgrader.go @@ -21,11 +21,11 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) const ( @@ -92,7 +92,7 @@ func (u *tiproxyUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Statefu } if revision == tc.Status.TiProxy.StatefulSet.UpdateRevision { - if !podutil.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { + if !k8s.IsPodAvailable(pod, int32(minReadySeconds), metav1.Now()) { return controller.RequeueErrorf("tidbcluster: [%s/%s]'s upgraded tiproxy pod: [%s] is not ready", ns, tcName, podName) } continue diff --git a/pkg/manager/member/utils.go b/pkg/manager/member/utils.go index 4dbfe7a9f5c..99332ae7bc8 100644 --- a/pkg/manager/member/utils.go +++ b/pkg/manager/member/utils.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/util/toml" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/manager/member/startscript" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/util" "github.com/Masterminds/semver" @@ -41,7 +42,6 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) const ( @@ -303,7 +303,7 @@ func shouldRecover(tc *v1alpha1.TidbCluster, component string, podLister corelis klog.Errorf("pod %s/%s does not exist: %v", tc.Namespace, name, err) return false } - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return false } var exist bool @@ -353,7 +353,7 @@ func shouldRecoverDM(dc *v1alpha1.DMCluster, component string, podLister corelis klog.Errorf("pod %s/%s does not exist: %v", dc.Namespace, name, err) return false } - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { return false } var exist bool diff --git a/pkg/third_party/k8s/pod_util.go b/pkg/third_party/k8s/pod_util.go new file mode 100644 index 00000000000..428f40fef93 --- /dev/null +++ b/pkg/third_party/k8s/pod_util.go @@ -0,0 +1,82 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// this file is copied from k8s.io/kubernetes/pkg/api/v1/pod/util.go @v1.23.17 + +package k8s + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || (!c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time)) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status v1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + return GetPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} diff --git a/pkg/third_party/k8s/slice.go b/pkg/third_party/k8s/slice.go new file mode 100644 index 00000000000..ea88c611838 --- /dev/null +++ b/pkg/third_party/k8s/slice.go @@ -0,0 +1,51 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// this file is copied from k8s.io/kubernetes/pkg/util/slice/slice.go @v1.23.17 + +package k8s + +// ContainsString checks if a given slice of strings contains the provided string. +// If a modifier func is provided, it is called with the slice item before the comparation. +func ContainsString(slice []string, s string, modifier func(s string) string) bool { + for _, item := range slice { + if item == s { + return true + } + if modifier != nil && modifier(item) == s { + return true + } + } + return false +} + +// RemoveString returns a newly created []string that contains all items from slice that +// are not equal to s and modifier(s) in case modifier func is provided. +func RemoveString(slice []string, s string, modifier func(s string) string) []string { + newSlice := make([]string, 0) + for _, item := range slice { + if item == s { + continue + } + if modifier != nil && modifier(item) == s { + continue + } + newSlice = append(newSlice, item) + } + if len(newSlice) == 0 { + // Sanitize for unit tests so we don't need to distinguish empty array + // and nil. + newSlice = nil + } + return newSlice +} diff --git a/tests/actions.go b/tests/actions.go index 387bb9ab901..8e03518ed8e 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -50,6 +50,8 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/fixture" "github.com/pingcap/tidb-operator/tests/pkg/metrics" "github.com/pingcap/tidb-operator/tests/slack" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" "github.com/ghodss/yaml" _ "github.com/go-sql-driver/mysql" @@ -65,8 +67,6 @@ import ( corelisterv1 "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" ) const ( @@ -277,10 +277,10 @@ func (oa *OperatorActions) CleanCRDOrDie() { framework.ExpectNoError(err, "failed to list CRD") for _, crd := range crdList.Items { if !strings.HasSuffix(crd.Name, ".pingcap.com") { - framework.Logf("CRD %q ignored", crd.Name) + log.Logf("CRD %q ignored", crd.Name) continue } - framework.Logf("Deleting CRD %q", crd.Name) + log.Logf("Deleting CRD %q", crd.Name) err = oa.apiExtCli.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete CRD %q", crd.Name) // Even if DELETE API request succeeds, the CRD object may still exists @@ -345,11 +345,11 @@ func (oa *OperatorActions) crdFiles(info *OperatorConfig) ([]string, error) { } func (oa *OperatorActions) waitForCRDsReady() { - framework.Logf("Wait for all CRDs are established") + log.Logf("Wait for all CRDs are established") e2eutil.WaitForCRDsEstablished(oa.apiExtCli, labels.Everything()) // workaround for https://github.com/kubernetes/kubernetes/issues/65517 - framework.Logf("force sync kubectl cache") + log.Logf("force sync kubectl cache") cmdArgs := []string{"sh", "-c", "rm -rf ~/.kube/cache ~/.kube/http-cache"} if _, err := exec.Command(cmdArgs[0], cmdArgs[1:]...).CombinedOutput(); err != nil { log.Failf("Failed to run '%s': %v", strings.Join(cmdArgs, " "), err) diff --git a/tests/cmd/blockwriter/main.go b/tests/cmd/blockwriter/main.go index 98d19342031..948093299af 100644 --- a/tests/cmd/blockwriter/main.go +++ b/tests/cmd/blockwriter/main.go @@ -27,9 +27,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/tests/pkg/blockwriter" "github.com/pingcap/tidb-operator/tests/pkg/util" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" flag "github.com/spf13/pflag" "k8s.io/component-base/logs" - "k8s.io/kubernetes/test/e2e/framework/log" ) var ( diff --git a/tests/cmd/fault-trigger/main.go b/tests/cmd/fault-trigger/main.go index 5c5dcc005a6..cfb92135ec8 100644 --- a/tests/cmd/fault-trigger/main.go +++ b/tests/cmd/fault-trigger/main.go @@ -22,9 +22,9 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/api" "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/component-base/logs" - "k8s.io/kubernetes/test/e2e/framework/log" ) var ( diff --git a/tests/cmd/mock-monitor/main.go b/tests/cmd/mock-monitor/main.go index 3e71f222b78..5dc9bf09aca 100644 --- a/tests/cmd/mock-monitor/main.go +++ b/tests/cmd/mock-monitor/main.go @@ -17,8 +17,8 @@ import ( "net/http" "github.com/pingcap/tidb-operator/tests/pkg/mock" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" "k8s.io/apiserver/pkg/server/healthz" - "k8s.io/kubernetes/test/e2e/framework/log" ) func main() { diff --git a/tests/cmd/webhook/main.go b/tests/cmd/webhook/main.go index dee57cc7bd3..6bd2031942d 100644 --- a/tests/cmd/webhook/main.go +++ b/tests/cmd/webhook/main.go @@ -21,12 +21,12 @@ import ( "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/tests/pkg/webhook" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" flag "github.com/spf13/pflag" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/component-base/logs" - "k8s.io/kubernetes/test/e2e/framework/log" ) var ( diff --git a/tests/config.go b/tests/config.go index dacd39aafe8..aef020e43cd 100644 --- a/tests/config.go +++ b/tests/config.go @@ -24,9 +24,8 @@ import ( utiloperator "github.com/pingcap/tidb-operator/tests/e2e/util/operator" "github.com/pingcap/tidb-operator/tests/pkg/blockwriter" "github.com/pingcap/tidb-operator/tests/slack" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" "gopkg.in/yaml.v2" - - "k8s.io/kubernetes/test/e2e/framework/log" ) const ( diff --git a/tests/crd_test_util.go b/tests/crd_test_util.go index 2b0f2ccb137..97e170e553c 100644 --- a/tests/crd_test_util.go +++ b/tests/crd_test_util.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" utilstatefulset "github.com/pingcap/tidb-operator/tests/e2e/util/statefulset" "github.com/pingcap/tidb-operator/tests/slack" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,7 +39,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/kubernetes/test/e2e/framework/log" ) type pumpStatus struct { diff --git a/tests/dm.go b/tests/dm.go index 61f4ffa4ace..30a4395e648 100644 --- a/tests/dm.go +++ b/tests/dm.go @@ -40,7 +40,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/utils/pointer" "github.com/pingcap/tidb-operator/pkg/apis/label" @@ -50,6 +49,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/dmapi" httputil "github.com/pingcap/tidb-operator/pkg/util/http" "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/drainer.go b/tests/drainer.go index 151ca795c1a..a5c84ae031d 100644 --- a/tests/drainer.go +++ b/tests/drainer.go @@ -25,9 +25,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/tests/slack" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework/log" ) type DbType string diff --git a/tests/e2e/br/br.go b/tests/e2e/br/br.go index dae326fbae8..e82729574eb 100644 --- a/tests/e2e/br/br.go +++ b/tests/e2e/br/br.go @@ -35,12 +35,12 @@ import ( nsutil "github.com/pingcap/tidb-operator/tests/e2e/util/ns" utiltidbcluster "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" ) var ( @@ -469,14 +469,14 @@ var _ = ginkgo.Describe("Backup and Restore", func() { // ginkgo.By("Create log-backup.enable TiDB cluster with tls") // masterClusterName := "tls-master" // err := createLogBackupEnableTidbCluster(f, masterClusterName, backupVersion, enableTLS, skipCA) - // framework.ExpectNoError(err) + // k8se2e.ExpectNoError(err) // ginkgo.By("Wait for tls-master TiDB cluster ready") // err = utiltidbcluster.WaitForTCConditionReady(f.ExtClient, ns, masterClusterName, tidbReadyTimeout, 0) - // framework.ExpectNoError(err) + // k8se2e.ExpectNoError(err) // ginkgo.By("Create RBAC for backup") // err = createRBAC(f) - // framework.ExpectNoError(err) + // k8se2e.ExpectNoError(err) // logBackupName := "log-backup" // typ := strings.ToLower(typeBR) @@ -485,22 +485,22 @@ var _ = ginkgo.Describe("Backup and Restore", func() { // backup.Spec.CleanPolicy = v1alpha1.CleanPolicyTypeDelete // backup.Spec.Mode = v1alpha1.BackupModeLog // }) - // framework.ExpectNoError(err) - // framework.ExpectNotEqual(logBackup.Status.CommitTs, "") + // k8se2e.ExpectNoError(err) + // k8se2e.ExpectNotEqual(logBackup.Status.CommitTs, "") // ginkgo.By("wait log backup progress reach current ts") // currentTS := strconv.FormatUint(config.GoTimeToTS(time.Now()), 10) // err = brutil.WaitForLogBackupProgressReachTS(f.ExtClient, ns, logBackupName, currentTS, logbackupCatchUpTimeout) - // framework.ExpectNoError(err) + // k8se2e.ExpectNoError(err) // ginkgo.By("Delete log backup") // err = deleteBackup(f, logBackupName) - // framework.ExpectNoError(err) + // k8se2e.ExpectNoError(err) // ginkgo.By("Check if all log backup files in storage is deleted") // cleaned, err := f.Storage.IsDataCleaned(ctx, ns, logBackup.Spec.S3.Prefix) // now we only use s3 - // framework.ExpectNoError(err) - // framework.ExpectEqual(cleaned, true, "storage should be cleaned") + // k8se2e.ExpectNoError(err) + // k8se2e.ExpectEqual(cleaned, true, "storage should be cleaned") // }) }) diff --git a/tests/e2e/br/framework/framework.go b/tests/e2e/br/framework/framework.go index 57762c2417a..be246ca14c2 100644 --- a/tests/e2e/br/framework/framework.go +++ b/tests/e2e/br/framework/framework.go @@ -19,15 +19,6 @@ import ( "strings" "github.com/onsi/ginkgo" - asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" - "github.com/pingcap/tidb-operator/pkg/apis/label" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" - "github.com/pingcap/tidb-operator/pkg/scheme" - onceutil "github.com/pingcap/tidb-operator/tests/e2e/br/utils/once" - "github.com/pingcap/tidb-operator/tests/e2e/br/utils/portforward" - "github.com/pingcap/tidb-operator/tests/e2e/br/utils/s3" - tlsutil "github.com/pingcap/tidb-operator/tests/e2e/br/utils/tls" - yamlutil "github.com/pingcap/tidb-operator/tests/e2e/br/utils/yaml" v1 "k8s.io/api/core/v1" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -37,8 +28,19 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" apiregistration "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + + asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" + "github.com/pingcap/tidb-operator/pkg/apis/label" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" + "github.com/pingcap/tidb-operator/pkg/scheme" + onceutil "github.com/pingcap/tidb-operator/tests/e2e/br/utils/once" + "github.com/pingcap/tidb-operator/tests/e2e/br/utils/portforward" + "github.com/pingcap/tidb-operator/tests/e2e/br/utils/s3" + tlsutil "github.com/pingcap/tidb-operator/tests/e2e/br/utils/tls" + yamlutil "github.com/pingcap/tidb-operator/tests/e2e/br/utils/yaml" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) type Framework struct { @@ -151,26 +153,26 @@ func (f *Framework) AfterEach() { ginkgo.By("Try to clean up all backups") f.ForceCleanBackups(f.Namespace.Name) } else { - framework.Logf("Skip cleaning up backup") + log.Logf("Skip cleaning up backup") } } func (f *Framework) ForceCleanBackups(ns string) { bl, err := f.ExtClient.PingcapV1alpha1().Backups(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { - framework.Logf("failed to list backups in namespace %s: %v", ns, err) + log.Logf("failed to list backups in namespace %s: %v", ns, err) return } for i := range bl.Items { name := bl.Items[i].Name if err := f.ExtClient.PingcapV1alpha1().Backups(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { - framework.Logf("failed to delete backup(%s) in namespace %s: %v", name, ns, err) + log.Logf("failed to delete backup(%s) in namespace %s: %v", name, ns, err) return } // use patch to avoid update conflicts patch := []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`) if _, err := f.ExtClient.PingcapV1alpha1().Backups(ns).Patch(context.TODO(), name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil { - framework.Logf("failed to clean backup(%s) finalizers in namespace %s: %v", name, ns, err) + log.Logf("failed to clean backup(%s) finalizers in namespace %s: %v", name, ns, err) return } } @@ -183,7 +185,7 @@ func (f *Framework) RecycleReleasedPV() { c := f.ClientSet pvList, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) if err != nil { - framework.Logf("failed to list pvs: %v", err) + log.Logf("failed to list pvs: %v", err) return } var ( @@ -194,7 +196,7 @@ func (f *Framework) RecycleReleasedPV() { succeeded int ) defer func() { - framework.Logf("recycling orphan PVs (total: %d, retainReleased: %d, skipped: %d, failed: %d, succeeded: %d)", total, retainReleased, skipped, failed, succeeded) + log.Logf("recycling orphan PVs (total: %d, retainReleased: %d, skipped: %d, failed: %d, succeeded: %d)", total, retainReleased, skipped, failed, succeeded) }() for _, pv := range pvList.Items { if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain || pv.Status.Phase != v1.VolumeReleased { @@ -203,13 +205,13 @@ func (f *Framework) RecycleReleasedPV() { retainReleased++ pvcNamespaceName, ok := pv.Labels[label.NamespaceLabelKey] if !ok { - framework.Logf("label %q does not exist in PV %q", label.NamespaceLabelKey, pv.Name) + log.Logf("label %q does not exist in PV %q", label.NamespaceLabelKey, pv.Name) failed++ continue } _, err := c.CoreV1().Namespaces().Get(context.TODO(), pvcNamespaceName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { - framework.Logf("failed to get namespace %q: %v", pvcNamespaceName, err) + log.Logf("failed to get namespace %q: %v", pvcNamespaceName, err) failed++ continue } @@ -222,10 +224,10 @@ func (f *Framework) RecycleReleasedPV() { _, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), &pv, metav1.UpdateOptions{}) if err != nil { failed++ - framework.Logf("failed to set PersistentVolumeReclaimPolicy of PV %q to Delete: %v", pv.Name, err) + log.Logf("failed to set PersistentVolumeReclaimPolicy of PV %q to Delete: %v", pv.Name, err) } else { succeeded++ - framework.Logf("successfully set PersistentVolumeReclaimPolicy of PV %q to Delete", pv.Name) + log.Logf("successfully set PersistentVolumeReclaimPolicy of PV %q to Delete", pv.Name) } } } diff --git a/tests/e2e/br/utils/portforward/portforward.go b/tests/e2e/br/utils/portforward/portforward.go index 3c318b73b52..a969a83daaf 100644 --- a/tests/e2e/br/utils/portforward/portforward.go +++ b/tests/e2e/br/utils/portforward/portforward.go @@ -24,6 +24,7 @@ import ( "strconv" "time" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -39,7 +40,6 @@ import ( "k8s.io/client-go/transport/spdy" "k8s.io/kubectl/pkg/polymorphichelpers" "k8s.io/kubectl/pkg/util/podutils" - "k8s.io/kubernetes/test/e2e/framework/log" ) const ( diff --git a/tests/e2e/br/utils/s3/minio.go b/tests/e2e/br/utils/s3/minio.go index 659389f2850..5bccf192c8b 100644 --- a/tests/e2e/br/utils/s3/minio.go +++ b/tests/e2e/br/utils/s3/minio.go @@ -21,13 +21,14 @@ import ( "github.com/minio/minio-go/v6" "github.com/onsi/ginkgo" - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/tests/e2e/br/utils/portforward" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" - podutil "k8s.io/kubernetes/test/e2e/framework/pod" + + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/tests/e2e/br/utils/portforward" + podutil "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" ) const ( diff --git a/tests/e2e/config/config.go b/tests/e2e/config/config.go index 9dccb2ae12f..7eac1669ea3 100644 --- a/tests/e2e/config/config.go +++ b/tests/e2e/config/config.go @@ -25,7 +25,8 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" cliflag "k8s.io/component-base/cli/flag" - "k8s.io/kubernetes/test/e2e/framework" + + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" ) // Global Test configuration. diff --git a/tests/e2e/dmcluster/dmcluster.go b/tests/e2e/dmcluster/dmcluster.go index 051eec49403..cadba3daa8a 100644 --- a/tests/e2e/dmcluster/dmcluster.go +++ b/tests/e2e/dmcluster/dmcluster.go @@ -34,8 +34,6 @@ import ( typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" restclient "k8s.io/client-go/rest" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" "github.com/pingcap/tidb-operator/pkg/apis/label" @@ -54,6 +52,8 @@ import ( "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) var _ = ginkgo.Describe("DMCluster", func() { diff --git a/tests/e2e/e2e.go b/tests/e2e/e2e.go index 5781004a478..5907585f1f1 100644 --- a/tests/e2e/e2e.go +++ b/tests/e2e/e2e.go @@ -30,18 +30,7 @@ import ( "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/reporters" "github.com/onsi/gomega" - asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" - "github.com/pingcap/tidb-operator/pkg/version" - "github.com/pingcap/tidb-operator/tests" - e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config" - "github.com/pingcap/tidb-operator/tests/e2e/tidbcluster" - utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image" - utilnode "github.com/pingcap/tidb-operator/tests/e2e/util/node" - utiloperator "github.com/pingcap/tidb-operator/tests/e2e/util/operator" - "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,20 +42,27 @@ import ( "k8s.io/component-base/logs" "k8s.io/klog/v2" aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" - "k8s.io/kubernetes/test/e2e/framework" - e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - "k8s.io/kubernetes/test/e2e/framework/log" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - "k8s.io/kubernetes/test/e2e/framework/pod" utilnet "k8s.io/utils/net" // ensure auth plugins are loaded _ "k8s.io/client-go/plugin/pkg/client/auth" - // ensure that cloud providers are loaded - _ "k8s.io/kubernetes/test/e2e/framework/providers/aws" - _ "k8s.io/kubernetes/test/e2e/framework/providers/gce" + // no cloud provider specific for now in real e2e + + asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" + "github.com/pingcap/tidb-operator/pkg/version" + "github.com/pingcap/tidb-operator/tests" + e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config" + "github.com/pingcap/tidb-operator/tests/e2e/tidbcluster" + utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image" + utiloperator "github.com/pingcap/tidb-operator/tests/e2e/util/operator" + "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + e2ekubectl "github.com/pingcap/tidb-operator/tests/third_party/k8s/kubectl" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + e2enode "github.com/pingcap/tidb-operator/tests/third_party/k8s/node" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" ) var ( @@ -145,52 +141,6 @@ func setupSuite(c kubernetes.Interface, extClient versioned.Interface, apiExtCli log.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) } - ginkgo.By("Initializing all nodes") - nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - framework.ExpectNoError(err, "failed to list nodes") - for _, node := range nodeList.Items { - framework.Logf("Initializing node %q", node.Name) - framework.ExpectNoError(utilnode.InitNode(&node), fmt.Sprintf("initializing node %s failed", node.Name)) - } - - // By using default storage class in GKE/EKS (aws), network attached storage - // which be used and we must clean them later. - // We set local-storage class as default for simplicity. - // The default storage class of kind is local-path-provisioner which - // consumes local storage like local-volume-provisioner. However, it's not - // stable in our e2e testing. - if framework.TestContext.Provider == "gke" || framework.TestContext.Provider == "aws" { - defaultSCName := "local-storage" - list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) - framework.ExpectNoError(err, "list storage class failed") - // only one storage class can be marked default - // https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/#changing-the-default-storageclass - var localStorageSC *storagev1.StorageClass - for i, sc := range list.Items { - if sc.Name == defaultSCName { - localStorageSC = &list.Items[i] - } else if storageutil.IsDefaultAnnotation(sc.ObjectMeta) { - delete(sc.ObjectMeta.Annotations, storageutil.IsDefaultStorageClassAnnotation) - _, err = c.StorageV1().StorageClasses().Update(context.TODO(), &sc, metav1.UpdateOptions{}) - framework.ExpectNoError(err, "update storage class failed, %v", sc) - } - } - // nolint: staticcheck - // reason: SA5011(related information): this check suggests that the pointer can be nil - if localStorageSC == nil { - log.Failf("local-storage storage class not found") - } - // nolint: staticcheck - // reason: SA5011: possible nil pointer dereference - if localStorageSC.Annotations == nil { - localStorageSC.Annotations = map[string]string{} - } - localStorageSC.Annotations[storageutil.IsDefaultStorageClassAnnotation] = "true" - log.Logf("Setting %q as the default storage class", localStorageSC.Name) - _, err = c.StorageV1().StorageClasses().Update(context.TODO(), localStorageSC, metav1.UpdateOptions{}) - framework.ExpectNoError(err, "update storage class failed, %v", localStorageSC) - } - // Log the version of the server and this client. log.Logf("e2e test version: %s", version.Get().GitVersion) @@ -232,7 +182,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { cmd := exec.Command("sh", "-c", p.cmd) output, err := cmd.CombinedOutput() if err != nil { - framework.Failf("failed to %s (cmd: %q, error: %v, output: %s", p.text, p.cmd, err, string(output)) + log.Failf("failed to %s (cmd: %q, error: %v, output: %s", p.text, p.cmd, err, string(output)) } } @@ -259,13 +209,13 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { setupSuite(kubeCli, cli, apiExtCli) // override with hard-coded value e2econfig.TestConfig.ManifestDir = "/manifests" - framework.Logf("====== e2e configuration ======") - framework.Logf("%s", e2econfig.TestConfig.MustPrettyPrintJSON()) + log.Logf("====== e2e configuration ======") + log.Logf("%s", e2econfig.TestConfig.MustPrettyPrintJSON()) // preload images if e2econfig.TestConfig.PreloadImages { ginkgo.By("Preloading images") if err := utilimage.PreloadImages(); err != nil { - framework.Failf("failed to pre-load images: %v", err) + log.Failf("failed to pre-load images: %v", err) } } @@ -498,7 +448,7 @@ func getDefaultClusterIPFamily(c kubernetes.Interface) string { // Get the ClusterIP of the kubernetes service created in the default namespace svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get kubernetes service ClusterIP: %v", err) + log.Failf("Failed to get kubernetes service ClusterIP: %v", err) } if utilnet.IsIPv6String(svc.Spec.ClusterIP) { @@ -518,25 +468,25 @@ func waitForDaemonSets(c kubernetes.Interface, ns string, allowedNotReadyNodes i } start := time.Now() - framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", + log.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns) return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { dsList, err := c.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { - framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) + log.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) return false, err } var notReadyDaemonSets []string for _, ds := range dsList.Items { - framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) + log.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes { notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name) } } if len(notReadyDaemonSets) > 0 { - framework.Logf("there are not ready daemonsets: %v", notReadyDaemonSets) + log.Logf("there are not ready daemonsets: %v", notReadyDaemonSets) return false, nil } @@ -561,5 +511,5 @@ func setupSuitePerGinkgoNode() { klog.Fatal("Error loading client: ", err) } framework.TestContext.IPFamily = getDefaultClusterIPFamily(c) - framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily) + log.Logf("Cluster IP family: %s", framework.TestContext.IPFamily) } diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 69bd908a8eb..b4b680b7163 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -22,25 +22,25 @@ import ( "testing" "time" - e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - "k8s.io/kubernetes/test/e2e/framework/log" - "k8s.io/kubernetes/test/e2e/framework/testfiles" // test sources _ "github.com/pingcap/tidb-operator/tests/e2e/br" + e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config" _ "github.com/pingcap/tidb-operator/tests/e2e/dmcluster" _ "github.com/pingcap/tidb-operator/tests/e2e/tidbcluster" _ "github.com/pingcap/tidb-operator/tests/e2e/tidbdashboard" _ "github.com/pingcap/tidb-operator/tests/e2e/tidbngmonitoring" _ "github.com/pingcap/tidb-operator/tests/e2e/tikv" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/config" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/testfiles" ) // handleFlags sets up all flags and parses the command line. @@ -90,10 +90,10 @@ func createTestingNS(baseName string, c clientset.Interface, labels map[string]s if err != nil { if apierrors.IsAlreadyExists(err) { // regenerate on conflict - framework.Logf("Namespace name %q was already taken, generate a new name and retry", namespaceObj.Name) + log.Logf("Namespace name %q was already taken, generate a new name and retry", namespaceObj.Name) namespaceObj.Name = fmt.Sprintf("%v-%v", baseName, framework.RandomSuffix()) } else { - framework.Logf("Unexpected error while creating namespace: %v", err) + log.Logf("Unexpected error while creating namespace: %v", err) } return false, nil } diff --git a/tests/e2e/framework/framework.go b/tests/e2e/framework/framework.go index 2e19e00deb6..f1c258dd9cb 100644 --- a/tests/e2e/framework/framework.go +++ b/tests/e2e/framework/framework.go @@ -22,7 +22,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" + + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) func NewDefaultFramework(baseName string) *framework.Framework { @@ -37,7 +39,7 @@ func NewDefaultFramework(baseName string) *framework.Framework { // if the PVC namespace does not exist anymore. pvList, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) if err != nil { - framework.Logf("failed to list pvs: %v", err) + log.Logf("failed to list pvs: %v", err) return } var ( @@ -48,7 +50,7 @@ func NewDefaultFramework(baseName string) *framework.Framework { succeeded int ) defer func() { - framework.Logf("recycling orphan PVs (total: %d, retainReleased: %d, skipped: %d, failed: %d, succeeded: %d)", total, retainReleased, skipped, failed, succeeded) + log.Logf("recycling orphan PVs (total: %d, retainReleased: %d, skipped: %d, failed: %d, succeeded: %d)", total, retainReleased, skipped, failed, succeeded) }() for _, pv := range pvList.Items { if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain || pv.Status.Phase != v1.VolumeReleased { @@ -57,13 +59,13 @@ func NewDefaultFramework(baseName string) *framework.Framework { retainReleased++ pvcNamespaceName, ok := pv.Labels[label.NamespaceLabelKey] if !ok { - framework.Logf("label %q does not exist in PV %q", label.NamespaceLabelKey, pv.Name) + log.Logf("label %q does not exist in PV %q", label.NamespaceLabelKey, pv.Name) failed++ continue } _, err := c.CoreV1().Namespaces().Get(context.TODO(), pvcNamespaceName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { - framework.Logf("failed to get namespace %q: %v", pvcNamespaceName, err) + log.Logf("failed to get namespace %q: %v", pvcNamespaceName, err) failed++ continue } @@ -76,10 +78,10 @@ func NewDefaultFramework(baseName string) *framework.Framework { _, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), &pv, metav1.UpdateOptions{}) if err != nil { failed++ - framework.Logf("failed to set PersistentVolumeReclaimPolicy of PV %q to Delete: %v", pv.Name, err) + log.Logf("failed to set PersistentVolumeReclaimPolicy of PV %q to Delete: %v", pv.Name, err) } else { succeeded++ - framework.Logf("successfully set PersistentVolumeReclaimPolicy of PV %q to Delete", pv.Name) + log.Logf("successfully set PersistentVolumeReclaimPolicy of PV %q to Delete", pv.Name) } } }) diff --git a/tests/e2e/suite.go b/tests/e2e/suite.go index 888c563335f..37dfaebe313 100644 --- a/tests/e2e/suite.go +++ b/tests/e2e/suite.go @@ -14,13 +14,8 @@ package e2e import ( - "fmt" - "io/ioutil" - "path" - "time" - - "k8s.io/kubernetes/test/e2e/framework" - e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step. @@ -29,59 +24,18 @@ import ( // and then the function that only runs on the first Ginkgo node. func CleanupSuite() { // Run on all Ginkgo nodes - framework.Logf("Running AfterSuite actions on all nodes") + log.Logf("Running AfterSuite actions on all nodes") framework.RunCleanupActions() } // AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite func AfterSuiteActions() { // Run only Ginkgo on node 1 - framework.Logf("Running AfterSuite actions on node 1") + log.Logf("Running AfterSuite actions on node 1") if framework.TestContext.ReportDir != "" { framework.CoreDump(framework.TestContext.ReportDir) } - if framework.TestContext.GatherSuiteMetricsAfterTest { - if err := gatherTestSuiteMetrics(); err != nil { - framework.Logf("Error gathering metrics: %v", err) - } - } if framework.TestContext.NodeKiller.Enabled { close(framework.TestContext.NodeKiller.NodeKillerStopCh) } } - -func gatherTestSuiteMetrics() error { - framework.Logf("Gathering metrics") - cfg, err := framework.LoadConfig() - if err != nil { - return fmt.Errorf("error loading config: %v", err) - } - c, err := framework.LoadClientset() - if err != nil { - return fmt.Errorf("error loading client: %v", err) - } - - // Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally). - grabber, err := e2emetrics.NewMetricsGrabber(c, nil, cfg, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, true) - if err != nil { - return fmt.Errorf("failed to create MetricsGrabber: %v", err) - } - - received, err := grabber.Grab() - if err != nil { - return fmt.Errorf("failed to grab metrics: %v", err) - } - - metricsForE2E := (*e2emetrics.ComponentCollection)(&received) - metricsJSON := metricsForE2E.PrintJSON() - if framework.TestContext.ReportDir != "" { - filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") - if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil { - return fmt.Errorf("error writing to %q: %v", filePath, err) - } - } else { - framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON) - } - - return nil -} diff --git a/tests/e2e/tidbcluster/across-kubernetes.go b/tests/e2e/tidbcluster/across-kubernetes.go index ea86789fa32..75b6993006e 100644 --- a/tests/e2e/tidbcluster/across-kubernetes.go +++ b/tests/e2e/tidbcluster/across-kubernetes.go @@ -20,6 +20,19 @@ import ( "time" "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" + aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" @@ -35,21 +48,8 @@ import ( utiltidb "github.com/pingcap/tidb-operator/tests/e2e/util/tidb" utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - - v1 "k8s.io/api/core/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/util/retry" - aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" - ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/e2e/tidbcluster/ownership.go b/tests/e2e/tidbcluster/ownership.go index 6020f5d1084..3a6831ab5c7 100644 --- a/tests/e2e/tidbcluster/ownership.go +++ b/tests/e2e/tidbcluster/ownership.go @@ -17,14 +17,15 @@ import ( "context" "time" - "github.com/pingcap/errors" - "github.com/pingcap/tidb-operator/pkg/controller" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb-operator/pkg/controller" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // WaitObjectToBeControlledByOrDie wait desired owner become the controller of the object diff --git a/tests/e2e/tidbcluster/serial.go b/tests/e2e/tidbcluster/serial.go index 476f62a664e..b268b27773b 100644 --- a/tests/e2e/tidbcluster/serial.go +++ b/tests/e2e/tidbcluster/serial.go @@ -33,8 +33,6 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -56,6 +54,8 @@ import ( utiltidb "github.com/pingcap/tidb-operator/tests/e2e/util/tidb" utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/e2e/tidbcluster/stability-asts.go b/tests/e2e/tidbcluster/stability-asts.go index 641c2290c83..670e6d30484 100644 --- a/tests/e2e/tidbcluster/stability-asts.go +++ b/tests/e2e/tidbcluster/stability-asts.go @@ -31,9 +31,6 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/pingcap/tidb-operator/pkg/apis/label" @@ -49,6 +46,9 @@ import ( utilstatefulset "github.com/pingcap/tidb-operator/tests/e2e/util/statefulset" utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + e2esset "github.com/pingcap/tidb-operator/tests/third_party/k8s/statefulset" ) var _ = ginkgo.Describe("[Stability]", func() { @@ -324,7 +324,7 @@ var _ = ginkgo.Describe("[Stability]", func() { for _, oldPod := range oldPodList.Items { // if the pod is not new or deleted in scaling, it should not be affected if oldPod.Name == newPod.Name && oldPod.UID != newPod.UID { - framework.Failf("pod %s/%s should not be affected (UID: %s, OLD UID: %s)", newPod.Namespace, newPod.Name, newPod.UID, oldPod.UID) + log.Failf("pod %s/%s should not be affected (UID: %s, OLD UID: %s)", newPod.Namespace, newPod.Name, newPod.UID, oldPod.UID) } } } diff --git a/tests/e2e/tidbcluster/stability.go b/tests/e2e/tidbcluster/stability.go index 4af9b58d7a8..0798090f0dc 100644 --- a/tests/e2e/tidbcluster/stability.go +++ b/tests/e2e/tidbcluster/stability.go @@ -13,6 +13,7 @@ package tidbcluster +/* import ( "context" "fmt" @@ -25,11 +26,29 @@ import ( "github.com/onsi/gomega" asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" "github.com/pingcap/errors" + v1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + corelisterv1 "k8s.io/client-go/listers/core/v1" + restclient "k8s.io/client-go/rest" + aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/scheme" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/pkg/util" "github.com/pingcap/tidb-operator/tests" e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config" @@ -37,7 +56,6 @@ import ( testutils "github.com/pingcap/tidb-operator/tests/e2e/util" utilcloud "github.com/pingcap/tidb-operator/tests/e2e/util/cloud" utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image" - utilnode "github.com/pingcap/tidb-operator/tests/e2e/util/node" utilpod "github.com/pingcap/tidb-operator/tests/e2e/util/pod" "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" "github.com/pingcap/tidb-operator/tests/e2e/util/proxiedpdclient" @@ -47,29 +65,11 @@ import ( utiltikv "github.com/pingcap/tidb-operator/tests/e2e/util/tikv" "github.com/pingcap/tidb-operator/tests/pkg/fixture" "github.com/pingcap/tidb-operator/tests/pkg/mock" - v1 "k8s.io/api/core/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - corelisterv1 "k8s.io/client-go/listers/core/v1" - restclient "k8s.io/client-go/rest" - aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" - "k8s.io/kubernetes/test/e2e/framework/node" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - "k8s.io/kubernetes/test/e2e/framework/pod" - e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + e2enode "github.com/pingcap/tidb-operator/tests/third_party/k8s/node" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" + e2eskipper "github.com/pingcap/tidb-operator/tests/third_party/k8s/skipper" ) // Stability specs describe tests which involve disruptive operations, e.g. @@ -182,16 +182,16 @@ var _ = ginkgo.Describe("[Stability]", func() { err = wait.PollImmediate(time.Second*30, time.Minute*5, func() (bool, error) { var ok bool var err error - framework.Logf("check whether pods of cluster %q are changed", clusterName) + log.Logf("check whether pods of cluster %q are changed", clusterName) ok, err = utilpod.PodsAreChanged(c, podList.Items)() if err != nil { - framework.Logf("ERROR: meet error during check pods of cluster %q are changed, err:%v", clusterName, err) + log.Logf("ERROR: meet error during check pods of cluster %q are changed, err:%v", clusterName, err) return false, err } if ok { return true, nil } - framework.Logf("check whether pods of cluster %q are running", clusterName) + log.Logf("check whether pods of cluster %q are running", clusterName) newPodList, err := c.CoreV1().Pods(ns).List(context.TODO(), listOptions) if err != nil { return false, err @@ -201,7 +201,7 @@ var _ = ginkgo.Describe("[Stability]", func() { return false, fmt.Errorf("pod %s/%s is not running", pod.Namespace, pod.Name) } } - framework.Logf("check whehter tidb cluster %q is connectable", clusterName) + log.Logf("check whehter tidb cluster %q is connectable", clusterName) ok, err = utiltidb.TiDBIsConnectable(fw, ns, clusterName, "root", "")() if !ok || err != nil { // not connectable or some error happened @@ -318,7 +318,7 @@ var _ = ginkgo.Describe("[Stability]", func() { framework.ExpectNoError(err, "failed to list pods in ns %s", ns) for _, pod := range podList.Items { if v, ok := pod.Labels[label.ComponentLabelKey]; !ok { - framework.Failf("pod %s/%s does not have component label key %q", pod.Namespace, pod.Name, label.ComponentLabelKey) + log.Failf("pod %s/%s does not have component label key %q", pod.Namespace, pod.Name, label.ComponentLabelKey) } else if v == label.PDLabelVal { allPDNodes[pod.Name] = allNodes[pod.Spec.NodeName] } else if v == label.TiKVLabelVal { @@ -361,68 +361,7 @@ var _ = ginkgo.Describe("[Stability]", func() { gomega.Expect(len(tikvPodsOnDeletedNode)).To(gomega.BeNumerically(">=", 1), "the number of affected tikvs must be equal or greater than 1") err = framework.TestContext.CloudConfig.Provider.DeleteNode(nodeToDelete) framework.ExpectNoError(err, fmt.Sprintf("failed to delete node %q", nodeToDelete.Name)) - framework.Logf("Node %q deleted", nodeToDelete.Name) - - if framework.TestContext.Provider == "aws" { - // The node object will be gone with physical machine. - ginkgo.By(fmt.Sprintf("[AWS/EKS] Wait for the node object %q to be deleted", nodeToDelete.Name)) - err = wait.PollImmediate(time.Second*5, time.Minute*5, func() (bool, error) { - _, err = c.CoreV1().Nodes().Get(context.TODO(), nodeToDelete.Name, metav1.GetOptions{}) - if err == nil || !apierrors.IsNotFound(err) { - return false, nil - } - return true, nil - }) - framework.ExpectNoError(err, "failed to get node %s", nodeToDelete.Name) - - ginkgo.By("[AWS/EKS] New instance will be created and join the cluster") - _, err := node.CheckReady(c, len(nodeList.Items), 5*time.Minute) - framework.ExpectNoError(err, "failed to check node ready state") - - ginkgo.By("[AWS/EKS] Initialize newly created node") - nodeList, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - framework.ExpectNoError(err, "failed to list nodes") - initialized := 0 - for _, node := range nodeList.Items { - if _, ok := allNodes[node.Name]; !ok { - framework.ExpectNoError(utilnode.InitNode(&node)) - initialized++ - } - } - gomega.Expect(initialized).To(gomega.BeNumerically("==", 1), "must have a node initialized") - } else if framework.TestContext.Provider == "gke" { - instanceIDAnn := "container.googleapis.com/instance_id" - oldInstanceID, ok := nodeToDelete.Annotations[instanceIDAnn] - if !ok { - framework.Failf("instance label %q not found on node object %q", instanceIDAnn, nodeToDelete.Name) - } - - ginkgo.By("[GCP/GKE] Wait for instance ID to be updated") - err = wait.PollImmediate(time.Second*5, time.Minute*10, func() (bool, error) { - node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeToDelete.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - instanceID, ok := node.Annotations[instanceIDAnn] - if !ok { - return false, nil - } - if instanceID == oldInstanceID { - return false, nil - } - framework.Logf("instance ID of node %q changed from %q to %q", nodeToDelete.Name, oldInstanceID, instanceID) - return true, nil - }) - framework.ExpectNoError(err, "wait for instance ID timeout") - - ginkgo.By("[GCP/GKE] Wait for the node to be ready") - node.WaitForNodeToBeReady(c, nodeToDelete.Name, time.Minute*5) - - ginkgo.By(fmt.Sprintf("[GCP/GKE] Initialize underlying machine of node %s", nodeToDelete.Name)) - node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeToDelete.Name, metav1.GetOptions{}) - framework.ExpectNoError(err, "failed to get node %s", nodeToDelete.Name) - framework.ExpectNoError(utilnode.InitNode(node)) - } + log.Logf("Node %q deleted", nodeToDelete.Name) ginkgo.By("Mark stores of failed tikv pods as tombstone") pdClient, cancel, err := proxiedpdclient.NewProxiedPDClient(secretLister, fw, ns, clusterName, false) @@ -433,7 +372,7 @@ var _ = ginkgo.Describe("[Stability]", func() { } }() for _, pod := range tikvPodsOnDeletedNode { - framework.Logf("Mark tikv store of pod %s/%s as Tombstone", ns, pod.Name) + log.Logf("Mark tikv store of pod %s/%s as Tombstone", ns, pod.Name) err = wait.PollImmediate(time.Second*3, time.Minute, func() (bool, error) { storeID, err := utiltikv.GetStoreIDByPodName(cli, ns, clusterName, pod.Name) if err != nil { @@ -449,7 +388,7 @@ var _ = ginkgo.Describe("[Stability]", func() { } ginkgo.By("Delete pd members") for _, pod := range pdPodsOnDeletedNode { - framework.Logf("Delete pd member of pod %s/%s", ns, pod.Name) + log.Logf("Delete pd member of pod %s/%s", ns, pod.Name) err = wait.PollImmediate(time.Second*3, time.Minute, func() (bool, error) { err = pdClient.DeleteMember(pod.Name) if err != nil { @@ -474,7 +413,7 @@ var _ = ginkgo.Describe("[Stability]", func() { for _, pvcName := range pvcNamesOnDeletedNode { pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { - framework.Failf("apiserver error: %v", err) + log.Failf("apiserver error: %v", err) } if apierrors.IsNotFound(err) { continue @@ -488,7 +427,7 @@ var _ = ginkgo.Describe("[Stability]", func() { } } } else if framework.TestContext.Provider == "gke" { - framework.Logf("We are using fixed paths in local PVs in our e2e. PVs of the deleted node are usable though the underlying storage is empty now") + log.Logf("We are using fixed paths in local PVs in our e2e. PVs of the deleted node are usable though the underlying storage is empty now") // Because of pod exponential crash loop back off, we can // delete the failed pods to make it start soon. // Note that this is optional. @@ -770,7 +709,7 @@ var _ = ginkgo.Describe("[Stability]", func() { } return false, err } - _, condition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + _, condition := k8s.GetPodCondition(&pod.Status, v1.PodScheduled) if condition == nil || condition.Status != v1.ConditionTrue { return false, nil } @@ -829,7 +768,7 @@ var _ = ginkgo.Describe("[Stability]", func() { } return false, err } - return !podutil.IsPodReady(pod), nil + return !k8s.IsPodReady(pod), nil }) framework.ExpectNoError(err, "wait for patched pod ready timeout") @@ -1590,3 +1529,4 @@ var _ = ginkgo.Describe("[Stability]", func() { }) }) }) +*/ diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go index 329e604e9d4..9030c731619 100644 --- a/tests/e2e/tidbcluster/tidbcluster.go +++ b/tests/e2e/tidbcluster/tidbcluster.go @@ -37,11 +37,9 @@ import ( clientset "k8s.io/client-go/kubernetes" k8sScheme "k8s.io/client-go/kubernetes/scheme" typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + corelisterv1 "k8s.io/client-go/listers/core/v1" restclient "k8s.io/client-go/rest" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" - e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/utils/pointer" ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" @@ -67,7 +65,9 @@ import ( "github.com/pingcap/tidb-operator/tests/e2e/util/proxiedpdclient" utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" - corelisterv1 "k8s.io/client-go/listers/core/v1" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + e2eskipper "github.com/pingcap/tidb-operator/tests/third_party/k8s/skipper" ) var _ = ginkgo.Describe("TiDBCluster", func() { diff --git a/tests/e2e/tidbcluster/tls.go b/tests/e2e/tidbcluster/tls.go index 436b9b58ebf..ea2a3d16319 100644 --- a/tests/e2e/tidbcluster/tls.go +++ b/tests/e2e/tidbcluster/tls.go @@ -26,17 +26,18 @@ import ( "text/template" "time" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "github.com/go-sql-driver/mysql" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/util" "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/pod" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" ) var tidbIssuerTmpl = ` @@ -570,7 +571,7 @@ func DeleteCertManager(cli clientset.Interface) error { for _, _pod := range podList.Items { err := pod.WaitForPodNotFoundInNamespace(cli, _pod.Name, "cert-manager", 5*time.Minute) if err != nil { - framework.Logf("failed to wait for pod cert-manager/%s disappear", _pod.Name) + log.Logf("failed to wait for pod cert-manager/%s disappear", _pod.Name) return false, nil } } @@ -661,7 +662,7 @@ func installCert(tmplStr string, tp interface{}) error { return err } if data, err := exec.Command("sh", "-c", fmt.Sprintf("kubectl apply -f %s", tmpFile.Name())).CombinedOutput(); err != nil { - framework.Logf("failed to create certificate: %s, %v", string(data), err) + log.Logf("failed to create certificate: %s, %v", string(data), err) return err } @@ -693,7 +694,7 @@ func tidbIsTLSEnabled(fw portforward.PortForward, c clientset.Interface, ns, tcN return true, fmt.Errorf("the connection to tidb server is not ssl %s/%s", ns, tcName) } - framework.Logf("The connection to TiDB Server is TLS enabled.") + log.Logf("The connection to TiDB Server is TLS enabled.") return true, nil } } @@ -706,7 +707,7 @@ func insertIntoDataToSourceDB(fw portforward.PortForward, c clientset.Interface, return func() (bool, error) { db, cancel, err := connectToTiDBWithTLSSupport(fw, c, ns, tcName, passwd, tlsEnabled) if err != nil { - framework.Logf("failed to connect to source db: %v", err) + log.Logf("failed to connect to source db: %v", err) return false, nil } defer db.Close() @@ -714,13 +715,13 @@ func insertIntoDataToSourceDB(fw portforward.PortForward, c clientset.Interface, res, err := db.Exec("CREATE TABLE test.city (name VARCHAR(64) PRIMARY KEY)") if err != nil { - framework.Logf("can't create table in source db: %v, %v", res, err) + log.Logf("can't create table in source db: %v, %v", res, err) return false, nil } res, err = db.Exec("INSERT INTO test.city (name) VALUES (\"beijing\")") if err != nil { - framework.Logf("can't insert into table tls in source db: %v, %v", res, err) + log.Logf("can't insert into table tls in source db: %v, %v", res, err) return false, nil } @@ -732,7 +733,7 @@ func dataInClusterIsCorrect(fw portforward.PortForward, c clientset.Interface, n return func() (bool, error) { db, cancel, err := connectToTiDBWithTLSSupport(fw, c, ns, tcName, passwd, tlsEnabled) if err != nil { - framework.Logf("can't connect to %s/%s, %v", ns, tcName, err) + log.Logf("can't connect to %s/%s, %v", ns, tcName, err) return false, nil } defer db.Close() @@ -743,11 +744,11 @@ func dataInClusterIsCorrect(fw portforward.PortForward, c clientset.Interface, n err = row.Scan(&name) if err != nil { - framework.Logf("can't scan from %s/%s, %v", ns, tcName, err) + log.Logf("can't scan from %s/%s, %v", ns, tcName, err) return false, nil } - framework.Logf("TABLE test.city name = %s", name) + log.Logf("TABLE test.city name = %s", name) if name == "beijing" { return true, nil } diff --git a/tests/e2e/tidbdashboard/dashboard.go b/tests/e2e/tidbdashboard/dashboard.go index 283de7743c8..e1e8da63d74 100644 --- a/tests/e2e/tidbdashboard/dashboard.go +++ b/tests/e2e/tidbdashboard/dashboard.go @@ -32,13 +32,13 @@ import ( "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" "github.com/onsi/ginkgo" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/tests/e2e/tidbngmonitoring/ngm.go b/tests/e2e/tidbngmonitoring/ngm.go index 367913e41cf..77617633f3a 100644 --- a/tests/e2e/tidbngmonitoring/ngm.go +++ b/tests/e2e/tidbngmonitoring/ngm.go @@ -36,6 +36,7 @@ import ( utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" utiltngm "github.com/pingcap/tidb-operator/tests/e2e/util/tngm" "github.com/pingcap/tidb-operator/tests/pkg/fixture" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -44,7 +45,6 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/tests/e2e/tikv/tikv.go b/tests/e2e/tikv/tikv.go index c5315d5c991..84186413752 100644 --- a/tests/e2e/tikv/tikv.go +++ b/tests/e2e/tikv/tikv.go @@ -22,9 +22,6 @@ import ( "time" "github.com/onsi/ginkgo" - astsHelper "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" - asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" - utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -34,11 +31,11 @@ import ( typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" restclient "k8s.io/client-go/rest" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/utils/pointer" ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + astsHelper "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" + asclientset "github.com/pingcap/advanced-statefulset/client/client/clientset/versioned" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" @@ -50,7 +47,10 @@ import ( e2eframework "github.com/pingcap/tidb-operator/tests/e2e/framework" utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image" "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" + utiltc "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" "github.com/pingcap/tidb-operator/tests/pkg/fixture" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) type testcase struct { diff --git a/tests/e2e/util/cloud/cloud.go b/tests/e2e/util/cloud/cloud.go index 021e0169c53..41655b02c15 100644 --- a/tests/e2e/util/cloud/cloud.go +++ b/tests/e2e/util/cloud/cloud.go @@ -17,8 +17,8 @@ import ( "os/exec" "strings" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) func getClusterLocation() string { @@ -69,7 +69,7 @@ func DisableNodeAutoRepair() { framework.ExpectNoError(err, "failed to get gcloud command: %q", gcloudCommand) } else { // TODO support AWS (EKS) - framework.Failf("unsupported provider %q", framework.TestContext.Provider) + log.Failf("unsupported provider %q", framework.TestContext.Provider) } } @@ -86,6 +86,6 @@ func EnableNodeAutoRepair() { framework.ExpectNoError(err, "failed to get gcloud command: %q", gcloudCommand) } else { // TODO support AWS (EKS) - framework.Failf("unsupported provider %q", framework.TestContext.Provider) + log.Failf("unsupported provider %q", framework.TestContext.Provider) } } diff --git a/tests/e2e/util/db/db.go b/tests/e2e/util/db/db.go index 217877cf6d7..7971a3070fb 100644 --- a/tests/e2e/util/db/db.go +++ b/tests/e2e/util/db/db.go @@ -18,7 +18,7 @@ import ( "database/sql" "fmt" - "k8s.io/kubernetes/test/e2e/framework" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" ) type baseAction struct { diff --git a/tests/e2e/util/image/image.go b/tests/e2e/util/image/image.go index 9c346738459..aff1c5fd045 100644 --- a/tests/e2e/util/image/image.go +++ b/tests/e2e/util/image/image.go @@ -22,8 +22,9 @@ import ( "github.com/ghodss/yaml" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" + + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) var ( diff --git a/tests/e2e/util/node/node.go b/tests/e2e/util/node/node.go deleted file mode 100644 index b8a873706fb..00000000000 --- a/tests/e2e/util/node/node.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package node - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/ssh" -) - -var ( - awsNodeInitCmd = ` -sudo bash -c ' -test -d /mnt/disks || mkdir -p /mnt/disks -df -h /mnt/disks -if mountpoint /mnt/disks &>/dev/null; then - echo "info: /mnt/disks is a mountpoint" -else - echo "info: /mnt/disks is not a mountpoint, creating local volumes on the rootfs" -fi -cd /mnt/disks -for ((i = 1; i <= 32; i++)) { - if [ ! -d vol$i ]; then - mkdir vol$i - fi - if ! mountpoint vol$i &>/dev/null; then - mount --bind vol$i vol$i - fi -} -echo "info: increase max open files for containers" -if ! grep -qF "OPTIONS" /etc/sysconfig/docker; then - echo 'OPTIONS="--default-ulimit nofile=1024000:1024000"' >> /etc/sysconfig/docker -fi -systemctl restart docker -' -` - // disks are created under /mnt/stateful_partition directory - // https://cloud.google.com/container-optimized-os/docs/concepts/disks-and-filesystem - gkeNodeInitCmd = ` -sudo bash -c ' -test -d /mnt/stateful_partition/disks || mkdir -p /mnt/stateful_partition/disks -df -h /mnt/stateful_partition/disks -test -d /mnt/disks || mkdir -p /mnt/disks -cd /mnt/disks -for ((i = 1; i <= 32; i++)) { - if [ ! -d vol$i ]; then - mkdir vol$i - fi - if ! mountpoint vol$i &>/dev/null; then - if [ ! -d /mnt/stateful_partition/disks/vol$i ]; then - mkdir /mnt/stateful_partition/disks/vol$i - fi - mount --bind /mnt/stateful_partition/disks/vol$i vol$i - fi -} -' -` -) - -func InitNode(node *v1.Node) error { - var initNodeCmd string - if framework.TestContext.Provider == "aws" { - initNodeCmd = awsNodeInitCmd - } else if framework.TestContext.Provider == "gke" { - initNodeCmd = gkeNodeInitCmd - } else { - framework.Logf("Unknown provider %q, skipped", framework.TestContext.Provider) - return nil - } - return ssh.IssueSSHCommand(initNodeCmd, framework.TestContext.Provider, node) -} diff --git a/tests/e2e/util/operator/operator.go b/tests/e2e/util/operator/operator.go index cc6f6ef8adb..378be1cba58 100644 --- a/tests/e2e/util/operator/operator.go +++ b/tests/e2e/util/operator/operator.go @@ -21,8 +21,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/test/e2e/framework" + + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // OperatorKillerConfig describes configuration for operator killer. @@ -68,23 +69,23 @@ func (k *OperatorKiller) Run(stopCh <-chan struct{}) { wait.JitterUntil(func() { pods, err := k.podLister() if err != nil { - framework.Logf("failed to list operator pods: %v", err) + log.Logf("failed to list operator pods: %v", err) return } - framework.Logf("Trying to kill tidb-operator pods (%d)", len(pods)) + log.Logf("Trying to kill tidb-operator pods (%d)", len(pods)) for _, pod := range pods { - if !podutil.IsPodReady(&pod) || hasBeenRestarted(&pod) { + if !k8s.IsPodReady(&pod) || hasBeenRestarted(&pod) { // deleting the pod will recreate it, we should skip if the pod // is not ready or has been restarted before, otherwise // potential errors (e.g. panic) in operator may be hidden. - framework.Logf("pod %s/%s is not ready or crashed before, skip deleting", pod.Namespace, pod.Name) + log.Logf("pod %s/%s is not ready or crashed before, skip deleting", pod.Namespace, pod.Name) continue } err = k.client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) if err != nil { - framework.Logf("failed to delete pod %s/%s: %v", pod.Namespace, pod.Name, err) + log.Logf("failed to delete pod %s/%s: %v", pod.Namespace, pod.Name, err) } else { - framework.Logf("successfully deleted pod %s/%s", pod.Namespace, pod.Name) + log.Logf("successfully deleted pod %s/%s", pod.Namespace, pod.Name) } } }, k.config.Interval, k.config.JitterFactor, true, stopCh) diff --git a/tests/e2e/util/pod/pod.go b/tests/e2e/util/pod/pod.go index 2a9d800ac98..399bdb87a5d 100644 --- a/tests/e2e/util/pod/pod.go +++ b/tests/e2e/util/pod/pod.go @@ -17,15 +17,16 @@ import ( "context" "time" - testutils "github.com/pingcap/tidb-operator/tests/e2e/util" v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/log" + + testutils "github.com/pingcap/tidb-operator/tests/e2e/util" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // PodsAreChanged checks the given pods are changed or not (recreate, update). diff --git a/tests/e2e/util/portforward/portforward.go b/tests/e2e/util/portforward/portforward.go index 3a68add74f8..ccf2d4935d6 100644 --- a/tests/e2e/util/portforward/portforward.go +++ b/tests/e2e/util/portforward/portforward.go @@ -32,7 +32,8 @@ import ( "k8s.io/client-go/tools/portforward" "k8s.io/client-go/transport/spdy" "k8s.io/kubectl/pkg/polymorphichelpers" - "k8s.io/kubernetes/test/e2e/framework/log" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/e2e/util/statefulset/statefulset.go b/tests/e2e/util/statefulset/statefulset.go index 2521b69ddba..b5f0cdf3da1 100644 --- a/tests/e2e/util/statefulset/statefulset.go +++ b/tests/e2e/util/statefulset/statefulset.go @@ -19,8 +19,6 @@ import ( "regexp" "strconv" - "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,8 +26,11 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/test/e2e/framework/log" + + "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$") @@ -64,8 +65,8 @@ func IsAllDesiredPodsRunningAndReady(c kubernetes.Interface, sts *appsv1.Statefu return false } for _, pod := range actualPodList.Items { - if !podutil.IsPodReady(&pod) { - log.Logf("pod %s of sts %s/%s is not ready, got: %v", pod.Name, sts.Namespace, sts.Name, podutil.GetPodReadyCondition(pod.Status)) + if !k8s.IsPodReady(&pod) { + log.Logf("pod %s of sts %s/%s is not ready, got: %v", pod.Name, sts.Namespace, sts.Name, k8s.GetPodReadyCondition(pod.Status)) return false } } diff --git a/tests/e2e/util/storage/minio.go b/tests/e2e/util/storage/minio.go index 15a75042e17..da157e1da6b 100644 --- a/tests/e2e/util/storage/minio.go +++ b/tests/e2e/util/storage/minio.go @@ -20,13 +20,14 @@ import ( "time" "github.com/minio/minio-go/v6" - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" - "github.com/pingcap/tidb-operator/tests/pkg/fixture" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework/pod" + + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" + "github.com/pingcap/tidb-operator/tests/pkg/fixture" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" ) const ( diff --git a/tests/e2e/util/tidbcluster/tidbcluster.go b/tests/e2e/util/tidbcluster/tidbcluster.go index 6d55f9e7892..e9af71eb420 100644 --- a/tests/e2e/util/tidbcluster/tidbcluster.go +++ b/tests/e2e/util/tidbcluster/tidbcluster.go @@ -21,9 +21,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/util/tidbcluster" "github.com/pingcap/tidb-operator/tests" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/test/e2e/framework" ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/tests/e2e/util/tidbcluster/wait_for_component.go b/tests/e2e/util/tidbcluster/wait_for_component.go index af3f4f42114..7fc06fc2795 100644 --- a/tests/e2e/util/tidbcluster/wait_for_component.go +++ b/tests/e2e/util/tidbcluster/wait_for_component.go @@ -18,10 +18,11 @@ import ( "fmt" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/test/e2e/framework" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" ) // MustWaitForComponentPhase wait a component to be in a specific phase diff --git a/tests/e2e/util/tngm/tngm.go b/tests/e2e/util/tngm/tngm.go index c50bf15aff5..d39cc25d75c 100644 --- a/tests/e2e/util/tngm/tngm.go +++ b/tests/e2e/util/tngm/tngm.go @@ -20,10 +20,10 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" ) func MustWaitForNGMPhase(c versioned.Interface, tngm *v1alpha1.TidbNGMonitoring, phase v1alpha1.MemberPhase, timeout, pollInterval time.Duration) { diff --git a/tests/e2e/util/util.go b/tests/e2e/util/util.go index 369ae9a210c..2be31141271 100644 --- a/tests/e2e/util/util.go +++ b/tests/e2e/util/util.go @@ -26,7 +26,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/test/e2e/framework" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // WaitForAPIServicesAvaiable waits for apiservices to be available @@ -51,12 +52,12 @@ func WaitForAPIServicesAvaiable(client aggregatorclientset.Interface, selector l } for _, apiService := range apiServiceList.Items { if !isAvaiable(apiService.Status) { - framework.Logf("APIService %q is not available yet", apiService.Name) + log.Logf("APIService %q is not available yet", apiService.Name) return false, nil } } for _, apiService := range apiServiceList.Items { - framework.Logf("APIService %q is available", apiService.Name) + log.Logf("APIService %q is available", apiService.Name) } return true, nil }) @@ -84,12 +85,12 @@ func WaitForCRDsEstablished(client apiextensionsclientset.Interface, selector la } for _, crd := range crdList.Items { if !isEstalbished(crd.Status) { - framework.Logf("CRD %q is not established yet", crd.Name) + log.Logf("CRD %q is not established yet", crd.Name) return false, nil } } for _, crd := range crdList.Items { - framework.Logf("CRD %q is established", crd.Name) + log.Logf("CRD %q is established", crd.Name) } return true, nil }) diff --git a/tests/fault.go b/tests/fault.go index 9e1d53b1a31..f0b9f4c53b5 100644 --- a/tests/fault.go +++ b/tests/fault.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/client" "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" corelisterv1 "k8s.io/client-go/listers/core/v1" - "k8s.io/kubernetes/test/e2e/framework/log" ) const ( diff --git a/tests/monitor.go b/tests/monitor.go index 423c1bfcd9f..5025395bc0a 100644 --- a/tests/monitor.go +++ b/tests/monitor.go @@ -26,13 +26,13 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/pkg/monitor/monitor" + "github.com/pingcap/tidb-operator/pkg/third_party/k8s" "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" "github.com/pingcap/tidb-operator/tests/pkg/metrics" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/test/e2e/framework/log" ) func CheckTidbMonitor(monitor *v1alpha1.TidbMonitor, cli versioned.Interface, kubeCli kubernetes.Interface, fw portforward.PortForward) error { @@ -87,7 +87,7 @@ func checkTidbMonitorPod(tm *v1alpha1.TidbMonitor, kubeCli kubernetes.Interface) } pod := &pods.Items[0] - if !podutil.IsPodReady(pod) { + if !k8s.IsPodReady(pod) { log.Logf("ERROR: tm[%s/%s]'s pod[%s/%s] is not ready", tm.Namespace, tm.Name, pod.Namespace, pod.Name) return false, nil } diff --git a/tests/pkg/blockwriter/blockwriter.go b/tests/pkg/blockwriter/blockwriter.go index 397b010cb47..a885026bde3 100644 --- a/tests/pkg/blockwriter/blockwriter.go +++ b/tests/pkg/blockwriter/blockwriter.go @@ -23,9 +23,10 @@ import ( "sync/atomic" "time" - "github.com/pingcap/tidb-operator/tests/pkg/util" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework/log" + + "github.com/pingcap/tidb-operator/tests/pkg/util" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/pkg/fault-trigger/api/response.go b/tests/pkg/fault-trigger/api/response.go index de4a084f90d..e4b8bd0bd46 100644 --- a/tests/pkg/fault-trigger/api/response.go +++ b/tests/pkg/fault-trigger/api/response.go @@ -31,7 +31,8 @@ import ( "net/http" "github.com/pingcap/errors" - "k8s.io/kubernetes/test/e2e/framework/log" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // Response defines a new response struct for http diff --git a/tests/pkg/fault-trigger/api/server.go b/tests/pkg/fault-trigger/api/server.go index 79b8c544c00..0bc46c1d5cb 100644 --- a/tests/pkg/fault-trigger/api/server.go +++ b/tests/pkg/fault-trigger/api/server.go @@ -29,8 +29,9 @@ import ( "net/http" restful "github.com/emicklei/go-restful" + "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // Server is a web service to control fault trigger diff --git a/tests/pkg/fault-trigger/client/client.go b/tests/pkg/fault-trigger/client/client.go index c8953355c1f..11b6d9174c4 100644 --- a/tests/pkg/fault-trigger/client/client.go +++ b/tests/pkg/fault-trigger/client/client.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/api" "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" "github.com/pingcap/tidb-operator/tests/pkg/util" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // Client is a fault-trigger client diff --git a/tests/pkg/fault-trigger/manager/static_pod_service.go b/tests/pkg/fault-trigger/manager/static_pod_service.go index bfa8e0d07b6..a977c842838 100644 --- a/tests/pkg/fault-trigger/manager/static_pod_service.go +++ b/tests/pkg/fault-trigger/manager/static_pod_service.go @@ -18,7 +18,7 @@ import ( "os" "os/exec" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/pkg/fault-trigger/manager/systemctl_service.go b/tests/pkg/fault-trigger/manager/systemctl_service.go index 175cb5cfb05..ea9482eaf32 100644 --- a/tests/pkg/fault-trigger/manager/systemctl_service.go +++ b/tests/pkg/fault-trigger/manager/systemctl_service.go @@ -17,7 +17,7 @@ import ( "fmt" "os/exec" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/pkg/fault-trigger/manager/vm_qm.go b/tests/pkg/fault-trigger/manager/vm_qm.go index 59e3e18201d..661b67b510d 100644 --- a/tests/pkg/fault-trigger/manager/vm_qm.go +++ b/tests/pkg/fault-trigger/manager/vm_qm.go @@ -18,7 +18,7 @@ import ( "os/exec" "strings" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) type QMVMManager struct { diff --git a/tests/pkg/fault-trigger/manager/vm_virsh.go b/tests/pkg/fault-trigger/manager/vm_virsh.go index e002ae64440..1d6910c8bd8 100644 --- a/tests/pkg/fault-trigger/manager/vm_virsh.go +++ b/tests/pkg/fault-trigger/manager/vm_virsh.go @@ -18,7 +18,7 @@ import ( "os/exec" "strings" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) type VirshVMManager struct { diff --git a/tests/pkg/mock/monitor.go b/tests/pkg/mock/monitor.go index 0c4507b70ca..87f13257053 100644 --- a/tests/pkg/mock/monitor.go +++ b/tests/pkg/mock/monitor.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/pingcap/tidb-operator/pkg/autoscaler/autoscaler/calculate" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) type MonitorInterface interface { diff --git a/tests/pkg/mock/util.go b/tests/pkg/mock/util.go index f5832b826f6..64cd818866e 100644 --- a/tests/pkg/mock/util.go +++ b/tests/pkg/mock/util.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/autoscaler/autoscaler/calculate" "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) type MonitorParams struct { diff --git a/tests/pkg/util/db.go b/tests/pkg/util/db.go index e119cf98429..5ab40985389 100644 --- a/tests/pkg/util/db.go +++ b/tests/pkg/util/db.go @@ -18,7 +18,7 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // OpenDB opens db diff --git a/tests/pkg/util/utils.go b/tests/pkg/util/utils.go index 6bc7034d5c3..fefb7907ae3 100644 --- a/tests/pkg/util/utils.go +++ b/tests/pkg/util/utils.go @@ -19,7 +19,7 @@ import ( "os/exec" "strings" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) const ( diff --git a/tests/pkg/webhook/pods.go b/tests/pkg/webhook/pods.go index 79441d5e778..2acde7a050e 100644 --- a/tests/pkg/webhook/pods.go +++ b/tests/pkg/webhook/pods.go @@ -19,7 +19,7 @@ import ( v1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // only allow pods to be delete when it is not ddlowner of tidb, not leader of pd and not diff --git a/tests/pkg/webhook/route.go b/tests/pkg/webhook/route.go index 267908813e0..ec7922375eb 100644 --- a/tests/pkg/webhook/route.go +++ b/tests/pkg/webhook/route.go @@ -21,7 +21,8 @@ import ( v1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/test/e2e/framework/log" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) // toAdmissionResponse is a helper function to create an AdmissionResponse diff --git a/tests/slack/slack.go b/tests/slack/slack.go index cf3c5f25b99..70db2f046e8 100644 --- a/tests/slack/slack.go +++ b/tests/slack/slack.go @@ -20,7 +20,7 @@ import ( "net/http" "time" - "k8s.io/kubernetes/test/e2e/framework/log" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" ) var ( diff --git a/tests/third_party/k8s/auth/helpers.go b/tests/third_party/k8s/auth/helpers.go new file mode 100644 index 00000000000..fdc81b9fd90 --- /dev/null +++ b/tests/third_party/k8s/auth/helpers.go @@ -0,0 +1,170 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/auth/helpers.go @v1.23.17 + +package auth + +import ( + "context" + "fmt" + "sync" + "time" + + authorizationv1 "k8s.io/api/authorization/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + v1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1" + v1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1" + + e2elog "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +const ( + policyCachePollInterval = 100 * time.Millisecond + policyCachePollTimeout = 5 * time.Second +) + +type bindingsGetter interface { + v1rbac.RoleBindingsGetter + v1rbac.ClusterRoleBindingsGetter + v1rbac.ClusterRolesGetter +} + +// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action. +// If policyCachePollTimeout is reached without the expected condition matching, an error is returned +func WaitForAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error { + return WaitForNamedAuthorizationUpdate(c, user, namespace, verb, "", resource, allowed) +} + +// WaitForNamedAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource. +// If policyCachePollTimeout is reached without the expected condition matching, an error is returned +func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error { + review := &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Group: resource.Group, + Verb: verb, + Resource: resource.Resource, + Namespace: namespace, + Name: resourceName, + }, + User: user, + }, + } + + err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) { + response, err := c.SubjectAccessReviews().Create(context.TODO(), review, metav1.CreateOptions{}) + if err != nil { + return false, err + } + if response.Status.Allowed != allowed { + return false, nil + } + return true, nil + }) + return err +} + +// BindClusterRole binds the cluster role at the cluster scope. If RBAC is not enabled, nil +// is returned with no action. +func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error { + if !IsRBACEnabled(c) { + return nil + } + + // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches + _, err := c.ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns + "--" + clusterRole, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRole, + }, + Subjects: subjects, + }, metav1.CreateOptions{}) + + if err != nil { + return fmt.Errorf("binding clusterrole/%s for %q for %v: %w", clusterRole, ns, subjects, err) + } + + return nil +} + +// BindClusterRoleInNamespace binds the cluster role at the namespace scope. If RBAC is not enabled, nil +// is returned with no action. +func BindClusterRoleInNamespace(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error { + return bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...) +} + +// BindRoleInNamespace binds the role at the namespace scope. If RBAC is not enabled, nil +// is returned with no action. +func BindRoleInNamespace(c bindingsGetter, role, ns string, subjects ...rbacv1.Subject) error { + return bindInNamespace(c, "Role", role, ns, subjects...) +} + +func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rbacv1.Subject) error { + if !IsRBACEnabled(c) { + return nil + } + + // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches + _, err := c.RoleBindings(ns).Create(context.TODO(), &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns + "--" + role, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: roleType, + Name: role, + }, + Subjects: subjects, + }, metav1.CreateOptions{}) + + if err != nil { + return fmt.Errorf("binding %s/%s into %q for %v: %w", roleType, role, ns, subjects, err) + } + + return nil +} + +var ( + isRBACEnabledOnce sync.Once + isRBACEnabled bool +) + +// IsRBACEnabled returns true if RBAC is enabled. Otherwise false. +func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool { + isRBACEnabledOnce.Do(func() { + crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + e2elog.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err) + isRBACEnabled = false + } else if crs == nil || len(crs.Items) == 0 { + e2elog.Logf("No ClusterRoles found; assuming RBAC is disabled.") + isRBACEnabled = false + } else { + e2elog.Logf("Found ClusterRoles; assuming RBAC is enabled.") + isRBACEnabled = true + } + }) + + return isRBACEnabled +} diff --git a/tests/third_party/k8s/cleanup.go b/tests/third_party/k8s/cleanup.go new file mode 100644 index 00000000000..6d0616f1e4e --- /dev/null +++ b/tests/third_party/k8s/cleanup.go @@ -0,0 +1,82 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is a copy of the file in k8s.io/kubernetes/test/e2e/framework/cleanup.go @v1.23.17 + +package k8s + +import ( + "reflect" + "runtime" + "sync" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +// CleanupActionHandle is an integer pointer type for handling cleanup action +type CleanupActionHandle *int +type cleanupFuncHandle struct { + actionHandle CleanupActionHandle + actionHook func() +} + +var cleanupActionsLock sync.Mutex +var cleanupHookList = []cleanupFuncHandle{} + +// AddCleanupAction installs a function that will be called in the event of the +// whole test being terminated. This allows arbitrary pieces of the overall +// test to hook into SynchronizedAfterSuite(). +// The hooks are called in last-in-first-out order. +func AddCleanupAction(fn func()) CleanupActionHandle { + p := CleanupActionHandle(new(int)) + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + c := cleanupFuncHandle{actionHandle: p, actionHook: fn} + cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...) + return p +} + +// RemoveCleanupAction removes a function that was installed by +// AddCleanupAction. +func RemoveCleanupAction(p CleanupActionHandle) { + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + for i, item := range cleanupHookList { + if item.actionHandle == p { + cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...) + break + } + } +} + +// RunCleanupActions runs all functions installed by AddCleanupAction. It does +// not remove them (see RemoveCleanupAction) but it does run unlocked, so they +// may remove themselves. +func RunCleanupActions() { + list := []func(){} + func() { + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + for _, p := range cleanupHookList { + list = append(list, p.actionHook) + } + }() + // Run unlocked. + for _, fn := range list { + log.Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()) + fn() + } +} diff --git a/tests/third_party/k8s/config/config.go b/tests/third_party/k8s/config/config.go new file mode 100644 index 00000000000..00183888d5c --- /dev/null +++ b/tests/third_party/k8s/config/config.go @@ -0,0 +1,111 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config simplifies the declaration of configuration options. +// Right now the implementation maps them directly to command line +// flags. When combined with test/e2e/framework/viperconfig in a test +// suite, those flags then can also be read from a config file. +// +// The command line flags all get stored in a private flag set. The +// developer of the E2E test suite decides how they are exposed. Options +// include: +// - exposing as normal flags in the actual command line: +// CopyFlags(Flags, flag.CommandLine) +// - populate via test/e2e/framework/viperconfig: +// viperconfig.ViperizeFlags("my-config.yaml", "", Flags) +// - a combination of both: +// CopyFlags(Flags, flag.CommandLine) +// viperconfig.ViperizeFlags("my-config.yaml", "", flag.CommandLine) +// +// Instead of defining flags one-by-one, test developers annotate a +// structure with tags and then call a single function. This is the +// same approach as in https://godoc.org/github.com/jessevdk/go-flags, +// but implemented so that a test suite can continue to use the normal +// "flag" package. +// +// For example, a file storage/csi.go might define: +// +// var scaling struct { +// NumNodes int `default:"1" description:"number of nodes to run on"` +// Master string +// } +// _ = config.AddOptions(&scaling, "storage.csi.scaling") +// +// This defines the following command line flags: +// +// -storage.csi.scaling.numNodes= - number of nodes to run on (default: 1) +// -storage.csi.scaling.master= +// +// All fields in the structure must be exported and have one of the following +// types (same as in the `flag` package): +// - bool +// - time.Duration +// - float64 +// - string +// - int +// - int64 +// - uint +// - uint64 +// - and/or nested or embedded structures containing those basic types. +// +// Each basic entry may have a tag with these optional keys: +// +// usage: additional explanation of the option +// default: the default value, in the same format as it would +// be given on the command line and true/false for +// a boolean +// +// The names of the final configuration options are a combination of an +// optional common prefix for all options in the structure and the +// name of the fields, concatenated with a dot. To get names that are +// consistent with the command line flags defined by `ginkgo`, the +// initial character of each field name is converted to lower case. +// +// There is currently no support for aliases, so renaming the fields +// or the common prefix will be visible to users of the test suite and +// may breaks scripts which use the old names. +// +// The variable will be filled with the actual values by the test +// suite before running tests. Beware that the code which registers +// Ginkgo tests cannot use those config options, because registering +// tests and options both run before the E2E test suite handles +// parameters. + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/config/config.go @v1.23.17 + +package config + +import ( + "flag" +) + +// Flags is the flag set that AddOptions adds to. Test authors should +// also use it instead of directly adding to the global command line. +var Flags = flag.NewFlagSet("", flag.ContinueOnError) + +// CopyFlags ensures that all flags that are defined in the source flag +// set appear in the target flag set as if they had been defined there +// directly. From the flag package it inherits the behavior that there +// is a panic if the target already contains a flag from the source. +func CopyFlags(source *flag.FlagSet, target *flag.FlagSet) { + source.VisitAll(func(flag *flag.Flag) { + // We don't need to copy flag.DefValue. The original + // default (from, say, flag.String) was stored in + // the value and gets extracted by Var for the help + // message. + target.Var(flag.Value, flag.Name, flag.Usage) + }) +} diff --git a/tests/third_party/k8s/except.go b/tests/third_party/k8s/except.go new file mode 100644 index 00000000000..50faec8d635 --- /dev/null +++ b/tests/third_party/k8s/except.go @@ -0,0 +1,64 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/expect.go @v1.23.17 + +package k8s + +import ( + "github.com/onsi/gomega" +) + +// ExpectEqual expects the specified two are the same, otherwise an exception raises +func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) +} + +// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises +func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...) +} + +// ExpectError expects an error happens, otherwise an exception raises +func ExpectError(err error, explain ...interface{}) { + gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...) +} + +// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. +func ExpectNoError(err error, explain ...interface{}) { + ExpectNoErrorWithOffset(1, err, explain...) +} + +// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller +// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f"). +func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { + gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) +} + +// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter. +func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...) +} + +// ExpectHaveKey expects the actual map has the key in the keyset +func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...) +} + +// ExpectEmpty expects actual is empty +func ExpectEmpty(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...) +} diff --git a/tests/third_party/k8s/exec_util.go b/tests/third_party/k8s/exec_util.go new file mode 100644 index 00000000000..2775c9b4200 --- /dev/null +++ b/tests/third_party/k8s/exec_util.go @@ -0,0 +1,122 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/exec_util.go @v1.23.17 + +package k8s + +import ( + "bytes" + "io" + "net/url" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +// ExecOptions passed to ExecWithOptions +type ExecOptions struct { + Command []string + Namespace string + PodName string + ContainerName string + Stdin io.Reader + CaptureStdout bool + CaptureStderr bool + // If false, whitespace in std{err,out} will be removed. + PreserveWhitespace bool + Quiet bool +} + +// ExecWithOptions executes a command in the specified container, +// returning stdout, stderr and error. `options` allowed for +// additional parameters to be passed. +func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) { + if !options.Quiet { + log.Logf("ExecWithOptions %+v", options) + } + config, err := LoadConfig() + ExpectNoError(err, "failed to load restclient config") + + const tty = false + + log.Logf("ExecWithOptions: Clientset creation") + req := f.ClientSet.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(options.PodName). + Namespace(options.Namespace). + SubResource("exec"). + Param("container", options.ContainerName) + req.VersionedParams(&v1.PodExecOptions{ + Container: options.ContainerName, + Command: options.Command, + Stdin: options.Stdin != nil, + Stdout: options.CaptureStdout, + Stderr: options.CaptureStderr, + TTY: tty, + }, scheme.ParameterCodec) + + var stdout, stderr bytes.Buffer + log.Logf("ExecWithOptions: execute(POST %s %s)", req.URL()) + err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty) + if options.PreserveWhitespace { + return stdout.String(), stderr.String(), err + } + return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err +} + +// ExecCommandInContainerWithFullOutput executes a command in the +// specified container and return stdout, stderr and error +func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) { + return f.ExecWithOptions(ExecOptions{ + Command: cmd, + Namespace: f.Namespace.Name, + PodName: podName, + ContainerName: containerName, + Stdin: nil, + CaptureStdout: true, + CaptureStderr: true, + PreserveWhitespace: false, + }) +} + +// ExecCommandInContainer executes a command in the specified container. +func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string { + stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...) + log.Logf("Exec stderr: %q", stderr) + ExpectNoError(err, + "failed to execute command in pod %v, container %v: %v", + podName, containerName, err) + return stdout +} + +func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { + exec, err := remotecommand.NewSPDYExecutor(config, method, url) + if err != nil { + return err + } + return exec.Stream(remotecommand.StreamOptions{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + Tty: tty, + }) +} diff --git a/tests/third_party/k8s/flake_reporting_util.go b/tests/third_party/k8s/flake_reporting_util.go new file mode 100644 index 00000000000..90460841f0e --- /dev/null +++ b/tests/third_party/k8s/flake_reporting_util.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/fake_reporting_util.go @v1.23.17 + +package k8s + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +// FlakeReport is a struct for managing the flake report. +type FlakeReport struct { + lock sync.RWMutex + Flakes []string `json:"flakes"` + FlakeCount int `json:"flakeCount"` +} + +// NewFlakeReport returns a new flake report. +func NewFlakeReport() *FlakeReport { + return &FlakeReport{ + Flakes: []string{}, + } +} + +func buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + } +} + +// RecordFlakeIfError records the error (if non-nil) as a flake along with an optional description. +// This can be used as a replacement of framework.ExpectNoError() for non-critical errors that can +// be considered as 'flakes' to avoid causing failures in tests. +func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...interface{}) { + if err == nil { + return + } + msg := fmt.Sprintf("Unexpected error occurred: %v", err) + desc := buildDescription(optionalDescription) + if desc != "" { + msg = fmt.Sprintf("%v (Description: %v)", msg, desc) + } + log.Logf(msg) + f.lock.Lock() + defer f.lock.Unlock() + f.Flakes = append(f.Flakes, msg) + f.FlakeCount++ +} + +// GetFlakeCount returns the flake count. +func (f *FlakeReport) GetFlakeCount() int { + f.lock.RLock() + defer f.lock.RUnlock() + return f.FlakeCount +} + +// PrintHumanReadable returns string of flake report. +func (f *FlakeReport) PrintHumanReadable() string { + f.lock.RLock() + defer f.lock.RUnlock() + buf := bytes.Buffer{} + buf.WriteString(fmt.Sprintf("FlakeCount: %v\n", f.FlakeCount)) + buf.WriteString("Flakes:\n") + for _, flake := range f.Flakes { + buf.WriteString(fmt.Sprintf("%v\n", flake)) + } + return buf.String() +} + +// PrintJSON returns the summary of frake report with JSON format. +func (f *FlakeReport) PrintJSON() string { + f.lock.RLock() + defer f.lock.RUnlock() + return PrettyPrintJSON(f) +} + +// SummaryKind returns the summary of flake report. +func (f *FlakeReport) SummaryKind() string { + return "FlakeReport" +} diff --git a/tests/third_party/k8s/framework.go b/tests/third_party/k8s/framework.go new file mode 100644 index 00000000000..3f94e306da8 --- /dev/null +++ b/tests/third_party/k8s/framework.go @@ -0,0 +1,759 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package framework contains provider-independent helper code for +// building and running E2E tests with Ginkgo. The actual Ginkgo test +// suites gets assembled by combining this framework, the optional +// provider support code and specific tests via a separate .go file +// like Kubernetes' test/e2e.go. + +// this file is a copy (and modified) of the file in k8s.io/kubernetes/test/e2e/framework/framework.go @v1.23.17 + +package k8s + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "path" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + cacheddiscovery "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/dynamic" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + scaleclient "k8s.io/client-go/scale" + + "github.com/onsi/ginkgo" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +const ( + // DefaultNamespaceDeletionTimeout is timeout duration for waiting for a namespace deletion. + DefaultNamespaceDeletionTimeout = 5 * time.Minute +) + +// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you. +// Eventual goal is to merge this with integration test framework. +type Framework struct { + BaseName string + + // Set together with creating the ClientSet and the namespace. + // Guaranteed to be unique in the cluster even when running the same + // test multiple times in parallel. + UniqueName string + + clientConfig *rest.Config + ClientSet clientset.Interface + KubemarkExternalClusterClientSet clientset.Interface + + DynamicClient dynamic.Interface + + ScalesGetter scaleclient.ScalesGetter + + SkipNamespaceCreation bool // Whether to skip creating a namespace + Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped + namespacesToDelete []*v1.Namespace // Some tests have more than one. + NamespaceDeletionTimeout time.Duration + SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace + + gatherer *ContainerResourceGatherer + // Constraints that passed to a check which is executed after data is gathered to + // see if 99% of results are within acceptable bounds. It has to be injected in the test, + // as expectations vary greatly. Constraints are grouped by the container names. + AddonResourceConstraints map[string]ResourceConstraint + + // logsSizeWaitGroup sync.WaitGroup + // logsSizeCloseChannel chan bool + // logsSizeVerifier *LogsSizeVerifier + + // Flaky operation failures in an e2e test can be captured through this. + flakeReport *FlakeReport + + // To make sure that this framework cleans up after itself, no matter what, + // we install a Cleanup action before each test and clear it after. If we + // should abort, the AfterSuite hook should run all Cleanup actions. + cleanupHandle CleanupActionHandle + + // afterEaches is a map of name to function to be called after each test. These are not + // cleared. The call order is randomized so that no dependencies can grow between + // the various afterEaches + afterEaches map[string]AfterEachActionFunc + + // beforeEachStarted indicates that BeforeEach has started + beforeEachStarted bool + + // configuration for framework's client + Options Options + + // Place where various additional data is stored during test run to be printed to ReportDir, + // or stdout if ReportDir is not set once test ends. + TestSummaries []TestDataSummary + + // TODO(pingcap): add this if needed + // Place to keep ClusterAutoscaler metrics from before test in order to compute delta. + // clusterAutoscalerMetricsBeforeTest e2emetrics.Collection + + // Timeouts contains the custom timeouts used during the test execution. + Timeouts *TimeoutContext +} + +// AfterEachActionFunc is a function that can be called after each test +type AfterEachActionFunc func(f *Framework, failed bool) + +// TestDataSummary is an interface for managing test data. +type TestDataSummary interface { + SummaryKind() string + PrintHumanReadable() string + PrintJSON() string +} + +// Options is a struct for managing test framework options. +type Options struct { + ClientQPS float32 + ClientBurst int + GroupVersion *schema.GroupVersion +} + +// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts. +func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework { + f := NewDefaultFramework(baseName) + f.Timeouts = timeouts + return f +} + +// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for +// you (you can write additional before/after each functions). +func NewDefaultFramework(baseName string) *Framework { + options := Options{ + ClientQPS: 20, + ClientBurst: 50, + } + return NewFramework(baseName, options, nil) +} + +// NewFramework creates a test framework. +func NewFramework(baseName string, options Options, client clientset.Interface) *Framework { + f := &Framework{ + BaseName: baseName, + AddonResourceConstraints: make(map[string]ResourceConstraint), + Options: options, + ClientSet: client, + Timeouts: NewTimeoutContextWithDefaults(), + } + + f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) { + if !failed { + return + } + if !TestContext.DumpLogsOnFailure { + return + } + if !f.SkipNamespaceCreation { + for _, ns := range f.namespacesToDelete { + DumpAllNamespaceInfo(f.ClientSet, ns.Name) + } + } + }) + + ginkgo.BeforeEach(f.BeforeEach) + ginkgo.AfterEach(f.AfterEach) + + return f +} + +// BeforeEach gets a client and makes a namespace. +func (f *Framework) BeforeEach() { + f.beforeEachStarted = true + + // The fact that we need this feels like a bug in ginkgo. + // https://github.com/onsi/ginkgo/issues/222 + f.cleanupHandle = AddCleanupAction(f.AfterEach) + if f.ClientSet == nil { + ginkgo.By("Creating a kubernetes client") + config, err := LoadConfig() + ExpectNoError(err) + + config.QPS = f.Options.ClientQPS + config.Burst = f.Options.ClientBurst + if f.Options.GroupVersion != nil { + config.GroupVersion = f.Options.GroupVersion + } + if TestContext.KubeAPIContentType != "" { + config.ContentType = TestContext.KubeAPIContentType + } + f.clientConfig = rest.CopyConfig(config) + f.ClientSet, err = clientset.NewForConfig(config) + ExpectNoError(err) + f.DynamicClient, err = dynamic.NewForConfig(config) + ExpectNoError(err) + + // create scales getter, set GroupVersion and NegotiatedSerializer to default values + // as they are required when creating a REST client. + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = scheme.Codecs + } + restClient, err := rest.RESTClientFor(config) + ExpectNoError(err) + discoClient, err := discovery.NewDiscoveryClientForConfig(config) + ExpectNoError(err) + cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient) + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient) + restMapper.Reset() + resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient) + f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) + + TestContext.CloudConfig.Provider.FrameworkBeforeEach(f) + } + + if !f.SkipNamespaceCreation { + ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName)) + namespace, err := f.CreateNamespace(f.BaseName, map[string]string{ + "e2e-framework": f.BaseName, + }) + ExpectNoError(err) + + f.Namespace = namespace + + if TestContext.VerifyServiceAccount { + ginkgo.By("Waiting for a default service account to be provisioned in namespace") + err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) + ExpectNoError(err) + ginkgo.By("Waiting for kube-root-ca.crt to be provisioned in namespace") + err = WaitForKubeRootCAInNamespace(f.ClientSet, namespace.Name) + ExpectNoError(err) + } else { + log.Logf("Skipping waiting for service account") + } + f.UniqueName = f.Namespace.GetName() + } else { + // not guaranteed to be unique, but very likely + f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31()) + } + + if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" { + var err error + var nodeMode NodesSet + switch TestContext.GatherKubeSystemResourceUsageData { + case "master": + nodeMode = MasterNodes + case "masteranddns": + nodeMode = MasterAndDNSNodes + default: + nodeMode = AllNodes + } + + f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{ + InKubemark: ProviderIs("kubemark"), + Nodes: nodeMode, + ResourceDataGatheringPeriod: 60 * time.Second, + ProbeDuration: 15 * time.Second, + PrintVerboseLogs: false, + }, nil) + if err != nil { + log.Logf("Error while creating NewResourceUsageGatherer: %v", err) + } else { + go f.gatherer.StartGatheringData() + } + } + + // if TestContext.GatherLogsSizes { + // // TODO(pingcap): removed logsSizeVerifier + // } + + // TODO(pingcap): e2emetrics removed, add it back if needed + + f.flakeReport = NewFlakeReport() +} + +// printSummaries prints summaries of tests. +func printSummaries(summaries []TestDataSummary, testBaseName string) { + now := time.Now() + for i := range summaries { + log.Logf("Printing summary: %v", summaries[i].SummaryKind()) + switch TestContext.OutputPrintType { + case "hr": + if TestContext.ReportDir == "" { + log.Logf(summaries[i].PrintHumanReadable()) + } else { + // TODO: learn to extract test name and append it to the kind instead of timestamp. + filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt") + if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil { + log.Logf("Failed to write file %v with test performance data: %v", filePath, err) + } + } + case "json": + fallthrough + default: + if TestContext.OutputPrintType != "json" { + log.Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType) + } + if TestContext.ReportDir == "" { + log.Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON()) + log.Logf("Finished") + } else { + // TODO: learn to extract test name and append it to the kind instead of timestamp. + filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json") + log.Logf("Writing to %s", filePath) + if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil { + log.Logf("Failed to write file %v with test performance data: %v", filePath, err) + } + } + } + } +} + +// AddAfterEach is a way to add a function to be called after every test. The execution order is intentionally random +// to avoid growing dependencies. If you register the same name twice, it is a coding error and will panic. +func (f *Framework) AddAfterEach(name string, fn AfterEachActionFunc) { + if _, ok := f.afterEaches[name]; ok { + panic(fmt.Sprintf("%q is already registered", name)) + } + + if f.afterEaches == nil { + f.afterEaches = map[string]AfterEachActionFunc{} + } + f.afterEaches[name] = fn +} + +// AfterEach deletes the namespace, after reading its events. +func (f *Framework) AfterEach() { + // If BeforeEach never started AfterEach should be skipped. + // Currently some tests under e2e/storage have this condition. + if !f.beforeEachStarted { + return + } + + RemoveCleanupAction(f.cleanupHandle) + + // This should not happen. Given ClientSet is a public field a test must have updated it! + // Error out early before any API calls during cleanup. + if f.ClientSet == nil { + log.Failf("The framework ClientSet must not be nil at this point") + } + + // DeleteNamespace at the very end in defer, to avoid any + // expectation failures preventing deleting the namespace. + defer func() { + nsDeletionErrors := map[string]error{} + // Whether to delete namespace is determined by 3 factors: delete-namespace flag, delete-namespace-on-failure flag and the test result + // if delete-namespace set to false, namespace will always be preserved. + // if delete-namespace is true and delete-namespace-on-failure is false, namespace will be preserved if test failed. + if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) { + for _, ns := range f.namespacesToDelete { + ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) + if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + nsDeletionErrors[ns.Name] = err + + // Dump namespace if we are unable to delete the namespace and the dump was not already performed. + if !ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { + DumpAllNamespaceInfo(f.ClientSet, ns.Name) + } + } else { + log.Logf("Namespace %v was already deleted", ns.Name) + } + } + } + } else { + if !TestContext.DeleteNamespace { + log.Logf("Found DeleteNamespace=false, skipping namespace deletion!") + } else { + log.Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!") + } + } + + // Paranoia-- prevent reuse! + f.Namespace = nil + f.clientConfig = nil + f.ClientSet = nil + f.namespacesToDelete = nil + + // if we had errors deleting, report them now. + if len(nsDeletionErrors) != 0 { + messages := []string{} + for namespaceKey, namespaceErr := range nsDeletionErrors { + messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr)) + } + log.Failf(strings.Join(messages, ",")) + } + }() + + // run all aftereach functions in random order to ensure no dependencies grow + for _, afterEachFn := range f.afterEaches { + afterEachFn(f, ginkgo.CurrentGinkgoTestDescription().Failed) + } + + if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil { + ginkgo.By("Collecting resource usage data") + summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints) + defer ExpectNoError(resourceViolationError) + f.TestSummaries = append(f.TestSummaries, summary) + } + + // if TestContext.GatherLogsSizes { + // // TODO(pingcap): removed logsSizeVerifier + // } + + // if TestContext.GatherMetricsAfterTest != "false" { + // // TODO(pingcap): removed metrics grabber + // } + + TestContext.CloudConfig.Provider.FrameworkAfterEach(f) + + // Report any flakes that were observed in the e2e test and reset. + if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 { + f.TestSummaries = append(f.TestSummaries, f.flakeReport) + f.flakeReport = nil + } + + printSummaries(f.TestSummaries, f.BaseName) + + // Check whether all nodes are ready after the test. + // This is explicitly done at the very end of the test, to avoid + // e.g. not removing namespace in case of this failure. + if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil { + log.Failf("All nodes should be ready after test, %v", err) + } +} + +// DeleteNamespace can be used to delete a namespace. Additionally it can be used to +// dump namespace information so as it can be used as an alternative of framework +// deleting the namespace towards the end. +func (f *Framework) DeleteNamespace(name string) { + defer func() { + err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Logf("error deleting namespace %s: %v", name, err) + return + } + err = WaitForNamespacesDeleted(f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout) + if err != nil { + log.Logf("error deleting namespace %s: %v", name, err) + return + } + // remove deleted namespace from namespacesToDelete map + for i, ns := range f.namespacesToDelete { + if ns == nil { + continue + } + if ns.Name == name { + f.namespacesToDelete = append(f.namespacesToDelete[:i], f.namespacesToDelete[i+1:]...) + } + } + }() + // if current test failed then we should dump namespace information + if !f.SkipNamespaceCreation && ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { + DumpAllNamespaceInfo(f.ClientSet, name) + } + +} + +// CreateNamespace creates a namespace for e2e testing. +func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) { + createTestingNS := TestContext.CreateTestingNS + if createTestingNS == nil { + createTestingNS = CreateTestingNS + } + ns, err := createTestingNS(baseName, f.ClientSet, labels) + // check ns instead of err to see if it's nil as we may + // fail to create serviceAccount in it. + f.AddNamespacesToDelete(ns) + + if err == nil && !f.SkipPrivilegedPSPBinding { + CreatePrivilegedPSPBinding(f.ClientSet, ns.Name) + } + + return ns, err +} + +// RecordFlakeIfError records flakeness info if error happens. +// NOTE: This function is not used at any places yet, but we are in progress for https://github.com/kubernetes/kubernetes/issues/66239 which requires this. Please don't remove this. +func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) { + f.flakeReport.RecordFlakeIfError(err, optionalDescription) +} + +// AddNamespacesToDelete adds one or more namespaces to be deleted when the test +// completes. +func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) { + for _, ns := range namespaces { + if ns == nil { + continue + } + f.namespacesToDelete = append(f.namespacesToDelete, ns) + + } +} + +// ClientConfig an externally accessible method for reading the kube client config. +func (f *Framework) ClientConfig() *rest.Config { + ret := rest.CopyConfig(f.clientConfig) + // json is least common denominator + ret.ContentType = runtime.ContentTypeJSON + ret.AcceptContentTypes = runtime.ContentTypeJSON + return ret +} + +// KubeUser is a struct for managing kubernetes user info. +type KubeUser struct { + Name string `yaml:"name"` + User struct { + Username string `yaml:"username"` + Password string `yaml:"password" datapolicy:"password"` + Token string `yaml:"token" datapolicy:"token"` + } `yaml:"user"` +} + +// KubeCluster is a struct for managing kubernetes cluster info. +type KubeCluster struct { + Name string `yaml:"name"` + Cluster struct { + CertificateAuthorityData string `yaml:"certificate-authority-data"` + Server string `yaml:"server"` + } `yaml:"cluster"` +} + +// KubeConfig is a struct for managing kubernetes config. +type KubeConfig struct { + Contexts []struct { + Name string `yaml:"name"` + Context struct { + Cluster string `yaml:"cluster"` + User string + } `yaml:"context"` + } `yaml:"contexts"` + + Clusters []KubeCluster `yaml:"clusters"` + + Users []KubeUser `yaml:"users"` +} + +// FindUser returns user info which is the specified user name. +func (kc *KubeConfig) FindUser(name string) *KubeUser { + for _, user := range kc.Users { + if user.Name == name { + return &user + } + } + return nil +} + +// FindCluster returns cluster info which is the specified cluster name. +func (kc *KubeConfig) FindCluster(name string) *KubeCluster { + for _, cluster := range kc.Clusters { + if cluster.Name == name { + return &cluster + } + } + return nil +} + +// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier. +func ConformanceIt(text string, body interface{}, timeout ...float64) bool { + return ginkgo.It(text+" [Conformance]", body, timeout...) +} + +// PodStateVerification represents a verification of pod state. +// Any time you have a set of pods that you want to operate against or query, +// this struct can be used to declaratively identify those pods. +type PodStateVerification struct { + // Optional: only pods that have k=v labels will pass this filter. + Selectors map[string]string + + // Required: The phases which are valid for your pod. + ValidPhases []v1.PodPhase + + // Optional: only pods passing this function will pass the filter + // Verify a pod. + // As an optimization, in addition to specifying filter (boolean), + // this function allows specifying an error as well. + // The error indicates that the polling of the pod spectrum should stop. + Verify func(v1.Pod) (bool, error) + + // Optional: only pods with this name will pass the filter. + PodName string +} + +// ClusterVerification is a struct for a verification of cluster state. +type ClusterVerification struct { + client clientset.Interface + namespace *v1.Namespace // pointer rather than string, since ns isn't created until before each. + podState PodStateVerification +} + +// NewClusterVerification creates a new cluster verification. +func (f *Framework) NewClusterVerification(namespace *v1.Namespace, filter PodStateVerification) *ClusterVerification { + return &ClusterVerification{ + f.ClientSet, + namespace, + filter, + } +} + +func passesPodNameFilter(pod v1.Pod, name string) bool { + return name == "" || strings.Contains(pod.Name, name) +} + +func passesVerifyFilter(pod v1.Pod, verify func(p v1.Pod) (bool, error)) (bool, error) { + if verify == nil { + return true, nil + } + + verified, err := verify(pod) + // If an error is returned, by definition, pod verification fails + if err != nil { + return false, err + } + return verified, nil +} + +func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool { + passesPhaseFilter := false + for _, phase := range validPhases { + if pod.Status.Phase == phase { + passesPhaseFilter = true + } + } + return passesPhaseFilter +} + +// filterLabels returns a list of pods which have labels. +func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) { + var err error + var selector labels.Selector + var pl *v1.PodList + // List pods based on selectors. This might be a tiny optimization rather then filtering + // everything manually. + if len(selectors) > 0 { + selector = labels.SelectorFromSet(labels.Set(selectors)) + options := metav1.ListOptions{LabelSelector: selector.String()} + pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), options) + } else { + pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + } + return pl, err +} + +// filter filters pods which pass a filter. It can be used to compose +// the more useful abstractions like ForEach, WaitFor, and so on, which +// can be used directly by tests. +func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) { + if len(p.ValidPhases) == 0 || namespace == nil { + panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace)) + } + + ns := namespace.Name + pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against. + log.Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors) + if len(pl.Items) == 0 || err != nil { + return pl.Items, err + } + + unfilteredPods := pl.Items + filteredPods := []v1.Pod{} +ReturnPodsSoFar: + // Next: Pod must match at least one of the states that the user specified + for _, pod := range unfilteredPods { + if !(passesPhasesFilter(pod, p.ValidPhases) && passesPodNameFilter(pod, p.PodName)) { + continue + } + passesVerify, err := passesVerifyFilter(pod, p.Verify) + if err != nil { + log.Logf("Error detected on %v : %v !", pod.Name, err) + break ReturnPodsSoFar + } + if passesVerify { + filteredPods = append(filteredPods, pod) + } + } + return filteredPods, err +} + +// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification +// definition. +func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) { + pods := []v1.Pod{} + var returnedErr error + + err := wait.Poll(1*time.Second, timeout, func() (bool, error) { + pods, returnedErr = cl.podState.filter(cl.client, cl.namespace) + + // Failure + if returnedErr != nil { + log.Logf("Cutting polling short: We got an error from the pod filtering layer.") + // stop polling if the pod filtering returns an error. that should never happen. + // it indicates, for example, that the client is broken or something non-pod related. + return false, returnedErr + } + log.Logf("Found %v / %v", len(pods), atLeast) + + // Success + if len(pods) >= atLeast { + return true, nil + } + // Keep trying... + return false, nil + }) + log.Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast) + return pods, err +} + +// WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong. +func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) { + pods, err := cl.WaitFor(atLeast, timeout) + if err != nil || len(pods) < atLeast { + log.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err) + } +} + +// ForEach runs a function against every verifiable pod. Be warned that this doesn't wait for "n" pods to verify, +// so it may return very quickly if you have strict pod state requirements. +// +// For example, if you require at least 5 pods to be running before your test will pass, +// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach. +func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error { + pods, err := cl.podState.filter(cl.client, cl.namespace) + if err == nil { + if len(pods) == 0 { + log.Failf("No pods matched the filter.") + } + log.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods)) + for _, p := range pods { + podFunc(p) + } + } else { + log.Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err) + } + + return err +} diff --git a/tests/third_party/k8s/ginkgowrapper/wrapper.go b/tests/third_party/k8s/ginkgowrapper/wrapper.go new file mode 100644 index 00000000000..a22def44930 --- /dev/null +++ b/tests/third_party/k8s/ginkgowrapper/wrapper.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go @v1.23.17 + +// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic +// with structured data instead of a constant string. +package ginkgowrapper + +import ( + "bufio" + "bytes" + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo" +) + +// FailurePanic is the value that will be panicked from Fail. +type FailurePanic struct { + Message string // The failure message passed to Fail + Filename string // The filename that is the source of the failure + Line int // The line number of the filename that is the source of the failure + FullStackTrace string // A full stack trace starting at the source of the failure +} + +// String makes FailurePanic look like the old Ginkgo panic when printed. +func (FailurePanic) String() string { return ginkgo.GINKGO_PANIC } + +// Fail wraps ginkgo.Fail so that it panics with more useful +// information about the failure. This function will panic with a +// FailurePanic. +func Fail(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + + _, file, line, _ := runtime.Caller(skip) + fp := FailurePanic{ + Message: message, + Filename: file, + Line: line, + FullStackTrace: pruneStack(skip), + } + + defer func() { + e := recover() + if e != nil { + panic(fp) + } + }() + + ginkgo.Fail(message, skip) +} + +// ginkgo adds a lot of test running infrastructure to the stack, so +// we filter those out +var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`) + +func pruneStack(skip int) string { + skip += 2 // one for pruneStack and one for debug.Stack + stack := debug.Stack() + scanner := bufio.NewScanner(bytes.NewBuffer(stack)) + var prunedStack []string + + // skip the top of the stack + for i := 0; i < 2*skip+1; i++ { + scanner.Scan() + } + + for scanner.Scan() { + if stackSkipPattern.Match(scanner.Bytes()) { + scanner.Scan() // these come in pairs + } else { + prunedStack = append(prunedStack, scanner.Text()) + scanner.Scan() // these come in pairs + prunedStack = append(prunedStack, scanner.Text()) + } + } + + return strings.Join(prunedStack, "\n") +} diff --git a/tests/third_party/k8s/kubectl/kubectl.utils.go b/tests/third_party/k8s/kubectl/kubectl.utils.go new file mode 100644 index 00000000000..6441b1c56a4 --- /dev/null +++ b/tests/third_party/k8s/kubectl/kubectl.utils.go @@ -0,0 +1,144 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go @v1.23.17 + +package kubectl + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + podutil "github.com/pingcap/tidb-operator/pkg/third_party/k8s" + e2elog "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + e2epod "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" +) + +// TestKubeconfig is a struct containing the needed attributes from TestContext and Framework(Namespace). +type TestKubeconfig struct { + CertDir string + Host string + KubeConfig string + KubeContext string + KubectlPath string + Namespace string // Every test has at least one namespace unless creation is skipped +} + +// NewTestKubeconfig returns a new Kubeconfig struct instance. +func NewTestKubeconfig(certdir, host, kubeconfig, kubecontext, kubectlpath, namespace string) *TestKubeconfig { + return &TestKubeconfig{ + CertDir: certdir, + Host: host, + KubeConfig: kubeconfig, + KubeContext: kubecontext, + KubectlPath: kubectlpath, + Namespace: namespace, + } +} + +// KubectlCmd runs the kubectl executable through the wrapper script. +func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd { + defaultArgs := []string{} + + // Reference a --server option so tests can run anywhere. + if tk.Host != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+tk.Host) + } + if tk.KubeConfig != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+tk.KubeConfig) + + // Reference the KubeContext + if tk.KubeContext != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+tk.KubeContext) + } + + } else { + if tk.CertDir != "" { + defaultArgs = append(defaultArgs, + fmt.Sprintf("--certificate-authority=%s", filepath.Join(tk.CertDir, "ca.crt")), + fmt.Sprintf("--client-certificate=%s", filepath.Join(tk.CertDir, "kubecfg.crt")), + fmt.Sprintf("--client-key=%s", filepath.Join(tk.CertDir, "kubecfg.key"))) + } + } + if tk.Namespace != "" { + defaultArgs = append(defaultArgs, fmt.Sprintf("--namespace=%s", tk.Namespace)) + } + kubectlArgs := append(defaultArgs, args...) + + //We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh" + //and so on. + cmd := exec.Command(tk.KubectlPath, kubectlArgs...) + + //caller will invoke this and wait on it. + return cmd +} + +// LogFailedContainers runs `kubectl logs` on a failed containers. +func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logFunc("Error getting pods in namespace '%s': %v", ns, err) + return + } + logFunc("Running kubectl logs on non-ready containers in %v", ns) + for _, pod := range podList.Items { + if res, err := PodRunningReady(&pod); !res || err != nil { + kubectlLogPod(c, pod, "", e2elog.Logf) + } + } +} + +func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { + for _, container := range pod.Spec.Containers { + if strings.Contains(container.Name, containerNameSubstr) { + // Contains() matches all strings if substr is empty + logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name) + if err != nil { + logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) + if err != nil { + logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) + } + } + logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName) + logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name) + } + } +} + +// PodRunningReady checks whether pod p's phase is running and it has a ready +// condition of status true. +// This function is copied from k8s.io/kubernetes/test/utils/conditions.go @v1.23.17 +func PodRunningReady(p *v1.Pod) (bool, error) { + // Check the phase is running. + if p.Status.Phase != v1.PodRunning { + return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'", + p.ObjectMeta.Name, p.Spec.NodeName, v1.PodRunning, p.Status.Phase) + } + // Check the ready condition is true. + if !podutil.IsPodReady(p) { + return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v", + p.ObjectMeta.Name, p.Spec.NodeName, v1.PodReady, v1.ConditionTrue, p.Status.Conditions) + } + return true, nil +} diff --git a/tests/third_party/k8s/log/logger.go b/tests/third_party/k8s/log/logger.go new file mode 100644 index 00000000000..57178fa3ba6 --- /dev/null +++ b/tests/third_party/k8s/log/logger.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/log/logger.go @v1.23.17 + +package log + +import ( + "fmt" + "time" + + "github.com/onsi/ginkgo" + + e2eginkgowrapper "github.com/pingcap/tidb-operator/tests/third_party/k8s/ginkgowrapper" +) + +func nowStamp() string { + return time.Now().Format(time.StampMilli) +} + +func log(level string, format string, args ...interface{}) { + fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) +} + +// Logf logs the info. +func Logf(format string, args ...interface{}) { + log("INFO", format, args...) +} + +// Failf logs the fail info. +func Failf(format string, args ...interface{}) { + FailfWithOffset(1, format, args...) +} + +// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller +// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f"). +func FailfWithOffset(offset int, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log("FAIL", msg) + e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset) +} diff --git a/tests/third_party/k8s/node/resource.go b/tests/third_party/k8s/node/resource.go new file mode 100644 index 00000000000..ff6688328f6 --- /dev/null +++ b/tests/third_party/k8s/node/resource.go @@ -0,0 +1,289 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/node/resource.go @v1.23.17 + +package node + +import ( + "fmt" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + + e2elog "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +const ( + // poll is how often to Poll pods, nodes and claims. + poll = 2 * time.Second + + // singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent + // transient failures from failing tests. + singleCallTimeout = 5 * time.Minute +) + +var ( + // unreachableTaintTemplate is the taint for when a node becomes unreachable. + // Copied from pkg/controller/nodelifecycle to avoid pulling extra dependencies + unreachableTaintTemplate = &v1.Taint{ + Key: v1.TaintNodeUnreachable, + Effect: v1.TaintEffectNoExecute, + } + + // notReadyTaintTemplate is the taint for when a node is not ready for executing pods. + // Copied from pkg/controller/nodelifecycle to avoid pulling extra dependencies + notReadyTaintTemplate = &v1.Taint{ + Key: v1.TaintNodeNotReady, + Effect: v1.TaintEffectNoExecute, + } +) + +func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool { + // Check the node readiness condition (logging all). + for _, cond := range node.Status.Conditions { + // Ensure that the condition type and the status matches as desired. + if cond.Type == conditionType { + // For NodeReady condition we need to check Taints as well + if cond.Type == v1.NodeReady { + hasNodeControllerTaints := false + // For NodeReady we need to check if Taints are gone as well + taints := node.Spec.Taints + for _, taint := range taints { + if taint.MatchTaint(unreachableTaintTemplate) || taint.MatchTaint(notReadyTaintTemplate) { + hasNodeControllerTaints = true + break + } + } + if wantTrue { + if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints { + return true + } + msg := "" + if !hasNodeControllerTaints { + msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", + conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } else { + msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure", + conditionType, node.Name, cond.Status == v1.ConditionTrue, taints) + } + if !silent { + e2elog.Logf(msg) + } + return false + } + // TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default + if cond.Status != v1.ConditionTrue { + return true + } + if !silent { + e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", + conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } + return false + } + if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) { + return true + } + if !silent { + e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", + conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } + return false + } + + } + if !silent { + e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name) + } + return false +} + +// IsConditionSetAsExpected returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue with detailed logging. +func IsConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool { + return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false) +} + +// IsConditionSetAsExpectedSilent returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue. +func IsConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool { + return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true) +} + +// isConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false. +func isConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool { + for _, cond := range node.Status.Conditions { + if cond.Type == conditionType { + return false + } + } + return true +} + +// Filter filters nodes in NodeList in place, removing nodes that do not +// satisfy the given condition +func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) { + var l []v1.Node + + for _, node := range nodeList.Items { + if fn(node) { + l = append(l, node) + } + } + nodeList.Items = l +} + +// GetReadySchedulableNodes addresses the common use case of getting nodes you can do work on. +// 1) Needs to be schedulable. +// 2) Needs to be ready. +// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. +// If there are no nodes that are both ready and schedulable, this will return an error. +func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) { + nodes, err = checkWaitListSchedulableNodes(c) + if err != nil { + return nil, fmt.Errorf("listing schedulable nodes error: %s", err) + } + Filter(nodes, func(node v1.Node) bool { + return IsNodeSchedulable(&node) && isNodeUntainted(&node) + }) + if len(nodes.Items) == 0 { + return nil, fmt.Errorf("there are currently no ready, schedulable nodes in the cluster") + } + return nodes, nil +} + +// isNodeUntainted tests whether a fake pod can be scheduled on "node", given its current taints. +// TODO: need to discuss wether to return bool and error type +func isNodeUntainted(node *v1.Node) bool { + return isNodeUntaintedWithNonblocking(node, "") +} + +// isNodeUntaintedWithNonblocking tests whether a fake pod can be scheduled on "node" +// but allows for taints in the list of non-blocking taints. +func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) bool { + fakePod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-not-scheduled", + Namespace: "fake-not-scheduled", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fake-not-scheduled", + Image: "fake-not-scheduled", + }, + }, + }, + } + + // Simple lookup for nonblocking taints based on comma-delimited list. + nonblockingTaintsMap := map[string]struct{}{} + for _, t := range strings.Split(nonblockingTaints, ",") { + if strings.TrimSpace(t) != "" { + nonblockingTaintsMap[strings.TrimSpace(t)] = struct{}{} + } + } + + n := node + if len(nonblockingTaintsMap) > 0 { + nodeCopy := node.DeepCopy() + nodeCopy.Spec.Taints = []v1.Taint{} + for _, v := range node.Spec.Taints { + if _, isNonblockingTaint := nonblockingTaintsMap[v.Key]; !isNonblockingTaint { + nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v) + } + } + n = nodeCopy + } + return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, fakePod.Spec.Tolerations) +} + +func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool { + filteredTaints := []v1.Taint{} + for _, taint := range taints { + if taint.Effect == v1.TaintEffectNoExecute || taint.Effect == v1.TaintEffectNoSchedule { + filteredTaints = append(filteredTaints, taint) + } + } + + toleratesTaint := func(taint v1.Taint) bool { + for _, toleration := range tolerations { + if toleration.ToleratesTaint(&taint) { + return true + } + } + + return false + } + + for _, taint := range filteredTaints { + if !toleratesTaint(taint) { + return false + } + } + + return true +} + +// IsNodeSchedulable returns true if: +// 1) doesn't have "unschedulable" field set +// 2) it also returns true from IsNodeReady +func IsNodeSchedulable(node *v1.Node) bool { + if node == nil { + return false + } + return !node.Spec.Unschedulable && IsNodeReady(node) +} + +// IsNodeReady returns true if: +// 1) it's Ready condition is set to true +// 2) doesn't have NetworkUnavailable condition set to true +func IsNodeReady(node *v1.Node) bool { + nodeReady := IsConditionSetAsExpected(node, v1.NodeReady, true) + networkReady := isConditionUnset(node, v1.NodeNetworkUnavailable) || + IsConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false) + return nodeReady && networkReady +} + +// hasNonblockingTaint returns true if the node contains at least +// one taint with a key matching the regexp. +func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool { + if node == nil { + return false + } + + // Simple lookup for nonblocking taints based on comma-delimited list. + nonblockingTaintsMap := map[string]struct{}{} + for _, t := range strings.Split(nonblockingTaints, ",") { + if strings.TrimSpace(t) != "" { + nonblockingTaintsMap[strings.TrimSpace(t)] = struct{}{} + } + } + + for _, taint := range node.Spec.Taints { + if _, hasNonblockingTaint := nonblockingTaintsMap[taint.Key]; hasNonblockingTaint { + return true + } + } + + return false +} diff --git a/tests/third_party/k8s/node/wait.go b/tests/third_party/k8s/node/wait.go new file mode 100644 index 00000000000..63a8d625b21 --- /dev/null +++ b/tests/third_party/k8s/node/wait.go @@ -0,0 +1,195 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/node/wait.go @v1.23.17 + +package node + +import ( + "context" + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + + e2elog "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +const sleepTime = 20 * time.Second + +// WaitConditionToBe returns whether node "name's" condition state matches wantTrue +// within timeout. If wantTrue is true, it will ensure the node condition status +// is ConditionTrue; if it's false, it ensures the node condition is in any state +// other than ConditionTrue (e.g. not true or unknown). +func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { + e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + e2elog.Logf("Couldn't get node %s", name) + continue + } + + if IsConditionSetAsExpected(node, conditionType, wantTrue) { + return true + } + } + e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) + return false +} + +// WaitForNodeToBeReady returns whether node name is ready within timeout. +func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool { + return WaitConditionToBe(c, name, v1.NodeReady, true, timeout) +} + +// CheckReady waits up to timeout for cluster to has desired size and +// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes. +func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) { + nodes, err := waitListSchedulableNodes(c) + if err != nil { + e2elog.Logf("Failed to list nodes: %v", err) + continue + } + numNodes := len(nodes.Items) + + // Filter out not-ready nodes. + Filter(nodes, func(node v1.Node) bool { + nodeReady := IsConditionSetAsExpected(&node, v1.NodeReady, true) + networkReady := isConditionUnset(&node, v1.NodeNetworkUnavailable) || IsConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false) + return nodeReady && networkReady + }) + numReady := len(nodes.Items) + + if numNodes == size && numReady == size { + e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size) + return nodes.Items, nil + } + e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady) + } + return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size) +} + +// waitListSchedulableNodes is a wrapper around listing nodes supporting retries. +func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { + var nodes *v1.NodeList + var err error + if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { + nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ + "spec.unschedulable": "false", + }.AsSelector().String()}) + if err != nil { + return false, err + } + return true, nil + }) != nil { + return nodes, err + } + return nodes, nil +} + +// checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries. +func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { + nodes, err := waitListSchedulableNodes(c) + if err != nil { + return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err) + } + return nodes, nil +} + +// CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests). +func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) { + attempt := 0 + return func() (bool, error) { + if allowedNotReadyNodes == -1 { + return true, nil + } + attempt++ + var nodesNotReadyYet []v1.Node + opts := metav1.ListOptions{ + ResourceVersion: "0", + // remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic. + FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(), + } + allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts) + if err != nil { + var terminalListNodesErr error + e2elog.Logf("Unexpected error listing nodes: %v", err) + if attempt >= 3 { + terminalListNodesErr = err + } + return false, terminalListNodesErr + } + for _, node := range allNodes.Items { + if !readyForTests(&node, nonblockingTaints) { + nodesNotReadyYet = append(nodesNotReadyYet, node) + } + } + // Framework allows for nodes to be non-ready, + // to make it possible e.g. for incorrect deployment of some small percentage + // of nodes (which we allow in cluster validation). Some nodes that are not + // provisioned correctly at startup will never become ready (e.g. when something + // won't install correctly), so we can't expect them to be ready at any point. + // + // We log the *reason* why nodes are not schedulable, specifically, its usually the network not being available. + if len(nodesNotReadyYet) > 0 { + // In large clusters, log them only every 10th pass. + if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 { + e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes) + for _, node := range nodesNotReadyYet { + e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]", + node.Name, + IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true), + IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false), + node.Spec.Taints, + nonblockingTaints, + ) + + } + if len(nodesNotReadyYet) > allowedNotReadyNodes { + ready := len(allNodes.Items) - len(nodesNotReadyYet) + remaining := len(nodesNotReadyYet) - allowedNotReadyNodes + e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining) + } + } + } + return len(nodesNotReadyYet) <= allowedNotReadyNodes, nil + } +} + +// readyForTests determines whether or not we should continue waiting for the nodes +// to enter a testable state. By default this means it is schedulable, NodeReady, and untainted. +// Nodes with taints nonblocking taints are permitted to have that taint and +// also have their node.Spec.Unschedulable field ignored for the purposes of this function. +func readyForTests(node *v1.Node, nonblockingTaints string) bool { + if hasNonblockingTaint(node, nonblockingTaints) { + // If the node has one of the nonblockingTaints taints; just check that it is ready + // and don't require node.Spec.Unschedulable to be set either way. + if !IsNodeReady(node) || !isNodeUntaintedWithNonblocking(node, nonblockingTaints) { + return false + } + } else { + if !IsNodeSchedulable(node) || !isNodeUntainted(node) { + return false + } + } + return true +} diff --git a/tests/third_party/k8s/pod/resource.go b/tests/third_party/k8s/pod/resource.go new file mode 100644 index 00000000000..b96d2e9d8b0 --- /dev/null +++ b/tests/third_party/k8s/pod/resource.go @@ -0,0 +1,233 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copiedfrom k8s.io/kubernetes/test/e2e/framework/pod/resource.go @v1.23.17 + +package pod + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubectl/pkg/util/podutils" + + e2elog "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that +// the pod has already reached completed state. +var errPodCompleted = fmt.Errorf("pod ran to completion") + +// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of +// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will +// have their logs fetched. +const LabelLogOnPodFailure = "log-on-pod-failure" + +func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + switch pod.Status.Phase { + case v1.PodRunning: + return true, nil + case v1.PodFailed, v1.PodSucceeded: + return false, errPodCompleted + } + return false, nil + } +} + +func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + switch pod.Status.Phase { + case v1.PodFailed, v1.PodSucceeded: + e2elog.Logf("The status of Pod %s is %s which is unexpected", podName, pod.Status.Phase) + return false, errPodCompleted + case v1.PodRunning: + e2elog.Logf("The status of Pod %s is %s (Ready = %v)", podName, pod.Status.Phase, podutils.IsPodReady(pod)) + return podutils.IsPodReady(pod), nil + } + e2elog.Logf("The status of Pod %s is %s, waiting for it to be Running (with Ready = true)", podName, pod.Status.Phase) + return false, nil + } +} + +// LogPodStates logs basic info of provided pods for debugging. +func LogPodStates(pods []v1.Pod) { + // Find maximum widths for pod, node, and phase strings for column printing. + maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE") + for i := range pods { + pod := &pods[i] + if len(pod.ObjectMeta.Name) > maxPodW { + maxPodW = len(pod.ObjectMeta.Name) + } + if len(pod.Spec.NodeName) > maxNodeW { + maxNodeW = len(pod.Spec.NodeName) + } + if len(pod.Status.Phase) > maxPhaseW { + maxPhaseW = len(pod.Status.Phase) + } + } + // Increase widths by one to separate by a single space. + maxPodW++ + maxNodeW++ + maxPhaseW++ + maxGraceW++ + + // Log pod info. * does space padding, - makes them left-aligned. + e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", + maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS") + for _, pod := range pods { + grace := "" + if pod.DeletionGracePeriodSeconds != nil { + grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds) + } + e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", + maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions) + } + e2elog.Logf("") // Final empty line helps for readability. +} + +// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows +// why pods crashed and since it is in the API, it's fast to retrieve. +func logPodTerminationMessages(pods []v1.Pod) { + for _, pod := range pods { + for _, status := range pod.Status.InitContainerStatuses { + if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { + e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) + } + } + for _, status := range pod.Status.ContainerStatuses { + if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { + e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) + } + } + } +} + +// logPodLogs logs the container logs from pods in the given namespace. This can be helpful for debugging +// issues that do not cause the container to fail (e.g.: network connectivity issues) +// We will log the Pods that have the LabelLogOnPodFailure label. If there aren't any, we default to +// logging only the first 5 Pods. This requires the reportDir to be set, and the pods are logged into: +// {report_dir}/pods/{namespace}/{pod}/{container_name}/logs.txt +func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) { + if reportDir == "" { + return + } + + var logPods []v1.Pod + for _, pod := range pods { + if _, ok := pod.Labels[LabelLogOnPodFailure]; ok { + logPods = append(logPods, pod) + } + } + maxPods := len(logPods) + + // There are no pods with the LabelLogOnPodFailure label, we default to the first 5 Pods. + if maxPods == 0 { + logPods = pods + maxPods = len(pods) + if maxPods > 5 { + maxPods = 5 + } + } + + tailLen := 42 + for i := 0; i < maxPods; i++ { + pod := logPods[i] + for _, container := range pod.Spec.Containers { + logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen) + if err != nil { + e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err) + continue + } + + logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name) + err = os.MkdirAll(logDir, 0755) + if err != nil { + e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err) + continue + } + + logPath := filepath.Join(logDir, "logs.txt") + err = os.WriteFile(logPath, []byte(logs), 0644) + if err != nil { + e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err) + } + } + } +} + +// DumpAllPodInfoForNamespace logs all pod information for a given namespace. +func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) { + pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + e2elog.Logf("unable to fetch pod debug info: %v", err) + } + LogPodStates(pods.Items) + logPodTerminationMessages(pods.Items) + logPodLogs(c, namespace, pods.Items, reportDir) +} + +// GetPodLogs returns the logs of the specified container (namespace/pod/container). +func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(c, namespace, podName, containerName, false, nil, nil) +} + +// GetPreviousPodLogs returns the logs of the previous instance of the +// specified container (namespace/pod/container). +func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(c, namespace, podName, containerName, true, nil, nil) +} + +// utility function for gomega Eventually +func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) { + request := c.CoreV1().RESTClient().Get(). + Resource("pods"). + Namespace(namespace). + Name(podName).SubResource("log"). + Param("container", containerName). + Param("previous", strconv.FormatBool(previous)) + if sinceTime != nil { + request.Param("sinceTime", sinceTime.Format(time.RFC3339)) + } + if tailLines != nil { + request.Param("tailLines", strconv.Itoa(*tailLines)) + } + logs, err := request.Do(context.TODO()).Raw() + if err != nil { + return "", err + } + if strings.Contains(string(logs), "Internal Error") { + return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q", string(logs)) + } + return string(logs), err +} diff --git a/tests/third_party/k8s/pod/wait.go b/tests/third_party/k8s/pod/wait.go new file mode 100644 index 00000000000..6ac1bacbda6 --- /dev/null +++ b/tests/third_party/k8s/pod/wait.go @@ -0,0 +1,329 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/pod/wait.go @v1.23.17 + +package pod + +import ( + "bytes" + "context" + "errors" + "fmt" + "text/tabwriter" + "time" + + "github.com/onsi/ginkgo" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubectl/pkg/util/podutils" + + podutil "github.com/pingcap/tidb-operator/pkg/third_party/k8s" + e2elog "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +const ( + // podStartTimeout is how long to wait for the pod to be started. + podStartTimeout = 5 * time.Minute + + // poll is how often to poll pods, nodes and claims. + poll = 2 * time.Second +) + +type podCondition func(pod *v1.Pod) (bool, error) + +// errorBadPodsStates create error message of basic info of bad pods for debugging. +func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration, err error) string { + errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout) + if err != nil { + errStr += fmt.Sprintf("Last error: %s\n", err) + } + // Print bad pods info only if there are fewer than 10 bad pods + if len(badPods) > 10 { + return errStr + "There are too many bad pods. Please check log for details." + } + + buf := bytes.NewBuffer(nil) + w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS") + for _, badPod := range badPods { + grace := "" + if badPod.DeletionGracePeriodSeconds != nil { + grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds) + } + podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v", + badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions) + fmt.Fprintln(w, podInfo) + } + w.Flush() + return errStr + buf.String() +} + +// WaitForPodsRunningReady waits up to timeout to ensure that all pods in +// namespace ns are either running and ready, or failed but controlled by a +// controller. Also, it ensures that at least minPods are running and +// ready. It has separate behavior from other 'wait for' pods functions in +// that it requests the list of pods on every iteration. This is useful, for +// example, in cluster startup, because the number of pods increases while +// waiting. All pods that are in SUCCESS state are not counted. +// +// If ignoreLabels is not empty, pods matching this selector are ignored. +// +// If minPods or allowedNotReadyPods are -1, this method returns immediately +// without waiting. +func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { + if minPods == -1 || allowedNotReadyPods == -1 { + return nil + } + + ignoreSelector := labels.SelectorFromSet(map[string]string{}) + start := time.Now() + e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", + timeout, minPods, ns) + var ignoreNotReady bool + badPods := []v1.Pod{} + desiredPods := 0 + notReady := int32(0) + var lastAPIError error + + if wait.PollImmediate(poll, timeout, func() (bool, error) { + // We get the new list of pods, replication controllers, and + // replica sets in every iteration because more pods come + // online during startup and we want to ensure they are also + // checked. + replicas, replicaOk := int32(0), int32(0) + // Clear API error from the last attempt in case the following calls succeed. + lastAPIError = nil + + rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err) + lastAPIError = err + return false, err + } + for _, rc := range rcList.Items { + replicas += *rc.Spec.Replicas + replicaOk += rc.Status.ReadyReplicas + } + + rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + lastAPIError = err + e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err) + return false, err + } + for _, rs := range rsList.Items { + replicas += *rs.Spec.Replicas + replicaOk += rs.Status.ReadyReplicas + } + + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + lastAPIError = err + e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err) + return false, err + } + nOk := int32(0) + notReady = int32(0) + badPods = []v1.Pod{} + desiredPods = len(podList.Items) + for _, pod := range podList.Items { + if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { + continue + } + res, err := PodRunningReady(&pod) + switch { + case res && err == nil: + nOk++ + case pod.Status.Phase == v1.PodSucceeded: + e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name) + // it doesn't make sense to wait for this pod + continue + case pod.Status.Phase != v1.PodFailed: + e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) + notReady++ + badPods = append(badPods, pod) + default: + if metav1.GetControllerOf(&pod) == nil { + e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name) + badPods = append(badPods, pod) + } + //ignore failed pods that are controlled by some controller + } + } + + e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)", + nOk, len(podList.Items), ns, int(time.Since(start).Seconds())) + e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk) + + if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 { + return true, nil + } + ignoreNotReady = (notReady <= allowedNotReadyPods) + LogPodStates(badPods) + return false, nil + }) != nil { + if !ignoreNotReady { + return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)) + } + e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods) + } + return nil +} + +// WaitForPodCondition waits a pods to be matched to the given condition. +func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { + e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc) + var lastPodError error + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + lastPodError = err + if err != nil { + if apierrors.IsNotFound(err) { + e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err) + } else { + e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err) + } + continue + } + // log now so that current pod info is reported before calling `condition()` + e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v", + podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start)) + if done, err := condition(pod); done { + if err == nil { + e2elog.Logf("Pod %q satisfied condition %q", podName, desc) + } + return err + } + } + if apierrors.IsNotFound(lastPodError) { + // return for compatbility with other functions testing for IsNotFound + return lastPodError + } + return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc) +} + +// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate, +// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not +// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully +// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to +// the supplied reason. +func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error { + return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *v1.Pod) (bool, error) { + // Only consider Failed pods. Successful pods will be deleted and detected in + // waitForPodCondition's Get call returning `IsNotFound` + if pod.Status.Phase == v1.PodFailed { + if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop + return true, nil + } + return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason) + } + return false, nil + }) +} + +// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. +func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { + if pod.Spec.RestartPolicy == v1.RestartPolicyAlways { + return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName) + } + switch pod.Status.Phase { + case v1.PodSucceeded: + ginkgo.By("Saw pod success") + return true, nil + case v1.PodFailed: + return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status) + default: + return false, nil + } + }) +} + +// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running. +func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, podRunning(c, podName, namespace)) +} + +// WaitTimeoutForPodReadyInNamespace waits the given timeout duration for the +// specified pod to be ready and running. +func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace)) +} + +// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate. +// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get +// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other +// than "not found" then that error is returned and the wait stops. +func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, func() (bool, error) { + _, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return true, nil // done + } + if err != nil { + return true, err // stop wait with error + } + return false, nil + }) +} + +// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear. +func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { + return wait.PollImmediate(interval, timeout, func() (bool, error) { + e2elog.Logf("Waiting for pod %s to disappear", podName) + options := metav1.ListOptions{LabelSelector: label.String()} + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + if err != nil { + return false, err + } + found := false + for _, pod := range pods.Items { + if pod.Name == podName { + e2elog.Logf("Pod %s still exists", podName) + found = true + break + } + } + if !found { + e2elog.Logf("Pod %s no longer exists", podName) + return true, nil + } + return false, nil + }) +} + +// PodRunningReady checks whether pod p's phase is running and it has a ready +// condition of status true. +// This function is copied from k8s.io/kubernetes/test/utils/conditions.go @v1.23.17 +func PodRunningReady(p *v1.Pod) (bool, error) { + // Check the phase is running. + if p.Status.Phase != v1.PodRunning { + return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'", + p.ObjectMeta.Name, p.Spec.NodeName, v1.PodRunning, p.Status.Phase) + } + // Check the ready condition is true. + if !podutil.IsPodReady(p) { + return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v", + p.ObjectMeta.Name, p.Spec.NodeName, v1.PodReady, v1.ConditionTrue, p.Status.Conditions) + } + return true, nil +} diff --git a/tests/third_party/k8s/pods.go b/tests/third_party/k8s/pods.go new file mode 100644 index 00000000000..6c779c73e93 --- /dev/null +++ b/tests/third_party/k8s/pods.go @@ -0,0 +1,128 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/pods.go @v1.23.17 + +package k8s + +import ( + "context" + "time" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/onsi/gomega" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + e2epod "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" +) + +const ( + // DefaultPodDeletionTimeout is the default timeout for deleting pod + DefaultPodDeletionTimeout = 3 * time.Minute +) + +// ImagePrePullList is the images used in the current test suite. It should be initialized in test suite and +// the images in the list should be pre-pulled in the test suite. Currently, this is only used by +// node e2e test. +var ImagePrePullList sets.String + +// PodClient is a convenience method for getting a pod client interface in the framework's namespace, +// possibly applying test-suite specific transformations to the pod spec, e.g. for +// node e2e pod scheduling. +func (f *Framework) PodClient() *PodClient { + return &PodClient{ + f: f, + PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name), + } +} + +// PodClientNS is a convenience method for getting a pod client interface in an alternative namespace, +// possibly applying test-suite specific transformations to the pod spec, e.g. for +// node e2e pod scheduling. +func (f *Framework) PodClientNS(namespace string) *PodClient { + return &PodClient{ + f: f, + PodInterface: f.ClientSet.CoreV1().Pods(namespace), + } +} + +// PodClient is a struct for pod client. +type PodClient struct { + f *Framework + v1core.PodInterface +} + +// Create creates a new pod according to the framework specifications (don't wait for it to start). +func (c *PodClient) Create(pod *v1.Pod) *v1.Pod { + c.mungeSpec(pod) + p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating Pod") + return p +} + +// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't +// disappear before the timeout, it will fail the test. +func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) { + namespace := c.f.Namespace.Name + err := c.Delete(context.TODO(), name, options) + if err != nil && !apierrors.IsNotFound(err) { + log.Failf("Failed to delete pod %q: %v", name, err) + } + gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(), + 2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name) +} + +// mungeSpec apply test-suite specific transformations to the pod spec. +func (c *PodClient) mungeSpec(pod *v1.Pod) { + if !TestContext.NodeE2E { + return + } + + gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(TestContext.NodeName)), "Test misconfigured") + pod.Spec.NodeName = TestContext.NodeName + // Node e2e does not support the default DNSClusterFirst policy. Set + // the policy to DNSDefault, which is configured per node. + pod.Spec.DNSPolicy = v1.DNSDefault + + // PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced, + // we should not munge ImagePullPolicy for cluster e2e pods. + if !TestContext.PrepullImages { + return + } + // If prepull is enabled, munge the container spec to make sure the images are not pulled + // during the test. + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + if c.ImagePullPolicy == v1.PullAlways { + // If the image pull policy is PullAlways, the image doesn't need to be in + // the allow list or pre-pulled, because the image is expected to be pulled + // in the test anyway. + continue + } + // If the image policy is not PullAlways, the image must be in the pre-pull list and + // pre-pulled. + gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image) + // Do not pull images during the tests because the images in pre-pull list should have + // been prepulled. + c.ImagePullPolicy = v1.PullNever + } +} diff --git a/tests/third_party/k8s/ports.go b/tests/third_party/k8s/ports.go new file mode 100644 index 00000000000..37a4b94ffb1 --- /dev/null +++ b/tests/third_party/k8s/ports.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/ports.go @v1.23.17 + +package k8s + +// NOTE: constants in this file are copied from pkg/cluster/ports/ports.go + +const ( + // KubeletPort is the default port for the kubelet server on each host machine. + // May be overridden by a flag at startup. + KubeletPort = 10250 + // KubeControllerManagerPort is the default port for the controller manager status server. + // May be overridden by a flag at startup. + KubeControllerManagerPort = 10257 +) diff --git a/tests/third_party/k8s/provider.go b/tests/third_party/k8s/provider.go new file mode 100644 index 00000000000..07fe24c8013 --- /dev/null +++ b/tests/third_party/k8s/provider.go @@ -0,0 +1,182 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this is a copy of the file in k8s.io/kubernetes/test/e2e/framework/provider.go @v1.23.17 + +package k8s + +import ( + "fmt" + "os" + "sync" + + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +// Factory is a func which operates provider specific behavior. +type Factory func() (ProviderInterface, error) + +var ( + providers = make(map[string]Factory) + mutex sync.Mutex +) + +// RegisterProvider is expected to be called during application init, +// typically by an init function in a provider package. +func RegisterProvider(name string, factory Factory) { + mutex.Lock() + defer mutex.Unlock() + if _, ok := providers[name]; ok { + panic(fmt.Sprintf("provider %s already registered", name)) + } + providers[name] = factory +} + +// GetProviders returns the names of all currently registered providers. +func GetProviders() []string { + mutex.Lock() + defer mutex.Unlock() + var providerNames []string + for name := range providers { + providerNames = append(providerNames, name) + } + return providerNames +} + +func init() { + // "local" or "skeleton" can always be used. + RegisterProvider("local", func() (ProviderInterface, error) { + return NullProvider{}, nil + }) + RegisterProvider("skeleton", func() (ProviderInterface, error) { + return NullProvider{}, nil + }) + // The empty string used to be accepted in the past, but is not + // a valid value anymore. +} + +// SetupProviderConfig validates the chosen provider and creates +// an interface instance for it. +func SetupProviderConfig(providerName string) (ProviderInterface, error) { + var err error + + mutex.Lock() + defer mutex.Unlock() + factory, ok := providers[providerName] + if !ok { + return nil, fmt.Errorf("The provider %s is unknown: %w", providerName, os.ErrNotExist) + } + provider, err := factory() + + return provider, err +} + +// ProviderInterface contains the implementation for certain +// provider-specific functionality. +type ProviderInterface interface { + FrameworkBeforeEach(f *Framework) + FrameworkAfterEach(f *Framework) + + ResizeGroup(group string, size int32) error + GetGroupNodes(group string) ([]string, error) + GroupSize(group string) (int, error) + + DeleteNode(node *v1.Node) error + + CreatePD(zone string) (string, error) + DeletePD(pdName string) error + CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) + DeletePVSource(pvSource *v1.PersistentVolumeSource) error + + CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) + + EnsureLoadBalancerResourcesDeleted(ip, portRange string) error + LoadBalancerSrcRanges() []string + EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) +} + +// NullProvider is the default implementation of the ProviderInterface +// which doesn't do anything. +type NullProvider struct{} + +// FrameworkBeforeEach is a base implementation which does BeforeEach. +func (n NullProvider) FrameworkBeforeEach(f *Framework) {} + +// FrameworkAfterEach is a base implementation which does AfterEach. +func (n NullProvider) FrameworkAfterEach(f *Framework) {} + +// ResizeGroup is a base implementation which resizes group. +func (n NullProvider) ResizeGroup(string, int32) error { + return fmt.Errorf("Provider does not support InstanceGroups") +} + +// GetGroupNodes is a base implementation which returns group nodes. +func (n NullProvider) GetGroupNodes(group string) ([]string, error) { + return nil, fmt.Errorf("provider does not support InstanceGroups") +} + +// GroupSize returns the size of an instance group +func (n NullProvider) GroupSize(group string) (int, error) { + return -1, fmt.Errorf("provider does not support InstanceGroups") +} + +// DeleteNode is a base implementation which deletes a node. +func (n NullProvider) DeleteNode(node *v1.Node) error { + return fmt.Errorf("provider does not support DeleteNode") +} + +// CreatePD is a base implementation which creates PD. +func (n NullProvider) CreatePD(zone string) (string, error) { + return "", fmt.Errorf("provider does not support volume creation") +} + +// DeletePD is a base implementation which deletes PD. +func (n NullProvider) DeletePD(pdName string) error { + return fmt.Errorf("provider does not support volume deletion") +} + +// CreatePVSource is a base implementation which creates PV source. +func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) { + return nil, fmt.Errorf("Provider not supported") +} + +// DeletePVSource is a base implementation which deletes PV source. +func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { + return fmt.Errorf("Provider not supported") +} + +// CleanupServiceResources is a base implementation which cleans up service resources. +func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { +} + +// EnsureLoadBalancerResourcesDeleted is a base implementation which ensures load balancer is deleted. +func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { + return nil +} + +// LoadBalancerSrcRanges is a base implementation which returns the ranges of ips used by load balancers. +func (n NullProvider) LoadBalancerSrcRanges() []string { + return nil +} + +// EnableAndDisableInternalLB is a base implementation which returns functions for enabling/disabling an internal LB. +func (n NullProvider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) { + nop := func(svc *v1.Service) {} + return nop, nop +} + +var _ ProviderInterface = NullProvider{} diff --git a/tests/third_party/k8s/psp.go b/tests/third_party/k8s/psp.go new file mode 100644 index 00000000000..392d0036d4d --- /dev/null +++ b/tests/third_party/k8s/psp.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/psp.go @v1.23.17 + +package k8s + +import ( + "context" + "fmt" + "strings" + "sync" + + v1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + clientset "k8s.io/client-go/kubernetes" + + "github.com/onsi/ginkgo" + + e2eauth "github.com/pingcap/tidb-operator/tests/third_party/k8s/auth" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +const ( + podSecurityPolicyPrivileged = "e2e-test-privileged-psp" + + // allowAny is the wildcard used to allow any profile. + allowAny = "*" + + // allowedProfilesAnnotationKey specifies the allowed seccomp profiles. + allowedProfilesAnnotationKey = "seccomp.security.alpha.kubernetes.io/allowedProfileNames" + + // NOTE(pingcap): hardcode instead of using "k8s.io/kubernetes/test/utils/image" + pauseImage = "registry.k8s.io/pause:3.9" +) + +var ( + isPSPEnabledOnce sync.Once + isPSPEnabled bool +) + +// privilegedPSP creates a PodSecurityPolicy that allows everything. +func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy { + allowPrivilegeEscalation := true + return &policyv1beta1.PodSecurityPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{allowedProfilesAnnotationKey: allowAny}, + }, + Spec: policyv1beta1.PodSecurityPolicySpec{ + Privileged: true, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + AllowedCapabilities: []v1.Capability{"*"}, + Volumes: []policyv1beta1.FSType{policyv1beta1.All}, + HostNetwork: true, + HostPorts: []policyv1beta1.HostPortRange{{Min: 0, Max: 65535}}, + HostIPC: true, + HostPID: true, + RunAsUser: policyv1beta1.RunAsUserStrategyOptions{ + Rule: policyv1beta1.RunAsUserStrategyRunAsAny, + }, + SELinux: policyv1beta1.SELinuxStrategyOptions{ + Rule: policyv1beta1.SELinuxStrategyRunAsAny, + }, + SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{ + Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny, + }, + FSGroup: policyv1beta1.FSGroupStrategyOptions{ + Rule: policyv1beta1.FSGroupStrategyRunAsAny, + }, + ReadOnlyRootFilesystem: false, + AllowedUnsafeSysctls: []string{"*"}, + }, + } +} + +// IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false. +func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool { + isPSPEnabledOnce.Do(func() { + psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + log.Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err) + return + } + if psps == nil || len(psps.Items) == 0 { + log.Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.") + return + } + log.Logf("Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled") + testPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{GenerateName: "psp-test-pod-"}, + Spec: v1.PodSpec{Containers: []v1.Container{{Name: "test", Image: pauseImage}}}, + } + dryRunPod, err := kubeClient.CoreV1().Pods("kube-system").Create(context.TODO(), testPod, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}) + if err != nil { + if strings.Contains(err.Error(), "PodSecurityPolicy") { + log.Logf("PodSecurityPolicy error creating dryrun pod; assuming PodSecurityPolicy is enabled: %v", err) + isPSPEnabled = true + } else { + log.Logf("Error creating dryrun pod; assuming PodSecurityPolicy is disabled: %v", err) + } + return + } + pspAnnotation, pspAnnotationExists := dryRunPod.Annotations["kubernetes.io/psp"] + if !pspAnnotationExists { + log.Logf("No PSP annotation exists on dry run pod; assuming PodSecurityPolicy is disabled") + return + } + log.Logf("PSP annotation exists on dry run pod: %q; assuming PodSecurityPolicy is enabled", pspAnnotation) + isPSPEnabled = true + }) + return isPSPEnabled +} + +var ( + privilegedPSPOnce sync.Once +) + +// CreatePrivilegedPSPBinding creates the privileged PSP & role +func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string) { + if !IsPodSecurityPolicyEnabled(kubeClient) { + return + } + // Create the privileged PSP & role + privilegedPSPOnce.Do(func() { + _, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), podSecurityPolicyPrivileged, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + // Privileged PSP was already created. + ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged) + return + } + + psp := privilegedPSP(podSecurityPolicyPrivileged) + _, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{}) + if !apierrors.IsAlreadyExists(err) { + ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) + } + + if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) { + // Create the Role to bind it to the namespace. + _, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged}, + Rules: []rbacv1.PolicyRule{{ + APIGroups: []string{"extensions"}, + Resources: []string{"podsecuritypolicies"}, + ResourceNames: []string{podSecurityPolicyPrivileged}, + Verbs: []string{"use"}, + }}, + }, metav1.CreateOptions{}) + if !apierrors.IsAlreadyExists(err) { + ExpectNoError(err, "Failed to create PSP role") + } + } + }) + + if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) { + ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s", + podSecurityPolicyPrivileged, namespace)) + err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(), + podSecurityPolicyPrivileged, + namespace, + rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: namespace, + Name: "default", + }, + rbacv1.Subject{ + Kind: rbacv1.GroupKind, + APIGroup: rbacv1.GroupName, + Name: "system:serviceaccounts:" + namespace, + }, + ) + ExpectNoError(err) + ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(), + serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged, + schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true)) + } +} diff --git a/tests/third_party/k8s/resource_usage_gatherer.go b/tests/third_party/k8s/resource_usage_gatherer.go new file mode 100644 index 00000000000..f71fc0b386f --- /dev/null +++ b/tests/third_party/k8s/resource_usage_gatherer.go @@ -0,0 +1,657 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go @v1.23.17 + +package k8s + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "math" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/tabwriter" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientset "k8s.io/client-go/kubernetes" + kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +// ResourceConstraint is a struct to hold constraints. +type ResourceConstraint struct { + CPUConstraint float64 + MemoryConstraint uint64 +} + +// SingleContainerSummary is a struct to hold single container summary. +type SingleContainerSummary struct { + Name string + CPU float64 + Mem uint64 +} + +// ContainerResourceUsage is a structure for gathering container resource usage. +type ContainerResourceUsage struct { + Name string + Timestamp time.Time + CPUUsageInCores float64 + MemoryUsageInBytes uint64 + MemoryWorkingSetInBytes uint64 + MemoryRSSInBytes uint64 + // The interval used to calculate CPUUsageInCores. + CPUInterval time.Duration +} + +// ResourceUsagePerContainer is map of ContainerResourceUsage +type ResourceUsagePerContainer map[string]*ContainerResourceUsage + +// ResourceUsageSummary is a struct to hold resource usage summary. +// we can't have int here, as JSON does not accept integer keys. +type ResourceUsageSummary map[string][]SingleContainerSummary + +// PrintHumanReadable prints resource usage summary in human readable. +func (s *ResourceUsageSummary) PrintHumanReadable() string { + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0) + for perc, summaries := range *s { + buf.WriteString(fmt.Sprintf("%v percentile:\n", perc)) + fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n") + for _, summary := range summaries { + fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.CPU, float64(summary.Mem)/(1024*1024)) + } + w.Flush() + } + return buf.String() +} + +// PrintJSON prints resource usage summary in JSON. +func (s *ResourceUsageSummary) PrintJSON() string { + return PrettyPrintJSON(*s) +} + +// SummaryKind returns string of ResourceUsageSummary +func (s *ResourceUsageSummary) SummaryKind() string { + return "ResourceUsageSummary" +} + +type uint64arr []uint64 + +func (a uint64arr) Len() int { return len(a) } +func (a uint64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a uint64arr) Less(i, j int) bool { return a[i] < a[j] } + +type usageDataPerContainer struct { + cpuData []float64 + memUseData []uint64 + memWorkSetData []uint64 +} + +func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCompute []int) map[int]ResourceUsagePerContainer { + if len(timeSeries) == 0 { + return make(map[int]ResourceUsagePerContainer) + } + dataMap := make(map[string]*usageDataPerContainer) + for i := range timeSeries { + for name, data := range timeSeries[i] { + if dataMap[name] == nil { + dataMap[name] = &usageDataPerContainer{ + cpuData: make([]float64, 0, len(timeSeries)), + memUseData: make([]uint64, 0, len(timeSeries)), + memWorkSetData: make([]uint64, 0, len(timeSeries)), + } + } + dataMap[name].cpuData = append(dataMap[name].cpuData, data.CPUUsageInCores) + dataMap[name].memUseData = append(dataMap[name].memUseData, data.MemoryUsageInBytes) + dataMap[name].memWorkSetData = append(dataMap[name].memWorkSetData, data.MemoryWorkingSetInBytes) + } + } + for _, v := range dataMap { + sort.Float64s(v.cpuData) + sort.Sort(uint64arr(v.memUseData)) + sort.Sort(uint64arr(v.memWorkSetData)) + } + + result := make(map[int]ResourceUsagePerContainer) + for _, perc := range percentilesToCompute { + data := make(ResourceUsagePerContainer) + for k, v := range dataMap { + percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1 + data[k] = &ContainerResourceUsage{ + Name: k, + CPUUsageInCores: v.cpuData[percentileIndex], + MemoryUsageInBytes: v.memUseData[percentileIndex], + MemoryWorkingSetInBytes: v.memWorkSetData[percentileIndex], + } + } + result[perc] = data + } + return result +} + +func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]ResourceUsagePerContainer { + result := make(map[int]ResourceUsagePerContainer) + for percentile, data := range left { + result[percentile] = data + if _, ok := right[percentile]; !ok { + continue + } + for k, v := range right[percentile] { + result[percentile][k] = v + } + } + return result +} + +type resourceGatherWorker struct { + c clientset.Interface + nodeName string + wg *sync.WaitGroup + containerIDs []string + stopCh chan struct{} + dataSeries []ResourceUsagePerContainer + finished bool + inKubemark bool + resourceDataGatheringPeriod time.Duration + probeDuration time.Duration + printVerboseLogs bool +} + +func (w *resourceGatherWorker) singleProbe() { + data := make(ResourceUsagePerContainer) + if w.inKubemark { + kubemarkData := getKubemarkMasterComponentsResourceUsage() + if kubemarkData == nil { + return + } + for k, v := range kubemarkData { + data[k] = &ContainerResourceUsage{ + Name: v.Name, + MemoryWorkingSetInBytes: v.MemoryWorkingSetInBytes, + CPUUsageInCores: v.CPUUsageInCores, + } + } + } else { + nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs }) + if err != nil { + log.Logf("Error while reading data from %v: %v", w.nodeName, err) + return + } + for k, v := range nodeUsage { + data[k] = v + if w.printVerboseLogs { + log.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes) + } + } + } + w.dataSeries = append(w.dataSeries, data) +} + +// getOneTimeResourceUsageOnNode queries the node's /stats/summary endpoint +// and returns the resource usage of all containerNames for the past +// cpuInterval. +// The acceptable range of the interval is 2s~120s. Be warned that as the +// interval (and #containers) increases, the size of kubelet's response +// could be significant. E.g., the 60s interval stats for ~20 containers is +// ~1.5MB. Don't hammer the node with frequent, heavy requests. +// +// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two +// stats points to compute the cpu usage over the interval. Assuming cadvisor +// polls every second, we'd need to get N stats points for N-second interval. +// Note that this is an approximation and may not be accurate, hence we also +// write the actual interval used for calculation (based on the timestamps of +// the stats points in ContainerResourceUsage.CPUInterval. +// +// containerNames is a function returning a collection of container names in which +// user is interested in. +func getOneTimeResourceUsageOnNode( + c clientset.Interface, + nodeName string, + cpuInterval time.Duration, + containerNames func() []string, +) (ResourceUsagePerContainer, error) { + const ( + // cadvisor records stats about every second. + cadvisorStatsPollingIntervalInSeconds float64 = 1.0 + // cadvisor caches up to 2 minutes of stats (configured by kubelet). + maxNumStatsToRequest int = 120 + ) + + numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds) + if numStats < 2 || numStats > maxNumStatsToRequest { + return nil, fmt.Errorf("numStats needs to be > 1 and < %d", maxNumStatsToRequest) + } + // Get information of all containers on the node. + summary, err := getStatsSummary(c, nodeName) + if err != nil { + return nil, err + } + + f := func(name string, newStats *kubeletstatsv1alpha1.ContainerStats) *ContainerResourceUsage { + if newStats == nil || newStats.CPU == nil || newStats.Memory == nil { + return nil + } + return &ContainerResourceUsage{ + Name: name, + Timestamp: newStats.StartTime.Time, + CPUUsageInCores: float64(removeUint64Ptr(newStats.CPU.UsageNanoCores)) / 1000000000, + MemoryUsageInBytes: removeUint64Ptr(newStats.Memory.UsageBytes), + MemoryWorkingSetInBytes: removeUint64Ptr(newStats.Memory.WorkingSetBytes), + MemoryRSSInBytes: removeUint64Ptr(newStats.Memory.RSSBytes), + CPUInterval: 0, + } + } + // Process container infos that are relevant to us. + containers := containerNames() + usageMap := make(ResourceUsagePerContainer, len(containers)) + for _, pod := range summary.Pods { + for _, container := range pod.Containers { + isInteresting := false + for _, interestingContainerName := range containers { + if container.Name == interestingContainerName { + isInteresting = true + break + } + } + if !isInteresting { + continue + } + if usage := f(pod.PodRef.Name+"/"+container.Name, &container); usage != nil { + usageMap[pod.PodRef.Name+"/"+container.Name] = usage + } + } + } + return usageMap, nil +} + +// getStatsSummary contacts kubelet for the container information. +func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) { + ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) + defer cancel() + + data, err := c.CoreV1().RESTClient().Get(). + Resource("nodes"). + SubResource("proxy"). + Name(fmt.Sprintf("%v:%v", nodeName, KubeletPort)). + Suffix("stats/summary"). + Do(ctx).Raw() + + if err != nil { + return nil, err + } + + summary := kubeletstatsv1alpha1.Summary{} + err = json.Unmarshal(data, &summary) + if err != nil { + return nil, err + } + return &summary, nil +} + +func removeUint64Ptr(ptr *uint64) uint64 { + if ptr == nil { + return 0 + } + return *ptr +} + +func (w *resourceGatherWorker) gather(initialSleep time.Duration) { + defer utilruntime.HandleCrash() + defer w.wg.Done() + defer log.Logf("Closing worker for %v", w.nodeName) + defer func() { w.finished = true }() + select { + case <-time.After(initialSleep): + w.singleProbe() + for { + select { + case <-time.After(w.resourceDataGatheringPeriod): + w.singleProbe() + case <-w.stopCh: + return + } + } + case <-w.stopCh: + return + } +} + +// ContainerResourceGatherer is a struct for gathering container resource. +type ContainerResourceGatherer struct { + client clientset.Interface + stopCh chan struct{} + workers []resourceGatherWorker + workerWg sync.WaitGroup + containerIDs []string + options ResourceGathererOptions +} + +// ResourceGathererOptions is a struct to hold options for resource. +type ResourceGathererOptions struct { + InKubemark bool + Nodes NodesSet + ResourceDataGatheringPeriod time.Duration + ProbeDuration time.Duration + PrintVerboseLogs bool +} + +// NodesSet is a value of nodes set. +type NodesSet int + +const ( + // AllNodes means all containers on all nodes. + AllNodes NodesSet = 0 + // MasterNodes means all containers on Master nodes only. + MasterNodes NodesSet = 1 + // MasterAndDNSNodes means all containers on Master nodes and DNS containers on other nodes. + MasterAndDNSNodes NodesSet = 2 +) + +// nodeHasControlPlanePods returns true if specified node has control plane pods +// (kube-scheduler and/or kube-controller-manager). +func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, error) { + regKubeScheduler := regexp.MustCompile("kube-scheduler-.*") + regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*") + + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(), + }) + if err != nil { + return false, err + } + if len(podList.Items) < 1 { + log.Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem) + } + for _, pod := range podList.Items { + if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) { + return true, nil + } + } + return false, nil +} + +// NewResourceUsageGatherer returns a new ContainerResourceGatherer. +func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) { + g := ContainerResourceGatherer{ + client: c, + stopCh: make(chan struct{}), + containerIDs: make([]string, 0), + options: options, + } + + if options.InKubemark { + g.workerWg.Add(1) + g.workers = append(g.workers, resourceGatherWorker{ + inKubemark: true, + stopCh: g.stopCh, + wg: &g.workerWg, + finished: false, + resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod, + probeDuration: options.ProbeDuration, + printVerboseLogs: options.PrintVerboseLogs, + }) + return &g, nil + } + + // Tracks kube-system pods if no valid PodList is passed in. + var err error + if pods == nil { + pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + log.Logf("Error while listing Pods: %v", err) + return nil, err + } + } + dnsNodes := make(map[string]bool) + for _, pod := range pods.Items { + if options.Nodes == MasterNodes { + isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName) + if err != nil { + return nil, err + } + if !isControlPlane { + continue + } + } + if options.Nodes == MasterAndDNSNodes { + isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName) + if err != nil { + return nil, err + } + if !isControlPlane && pod.Labels["k8s-app"] != "kube-dns" { + continue + } + } + for _, container := range pod.Status.InitContainerStatuses { + g.containerIDs = append(g.containerIDs, container.Name) + } + for _, container := range pod.Status.ContainerStatuses { + g.containerIDs = append(g.containerIDs, container.Name) + } + if options.Nodes == MasterAndDNSNodes { + dnsNodes[pod.Spec.NodeName] = true + } + } + nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + log.Logf("Error while listing Nodes: %v", err) + return nil, err + } + + for _, node := range nodeList.Items { + isControlPlane, err := nodeHasControlPlanePods(c, node.Name) + if err != nil { + return nil, err + } + if options.Nodes == AllNodes || isControlPlane || dnsNodes[node.Name] { + g.workerWg.Add(1) + g.workers = append(g.workers, resourceGatherWorker{ + c: c, + nodeName: node.Name, + wg: &g.workerWg, + containerIDs: g.containerIDs, + stopCh: g.stopCh, + finished: false, + inKubemark: false, + resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod, + probeDuration: options.ProbeDuration, + printVerboseLogs: options.PrintVerboseLogs, + }) + if options.Nodes == MasterNodes { + break + } + } + } + return &g, nil +} + +// StartGatheringData starts a stat gathering worker blocks for each node to track, +// and blocks until StopAndSummarize is called. +func (g *ContainerResourceGatherer) StartGatheringData() { + if len(g.workers) == 0 { + return + } + delayPeriod := g.options.ResourceDataGatheringPeriod / time.Duration(len(g.workers)) + delay := time.Duration(0) + for i := range g.workers { + go g.workers[i].gather(delay) + delay += delayPeriod + } + g.workerWg.Wait() +} + +// StopAndSummarize stops stat gathering workers, processes the collected stats, +// generates resource summary for the passed-in percentiles, and returns the summary. +// It returns an error if the resource usage at any percentile is beyond the +// specified resource constraints. +func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) { + close(g.stopCh) + log.Logf("Closed stop channel. Waiting for %v workers", len(g.workers)) + finished := make(chan struct{}, 1) + go func() { + g.workerWg.Wait() + finished <- struct{}{} + }() + select { + case <-finished: + log.Logf("Waitgroup finished.") + case <-time.After(2 * time.Minute): + unfinished := make([]string, 0) + for i := range g.workers { + if !g.workers[i].finished { + unfinished = append(unfinished, g.workers[i].nodeName) + } + } + log.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished) + } + + if len(percentiles) == 0 { + log.Logf("Warning! Empty percentile list for stopAndPrintData.") + return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data") + } + data := make(map[int]ResourceUsagePerContainer) + for i := range g.workers { + if g.workers[i].finished { + stats := computePercentiles(g.workers[i].dataSeries, percentiles) + data = leftMergeData(stats, data) + } + } + + // Workers has been stopped. We need to gather data stored in them. + sortedKeys := []string{} + for name := range data[percentiles[0]] { + sortedKeys = append(sortedKeys, name) + } + sort.Strings(sortedKeys) + violatedConstraints := make([]string, 0) + summary := make(ResourceUsageSummary) + for _, perc := range percentiles { + for _, name := range sortedKeys { + usage := data[perc][name] + summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], SingleContainerSummary{ + Name: name, + CPU: usage.CPUUsageInCores, + Mem: usage.MemoryWorkingSetInBytes, + }) + + // Verifying 99th percentile of resource usage + if perc != 99 { + continue + } + // Name has a form: / + containerName := strings.Split(name, "/")[1] + constraint, ok := constraints[containerName] + if !ok { + continue + } + if usage.CPUUsageInCores > constraint.CPUConstraint { + violatedConstraints = append( + violatedConstraints, + fmt.Sprintf("Container %v is using %v/%v CPU", + name, + usage.CPUUsageInCores, + constraint.CPUConstraint, + ), + ) + } + if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint { + violatedConstraints = append( + violatedConstraints, + fmt.Sprintf("Container %v is using %v/%v MB of memory", + name, + float64(usage.MemoryWorkingSetInBytes)/(1024*1024), + float64(constraint.MemoryConstraint)/(1024*1024), + ), + ) + } + } + } + if len(violatedConstraints) > 0 { + return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n")) + } + return &summary, nil +} + +// kubemarkResourceUsage is a struct for tracking the resource usage of kubemark. +type kubemarkResourceUsage struct { + Name string + MemoryWorkingSetInBytes uint64 + CPUUsageInCores float64 +} + +func getMasterUsageByPrefix(prefix string) (string, error) { + // TODO(pingcap): removed getting the resource usage of master node via SSH + return "", nil +} + +// getKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name. +func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsage { + result := make(map[string]*kubemarkResourceUsage) + // Get kubernetes component resource usage + sshResult, err := getMasterUsageByPrefix("kube") + if err != nil { + log.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err) + return nil + } + scanner := bufio.NewScanner(strings.NewReader(sshResult)) + for scanner.Scan() { + var cpu float64 + var mem uint64 + var name string + fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /usr/local/bin/kube-%s", &cpu, &mem, &name) + if name != "" { + // Gatherer expects pod_name/container_name format + fullName := name + "/" + name + result[fullName] = &kubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100} + } + } + // Get etcd resource usage + sshResult, err = getMasterUsageByPrefix("bin/etcd") + if err != nil { + log.Logf("Error when trying to SSH to master machine. Skipping probe") + return nil + } + scanner = bufio.NewScanner(strings.NewReader(sshResult)) + for scanner.Scan() { + var cpu float64 + var mem uint64 + var etcdKind string + fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /bin/sh -c /usr/local/bin/etcd", &cpu, &mem) + dataDirStart := strings.Index(scanner.Text(), "--data-dir") + if dataDirStart < 0 { + continue + } + fmt.Sscanf(scanner.Text()[dataDirStart:], "--data-dir=/var/%s", &etcdKind) + if etcdKind != "" { + // Gatherer expects pod_name/container_name format + fullName := "etcd/" + etcdKind + result[fullName] = &kubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100} + } + } + return result +} diff --git a/tests/third_party/k8s/skipper/skipper.go b/tests/third_party/k8s/skipper/skipper.go new file mode 100644 index 00000000000..196ea3e3533 --- /dev/null +++ b/tests/third_party/k8s/skipper/skipper.go @@ -0,0 +1,111 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go @v1.23.17 + +package skipper + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo" + + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +func skipInternalf(caller int, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + framework.Logf(msg) + skip(msg, caller+1) +} + +// SkipPanic is the value that will be panicked from Skip. +type SkipPanic struct { + Message string // The failure message passed to Fail + Filename string // The filename that is the source of the failure + Line int // The line number of the filename that is the source of the failure + FullStackTrace string // A full stack trace starting at the source of the failure +} + +// String makes SkipPanic look like the old Ginkgo panic when printed. +func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC } + +// Skip wraps ginkgo.Skip so that it panics with more useful +// information about why the test is being skipped. This function will +// panic with a SkipPanic. +func skip(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + + _, file, line, _ := runtime.Caller(skip) + sp := SkipPanic{ + Message: message, + Filename: file, + Line: line, + FullStackTrace: pruneStack(skip), + } + + defer func() { + e := recover() + if e != nil { + panic(sp) + } + }() + + ginkgo.Skip(message, skip) +} + +// ginkgo adds a lot of test running infrastructure to the stack, so +// we filter those out +var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`) + +func pruneStack(skip int) string { + skip += 2 // one for pruneStack and one for debug.Stack + stack := debug.Stack() + scanner := bufio.NewScanner(bytes.NewBuffer(stack)) + var prunedStack []string + + // skip the top of the stack + for i := 0; i < 2*skip+1; i++ { + scanner.Scan() + } + + for scanner.Scan() { + if stackSkipPattern.Match(scanner.Bytes()) { + scanner.Scan() // these come in pairs + } else { + prunedStack = append(prunedStack, scanner.Text()) + scanner.Scan() // these come in pairs + prunedStack = append(prunedStack, scanner.Text()) + } + } + + return strings.Join(prunedStack, "\n") +} + +// Skipf skips with information about why the test is being skipped. +func Skipf(format string, args ...interface{}) { + skipInternalf(1, format, args...) + panic("unreachable") +} diff --git a/tests/third_party/k8s/statefulset/rest.go b/tests/third_party/k8s/statefulset/rest.go new file mode 100644 index 00000000000..41711e069c4 --- /dev/null +++ b/tests/third_party/k8s/statefulset/rest.go @@ -0,0 +1,39 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/statefulset/rest.go @v1.23.17 + +package statefulset + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + + framework "github.com/pingcap/tidb-operator/tests/third_party/k8s" +) + +// GetPodList gets the current Pods in ss. +func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList { + selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) + framework.ExpectNoError(err) + podList, err := c.CoreV1().Pods(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + framework.ExpectNoError(err) + return podList +} diff --git a/tests/third_party/k8s/storage/helpers.go b/tests/third_party/k8s/storage/helpers.go new file mode 100644 index 00000000000..9cd66b2e4f6 --- /dev/null +++ b/tests/third_party/k8s/storage/helpers.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/pkg/apis/storage/v1/util.go @v1.23.17 + +package util + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// IsDefaultStorageClassAnnotation represents a StorageClass annotation that +// marks a class as the default StorageClass +const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" + +// BetaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation. +// TODO: remove Beta when no longer used +const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" + +// IsDefaultAnnotationText returns a pretty Yes/No String if +// the annotation is set +// TODO: remove Beta when no longer needed +func IsDefaultAnnotationText(obj metav1.ObjectMeta) string { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + + return "No" +} + +// IsDefaultAnnotation returns a boolean if +// the annotation is set +// TODO: remove Beta when no longer needed +func IsDefaultAnnotation(obj metav1.ObjectMeta) bool { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return true + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return true + } + + return false +} diff --git a/tests/third_party/k8s/test_context.go b/tests/third_party/k8s/test_context.go new file mode 100644 index 00000000000..3df9ce965f6 --- /dev/null +++ b/tests/third_party/k8s/test_context.go @@ -0,0 +1,513 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/test_context.go @v1.23.17 + +package k8s + +import ( + "crypto/rand" + "encoding/base64" + "errors" + "flag" + "fmt" + "io/ioutil" + "math" + "os" + "sort" + "strings" + "time" + + "github.com/onsi/ginkgo/config" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" + + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" +) + +const ( + defaultHost = "https://127.0.0.1:6443" + + // DefaultNumNodes is the number of nodes. If not specified, then number of nodes is auto-detected + DefaultNumNodes = -1 +) + +// TestContextType contains test settings and global state. Due to +// historic reasons, it is a mixture of items managed by the test +// framework itself, cloud providers and individual tests. +// The goal is to move anything not required by the framework +// into the code which uses the settings. +// +// The recommendation for those settings is: +// - They are stored in their own context structure or local +// variables. +// - The standard `flag` package is used to register them. +// The flag name should follow the pattern ..... +// where the prefix is unlikely to conflict with other tests or +// standard packages and each part is in lower camel case. For +// example, test/e2e/storage/csi/context.go could define +// storage.csi.numIterations. +// - framework/config can be used to simplify the registration of +// multiple options with a single function call: +// var storageCSI { +// NumIterations `default:"1" usage:"number of iterations"` +// } +// _ config.AddOptions(&storageCSI, "storage.csi") +// - The direct use Viper in tests is possible, but discouraged because +// it only works in test suites which use Viper (which is not +// required) and the supported options cannot be +// discovered by a test suite user. +// +// Test suite authors can use framework/viper to make all command line +// parameters also configurable via a configuration file. +type TestContextType struct { + KubeConfig string + KubeContext string + KubeAPIContentType string + KubeVolumeDir string + CertDir string + Host string + BearerToken string `datapolicy:"token"` + // TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987. + RepoRoot string + DockershimCheckpointDir string + // ListImages will list off all images that are used then quit + ListImages bool + + // ListConformanceTests will list off all conformance tests that are available then quit + ListConformanceTests bool + + // Provider identifies the infrastructure provider (gce, gke, aws) + Provider string + + // Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling. + Tooling string + + CloudConfig CloudConfig + KubectlPath string + OutputDir string + ReportDir string + ReportPrefix string + Prefix string + MinStartupPods int + // Timeout for waiting for system pods to be running + SystemPodsStartupTimeout time.Duration + EtcdUpgradeStorage string + EtcdUpgradeVersion string + GCEUpgradeScript string + ContainerRuntime string + ContainerRuntimeEndpoint string + ContainerRuntimeProcessName string + ContainerRuntimePidFile string + // SystemdServices are comma separated list of systemd services the test framework + // will dump logs for. + SystemdServices string + // DumpSystemdJournal controls whether to dump the full systemd journal. + DumpSystemdJournal bool + ImageServiceEndpoint string + MasterOSDistro string + NodeOSDistro string + NodeOSArch string + VerifyServiceAccount bool + DeleteNamespace bool + DeleteNamespaceOnFailure bool + AllowedNotReadyNodes int + CleanStart bool + // If set to 'true' or 'all' framework will start a goroutine monitoring resource usage of system add-ons. + // It will read the data every 30 seconds from all Nodes and print summary during afterEach. If set to 'master' + // only master Node will be monitored. + GatherKubeSystemResourceUsageData string + GatherLogsSizes bool + GatherMetricsAfterTest string + GatherSuiteMetricsAfterTest bool + MaxNodesToGather int + // If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components. + IncludeClusterAutoscalerMetrics bool + // Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list. + OutputPrintType string + // NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable. + NodeSchedulableTimeout time.Duration + // SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready. + SystemDaemonsetStartupTimeout time.Duration + // CreateTestingNS is responsible for creating namespace used for executing e2e tests. + // It accepts namespace base name, which will be prepended with e2e prefix, kube client + // and labels to be applied to a namespace. + CreateTestingNS CreateTestingNSFn + // If set to true test will dump data about the namespace in which test was running. + DumpLogsOnFailure bool + // Disables dumping cluster log from master and nodes after all tests. + DisableLogDump bool + // Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty. + LogexporterGCSPath string + // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. + FeatureGates map[string]bool + // Node e2e specific test context + NodeTestContextType + + // The DNS Domain of the cluster. + ClusterDNSDomain string + + // The configuration of NodeKiller. + NodeKiller NodeKillerConfig + + // The Default IP Family of the cluster ("ipv4" or "ipv6") + IPFamily string + + // NonblockingTaints is the comma-delimeted string given by the user to specify taints which should not stop the test framework from running tests. + NonblockingTaints string + + // ProgressReportURL is the URL which progress updates will be posted to as tests complete. If empty, no updates are sent. + ProgressReportURL string + + // SriovdpConfigMapFile is the path to the ConfigMap to configure the SRIOV device plugin on this host. + SriovdpConfigMapFile string + + // SpecSummaryOutput is the file to write ginkgo.SpecSummary objects to as tests complete. Useful for debugging and test introspection. + SpecSummaryOutput string + + // DockerConfigFile is a file that contains credentials which can be used to pull images from certain private registries, needed for a test. + DockerConfigFile string + + // SnapshotControllerPodName is the name used for identifying the snapshot controller pod. + SnapshotControllerPodName string + + // SnapshotControllerHTTPPort the port used for communicating with the snapshot controller HTTP endpoint. + SnapshotControllerHTTPPort int + + // RequireDevices makes mandatory on the environment on which tests are run 1+ devices exposed through device plugins. + // With this enabled The e2e tests requiring devices for their operation can assume that if devices aren't reported, the test can fail + RequireDevices bool +} + +// NodeKillerConfig describes configuration of NodeKiller -- a utility to +// simulate node failures. +type NodeKillerConfig struct { + // Enabled determines whether NodeKill should do anything at all. + // All other options below are ignored if Enabled = false. + Enabled bool + // FailureRatio is a percentage of all nodes that could fail simultinously. + FailureRatio float64 + // Interval is time between node failures. + Interval time.Duration + // JitterFactor is factor used to jitter node failures. + // Node will be killed between [Interval, Interval + (1.0 + JitterFactor)]. + JitterFactor float64 + // SimulatedDowntime is a duration between node is killed and recreated. + SimulatedDowntime time.Duration + // NodeKillerStopCh is a channel that is used to notify NodeKiller to stop killing nodes. + NodeKillerStopCh chan struct{} +} + +// NodeTestContextType is part of TestContextType, it is shared by all node e2e test. +type NodeTestContextType struct { + // NodeE2E indicates whether it is running node e2e. + NodeE2E bool + // Name of the node to run tests on. + NodeName string + // NodeConformance indicates whether the test is running in node conformance mode. + NodeConformance bool + // PrepullImages indicates whether node e2e framework should prepull images. + PrepullImages bool + // TODO(pingcap): add this if needed + // KubeletConfig is the kubelet configuration the test is running against. + // KubeletConfig kubeletconfig.KubeletConfiguration + // ImageDescription is the description of the image on which the test is running. + ImageDescription string + // RuntimeConfig is a map of API server runtime configuration values. + RuntimeConfig map[string]string + // SystemSpecName is the name of the system spec (e.g., gke) that's used in + // the node e2e test. If empty, the default one (system.DefaultSpec) is + // used. The system specs are in test/e2e_node/system/specs/. + SystemSpecName string + // RestartKubelet restarts Kubelet unit when the process is killed. + RestartKubelet bool + // ExtraEnvs is a map of environment names to values. + ExtraEnvs map[string]string +} + +// CloudConfig holds the cloud configuration for e2e test suites. +type CloudConfig struct { + APIEndpoint string + ProjectID string + Zone string // for multizone tests, arbitrarily chosen zone + Zones []string // for multizone tests, use this set of zones instead of querying the cloud provider. Must include Zone. + Region string + MultiZone bool + MultiMaster bool + Cluster string + MasterName string + NodeInstanceGroup string // comma-delimited list of groups' names + NumNodes int + ClusterIPRange string + ClusterTag string + Network string + ConfigFile string // for azure and openstack + NodeTag string + MasterTag string + + Provider ProviderInterface +} + +// TestContext should be used by all tests to access common context data. +var TestContext TestContextType + +// ClusterIsIPv6 returns true if the cluster is IPv6 +func (tc TestContextType) ClusterIsIPv6() bool { + return tc.IPFamily == "ipv6" +} + +// RegisterCommonFlags registers flags common to all e2e test suites. +// The flag set can be flag.CommandLine (if desired) or a custom +// flag set that then gets passed to viperconfig.ViperizeFlags. +// +// The other Register*Flags methods below can be used to add more +// test-specific flags. However, those settings then get added +// regardless whether the test is actually in the test suite. +// +// For tests that have been converted to registering their +// options themselves, copy flags from test/e2e/framework/config +// as shown in HandleFlags. +func RegisterCommonFlags(flags *flag.FlagSet) { + // Turn on verbose by default to get spec names + config.DefaultReporterConfig.Verbose = true + + // Turn on EmitSpecProgress to get spec progress (especially on interrupt) + config.GinkgoConfig.EmitSpecProgress = true + + // Randomize specs as well as suites + config.GinkgoConfig.RandomizeAllSpecs = true + + flags.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.") + flags.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.") + flags.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.") + flags.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.") + flags.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.") + flags.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.") + flags.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.") + flags.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.") + flags.BoolVar(&TestContext.DisableLogDump, "disable-log-dump", false, "If set to true, logs from master and nodes won't be gathered after test run.") + flags.StringVar(&TestContext.LogexporterGCSPath, "logexporter-gcs-path", "", "Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.") + flags.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.") + flags.BoolVar(&TestContext.DeleteNamespaceOnFailure, "delete-namespace-on-failure", true, "If true, framework will delete test namespace on failure. Used only during test debugging.") + flags.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If greater than zero, framework will allow for that many non-ready nodes when checking for all ready nodes. If -1, no waiting will be performed for ready nodes or daemonset pods.") + + flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost)) + flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.") + flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.") + flags.Var(cliflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.") + flags.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).") + flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.") + flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.") + flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.") + flags.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.") + flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.") + flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.") + flags.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.") + // Expect the test suite to work with both the new and legacy non-blocking control plane taints by default + flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests. The default taint 'node-role.kubernetes.io/master' is DEPRECATED and will be removed from the list in a future release.") + + flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.") + flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.") + flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.") + + flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.") + flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.") + flags.StringVar(&TestContext.DockerConfigFile, "docker-config-file", "", "A file that contains credentials which can be used to pull images from certain private registries, needed for a test.") + + flags.StringVar(&TestContext.SnapshotControllerPodName, "snapshot-controller-pod-name", "", "The pod name to use for identifying the snapshot controller in the kube-system namespace.") + flags.IntVar(&TestContext.SnapshotControllerHTTPPort, "snapshot-controller-http-port", 0, "The port to use for snapshot controller HTTP communication.") +} + +// RegisterClusterFlags registers flags specific to the cluster e2e test suite. +func RegisterClusterFlags(flags *flag.FlagSet) { + flags.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.") + flags.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.") + flags.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'") + flags.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver") + + flags.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.") + flags.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.") + flags.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.") + // NOTE: Node E2E tests have this flag defined as well, but true by default. + // If this becomes true as well, they should be refactored into RegisterCommonFlags. + flags.BoolVar(&TestContext.PrepullImages, "prepull-images", false, "If true, prepull images so image pull failures do not cause test failures.") + flags.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, skeleton (the fallback if not set), etc.)") + flags.StringVar(&TestContext.Tooling, "tooling", "", "The tooling in use (kops, gke, etc.)") + flags.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") + flags.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.") + flags.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, ubuntu, gci, coreos, or custom).") + flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, windows, or custom), which determines how specific tests are implemented.") + flags.StringVar(&TestContext.NodeOSArch, "node-os-arch", "amd64", "The OS architecture of cluster VM instances (amd64, arm64, or custom).") + flags.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.") + + // TODO: Flags per provider? Rename gce-project/gce-zone? + cloudConfig := &TestContext.CloudConfig + flags.StringVar(&cloudConfig.MasterName, "kube-master", "", "Name of the kubernetes master. Only required if provider is gce or gke") + flags.StringVar(&cloudConfig.APIEndpoint, "gce-api-endpoint", "", "The GCE APIEndpoint being used, if applicable") + flags.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable") + flags.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable") + flags.Var(cliflag.NewStringSlice(&cloudConfig.Zones), "gce-zones", "The set of zones to use in a multi-zone test instead of querying the cloud provider.") + flags.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable") + flags.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.") + flags.BoolVar(&cloudConfig.MultiMaster, "gce-multimaster", false, "If true, the underlying GCE/GKE cluster is assumed to be multi-master.") + flags.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable") + flags.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.") + flags.StringVar(&cloudConfig.Network, "network", "e2e", "The cloud provider network for this e2e cluster.") + flags.IntVar(&cloudConfig.NumNodes, "num-nodes", DefaultNumNodes, fmt.Sprintf("Number of nodes in the cluster. If the default value of '%q' is used the number of schedulable nodes is auto-detected.", DefaultNumNodes)) + flags.StringVar(&cloudConfig.ClusterIPRange, "cluster-ip-range", "10.64.0.0/14", "A CIDR notation IP range from which to assign IPs in the cluster.") + flags.StringVar(&cloudConfig.NodeTag, "node-tag", "", "Network tags used on node instances. Valid only for gce, gke") + flags.StringVar(&cloudConfig.MasterTag, "master-tag", "", "Network tags used on master instances. Valid only for gce, gke") + + flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.") + flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.") + flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.") + flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.") + flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.") + flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.") + flags.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.") + flags.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.") + flags.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.") + flags.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.") + + nodeKiller := &TestContext.NodeKiller + flags.BoolVar(&nodeKiller.Enabled, "node-killer", false, "Whether NodeKiller should kill any nodes.") + flags.Float64Var(&nodeKiller.FailureRatio, "node-killer-failure-ratio", 0.01, "Percentage of nodes to be killed") + flags.DurationVar(&nodeKiller.Interval, "node-killer-interval", 1*time.Minute, "Time between node failures.") + flags.Float64Var(&nodeKiller.JitterFactor, "node-killer-jitter-factor", 60, "Factor used to jitter node failures.") + flags.DurationVar(&nodeKiller.SimulatedDowntime, "node-killer-simulated-downtime", 10*time.Minute, "A delay between node death and recreation") +} + +func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config { + clusterNick := "cluster" + userNick := "user" + contextNick := "context" + + configCmd := clientcmdapi.NewConfig() + + credentials := clientcmdapi.NewAuthInfo() + credentials.Token = clientCfg.BearerToken + credentials.TokenFile = clientCfg.BearerTokenFile + credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile + if len(credentials.ClientCertificate) == 0 { + credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData + } + credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile + if len(credentials.ClientKey) == 0 { + credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData + } + configCmd.AuthInfos[userNick] = credentials + + cluster := clientcmdapi.NewCluster() + cluster.Server = clientCfg.Host + cluster.CertificateAuthority = clientCfg.CAFile + if len(cluster.CertificateAuthority) == 0 { + cluster.CertificateAuthorityData = clientCfg.CAData + } + cluster.InsecureSkipTLSVerify = clientCfg.Insecure + configCmd.Clusters[clusterNick] = cluster + + context := clientcmdapi.NewContext() + context.Cluster = clusterNick + context.AuthInfo = userNick + configCmd.Contexts[contextNick] = context + configCmd.CurrentContext = contextNick + + return configCmd +} + +// GenerateSecureToken returns a string of length tokenLen, consisting +// of random bytes encoded as base64 for use as a Bearer Token during +// communication with an APIServer +func GenerateSecureToken(tokenLen int) (string, error) { + // Number of bytes to be tokenLen when base64 encoded. + tokenSize := math.Ceil(float64(tokenLen) * 6 / 8) + rawToken := make([]byte, int(tokenSize)) + if _, err := rand.Read(rawToken); err != nil { + return "", err + } + encoded := base64.RawURLEncoding.EncodeToString(rawToken) + token := encoded[:tokenLen] + return token, nil +} + +// AfterReadingAllFlags makes changes to the context after all flags +// have been read. +func AfterReadingAllFlags(t *TestContextType) { + // Only set a default host if one won't be supplied via kubeconfig + if len(t.Host) == 0 && len(t.KubeConfig) == 0 { + // Check if we can use the in-cluster config + if clusterConfig, err := restclient.InClusterConfig(); err == nil { + if tempFile, err := ioutil.TempFile(os.TempDir(), "kubeconfig-"); err == nil { + kubeConfig := createKubeConfig(clusterConfig) + clientcmd.WriteToFile(*kubeConfig, tempFile.Name()) + t.KubeConfig = tempFile.Name() + klog.V(4).Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name()) + } + } + if len(t.KubeConfig) == 0 { + klog.Warningf("Unable to find in-cluster config, using default host : %s", defaultHost) + t.Host = defaultHost + } + } + if len(t.BearerToken) == 0 { + var err error + t.BearerToken, err = GenerateSecureToken(16) + if err != nil { + klog.Fatalf("Failed to generate bearer token: %v", err) + } + } + + // Allow 1% of nodes to be unready (statistically) - relevant for large clusters. + if t.AllowedNotReadyNodes == 0 { + t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100 + } + + klog.V(4).Infof("Tolerating taints %q when considering if nodes are ready", TestContext.NonblockingTaints) + + // Make sure that all test runs have a valid TestContext.CloudConfig.Provider. + // TODO: whether and how long this code is needed is getting discussed + // in https://github.com/kubernetes/kubernetes/issues/70194. + if TestContext.Provider == "" { + // Some users of the e2e.test binary pass --provider=. + // We need to support that, changing it would break those usages. + log.Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.") + TestContext.Provider = "skeleton" + } + + var err error + TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider) + if err != nil { + if os.IsNotExist(errors.Unwrap(err)) { + // Provide a more helpful error message when the provider is unknown. + var providers []string + for _, name := range GetProviders() { + // The empty string is accepted, but looks odd in the output below unless we quote it. + if name == "" { + name = `""` + } + providers = append(providers, name) + } + sort.Strings(providers) + klog.Errorf("Unknown provider %q. The following providers are known: %v", TestContext.Provider, strings.Join(providers, " ")) + } else { + klog.Errorf("Failed to setup provider config for %q: %v", TestContext.Provider, err) + } + os.Exit(1) + } +} diff --git a/tests/third_party/k8s/testfiles/testfiles.go b/tests/third_party/k8s/testfiles/testfiles.go new file mode 100644 index 00000000000..88bad3d81a4 --- /dev/null +++ b/tests/third_party/k8s/testfiles/testfiles.go @@ -0,0 +1,197 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testfiles provides a wrapper around various optional ways +// of retrieving additional files needed during a test run: +// - builtin bindata +// - filesystem access +// +// Because it is a is self-contained package, it can be used by +// test/e2e/framework and test/e2e/manifest without creating +// a circular dependency. + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/testfiles/testfiles.go @v + +package testfiles + +import ( + "embed" + "errors" + "fmt" + "io/fs" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +var filesources []FileSource + +// AddFileSource registers another provider for files that may be +// needed at runtime. Should be called during initialization of a test +// binary. +func AddFileSource(filesource FileSource) { + filesources = append(filesources, filesource) +} + +// FileSource implements one way of retrieving test file content. For +// example, one file source could read from the original source code +// file tree, another from bindata compiled into a test executable. +type FileSource interface { + // ReadTestFile retrieves the content of a file that gets maintained + // alongside a test's source code. Files are identified by the + // relative path inside the repository containing the tests, for + // example "cluster/gce/upgrade.sh" inside kubernetes/kubernetes. + // + // When the file is not found, a nil slice is returned. An error is + // returned for all fatal errors. + ReadTestFile(filePath string) ([]byte, error) + + // DescribeFiles returns a multi-line description of which + // files are available via this source. It is meant to be + // used as part of the error message when a file cannot be + // found. + DescribeFiles() string +} + +// Read tries to retrieve the desired file content from +// one of the registered file sources. +func Read(filePath string) ([]byte, error) { + if len(filesources) == 0 { + return nil, fmt.Errorf("no file sources registered (yet?), cannot retrieve test file %s", filePath) + } + for _, filesource := range filesources { + data, err := filesource.ReadTestFile(filePath) + if err != nil { + return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err) + } + if data != nil { + return data, nil + } + } + // Here we try to generate an error that points test authors + // or users in the right direction for resolving the problem. + error := fmt.Sprintf("Test file %q was not found.\n", filePath) + for _, filesource := range filesources { + error += filesource.DescribeFiles() + error += "\n" + } + return nil, errors.New(error) +} + +// Exists checks whether a file could be read. Unexpected errors +// are handled by calling the fail function, which then should +// abort the current test. +func Exists(filePath string) (bool, error) { + for _, filesource := range filesources { + data, err := filesource.ReadTestFile(filePath) + if err != nil { + return false, err + } + if data != nil { + return true, nil + } + } + return false, nil +} + +// RootFileSource looks for files relative to a root directory. +type RootFileSource struct { + Root string +} + +// ReadTestFile looks for the file relative to the configured +// root directory. If the path is already absolute, for example +// in a test that has its own method of determining where +// files are, then the path will be used directly. +func (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) { + var fullPath string + if path.IsAbs(filePath) { + fullPath = filePath + } else { + fullPath = filepath.Join(r.Root, filePath) + } + data, err := ioutil.ReadFile(fullPath) + if os.IsNotExist(err) { + // Not an error (yet), some other provider may have the file. + return nil, nil + } + return data, err +} + +// DescribeFiles explains that it looks for files inside a certain +// root directory. +func (r RootFileSource) DescribeFiles() string { + description := fmt.Sprintf("Test files are expected in %q", r.Root) + if !path.IsAbs(r.Root) { + // The default in test_context.go is the relative path + // ../../, which doesn't really help locating the + // actual location. Therefore we add also the absolute + // path if necessary. + abs, err := filepath.Abs(r.Root) + if err == nil { + description += fmt.Sprintf(" = %q", abs) + } + } + description += "." + return description +} + +// EmbeddedFileSource handles files stored in a package generated with bindata. +type EmbeddedFileSource struct { + EmbeddedFS embed.FS + Root string + fileList []string +} + +// ReadTestFile looks for an embedded file with the given path. +func (e EmbeddedFileSource) ReadTestFile(filepath string) ([]byte, error) { + relativePath := strings.TrimPrefix(filepath, fmt.Sprintf("%s/", e.Root)) + + b, err := e.EmbeddedFS.ReadFile(relativePath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + + return b, nil +} + +// DescribeFiles explains that it is looking inside an embedded filesystem +func (e EmbeddedFileSource) DescribeFiles() string { + var lines []string + lines = append(lines, "The following files are embedded into the test executable:") + + if len(e.fileList) == 0 { + e.populateFileList() + } + lines = append(lines, e.fileList...) + + return strings.Join(lines, "\n\t") +} + +func (e *EmbeddedFileSource) populateFileList() { + fs.WalkDir(e.EmbeddedFS, ".", func(path string, d fs.DirEntry, err error) error { + if !d.IsDir() { + e.fileList = append(e.fileList, filepath.Join(e.Root, path)) + } + + return nil + }) +} diff --git a/tests/third_party/k8s/timeouts.go b/tests/third_party/k8s/timeouts.go new file mode 100644 index 00000000000..b9ac8378530 --- /dev/null +++ b/tests/third_party/k8s/timeouts.go @@ -0,0 +1,108 @@ +/* +Copyright 2020 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied from k8s.io/kubernetes/test/e2e/framework/timeouts.go @v1.23.17 + +package k8s + +import "time" + +const ( + // Default timeouts to be used in TimeoutContext + podStartTimeout = 5 * time.Minute + podStartShortTimeout = 2 * time.Minute + podStartSlowTimeout = 15 * time.Minute + podDeleteTimeout = 5 * time.Minute + claimProvisionTimeout = 5 * time.Minute + claimProvisionShortTimeout = 1 * time.Minute + claimBoundTimeout = 3 * time.Minute + pvReclaimTimeout = 3 * time.Minute + pvBoundTimeout = 3 * time.Minute + pvCreateTimeout = 3 * time.Minute + pvDeleteTimeout = 3 * time.Minute + pvDeleteSlowTimeout = 20 * time.Minute + snapshotCreateTimeout = 5 * time.Minute + snapshotDeleteTimeout = 5 * time.Minute + snapshotControllerMetricsTimeout = 5 * time.Minute +) + +// TimeoutContext contains timeout settings for several actions. +type TimeoutContext struct { + // PodStart is how long to wait for the pod to be started. + PodStart time.Duration + + // PodStartShort is same as `PodStart`, but shorter. + // Use it in a case-by-case basis, mostly when you are sure pod start will not be delayed. + PodStartShort time.Duration + + // PodStartSlow is same as `PodStart`, but longer. + // Use it in a case-by-case basis, mostly when you are sure pod start will take longer than usual. + PodStartSlow time.Duration + + // PodDelete is how long to wait for the pod to be deleted. + PodDelete time.Duration + + // ClaimProvision is how long claims have to become dynamically provisioned. + ClaimProvision time.Duration + + // ClaimProvisionShort is the same as `ClaimProvision`, but shorter. + ClaimProvisionShort time.Duration + + // ClaimBound is how long claims have to become bound. + ClaimBound time.Duration + + // PVReclaim is how long PVs have to become reclaimed. + PVReclaim time.Duration + + // PVBound is how long PVs have to become bound. + PVBound time.Duration + + // PVCreate is how long PVs have to be created. + PVCreate time.Duration + + // PVDelete is how long PVs have to become deleted. + PVDelete time.Duration + + // PVDeleteSlow is the same as PVDelete, but slower. + PVDeleteSlow time.Duration + + // SnapshotCreate is how long for snapshot to create snapshotContent. + SnapshotCreate time.Duration + + // SnapshotDelete is how long for snapshot to delete snapshotContent. + SnapshotDelete time.Duration + + // SnapshotControllerMetrics is how long to wait for snapshot controller metrics. + SnapshotControllerMetrics time.Duration +} + +// NewTimeoutContextWithDefaults returns a TimeoutContext with default values. +func NewTimeoutContextWithDefaults() *TimeoutContext { + return &TimeoutContext{ + PodStart: podStartTimeout, + PodStartShort: podStartShortTimeout, + PodStartSlow: podStartSlowTimeout, + PodDelete: podDeleteTimeout, + ClaimProvision: claimProvisionTimeout, + ClaimProvisionShort: claimProvisionShortTimeout, + ClaimBound: claimBoundTimeout, + PVReclaim: pvReclaimTimeout, + PVBound: pvBoundTimeout, + PVCreate: pvCreateTimeout, + PVDelete: pvDeleteTimeout, + PVDeleteSlow: pvDeleteSlowTimeout, + SnapshotCreate: snapshotCreateTimeout, + SnapshotDelete: snapshotDeleteTimeout, + SnapshotControllerMetrics: snapshotControllerMetricsTimeout, + } +} diff --git a/tests/third_party/k8s/util.go b/tests/third_party/k8s/util.go new file mode 100644 index 00000000000..7e8115bd152 --- /dev/null +++ b/tests/third_party/k8s/util.go @@ -0,0 +1,981 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file is copied (and modified) from k8s.io/kubernetes/test/e2e/framework/util.go @v1.23.17 + +package k8s + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math/rand" + "net/url" + "os" + "os/exec" + "path" + "sort" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + gomegatypes "github.com/onsi/gomega/types" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + utilfeature "k8s.io/apiserver/pkg/util/feature" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/component-base/featuregate" + uexec "k8s.io/utils/exec" + + e2ekubectl "github.com/pingcap/tidb-operator/tests/third_party/k8s/kubectl" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" + e2enode "github.com/pingcap/tidb-operator/tests/third_party/k8s/node" + e2epod "github.com/pingcap/tidb-operator/tests/third_party/k8s/pod" +) + +const ( + // Minimal number of nodes for the cluster to be considered large. + largeClusterThreshold = 100 + + // AllContainers specifies that all containers be visited + // Copied from pkg/api/v1/pod to avoid pulling extra dependencies + AllContainers = InitContainers | Containers | EphemeralContainers +) + +// DEPRECATED constants. Use the timeouts in framework.Framework instead. +const ( + // PodListTimeout is how long to wait for the pod to be listable. + PodListTimeout = time.Minute + + // PodStartTimeout is how long to wait for the pod to be started. + PodStartTimeout = 5 * time.Minute + + // PodStartShortTimeout is same as `PodStartTimeout` to wait for the pod to be started, but shorter. + // Use it case by case when we are sure pod start will not be delayed. + // minutes by slow docker pulls or something else. + PodStartShortTimeout = 2 * time.Minute + + // PodDeleteTimeout is how long to wait for a pod to be deleted. + PodDeleteTimeout = 5 * time.Minute + + // PodGetTimeout is how long to wait for a pod to be got. + PodGetTimeout = 2 * time.Minute + + // PodEventTimeout is how much we wait for a pod event to occur. + PodEventTimeout = 2 * time.Minute + + // ServiceStartTimeout is how long to wait for a service endpoint to be resolvable. + ServiceStartTimeout = 3 * time.Minute + + // Poll is how often to Poll pods, nodes and claims. + Poll = 2 * time.Second + + // PollShortTimeout is the short timeout value in polling. + PollShortTimeout = 1 * time.Minute + + // ServiceAccountProvisionTimeout is how long to wait for a service account to be provisioned. + // service accounts are provisioned after namespace creation + // a service account is required to support pod creation in a namespace as part of admission control + ServiceAccountProvisionTimeout = 2 * time.Minute + + // SingleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent + // transient failures from failing tests. + SingleCallTimeout = 5 * time.Minute + + // NodeReadyInitialTimeout is how long nodes have to be "ready" when a test begins. They should already + // be "ready" before the test starts, so this is small. + NodeReadyInitialTimeout = 20 * time.Second + + // PodReadyBeforeTimeout is how long pods have to be "ready" when a test begins. + PodReadyBeforeTimeout = 5 * time.Minute + + // ClaimProvisionShortTimeout is same as `ClaimProvisionTimeout` to wait for claim to be dynamically provisioned, but shorter. + // Use it case by case when we are sure this timeout is enough. + ClaimProvisionShortTimeout = 1 * time.Minute + + // ClaimProvisionTimeout is how long claims have to become dynamically provisioned. + ClaimProvisionTimeout = 5 * time.Minute + + // RestartNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is restarted before + // the test is considered failed. + RestartNodeReadyAgainTimeout = 5 * time.Minute + + // RestartPodReadyAgainTimeout is how long a pod is allowed to become "running" and "ready" after a node + // restart before test is considered failed. + RestartPodReadyAgainTimeout = 5 * time.Minute + + // SnapshotCreateTimeout is how long for snapshot to create snapshotContent. + SnapshotCreateTimeout = 5 * time.Minute + + // SnapshotDeleteTimeout is how long for snapshot to delete snapshotContent. + SnapshotDeleteTimeout = 5 * time.Minute +) + +var ( + + // ProvidersWithSSH are those providers where each node is accessible with SSH + ProvidersWithSSH = []string{"gce", "gke", "aws", "local"} +) + +// RunID is a unique identifier of the e2e run. +// Beware that this ID is not the same for all tests in the e2e run, because each Ginkgo node creates it separately. +var RunID = uuid.NewUUID() + +// CreateTestingNSFn is a func that is responsible for creating namespace used for executing e2e tests. +type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) + +// APIAddress returns a address of an instance. +func APIAddress() string { + instanceURL, err := url.Parse(TestContext.Host) + ExpectNoError(err) + return instanceURL.Hostname() +} + +// ProviderIs returns true if the provider is included is the providers. Otherwise false. +func ProviderIs(providers ...string) bool { + for _, provider := range providers { + if strings.EqualFold(provider, TestContext.Provider) { + return true + } + } + return false +} + +// MasterOSDistroIs returns true if the master OS distro is included in the supportedMasterOsDistros. Otherwise false. +func MasterOSDistroIs(supportedMasterOsDistros ...string) bool { + for _, distro := range supportedMasterOsDistros { + if strings.EqualFold(distro, TestContext.MasterOSDistro) { + return true + } + } + return false +} + +// NodeOSDistroIs returns true if the node OS distro is included in the supportedNodeOsDistros. Otherwise false. +func NodeOSDistroIs(supportedNodeOsDistros ...string) bool { + for _, distro := range supportedNodeOsDistros { + if strings.EqualFold(distro, TestContext.NodeOSDistro) { + return true + } + } + return false +} + +// NodeOSArchIs returns true if the node OS arch is included in the supportedNodeOsArchs. Otherwise false. +func NodeOSArchIs(supportedNodeOsArchs ...string) bool { + for _, arch := range supportedNodeOsArchs { + if strings.EqualFold(arch, TestContext.NodeOSArch) { + return true + } + } + return false +} + +// DeleteNamespaces deletes all namespaces that match the given delete and skip filters. +// Filter is by simple strings.Contains; first skip filter, then delete filter. +// Returns the list of deleted namespaces or an error. +func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { + ginkgo.By("Deleting namespaces") + nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + ExpectNoError(err, "Failed to get namespace list") + var deleted []string + var wg sync.WaitGroup +OUTER: + for _, item := range nsList.Items { + for _, pattern := range skipFilter { + if strings.Contains(item.Name, pattern) { + continue OUTER + } + } + if deleteFilter != nil { + var shouldDelete bool + for _, pattern := range deleteFilter { + if strings.Contains(item.Name, pattern) { + shouldDelete = true + break + } + } + if !shouldDelete { + continue OUTER + } + } + wg.Add(1) + deleted = append(deleted, item.Name) + go func(nsName string) { + defer wg.Done() + defer ginkgo.GinkgoRecover() + gomega.Expect(c.CoreV1().Namespaces().Delete(context.TODO(), nsName, metav1.DeleteOptions{})).To(gomega.Succeed()) + log.Logf("namespace : %v api call to delete is complete ", nsName) + }(item.Name) + } + wg.Wait() + return deleted, nil +} + +// WaitForNamespacesDeleted waits for the namespaces to be deleted. +func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error { + ginkgo.By(fmt.Sprintf("Waiting for namespaces %+v to vanish", namespaces)) + nsMap := map[string]bool{} + for _, ns := range namespaces { + nsMap[ns] = true + } + //Now POLL until all namespaces have been eradicated. + return wait.Poll(2*time.Second, timeout, + func() (bool, error) { + nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return false, err + } + for _, item := range nsList.Items { + if _, ok := nsMap[item.Name]; ok { + return false, nil + } + } + return true, nil + }) +} + +func waitForConfigMapInNamespace(c clientset.Interface, ns, name string, timeout time.Duration) error { + fieldSelector := fields.OneTermEqualSelector("metadata.name", name).String() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) { + options.FieldSelector = fieldSelector + return c.CoreV1().ConfigMaps(ns).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { + options.FieldSelector = fieldSelector + return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), options) + }, + } + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + _, err := watchtools.UntilWithSync(ctx, lw, &v1.ConfigMap{}, nil, func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, apierrors.NewNotFound(schema.GroupResource{Resource: "configmaps"}, name) + case watch.Added, watch.Modified: + return true, nil + } + return false, nil + }) + return err +} + +func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { + fieldSelector := fields.OneTermEqualSelector("metadata.name", serviceAccountName).String() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) { + options.FieldSelector = fieldSelector + return c.CoreV1().ServiceAccounts(ns).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { + options.FieldSelector = fieldSelector + return c.CoreV1().ServiceAccounts(ns).Watch(context.TODO(), options) + }, + } + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + _, err := watchtools.UntilWithSync(ctx, lw, &v1.ServiceAccount{}, nil, func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, apierrors.NewNotFound(schema.GroupResource{Resource: "serviceaccounts"}, serviceAccountName) + case watch.Added, watch.Modified: + return true, nil + } + return false, nil + }) + return err +} + +// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned +// the default service account is what is associated with pods when they do not specify a service account +// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned +func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error { + return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout) +} + +// WaitForKubeRootCAInNamespace waits for the configmap kube-root-ca.crt containing the service account +// CA trust bundle to be provisioned in the specified namespace so that pods do not have to retry mounting +// the config map (which creates noise that hides other issues in the Kubelet). +func WaitForKubeRootCAInNamespace(c clientset.Interface, namespace string) error { + return waitForConfigMapInNamespace(c, namespace, "kube-root-ca.crt", ServiceAccountProvisionTimeout) +} + +// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. +// Please see NewFramework instead of using this directly. +func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) { + if labels == nil { + labels = map[string]string{} + } + labels["e2e-run"] = string(RunID) + + // We don't use ObjectMeta.GenerateName feature, as in case of API call + // failure we don't know whether the namespace was created and what is its + // name. + name := fmt.Sprintf("%v-%v", baseName, RandomSuffix()) + + namespaceObj := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "", + Labels: labels, + }, + Status: v1.NamespaceStatus{}, + } + // Be robust about making the namespace creation call. + var got *v1.Namespace + if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { + var err error + got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{}) + if err != nil { + if apierrors.IsAlreadyExists(err) { + // regenerate on conflict + log.Logf("Namespace name %q was already taken, generate a new name and retry", namespaceObj.Name) + namespaceObj.Name = fmt.Sprintf("%v-%v", baseName, RandomSuffix()) + } else { + log.Logf("Unexpected error while creating namespace: %v", err) + } + return false, nil + } + return true, nil + }); err != nil { + return nil, err + } + + if TestContext.VerifyServiceAccount { + if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil { + // Even if we fail to create serviceAccount in the namespace, + // we have successfully create a namespace. + // So, return the created namespace. + return got, err + } + } + return got, nil +} + +// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state +// and waits until they are finally deleted. It ignores namespace skip. +func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { + // TODO: Since we don't have support for bulk resource deletion in the API, + // while deleting a namespace we are deleting all objects from that namespace + // one by one (one deletion == one API call). This basically exposes us to + // throttling - currently controller-manager has a limit of max 20 QPS. + // Once #10217 is implemented and used in namespace-controller, deleting all + // object from a given namespace should be much faster and we will be able + // to lower this timeout. + // However, now Density test is producing ~26000 events and Load capacity test + // is producing ~35000 events, thus assuming there are no other requests it will + // take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60 + // minutes to avoid any timeouts here. + timeout := 60 * time.Minute + + log.Logf("Waiting for terminating namespaces to be deleted...") + for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { + namespaces, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + log.Logf("Listing namespaces failed: %v", err) + continue + } + terminating := 0 + for _, ns := range namespaces.Items { + if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip { + if ns.Status.Phase == v1.NamespaceActive { + return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name) + } + terminating++ + } + } + if terminating == 0 { + return nil + } + } + return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out") +} + +// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. +func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { + return wait.Poll(interval, timeout, func() (bool, error) { + log.Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) + list, err := c.CoreV1().Endpoints(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return false, err + } + + for _, e := range list.Items { + if e.Name == serviceName && countEndpointsNum(&e) == expectNum { + return true, nil + } + } + return false, nil + }) +} + +func countEndpointsNum(e *v1.Endpoints) int { + num := 0 + for _, sub := range e.Subsets { + num += len(sub.Addresses) + } + return num +} + +// restclientConfig returns a config holds the information needed to build connection to kubernetes clusters. +func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) { + log.Logf(">>> kubeConfig: %s", TestContext.KubeConfig) + if TestContext.KubeConfig == "" { + return nil, fmt.Errorf("KubeConfig must be specified to load client config") + } + c, err := clientcmd.LoadFromFile(TestContext.KubeConfig) + if err != nil { + return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error()) + } + if kubeContext != "" { + log.Logf(">>> kubeContext: %s", kubeContext) + c.CurrentContext = kubeContext + } + return c, nil +} + +// ClientConfigGetter is a func that returns getter to return a config. +type ClientConfigGetter func() (*restclient.Config, error) + +// LoadConfig returns a config for a rest client with the UserAgent set to include the current test name. +func LoadConfig() (config *restclient.Config, err error) { + defer func() { + if err == nil && config != nil { + testDesc := ginkgo.CurrentGinkgoTestDescription() + if len(testDesc.ComponentTexts) > 0 { + componentTexts := strings.Join(testDesc.ComponentTexts, " ") + config.UserAgent = fmt.Sprintf("%s -- %s", rest.DefaultKubernetesUserAgent(), componentTexts) + } + } + }() + + if TestContext.NodeE2E { + // This is a node e2e test, apply the node e2e configuration + return &restclient.Config{ + Host: TestContext.Host, + BearerToken: TestContext.BearerToken, + TLSClientConfig: restclient.TLSClientConfig{ + Insecure: true, + }, + }, nil + } + c, err := restclientConfig(TestContext.KubeContext) + if err != nil { + if TestContext.KubeConfig == "" { + return restclient.InClusterConfig() + } + return nil, err + } + // In case Host is not set in TestContext, sets it as + // CurrentContext Server for k8s API client to connect to. + if TestContext.Host == "" && c.Clusters != nil { + currentContext, ok := c.Clusters[c.CurrentContext] + if ok { + TestContext.Host = currentContext.Server + } + } + + return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig() +} + +// LoadClientset returns clientset for connecting to kubernetes clusters. +func LoadClientset() (*clientset.Clientset, error) { + config, err := LoadConfig() + if err != nil { + return nil, fmt.Errorf("error creating client: %v", err.Error()) + } + return clientset.NewForConfig(config) +} + +// RandomSuffix provides a random sequence to append to pods,services,rcs. +func RandomSuffix() string { + return strconv.Itoa(rand.Intn(10000)) +} + +// KubectlBuilder is used to build, customize and execute a kubectl Command. +// Add more functions to customize the builder as needed. +type KubectlBuilder struct { + cmd *exec.Cmd + timeout <-chan time.Time +} + +// NewKubectlCommand returns a KubectlBuilder for running kubectl. +func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder { + b := new(KubectlBuilder) + tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath, namespace) + b.cmd = tk.KubectlCmd(args...) + return b +} + +// Exec runs the kubectl executable. +func (b KubectlBuilder) Exec() (string, error) { + stdout, _, err := b.ExecWithFullOutput() + return stdout, err +} + +// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr. +func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) { + var stdout, stderr bytes.Buffer + cmd := b.cmd + cmd.Stdout, cmd.Stderr = &stdout, &stderr + + log.Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately + if err := cmd.Start(); err != nil { + return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err) + } + errCh := make(chan error, 1) + go func() { + errCh <- cmd.Wait() + }() + select { + case err := <-errCh: + if err != nil { + var rc = 127 + if ee, ok := err.(*exec.ExitError); ok { + rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus()) + log.Logf("rc: %d", rc) + } + return stdout.String(), stderr.String(), uexec.CodeExitError{ + Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err), + Code: rc, + } + } + case <-b.timeout: + b.cmd.Process.Kill() + return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr) + } + log.Logf("stderr: %q", stderr.String()) + log.Logf("stdout: %q", stdout.String()) + return stdout.String(), stderr.String(), nil +} + +// RunKubectl is a convenience wrapper over kubectlBuilder +func RunKubectl(namespace string, args ...string) (string, error) { + return NewKubectlCommand(namespace, args...).Exec() +} + +// ContainerType signifies container type +type ContainerType int + +const ( + // FeatureEphemeralContainers allows running an ephemeral container in pod namespaces to troubleshoot a running pod + FeatureEphemeralContainers featuregate.Feature = "EphemeralContainers" + // Containers is for normal containers + Containers ContainerType = 1 << iota + // InitContainers is for init containers + InitContainers + // EphemeralContainers is for ephemeral containers + EphemeralContainers +) + +// allFeatureEnabledContainers returns a ContainerType mask which includes all container +// types except for the ones guarded by feature gate. +// Copied from pkg/api/v1/pod to avoid pulling extra dependencies +func allFeatureEnabledContainers() ContainerType { + containerType := AllContainers + if !utilfeature.DefaultFeatureGate.Enabled(FeatureEphemeralContainers) { + containerType &= ^EphemeralContainers + } + return containerType +} + +// ContainerVisitor is called with each container spec, and returns true +// if visiting should continue. +// Copied from pkg/api/v1/pod to avoid pulling extra dependencies +type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool) + +// visitContainers invokes the visitor function with a pointer to every container +// spec in the given pod spec with type set in mask. If visitor returns false, +// visiting is short-circuited. visitContainers returns true if visiting completes, +// false if visiting was short-circuited. +// Copied from pkg/api/v1/pod to avoid pulling extra dependencies +func visitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool { + if mask&InitContainers != 0 { + for i := range podSpec.InitContainers { + if !visitor(&podSpec.InitContainers[i], InitContainers) { + return false + } + } + } + if mask&Containers != 0 { + for i := range podSpec.Containers { + if !visitor(&podSpec.Containers[i], Containers) { + return false + } + } + } + if mask&EphemeralContainers != 0 { + for i := range podSpec.EphemeralContainers { + if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) { + return false + } + } + } + return true +} + +// MatchContainerOutput creates a pod and waits for all it's containers to exit with success. +// It then tests that the matcher with each expectedOutput matches the output of the specified container. +func (f *Framework) MatchContainerOutput( + pod *v1.Pod, + containerName string, + expectedOutput []string, + matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error { + ns := pod.ObjectMeta.Namespace + if ns == "" { + ns = f.Namespace.Name + } + podClient := f.PodClientNS(ns) + + createdPod := podClient.Create(pod) + defer func() { + ginkgo.By("delete the pod") + podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout) + }() + + // Wait for client pod to complete. + podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart) + + // Grab its logs. Get host first. + podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get pod status: %v", err) + } + + if podErr != nil { + // Pod failed. Dump all logs from all containers to see what's wrong + _ = visitContainers(&podStatus.Spec, allFeatureEnabledContainers(), func(c *v1.Container, containerType ContainerType) bool { + logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name) + if err != nil { + log.Logf("Failed to get logs from node %q pod %q container %q: %v", + podStatus.Spec.NodeName, podStatus.Name, c.Name, err) + } else { + log.Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, c.Name, logs) + } + return true + }) + return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr) + } + + log.Logf("Trying to get logs from node %s pod %s container %s: %v", + podStatus.Spec.NodeName, podStatus.Name, containerName, err) + + // Sometimes the actual containers take a second to get started, try to get logs for 60s + logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName) + if err != nil { + log.Logf("Failed to get logs from node %q pod %q container %q. %v", + podStatus.Spec.NodeName, podStatus.Name, containerName, err) + return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err) + } + + for _, expected := range expectedOutput { + m := matcher(expected) + matches, err := m.Match(logs) + if err != nil { + return fmt.Errorf("expected %q in container output: %v", expected, err) + } else if !matches { + return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs)) + } + } + + return nil +} + +// EventsLister is a func that lists events. +type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error) + +// dumpEventsInNamespace dumps events in the given namespace. +func dumpEventsInNamespace(eventsLister EventsLister, namespace string) { + ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) + events, err := eventsLister(metav1.ListOptions{}, namespace) + ExpectNoError(err, "failed to list events in namespace %q", namespace) + + ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items))) + // Sort events by their first timestamp + sortedEvents := events.Items + if len(sortedEvents) > 1 { + sort.Sort(byFirstTimestamp(sortedEvents)) + } + for _, e := range sortedEvents { + log.Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message) + } + // Note that we don't wait for any Cleanup to propagate, which means + // that if you delete a bunch of pods right before ending your test, + // you may or may not see the killing/deletion/Cleanup events. +} + +// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace. +func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { + dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) { + return c.CoreV1().Events(ns).List(context.TODO(), opts) + }, namespace) + + e2epod.DumpAllPodInfoForNamespace(c, namespace, TestContext.ReportDir) + + // If cluster is large, then the following logs are basically useless, because: + // 1. it takes tens of minutes or hours to grab all of them + // 2. there are so many of them that working with them are mostly impossible + // So we dump them only if the cluster is relatively small. + maxNodesForDump := TestContext.MaxNodesToGather + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + log.Logf("unable to fetch node list: %v", err) + return + } + if len(nodes.Items) <= maxNodesForDump { + dumpAllNodeInfo(c, nodes) + } else { + log.Logf("skipping dumping cluster info - cluster too large") + } +} + +// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker. +type byFirstTimestamp []v1.Event + +func (o byFirstTimestamp) Len() int { return len(o) } +func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o byFirstTimestamp) Less(i, j int) bool { + if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) { + return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name + } + return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp) +} + +func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) { + names := make([]string, len(nodes.Items)) + for ix := range nodes.Items { + names[ix] = nodes.Items[ix].Name + } + DumpNodeDebugInfo(c, names, log.Logf) +} + +// DumpNodeDebugInfo dumps debug information of the given nodes. +func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { + for _, n := range nodeNames { + logFunc("\nLogging node info for node %v", n) + node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{}) + if err != nil { + logFunc("Error getting node info %v", err) + } + logFunc("Node Info: %v", node) + + logFunc("\nLogging kubelet events for node %v", n) + for _, e := range getNodeEvents(c, n) { + logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v", + e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject) + } + logFunc("\nLogging pods the kubelet thinks is on node %v", n) + podList, err := getKubeletPods(c, n) + if err != nil { + logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err) + continue + } + for _, p := range podList.Items { + logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses)) + for _, c := range p.Status.InitContainerStatuses { + logFunc("\tInit container %v ready: %v, restart count %v", + c.Name, c.Ready, c.RestartCount) + } + for _, c := range p.Status.ContainerStatuses { + logFunc("\tContainer %v ready: %v, restart count %v", + c.Name, c.Ready, c.RestartCount) + } + } + // TODO(pingcap): remove e2emetrics + // TODO: Log node resource info + } +} + +// getKubeletPods retrieves the list of pods on the kubelet. +func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { + var client restclient.Result + finished := make(chan struct{}, 1) + go func() { + // call chain tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165 + client = c.CoreV1().RESTClient().Get(). + Resource("nodes"). + SubResource("proxy"). + Name(fmt.Sprintf("%v:%v", node, KubeletPort)). + Suffix("pods"). + Do(context.TODO()) + + finished <- struct{}{} + }() + select { + case <-finished: + result := &v1.PodList{} + if err := client.Into(result); err != nil { + return &v1.PodList{}, err + } + return result, nil + case <-time.After(PodGetTimeout): + return &v1.PodList{}, fmt.Errorf("Waiting up to %v for getting the list of pods", PodGetTimeout) + } +} + +// logNodeEvents logs kubelet events from the given node. This includes kubelet +// restart and node unhealthy events. Note that listing events like this will mess +// with latency metrics, beware of calling it during a test. +func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { + selector := fields.Set{ + "involvedObject.kind": "Node", + "involvedObject.name": nodeName, + "involvedObject.namespace": metav1.NamespaceAll, + "source": "kubelet", + }.AsSelector().String() + options := metav1.ListOptions{FieldSelector: selector} + events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options) + if err != nil { + log.Logf("Unexpected error retrieving node events %v", err) + return []v1.Event{} + } + return events.Items +} + +// WaitForAllNodesSchedulable waits up to timeout for all +// (but TestContext.AllowedNotReadyNodes) to become schedulable. +func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error { + if TestContext.AllowedNotReadyNodes == -1 { + return nil + } + + log.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes) + return wait.PollImmediate( + 30*time.Second, + timeout, + e2enode.CheckReadyForTests(c, TestContext.NonblockingTaints, TestContext.AllowedNotReadyNodes, largeClusterThreshold), + ) +} + +// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` +// inside of a shell. +func RunHostCmd(ns, name, cmd string) (string, error) { + return RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd) +} + +// AllNodesReady checks whether all registered nodes are ready. Setting -1 on +// TestContext.AllowedNotReadyNodes will bypass the post test node readiness check. +// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy, +// and figure out how to do it in a configurable way, as we can't expect all setups to run +// default test add-ons. +func AllNodesReady(c clientset.Interface, timeout time.Duration) error { + if TestContext.AllowedNotReadyNodes == -1 { + return nil + } + + log.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes) + + var notReady []*v1.Node + err := wait.PollImmediate(Poll, timeout, func() (bool, error) { + notReady = nil + // It should be OK to list unschedulable Nodes here. + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return false, err + } + for i := range nodes.Items { + node := &nodes.Items[i] + if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) { + notReady = append(notReady, node) + } + } + // Framework allows for nodes to be non-ready, + // to make it possible e.g. for incorrect deployment of some small percentage + // of nodes (which we allow in cluster validation). Some nodes that are not + // provisioned correctly at startup will never become ready (e.g. when something + // won't install correctly), so we can't expect them to be ready at any point. + return len(notReady) <= TestContext.AllowedNotReadyNodes, nil + }) + + if err != nil && err != wait.ErrWaitTimeout { + return err + } + + if len(notReady) > TestContext.AllowedNotReadyNodes { + msg := "" + for _, node := range notReady { + msg = fmt.Sprintf("%s, %s", msg, node.Name) + } + return fmt.Errorf("Not ready nodes: %#v", msg) + } + return nil +} + +// CoreDump SSHs to the master and all nodes and dumps their logs into dir. +// It shells out to cluster/log-dump/log-dump.sh to accomplish this. +func CoreDump(dir string) { + if TestContext.DisableLogDump { + log.Logf("Skipping dumping logs from cluster") + return + } + var cmd *exec.Cmd + if TestContext.LogexporterGCSPath != "" { + log.Logf("Dumping logs from nodes to GCS directly at path: %s", TestContext.LogexporterGCSPath) + cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir, TestContext.LogexporterGCSPath) + } else { + log.Logf("Dumping logs locally to: %s", dir) + cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir) + } + cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices))) + cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_JOURNAL=%v", TestContext.DumpSystemdJournal)) + + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + log.Logf("Error running cluster/log-dump/log-dump.sh: %v", err) + } +} + +// parseSystemdServices converts services separator from comma to space. +func parseSystemdServices(services string) string { + return strings.TrimSpace(strings.Replace(services, ",", " ", -1)) +} + +// PrettyPrintJSON converts metrics to JSON format. +func PrettyPrintJSON(metrics interface{}) string { + output := &bytes.Buffer{} + if err := json.NewEncoder(output).Encode(metrics); err != nil { + log.Logf("Error building encoder: %v", err) + return "" + } + formatted := &bytes.Buffer{} + if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil { + log.Logf("Error indenting: %v", err) + return "" + } + return formatted.String() +} diff --git a/tests/webhook.go b/tests/webhook.go index 98f9e7fdab4..0837c052c4e 100644 --- a/tests/webhook.go +++ b/tests/webhook.go @@ -20,12 +20,12 @@ import ( "time" podutil "github.com/pingcap/tidb-operator/tests/e2e/util/pod" + "github.com/pingcap/tidb-operator/tests/third_party/k8s/log" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework/log" ) func (oa *OperatorActions) setCabundleFromApiServer(info *OperatorConfig) error {