diff --git a/.github/actions/deploy/action.yaml b/.github/actions/deploy/action.yaml index 39afc5bb98..8a4dd4a5e3 100644 --- a/.github/actions/deploy/action.yaml +++ b/.github/actions/deploy/action.yaml @@ -83,6 +83,11 @@ runs: shell: bash run: sh tests/smoke/deploy-sorbet-resources.sh end2end working-directory: ./.github/scripts/end2end/operator + - name: Deploy metadata + shell: bash + run: bash deploy-metadata.sh + working-directory: ./.github/scripts/end2end + if: ${{ env.ENABLE_RING_TESTS == 'true' }} - name: End-to-end configuration shell: bash run: bash configure-e2e.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "default" diff --git a/.github/scripts/end2end/common.sh b/.github/scripts/end2end/common.sh index a8fc9d4246..d35d3319a2 100644 --- a/.github/scripts/end2end/common.sh +++ b/.github/scripts/end2end/common.sh @@ -9,3 +9,55 @@ get_token() { jq -cr '.id_token' } +wait_for_endpoint() { + local host=$1 + local port=$2 + local timeout_s=$3 + + kubectl run wait-for-port \ + --image=busybox \ + --attach=True \ + --rm \ + --restart=Never \ + --pod-running-timeout=5m \ + --image-pull-policy=IfNotPresent \ + --env="HOST=${host}" \ + --env="PORT=${port}" \ + --env="TIMEOUT_S=${timeout_s}" \ + -- sh -c ' +wait_for_endpoint() { + local count=0 + echo "waiting for $HOST:$PORT to be available" + while ! nc -z -w 1 $HOST "$PORT"; do + count=$((count + 1)) + [ "$count" -ge "$TIMEOUT_S" ] && echo "Error: timedout waiting for $HOST:$PORT after $TIMEOUT_S seconds" && return 1 + sleep 1 + done + echo "$HOST:$PORT is now available." +} +wait_for_endpoint +' +} + +wait_for_all_pods_behind_services() { + local service=$1 + local namespace=$2 + local port_regex=$3 + local timeout_s=$4 + kubectl get pods -n $namespace -l app=$service -o jsonpath='{range .items[*]}{.metadata.deletionTimestamp}:{.status.podIP}:{.spec.containers[*].ports[*].containerPort}{"\n"}{end}' | while read -r output; do + deletion_timestamp=$(echo $output | cut -d':' -f1) + ip=$(echo $output | cut -d':' -f2) + ports=$(echo $output | cut -d':' -f3) + # skip pods that are terminating + if [ -n "$deletion_timestamp" ] || [ -z "$ip" ] || [ -z "$ports" ]; then + continue + fi + # waiting for all ports that match the port prefix in cases where + # multiple containers are running within the same pod + for port in $ports; do + if [[ $port == $port_regex ]]; then + wait_for_endpoint $ip $port $timeout_s + fi + done + done +} diff --git a/.github/scripts/end2end/deploy-metadata.sh b/.github/scripts/end2end/deploy-metadata.sh new file mode 100644 index 0000000000..814af4f7de --- /dev/null +++ b/.github/scripts/end2end/deploy-metadata.sh @@ -0,0 +1,47 @@ +#!/bin/sh + +set -exu + +. "$(dirname $0)/common.sh" + +# create a separate namespace for metadata +kubectl create namespace metadata + +# clone the metadata repository +git init metadata +cd metadata +git fetch --depth 1 --no-tags https://${GIT_ACCESS_TOKEN}@github.com/scality/metadata.git +git checkout FETCH_HEAD + +# install metadata chart in a separate namespace +cd helm +helm dependency update cloudserver/ +helm install -n metadata \ + --set metadata.persistentVolume.storageClass='' \ + --set metadata.sproxyd.persistentVolume.storageClass='' \ + s3c cloudserver/ + +# wait for the repds to be created +kubectl -n metadata rollout status --watch --timeout=300s statefulset/s3c-metadata-repd +# wait for all repd pods to start serving admin API ports +wait_for_all_pods_behind_services metadata-repd metadata "91*" 60 + +# current chart uses an old version of bucketd that has issues reconnecting to the repd +# when bucketd is started first. Restarting bucketd after repd is ready. +kubectl -n metadata rollout restart deployment/s3c-metadata-bucketd +# wait for the bucketd pods to be created +kubectl -n metadata rollout status --watch --timeout=300s deploy/s3c-metadata-bucketd +# wait for all bucketd pods to start serving port 9000 +wait_for_all_pods_behind_services metadata-bucketd metadata 9000 60 + +# manually add "s3c.local" to the rest endpoints list as it's not configurable in the chart +current_config=$(kubectl get configmap/s3c-cloudserver-config-json -n metadata -o jsonpath='{.data.config\.json}') +updated_config=$(echo "$current_config" | jq '.restEndpoints["s3c.local"] = "us-east-1"') +kubectl patch configmap/s3c-cloudserver-config-json -n metadata --type='merge' -p="$(jq -n --arg v "$updated_config" '{"data": {"config.json": $v}}')" + +# restarting cloudserver to take the new configmap changes into account +kubectl -n metadata rollout restart deployment/s3c-cloudserver +# wait for the cloudserver pods to be created +kubectl -n metadata rollout status --watch --timeout=300s deployment/s3c-cloudserver +# wait for the cloudserver pods to start serving port 8000 +wait_for_all_pods_behind_services cloudserver metadata 8000 60 diff --git a/.github/scripts/end2end/patch-coredns.sh b/.github/scripts/end2end/patch-coredns.sh index f33e7c4d5b..b8b40fa376 100755 --- a/.github/scripts/end2end/patch-coredns.sh +++ b/.github/scripts/end2end/patch-coredns.sh @@ -36,6 +36,7 @@ corefile=" rewrite name exact prom.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact shell-ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact website.mywebsite.com ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact s3c.local s3c-cloudserver.metadata.svc.cluster.local kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure fallthrough in-addr.arpa ip6.arpa diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index e2b88a9768..eab0da5216 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -76,11 +76,11 @@ env: GCP_BACKEND_SERVICE_EMAIL: ${{ secrets.GCP_BACKEND_SERVICE_EMAIL }} # Enable this for Ring tests ENABLE_RING_TESTS: "false" - RING_S3C_ACCESS_KEY: ${{ secrets.RING_S3C_BACKEND_ACCESS_KEY }} - RING_S3C_SECRET_KEY: ${{ secrets.RING_S3C_BACKEND_SECRET_KEY }} - RING_S3C_ENDPOINT: ${{ secrets.RING_S3C_BACKEND_ENDPOINT }} + RING_S3C_ACCESS_KEY: accessKey1 + RING_S3C_SECRET_KEY: verySecretKey1 + RING_S3C_ENDPOINT: http://s3c.local:8000 RING_S3C_BACKEND_SOURCE_LOCATION: rings3cbackendingestion - RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }}-${{ github.run_attempt }} + RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }} # CTST end2end tests NOTIF_DEST_NAME: "destination1" NOTIF_DEST_TOPIC: "destination-topic-1" @@ -320,9 +320,6 @@ jobs: run: |- cd tests/zenko_tests envsubst < 'e2e-config.yaml.template' > 'e2e-config.yaml' - if [[ "${ENABLE_RING_TESTS}" == "false" ]]; then - yq -i 'del(.locations[] | select(.locationType == "location-scality-ring-s3-v1"))' e2e-config.yaml - fi cat e2e-config.yaml echo 'Generated e2e-config.yaml file' - name: Build and push CI image @@ -556,8 +553,9 @@ jobs: needs: [build-kafka, build-test-image] runs-on: - ubuntu-22.04-8core - # Enable this for Ring-based tests - # - scality-cloud + env: + ENABLE_RING_TESTS: "true" + GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }} steps: - name: Checkout uses: actions/checkout@v4 @@ -573,8 +571,6 @@ jobs: registry: ghcr.io - name: Deploy Zenko uses: ./.github/actions/deploy - env: - GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }} - name: Run backbeat end to end tests run: bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "backbeat" "default" working-directory: ./.github/scripts/end2end diff --git a/tests/zenko_tests/e2e_config/locations.py b/tests/zenko_tests/e2e_config/locations.py index e9f5d98bcc..578f4e5057 100644 --- a/tests/zenko_tests/e2e_config/locations.py +++ b/tests/zenko_tests/e2e_config/locations.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import logging +import os _log = logging.getLogger("end2end configuration") @@ -11,6 +12,11 @@ def create_location(client, uuid, location): :param uuid: zenko instance uuid :param location: location details """ + + ENABLE_RING_TESTS = os.environ['ENABLE_RING_TESTS'] + if ENABLE_RING_TESTS == "false" and location["locationType"] == "location-scality-ring-s3-v1": + return + try: Location_V1 = client.get_model('location-v1') if "bootstrapList" not in location["details"]: