-
Notifications
You must be signed in to change notification settings - Fork 65
/
pr_check.sh
executable file
·137 lines (112 loc) · 5.55 KB
/
pr_check.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#!/bin/bash
# --------------------------------------------
# Export vars for helper scripts to use
# --------------------------------------------
# name of app-sre "application" folder this component lives in; needs to match for quay
export COMPONENT="hac-dev"
export IMAGE="quay.io/cloudservices/hac-dev-frontend"
export APP_ROOT=$(pwd)
export WORKSPACE=${WORKSPACE:-$APP_ROOT} # if running in jenkins, use the build's workspace
export NODE_BUILD_VERSION=16
export ROUTE_PATH=/api/plugins/hac-dev
IMAGE="quay.io/cloudservices/hac-dev-frontend"
COMMON_BUILDER=https://raw.githubusercontent.com/RedHatInsights/insights-frontend-builder-common/master
# --------------------------------------------
# Options that must be configured by app owner
# --------------------------------------------
IQE_PLUGINS="hac_dev"
IQE_MARKER_EXPRESSION="smoke"
IQE_FILTER_EXPRESSION=""
# If test dockerfile changes, rebuild the runner image
TEST_IMAGE="quay.io/hacdev/hac-tests:next"
if ! git diff --exit-code --quiet origin/$ghprbTargetBranch HEAD -- integration-tests/Dockerfile; then
echo "Dockerfile changes detected, rebuilding test image"
TEST_IMAGE="hac-dev:pr-${ghprbPullId}"
cd integration-tests
podman build -t "$TEST_IMAGE" . -f Dockerfile
cd ..
fi
# Build and push to quay
set -exv
# source is preferred to | bash -s in this case to avoid a subshell
source <(curl -sSL $COMMON_BUILDER/src/frontend-build.sh)
BUILD_RESULTS=$?
# Get bonfire helper scripts and python venv. Set GIT_COMMIT and IMAGE_TAG
BOOTSTRAP_SCRIPT_URL="https://raw.githubusercontent.com/RedHatInsights/cicd-tools/main/bootstrap.sh"
curl -s "$BOOTSTRAP_SCRIPT_URL" > .cicd_bootstrap.sh && source .cicd_bootstrap.sh
# Note: PoC will be cleaned up with Bonfire changes
# Get a namespace in the eph cluster and set vars accordingly
NAMESPACE=$(bonfire namespace reserve)
ENV_NAME=env-${NAMESPACE}
oc project ${NAMESPACE}
HOSTNAME=$(oc get feenv ${ENV_NAME} -o json | jq ".spec.hostname" | tr -d '"')
# Temp: setup proxy and patch SSO for devsandbox
oc patch feenv ${ENV_NAME} --type merge -p '{"spec":{"sso": "'$HAC_KC_SSO_URL'" }}'
oc process -f tmp/hac-proxy.yaml -n ${NAMESPACE} -p NAMESPACE=${NAMESPACE} -p ENV_NAME=${ENV_NAME} -p HOSTNAME=${HOSTNAME} | oc create -f -
# Only deploy necessary frontend dependencies
export BONFIRE_FRONTEND_DEPENDENCIES=chrome-service,insights-chrome
# Deploy hac-dev with PR git ref and mainline hac-core ref
bonfire deploy \
hac \
--frontends true \
--source=appsre \
--clowd-env ${ENV_NAME} \
--set-template-ref ${COMPONENT}=${GIT_COMMIT} \
--set-image-tag ${IMAGE}=${IMAGE_TAG} \
--set-image-tag quay.io/redhat-services-prod/hcc-platex-services/chrome-service=latest \
--namespace ${NAMESPACE}
# Hacks for clowder and keycloak integration
oc get clowdenvironment $ENV_NAME -o json | jq '.spec.disabled=true' | oc apply -f -
export KC_URL=$(echo $HAC_KC_SSO_URL | sed -s 's/\/auth\///')
oc get deployment $ENV_NAME-mbop -o json | \
jq --arg url $KC_URL --arg user $HAC_KC_USERNAME --arg pass $HAC_KC_PASSWORD \
'(.spec.template.spec.containers[].env=[
{"name": "KEYCLOAK_SERVER", "value": $url},
{"name": "KEYCLOAK_USERNAME", "value": $user},
{"name": "KEYCLOAK_PASSWORD", "value": $pass},
{"name": "KEYCLOAK_VERSION", "value": "23.0.1"}])' | oc replace -f -
oc rollout status deployment $ENV_NAME-mbop
# workaround for BETA flag being used on testing env (eph env)
oc get frontend hac-dev --output json | jq '.spec.frontend.paths += ["/beta/api/plugins/hac-dev"]' | oc apply -f -
oc get frontend hac-core --output json | jq '.spec.frontend.paths += ["/beta/apps/hac-core"]' | oc apply -f -
oc rollout status deployment hac-dev-frontend
oc rollout status deployment hac-core-frontend
# Call the keycloak API and add a user
B64_USER=$(oc get secret ${ENV_NAME}-keycloak -o json | jq '.data.username'| tr -d '"')
B64_PASS=$(oc get secret ${ENV_NAME}-keycloak -o json | jq '.data.password' | tr -d '"')
CYPRESS_USERNAME="e2e-hac-"`echo ${B64_USER} | base64 -d`
ENCODED_CYPRESS_USERNAME=`echo -n ${CYPRESS_USERNAME} | base64 -w 0`
# These ENVs are populated in the Jenkins job by Vault secrets
python tmp/keycloak.py $HAC_KC_SSO_URL $HAC_KC_USERNAME $HAC_KC_PASSWORD $ENCODED_CYPRESS_USERNAME $B64_PASS $HAC_KC_REGISTRATION
mkdir -p $WORKSPACE/artifacts
PR_TITLE=$(echo ${ghprbPullTitle} | sed -r 's/\s/_/g')
GH_COMMENTBODY=$(echo ${ghprbCommentBody} | sed -r 's/\s/_/g')
COMMON_SETUP="-v $WORKSPACE/artifacts:/tmp/artifacts:Z,U \
-v $PWD/integration-tests:/e2e:Z,U \
-e CYPRESS_PR_CHECK=true \
-e CYPRESS_GH_PR_LINK=${ghprbPullLink} \
-e CYPRESS_HAC_BASE_URL=https://${HOSTNAME}/application-pipeline \
-e CYPRESS_USERNAME=${CYPRESS_USERNAME} \
-e CYPRESS_PASSWORD=`echo ${B64_PASS} | base64 -d` \
-e CYPRESS_GH_PR_TITLE=${PR_TITLE} \
-e CYPRESS_SSO_URL=${HAC_KC_SSO_URL} \
-e GH_COMMENTBODY=${GH_COMMENTBODY}"
set +e
TEST_RUN=0
podman run --userns=keep-id ${COMMON_SETUP} \
-e CYPRESS_GH_TOKEN=${CYPRESS_GH_TOKEN} \
-e CYPRESS_GH_PASSWORD=${CYPRESS_GH_PASSWORD} \
-e CYPRESS_GH_SETUP_KEY=${CYPRESS_GH_SETUP_KEY} \
-e CYPRESS_QUAY_TOKEN=${CYPRESS_QUAY_TOKEN} \
-e CYPRESS_RP_TOKEN=${CYPRESS_RP_HAC} \
-e CYPRESS_VC_KUBECONFIG=${CYPRESS_VC_KUBECONFIG} \
-e CYPRESS_SNYK_TOKEN=${CYPRESS_SNYK_TOKEN} \
-e CYPRESS_ATLAS_USERNAME=${CYPRESS_ATLAS_USERNAME} \
-e CYPRESS_ATLAS_PASSWORD=${CYPRESS_ATLAS_PASSWORD} \
${TEST_IMAGE} || TEST_RUN=1
if [[ $TEST_IMAGE =~ "hac-dev:pr" ]]; then
podman rmi -f $TEST_IMAGE
fi
bonfire namespace release -f ${NAMESPACE}
# teardown_docker
exit $TEST_RUN