-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprocess_acs_export.sh
executable file
·238 lines (184 loc) · 8.84 KB
/
process_acs_export.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
#!/bin/bash
export HYDRA_API="https://access.redhat.com/hydra/rest/securitydata"
export CATALOG_API="https://catalog.redhat.com/api/containers/v1/repositories/registry/registry.access.redhat.com/repository"
export SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export METADATA_DIR="${SCRIPT_DIR}/metadata"
CLEAN="${CLEAN:-false}"
if [[ "${CLEAN}" == "true" ]]
then
echo "Cleaning metadata directory"
rm -rf "${METADATA_DIR}"
mkdir "${METADATA_DIR}"
else
echo "Skip cleaning metadata directory"
fi
# Ensure the metadata directory exists
# Used to store fetched files to reduce runtimes
mkdir "${METADATA_DIR}" || true
function digest_to_tag() {
local my_image_name="${1}"
if [[ ! "${my_image_name}" =~ "@" ]]; then
echo "Image does not appear to contain digest"
return 1
fi
local my_image_repo=$(echo "${my_image_name}" | awk -F\@ '{print $1}' | awk '{sub(/\//," ");$1=$1;print $2}')
local my_image_tag=$(echo "${my_image_name}" | awk -F\@ '{print $NF}')
local my_image=$(echo "${my_image_repo}" | awk -F\/ '{print $NF}')
local my_image_metadata_file="${METADATA_DIR}/$(echo ${my_image_repo} | sed 's|/|_|g')_images.json"
# pull all past images if we don't have the file already
if [ ! -e "${my_image_metadata_file}" ]; then
curl -s "${CATALOG_API}/${my_image_repo}/images?page_size=500&page=0" > "${my_image_metadata_file}"
fi
jq -r -c ".data[] | select((.repositories[0].manifest_list_digest == \"${my_image_tag}\") and .parsed_data.architecture == \"amd64\") | .repositories[].tags[0].name" "${my_image_metadata_file}"
}
# Example of how to call the function above
#image_name="registry.redhat.io/openshift-logging/elasticsearch6-rhel8@sha256:fd46c47dca6d84f0fd403e481b28cafd614e2e9ed7c63f3965e66485bb26d20c"
#tag=$(digest_to_tag ${image_name})
function process_rhsa() {
local my_image_repo="${1}"
local rhsa="${2}"
local my_image_metadata_file="metadata/$(echo ${my_image_repo} | sed 's|/|_|g')_images.json"
#Map the image to an operator to determine what the latest image digest is
#OPERATOR_MAPPING=$(grep -Hl "${my_image_repo}" operator_images/*.txt)
local latest_digest=$(grep "${my_image_repo}" operator_images/*.txt | awk -F\= '{print $1}' | awk -F\@ '{print $NF}')
# pull all past images if we don't have the file already
if [ ! -e "${my_image_metadata_file}" ]; then
curl -s "${CATALOG_API}/${my_image_repo}/images?page_size=500&page=0" > "${my_image_metadata_file}"
fi
# find the one that matches the latest image digest in the operator bundle
#echo "Vulns in the newest version of ${my_image_repo}"
#echo ${latest_digest}
#echo $my_image_metadata_file
#jq -c -r ".data[] | select((.repositories[0].manifest_list_digest == \"${latest_digest}\") and .parsed_data.architecture == \"amd64\")" ${my_image_metadata_file} > "${my_image_metadata_file}.short"
#jq -c -r ".data[] | select((.repositories[0].manifest_list_digest == \"${latest_digest}\") and .parsed_data.architecture == \"amd64\") | .repositories[0].content_advisory_ids" "${my_image_metadata_file}"
local vulns=$(jq -c -r ".data[] | select((.repositories[0].manifest_list_digest == \"${latest_digest}\") and .parsed_data.architecture == \"amd64\") | .repositories[0].content_advisory_ids" "${my_image_metadata_file}")
if [[ "${vulns}" =~ "${rhsa}" ]]; then
echo "NOT resolved in lastest releast of ${my_image_repo}"
else
echo "Resolved in lastest releast of ${my_image_repo}"
fi
}
# Contains CVE/image pairings from ACS export
TMP_CVES="/tmp/cves.txt"
# Container cve-analyser results
CVE_ANALYSER_RESULTS="/tmp/cve_analyser.txt"
if [ $# -eq 0 ]
then
echo "Usage: $0 <CSV from ACS>"
exit 1
fi
INPUT_FILE="${1}"
OUTPUT_FILE="$(echo ${INPUT_FILE} | sed 's|\.csv||')_annotated.csv"
# Script requires the binary from here: https://github.com/p-rog/cve-analyser.git
# build the go binary and ensure it is in your path
if ! `which cve-analyser 2>&1 > /dev/null`
then
echo "cve-analyser binary missing"
exit 1
fi
# Remove old temp files
rm -f "${TMP_CVES}" "${CVE_ANALYSER_RESULTS}" "${OUTPUT_FILE}"
echo "Creating annotated CSV file with results"
# Write out a new CSV file with the added information
COLS=$(head -1 ${INPUT_FILE})
echo "${COLS}, \"RedHat CVSS Score\", \"RedHat Disposition\"" > "${OUTPUT_FILE}"
echo "Parsing ACS input CSV"
while read -r line
do
clusterName=$(echo "${line}" | awk -F\, '{print $1}')
clusterId=$(echo "${line}" | awk -F\, '{print $2}')
namespace=$(echo "${line}" | awk -F\, '{print $3}')
namespaceId=$(echo "${line}" | awk -F\, '{print $4}')
deployment_name=$(echo "${line}" | awk -F\, '{print $5}')
image_name=$(echo "${line}" | awk -F\, '{print $6}'| tr -d '"')
cve=$(echo "${line}" | awk -F\, '{print $7}' | tr -d '"')
cvss=$(echo "${line}" | awk -F\, '{print $8}')
if [[ "${image_name}" =~ "@" ]]; then
image_repo=$(echo "${image_name}" | awk -F\@ '{print $1}' | awk '{sub(/\//," ");$1=$1;print $2}')
else
image_repo=$(echo "${image_name}" | awk -F\: '{print $1}' | awk '{sub(/\//," ");$1=$1;print $2}')
fi
image=$(echo "${image_repo}" | awk -F\/ '{print $NF}')
# Script doesn't currently use these fields
#severity=$(echo "${line}" | awk -F\, '{print $9}')
#component=$(echo "${line}" | awk -F\, '{print $10}')
#version=$(echo "${line}" | awk -F\, '{print $11}')
#fixedBy=$(echo "${line}" | awk -F\, '{print $12}')
echo "Processing cve: ${cve}"
if [[ "${cve}" =~ "RHSA" ]]
then
#echo "${line}, todo, skipping" >> "${OUTPUT_FILE}"
#continue
RST=$(process_rhsa ${image_repo} ${cve})
else
# Convert image with digest to image with tag and process with cve-analyser
# TODO: Upgrade cve-analyser to accept CLI pair instead of file only, faster
image_tag=$(digest_to_tag ${image_name})
image_with_tag="$(echo "${image_repo}" | sed 's|@.*||'):${image_tag}"
echo "${cve},${image_with_tag}" > "${TMP_CVES}"
RST=$(/home/danclark/workspace/cve-analyser/cve-analyser "${TMP_CVES}" | awk -F\, '{print $NF}')
#echo "RST: ${RST}"
#echo "${cve}, ${image_repo}, ${RST}"
fi
echo "${line}, todo, ${RST}" >> "${OUTPUT_FILE}"
done < <(tail -n +2 ${INPUT_FILE})
# Skip first line of ACS CSV export which has column names
exit 0
# Don't need to send duplicate entries to the cve analyser
T=$(mktemp)
cat "${TMP_CVES}" | sort -u > "${T}"
mv "${T}" "${TMP_CVES}"
# cve-analyser is multi-threaded and output is not in the same order as input
echo "Generating results for CVE and image pairs"
cve-analyser "${TMP_CVES}" > "${CVE_ANALYSER_RESULTS}"
echo "Creating annotated CSV file with results"
# Write out a new CSV file with the added information
rm -f "${OUTPUT_FILE}"
COLS=$(head -1 ${INPUT_FILE})
echo "${COLS}, \"RedHat CVSS Score\", \"RedHat Disposition\"" > "${OUTPUT_FILE}"
while read -r line
do
cve=$(echo "${line}" | awk -F\, '{print $7}' | tr -d '"')
cvss_acs=$(echo "${line}" | awk -F\, '{print $8}')
if [[ "${cve}" =~ "RHSA" ]]
then
#echo "Processing: ${cve}"
image_metadata_file="metadata/$(echo ${image_repo} | sed 's|/|_|g')_images.json"
#Map the image to an operator to determine what the latest image digest is
#OPERATOR_MAPPING=$(grep -Hl "${image_repo}" operator_images/*.txt)
LATEST_DIGEST=$(grep "${image_repo}" operator_images/*.txt | awk -F\= '{print $1}' | awk -F\@ '{print $NF}')
# pull all past images if we don't have the file already
if [ ! -e "${image_metadata_file}" ]; then
curl -s "${CATALOG_API}/${image_repo}/images?page_size=500&page=0" > "${image_metadata_file}"
fi
# find the one that matches the latest image digest in the operator bundle
VULNS=$(jq -c -r ".data[] | select((.repositories[0].manifest_list_digest == \"${LATEST_DIGEST}\") and .parsed_data.architecture == \"amd64\") | .repositories[0].content_advisory_ids" "${image_metadata_file}")
if [[ "${VULNS}" =~ "${cve}" ]]; then
rst="NOT resolved in lastest releast of ${image_repo}"
else
rst="Resolved in lastest releast of ${image_repo}"
fi
cvss=""
else
image_name=$(echo "${line}" | awk -F\, '{print $6}')
if [[ "${image_name}" =~ "@" ]]; then
image_repo=$(echo "${image_name}" | awk -F\@ '{print $1}' | awk '{sub(/\//," ");$1=$1;print $2}')
else
image_repo=$(echo "${image_name}" | awk -F\: '{print $1}' | awk '{sub(/\//," ");$1=$1;print $2}')
fi
rst=$(grep "${cve}" "${CVE_ANALYSER_RESULTS}" | grep "${image_repo}" | awk -F\, '{print $NF}')
# Lookup Red Hat CVSS Score for CVE
#echo "curl -s -X GET \"${HYDRA_API}/cve/${cve}.json\" | jq -r -c '.cvss3.cvss3_base_score'"
cvss=$(curl -s -X GET "${HYDRA_API}/cve/${cve}.json" | jq -r -c '.cvss3.cvss3_base_score')
# If the jq command fails, set the field to empty
if [[ ! $? ]]
then
cvss=""
elif (( $(echo $cvss_acs == $cvss | bc -l) ))
then
cvss=""
fi
fi
echo "${line},${cvss},\"${rst}\"" >> "${OUTPUT_FILE}"
done < <(tail -n +2 ${INPUT_FILE})
exit 0