Skip to content

Commit

Permalink
Hardcode all paths
Browse files Browse the repository at this point in the history
  • Loading branch information
ianayl committed Feb 3, 2025
1 parent 572ff7e commit 412449e
Show file tree
Hide file tree
Showing 9 changed files with 91 additions and 283 deletions.
11 changes: 5 additions & 6 deletions devops/actions/benchmarking/aggregate/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,11 @@ runs:
$(python ./devops/scripts/benchmarking/load_config.py ./devops constants)
echo "SANITIZED_PERF_RES_GIT_REPO=$SANITIZED_PERF_RES_GIT_REPO" >> $GITHUB_ENV
echo "SANITIZED_PERF_RES_GIT_BRANCH=$SANITIZED_PERF_RES_GIT_BRANCH" >> $GITHUB_ENV
echo "SANITIZED_PERF_RES_PATH=$SANITIZED_PERF_RES_PATH" >> $GITHUB_ENV
- name: Checkout historical performance results repository
shell: bash
run: |
if [ ! -d "$SANITIZED_PERF_RES_PATH" ]; then
git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" "$SANITIZED_PERF_RES_PATH"
if [ ! -d ./llvm-ci-perf-results ]; then
git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" ./llvm-ci-perf-results
fi
- name: Run aggregator on historical results
shell: bash
Expand All @@ -69,7 +68,7 @@ runs:
#
# Thus, a min/max depth of 3 is used to enumerate all test cases in the
# repository. Test name is also derived from here.
find "$SANITIZED_PERF_RES_PATH" -mindepth 3 -maxdepth 3 -type d ! -path '*.git*' |
find ./llvm-ci-perf-results -mindepth 3 -maxdepth 3 -type d ! -path '*.git*' |
while read -r dir; do
test_name="$(basename "$dir")"
python ./devops/scripts/benchmarking/aggregate.py ./devops "$test_name" "$dir" "$SANITIZED_TIMESTAMP"
Expand All @@ -78,7 +77,7 @@ runs:
shell: bash
run: |
# TODO -- waiting on security clearance
cd "$SANITIZED_PERF_RES_PATH"
cd ./llvm-ci-perf-results
git config user.name "SYCL Benchmarking Bot"
git config user.email "[email protected]"
git add .
Expand All @@ -89,4 +88,4 @@ runs:
uses: actions/upload-artifact@v4
with:
name: llvm-ci-perf-results new medians
path: ${{ env.SANITIZED_PERF_RES_PATH }}/**/*-median.csv
path: ./llvm-ci-perf-results/**/*-median.csv
2 changes: 1 addition & 1 deletion devops/actions/run-tests/benchmark/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ runs:
# Load configuration values
$(python ./devops/scripts/benchmarking/load_config.py ./devops constants)
cd "$SANITIZED_PERF_RES_PATH"
cd "./llvm-ci-perf-results"
git config user.name "SYCL Benchmarking Bot"
git config user.email "[email protected]"
git add .
Expand Down
75 changes: 0 additions & 75 deletions devops/benchmarking/benchmark-ci.conf

This file was deleted.

45 changes: 24 additions & 21 deletions devops/benchmarking/constants.ini
Original file line number Diff line number Diff line change
Expand Up @@ -11,33 +11,36 @@
[compute_bench]
git_repo = ianayl/compute-benchmarks
git_branch = update-sycl
path = ./compute-benchmarks
; path = ./compute-benchmarks

; Constants for artifacts
[artifact]
; Path to root folder storing benchmark CI artifact
path = ./artifact
; Path (relative to artifact.path) to cache compute-benchmark results
;
; If a test result does not get moved out of this catch-all cache path, it is
; considered to have failed
output_cache = ./artifact/failed_tests
; Path (relative to artifact.path) to cache passing compute-benchmark results
passing_cache = ./artifact/passing_tests
; It was decided that paths should be hardcoded throughout this workflow for
; security reasons and ease of readability. Do not use paths as constants.

; ; Constants for artifacts
; [artifact]
; ; Path to root folder storing benchmark CI artifact
; path = ./artifact
; ; Path (relative to artifact.path) to cache compute-benchmark results
; ;
; ; If a test result does not get moved out of this catch-all cache path, it is
; ; considered to have failed
; output_cache = ./artifact/failed_tests
; ; Path (relative to artifact.path) to cache passing compute-benchmark results
; passing_cache = ./artifact/passing_tests

; Constants for git repo storing benchmark performance results
[perf_res]
git_repo = ianayl/llvm-ci-perf-results
git_branch = test-compute-bench
; Path to clone performance result repo
path = ./llvm-ci-perf-results
; path = ./llvm-ci-perf-results

[timestamp]
; Timestamp format used for
format = %%Y%%m%%d_%%H%%M%%S
; [timestamp]
; ; Timestamp format used for
; format = %%Y%%m%%d_%%H%%M%%S

[benchmark_log]
; Log file for test cases that perform over the allowed variance
slow = ./artifact/benchmarks_failed.log
; Log file for test cases that errored / failed to build
error = ./artifact/benchmarks_errored.log
; [benchmark_log]
; ; Log file for test cases that perform over the allowed variance
; slow = ./artifact/benchmarks_failed.log
; ; Log file for test cases that errored / failed to build
; error = ./artifact/benchmarks_errored.log
79 changes: 37 additions & 42 deletions devops/scripts/benchmarking/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,6 @@
usage () {
>&2 echo "Usage: $0 <compute-benchmarks git repo> -t <runner type> [-B <compute-benchmarks build path>]
-n Github runner name -- Required
-B Path to clone and build compute-benchmarks on
-p Path to compute-benchmarks (or directory to build compute-benchmarks in)
-r Github repo to use for compute-benchmarks origin, in format <org>/<name>
-b Git branch to use within compute-benchmarks
-f Compile flags passed into building compute-benchmarks
-c Clean up working directory
-C Clean up working directory and exit
-s Cache results
Expand All @@ -21,24 +16,22 @@ This script builds and runs benchmarks from compute-benchmarks."
}

clone_perf_res() {
echo "### Cloning llvm-ci-perf-res ($SANITIZED_PERF_RES_GIT_REPO:$SANITIZED_PERF_RES_GIT_BRANCH) ###"
mkdir -p "$(dirname "$SANITIZED_PERF_RES_PATH")"
git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" "$SANITIZED_PERF_RES_PATH"
echo "### Cloning llvm-ci-perf-results ($SANITIZED_PERF_RES_GIT_REPO:$SANITIZED_PERF_RES_GIT_BRANCH) ###"
git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" ./llvm-ci-perf-results
[ "$?" -ne 0 ] && exit $?
}

clone_compute_bench() {
echo "### Cloning compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO:$SANITIZED_COMPUTE_BENCH_GIT_BRANCH) ###"
mkdir -p "$(dirname "$SANITIZED_COMPUTE_BENCH_PATH")"
git clone -b "$SANITIZED_COMPUTE_BENCH_GIT_BRANCH" \
--recurse-submodules "https://github.com/$SANITIZED_COMPUTE_BENCH_GIT_REPO" \
"$SANITIZED_COMPUTE_BENCH_PATH"
./compute-benchmarks
[ "$?" -ne 0 ] && exit "$?"
}

build_compute_bench() {
echo "### Building compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO:$SANITIZED_COMPUTE_BENCH_GIT_BRANCH) ###"
mkdir "$SANITIZED_COMPUTE_BENCH_PATH/build" && cd "$SANITIZED_COMPUTE_BENCH_PATH/build" &&
mkdir ./compute-benchmarks/build && cd ./compute-benchmarks/build &&
# No reason to turn on ccache, if this docker image will be disassembled later on
cmake .. -DBUILD_SYCL=ON -DBUILD_L0=OFF -DBUILD=OCL=OFF -DCCACHE_ALLOWED=FALSE
# TODO enable mechanism for opting into L0 and OCL -- the concept is to
Expand Down Expand Up @@ -66,8 +59,8 @@ build_compute_bench() {
#
# Usage: <relative path of directory containing test case results>
samples_under_threshold () {
[ ! -d "$SANITIZED_PERF_RES_PATH/$1" ] && return 1 # Directory doesn't exist
file_count="$(find "$SANITIZED_PERF_RES_PATH/$1" -maxdepth 1 -type f | wc -l )"
[ ! -d "./llvm-ci-perf-results/$1" ] && return 1 # Directory doesn't exist
file_count="$(find "./llvm-ci-perf-results/$1" -maxdepth 1 -type f | wc -l )"
[ "$file_count" -lt "$SANITIZED_AVERAGE_MIN_THRESHOLD" ]
}

Expand All @@ -92,9 +85,9 @@ check_regression() {
#
# Usage: cache <relative path of output csv>
cache() {
mkdir -p "$(dirname "$SANITIZED_ARTIFACT_PASSING_CACHE/$1")" "$(dirname "$SANITIZED_PERF_RES_PATH/$1")"
cp "$SANITIZED_ARTIFACT_OUTPUT_CACHE/$1" "$SANITIZED_ARTIFACT_PASSING_CACHE/$1"
mv "$SANITIZED_ARTIFACT_OUTPUT_CACHE/$1" "$SANITIZED_PERF_RES_PATH/$1"
mkdir -p ./artifact/passing_tests/ ./artifact/failed_tests
cp "./artifact/failed_tests/$1" "./artifact/passing_tests/$1"
mv "./artifact/failed_tests/$1" "./llvm-ci-perf-results/$1"
}

# Check for a regression + cache if no regression found
Expand All @@ -114,15 +107,13 @@ check_and_cache() {

# Run and process the results of each enabled benchmark in enabled_tests.conf
process_benchmarks() {
mkdir -p "$SANITIZED_PERF_RES_PATH"

echo "### Running and processing selected benchmarks ###"
if [ -z "$TESTS_CONFIG" ]; then
echo "Setting tests to run via cli is not currently supported."
exit 1
else
rm "$SANITIZED_BENCHMARK_LOG_ERROR" "$SANITIZED_BENCHMARK_LOG_SLOW" 2> /dev/null
mkdir -p "$(dirname "$SANITIZED_BENCHMARK_LOG_ERROR")" "$(dirname "$SANITIZED_BENCHMARK_LOG_SLOW")"
rm ./artifact/benchmarks_errored.log ./artifact/benchmarks_failed.log
mkdir -p ./artifact
# Loop through each line of enabled_tests.conf, but ignore lines in the
# test config starting with #'s:
grep "^[^#]" "$TESTS_CONFIG" | while read -r testcase; do
Expand All @@ -145,11 +136,13 @@ process_benchmarks() {
# Figure out the relative path of our testcase result:
test_dir_relpath="$DEVICE_SELECTOR_DIRNAME/$RUNNER/$testcase"
output_csv_relpath="$test_dir_relpath/$testcase-$TIMESTAMP.csv"
mkdir -p "$SANITIZED_ARTIFACT_OUTPUT_CACHE/$test_dir_relpath" # Ensure directory exists
# TODO generate runner config txt if not exist
mkdir -p "./artifact/failed_tests/$test_dir_relpath" # Ensure directory exists

output_csv="$SANITIZED_ARTIFACT_OUTPUT_CACHE/$output_csv_relpath"
$SANITIZED_COMPUTE_BENCH_PATH/build/bin/$testcase --csv \
# Tests are first placed in ./artifact/failed_tests, and are only
# moved to passing_tests or the performance results repo if the
# benchmark results are passing
output_csv="./artifact/failed_tests/$output_csv_relpath"
"./compute-benchmarks/build/bin/$testcase" --csv \
--iterations="$SANITIZED_COMPUTE_BENCH_ITERATIONS" \
| tail +8 > "$output_csv"
# The tail +8 filters out header lines not in csv format
Expand All @@ -158,9 +151,8 @@ process_benchmarks() {
if [ "$exit_status" -eq 0 ] && [ -s "$output_csv" ]; then
check_and_cache $output_csv_relpath
else
# TODO consider capturing stderr for logging
echo "[ERROR] $testcase returned exit status $exit_status"
echo "-- $testcase: error $exit_status" >> "$SANITIZED_BENCHMARK_LOG_ERROR"
echo "-- $testcase: error $exit_status" >> ./artifact/benchmarks_errored.log
fi
done
fi
Expand All @@ -169,15 +161,15 @@ process_benchmarks() {
# Handle failures + produce a report on what failed
process_results() {
fail=0
if [ -s "$SANITIZED_BENCHMARK_LOG_SLOW" ]; then
if [ -s ./artifact/benchmarks_failed.log ]; then
printf "\n### Tests performing over acceptable range of average: ###\n"
cat "$SANITIZED_BENCHMARK_LOG_SLOW"
cat ./artifact/benchmarks_failed.log
echo ""
fail=2
fi
if [ -s "$SANITIZED_BENCHMARK_LOG_ERROR" ]; then
if [ -s ./artifact/benchmarks_errored.log ]; then
printf "\n### Tests that failed to run: ###\n"
cat "$SANITIZED_BENCHMARK_LOG_ERROR"
cat ./artifact/benchmarks_errored.log
echo ""
fail=1
fi
Expand All @@ -186,8 +178,8 @@ process_results() {

cleanup() {
echo "### Cleaning up compute-benchmark builds from prior runs ###"
rm -rf "$SANITIZED_COMPUTE_BENCH_PATH"
rm -rf "$SANITIZED_PERF_RES_PATH"
rm -rf ./compute-benchmarks
rm -rf ./llvm-ci-perf-results
[ ! -z "$_exit_after_cleanup" ] && exit
}

Expand Down Expand Up @@ -229,16 +221,19 @@ load_configs

COMPUTE_BENCH_COMPILE_FLAGS=""
CACHE_RESULTS="0"
TIMESTAMP="$(date +"$SANITIZED_TIMESTAMP_FORMAT")"
# Timestamp format is YYYYMMDD_HHMMSS
TIMESTAMP="$(date +%Y%m%d_%H%M%S)"

# CLI flags + overrides to configuration options:
while getopts "p:b:r:f:n:cCs" opt; do
while getopts "n:cCs" opt; do
case "$opt" in
p) COMPUTE_BENCH_PATH="$OPTARG" ;;
r) COMPUTE_BENCH_GIT_REPO="$OPTARG" ;;
b) COMPUTE_BENCH_BRANCH="$OPTARG" ;;
f) COMPUTE_BENCH_COMPILE_FLAGS="$OPTARG" ;;
n) RUNNER="$OPTARG" ;;
n)
if [ -n "$(printf "%s" "$OPTARG" | sed "s/[a-zA-Z0-9_-]*//g")" ]; then
echo "Illegal characters in runner name."
exit 1
fi
RUNNER="$OPTARG"
;;
# Cleanup status is saved in a var to ensure all arguments are processed before
# performing cleanup
c) _cleanup=1 ;;
Expand Down Expand Up @@ -279,9 +274,9 @@ DEVICE_SELECTOR_DIRNAME="$(echo "$ONEAPI_DEVICE_SELECTOR" | sed 's/:/-/')"
# Clean up and delete all cached files if specified:
[ ! -z "$_cleanup" ] && cleanup
# Clone and build only if they aren't already cached/deleted:
[ ! -d "$SANITIZED_PERF_RES_PATH" ] && clone_perf_res
[ ! -d "$SANITIZED_COMPUTE_BENCH_PATH" ] && clone_compute_bench
[ ! -d "$SANITIZED_COMPUTE_BENCH_PATH/build" ] && build_compute_bench
[ ! -d ./llvm-ci-perf-results ] && clone_perf_res
[ ! -d ./compute-benchmarks ] && clone_compute_bench
[ ! -d ./compute-benchmarks/build ] && build_compute_bench
# Process benchmarks:
process_benchmarks
process_results
Loading

0 comments on commit 412449e

Please sign in to comment.