Skip to content

Commit

Permalink
Fix small formatting issues and add comments here and there
Browse files Browse the repository at this point in the history
  • Loading branch information
ianayl committed Feb 4, 2025
1 parent 5c25a95 commit 3e01431
Show file tree
Hide file tree
Showing 8 changed files with 28 additions and 14 deletions.
8 changes: 4 additions & 4 deletions devops/actions/benchmarking/aggregate/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ name: 'Aggregate compute-benchmark results and produce historical averages'

# The benchmarking workflow in sycl-linux-run-tests.yml passes or fails based on
# how the benchmark results compare to a historical average: This historical
# average is calculated in this workflow, which aggregates historical data and
# produces measures of central tendency (median in this case) used for this
# purpose.
# average is calculated in this composite workflow, which aggregates historical
# data and produces measures of central tendency (median in this case) used for
# this purpose.
#
# This action assumes that /devops has been checked out in ./devops. This action
# also assumes that GITHUB_TOKEN was properly set in env, because according to
Expand Down Expand Up @@ -74,4 +74,4 @@ runs:
uses: actions/upload-artifact@v4
with:
name: llvm-ci-perf-results new medians
path: ./llvm-ci-perf-results/**/*-median.csv
path: ./llvm-ci-perf-results/**/*-median.csv
11 changes: 9 additions & 2 deletions devops/actions/run-tests/benchmark/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,15 @@ runs:
git add .
git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}"
git push "https://[email protected]/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH"
- shell: bash
run: echo \n\#\n\# Artifact link for benchmark results here\n\#\n
- name: Find workflow artifact below
if: always()
shell: bash
run: |
cat << EOF
#
# Artifact link for benchmark results below:
#
EOF
- name: Archive compute-benchmark results
if: always()
uses: actions/upload-artifact@v4
Expand Down
2 changes: 1 addition & 1 deletion devops/benchmarking/config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,4 @@ enabled_backends = level_zero,opencl,cuda,hip

; Devices to allow in device_selector
enabled_devices = cpu,gpu
; fpga is disabled
; fpga is disabled
2 changes: 1 addition & 1 deletion devops/benchmarking/constants.ini
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ git_branch = test-compute-bench
; ; Log file for test cases that perform over the allowed variance
; slow = ./artifact/benchmarks_failed.log
; ; Log file for test cases that errored / failed to build
; error = ./artifact/benchmarks_errored.log
; error = ./artifact/benchmarks_errored.log
2 changes: 1 addition & 1 deletion devops/benchmarking/enabled_tests.conf
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ miscellaneous_benchmark_sycl
ulls_benchmark_sycl

# As of January 2025, these are every compute-benchmark tests with a SYCL
# implementation.
# implementation.
14 changes: 10 additions & 4 deletions devops/scripts/benchmarking/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,22 +10,27 @@ class Validate:

@staticmethod
def filepath(path: str) -> bool:
"""
Returns True if path is clean (no illegal characters), otherwise False.
"""
filepath_re = re.compile(r"[a-zA-Z0-9\/\._\-]+")
return filepath_re.match(path) is not None

@staticmethod
# TODO use config
def timestamp(t: str) -> bool:
"""
Returns True if t is in form YYYYMMDD_HHMMSS, otherwise False.
"""
timestamp_re = re.compile(
# YYYYMMDD_HHMMSS
r"^\d{4}(0[1-9]|1[0-2])([0-2][0-9]|3[01])_([01][0-9]|2[0-3])[0-5][0-9][0-5][0-9]$"
)
return timestamp_re.match(t) is not None

@staticmethod
def sanitize_stat(stat: str) -> float:
"""
Sanitize statistics found in compute-benchmark output csv files. Returns float if sanitized, None if not sanitizable
Sanitize statistics found in compute-benchmark output csv files. Returns
float if sanitized, None if not sanitizable.
"""
# Get rid of %
if stat[-1] == "%":
Expand Down Expand Up @@ -172,8 +177,9 @@ def export_python_globals(self):
# self.__sanitize(all_opts["benchmark_log"]["error"],
# "benchmark_log.error")


# Fields that are supposed to be python objects need to be changed to
# python objects:
# python objects manually:

# metrics.recorded
m_rec_str = self.__sanitize(all_opts["metrics"]["recorded"], "metrics.recorded")
Expand Down
1 change: 1 addition & 0 deletions devops/scripts/benchmarking/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def to_hist_avg(benchmark_name: str, hist_avg_path: str, test_csv_path: str):
exit(1)

if sample_value > max_tolerated:
# Log failure if fail, otherwise proceed as usual
print(f"\n-- FAILED {benchmark_name}::{test}")
print(
f" {metric}: {sample_value} -- Historic avg. {test_hist_avg[metric]} (max tolerance {threshold*100}%: {max_tolerated})\n"
Expand Down
2 changes: 1 addition & 1 deletion devops/scripts/benchmarking/load_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,4 @@ def usage_and_exit():
elif sys.argv[2] == "constants":
print(config.export_shell_constants())
else:
usage_and_exit()
usage_and_exit()

0 comments on commit 3e01431

Please sign in to comment.