Skip to content

Commit

Permalink
Merge pull request #941 from roboflow/gpu_speedups
Browse files Browse the repository at this point in the history
Add an option to use pytorch for GPU-based image preprocessing
  • Loading branch information
grzegorz-roboflow authored Feb 17, 2025
2 parents 653a810 + 46ef39e commit b9f473a
Show file tree
Hide file tree
Showing 46 changed files with 848 additions and 627 deletions.
17 changes: 14 additions & 3 deletions .github/workflows/test.jetson_4.5.0.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,21 @@ jobs:
docker pull roboflow/roboflow-inference-server-jetson-4.5.0:test
docker build -t roboflow/roboflow-inference-server-jetson-4.5.0:test -f docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 .
docker push roboflow/roboflow-inference-server-jetson-4.5.0:test
- name: 🔋 Start Test Docker - Jetson 4.5.0
- name: 🔋 Start Test Docker without Torch Preprocessing - Jetson 4.5.0
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-4.5.0 make start_test_docker_jetson
- name: 🧪 Regression Tests - Jetson 4.5.0
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=False INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-4.5.0 make start_test_docker_jetson
- name: 🧪 Regression Tests without Torch Preprocessing - Jetson 4.5.0
run: |
SKIP_VISUALISATION_TESTS=true MAX_WAIT=300 SKIP_LMM_TEST=True SKIP_TROCR_TEST=True SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_YOLOV8_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 4.5.0
run: make stop_test_docker
if: success() || failure()

- name: 🔋 Start Test Docker with Torch Preprocessing - Jetson 4.5.0
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=True INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-4.5.0 make start_test_docker_jetson
- name: 🧪 Regression Tests with Torch Preprocessing - Jetson 4.5.0
run: |
SKIP_VISUALISATION_TESTS=true MAX_WAIT=300 SKIP_LMM_TEST=True SKIP_TROCR_TEST=True SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_YOLOV8_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 4.5.0
Expand Down
17 changes: 14 additions & 3 deletions .github/workflows/test.jetson_4.6.1.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,21 @@ jobs:
docker pull roboflow/roboflow-inference-server-jetson-4.6.1:test
docker build -t roboflow/roboflow-inference-server-jetson-4.6.1:test -f docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 .
docker push roboflow/roboflow-inference-server-jetson-4.6.1:test
- name: 🔋 Start Test Docker - Jetson 4.6.1
- name: 🔋 Start Test Docker without Torch Preprocessing - Jetson 4.6.1
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-4.6.1 make start_test_docker_jetson
- name: 🧪 Regression Tests - Jetson 4.6.1
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=False INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-4.6.1 make start_test_docker_jetson
- name: 🧪 Regression Tests without Torch Preprocessing - Jetson 4.6.1
run: |
SKIP_YOLO_WORLD_TEST=true SKIP_SPEED_TEST=true SKIP_LMM_TEST=True SKIP_TROCR_TEST=True SKIP_TROCR_TEST=True SKIP_DOCTR_TEST=true SKIP_CLIP_TEST=true SKIP_VISUALISATION_TESTS=true MAX_WAIT=300 SKIP_SAM_TEST=true SKIP_GROUNDING_DINO_TEST=true SKIP_YOLOV8_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 4.6.1
run: make stop_test_docker
if: success() || failure()

- name: 🔋 Start Test Docker with Torch Preprocessing - Jetson 4.6.1
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=True INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-4.6.1 make start_test_docker_jetson
- name: 🧪 Regression Tests with Torch Preprocessing - Jetson 4.6.1
run: |
SKIP_YOLO_WORLD_TEST=true SKIP_SPEED_TEST=true SKIP_LMM_TEST=True SKIP_TROCR_TEST=True SKIP_TROCR_TEST=True SKIP_DOCTR_TEST=true SKIP_CLIP_TEST=true SKIP_VISUALISATION_TESTS=true MAX_WAIT=300 SKIP_SAM_TEST=true SKIP_GROUNDING_DINO_TEST=true SKIP_YOLOV8_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 4.6.1
Expand Down
17 changes: 14 additions & 3 deletions .github/workflows/test.jetson_5.1.1.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,21 @@ jobs:
docker pull roboflow/roboflow-inference-server-jetson-5.1.1:test
docker build -t roboflow/roboflow-inference-server-jetson-5.1.1:test -f docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 .
docker push roboflow/roboflow-inference-server-jetson-5.1.1:test
- name: 🔋 Start Test Docker - Jetson 5.1.1
- name: 🔋 Start Test Docker without Torch Preprocessing - Jetson 5.1.1
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-5.1.1 make start_test_docker_jetson
- name: 🧪 Regression Tests - Jetson 5.1.1
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=False INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-5.1.1 make start_test_docker_jetson
- name: 🧪 Regression Tests without Torch Preprocessing - Jetson 5.1.1
run: |
SKIP_VISUALISATION_TESTS=true SKIP_LMM_TEST=True MAX_WAIT=300 SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 5.1.1
run: make stop_test_docker
if: success() || failure()

- name: 🔋 Start Test Docker with Torch Preprocessing - Jetson 5.1.1
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=True INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-5.1.1 make start_test_docker_jetson
- name: 🧪 Regression Tests with Torch Preprocessing - Jetson 5.1.1
run: |
SKIP_VISUALISATION_TESTS=true SKIP_LMM_TEST=True MAX_WAIT=300 SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 5.1.1
Expand Down
17 changes: 14 additions & 3 deletions .github/workflows/test.jetson_6.0.0.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,21 @@ jobs:
docker pull roboflow/roboflow-inference-server-jetson-6.0.0:test
docker build -t roboflow/roboflow-inference-server-jetson-6.0.0:test -f docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 .
docker push roboflow/roboflow-inference-server-jetson-6.0.0:test
- name: 🔋 Start Test Docker - Jetson 6.0.0
- name: 🔋 Start Test Docker without Torch Preprocessing - Jetson 6.0.0
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-6.0.0 make start_test_docker_jetson
- name: 🧪 Regression Tests - Jetson 6.0.0
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=False INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-6.0.0 make start_test_docker_jetson
- name: 🧪 Regression Tests without Torch Preprocessing - Jetson 6.0.0
run: |
SKIP_VISUALISATION_TESTS=true SKIP_LMM_TEST=True MAX_WAIT=300 SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 6.0.0
run: make stop_test_docker
if: success() || failure()

- name: 🔋 Start Test Docker with Torch Preprocessing - Jetson 6.0.0
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=True INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-6.0.0 make start_test_docker_jetson
- name: 🧪 Regression Tests with Torch Preprocessing - Jetson 6.0.0
run: |
SKIP_VISUALISATION_TESTS=true SKIP_LMM_TEST=True MAX_WAIT=300 SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 6.0.0
Expand Down
18 changes: 15 additions & 3 deletions .github/workflows/test.nvidia_t4.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,27 @@ jobs:
- name: �🔨 Build and Push Test Docker - GPU
run: |
docker build -t roboflow/roboflow-inference-server-gpu:test -f docker/dockerfiles/Dockerfile.onnx.gpu .
- name: 🔋 Start Test Docker - GPU
- name: 🔋 Start Test Docker without Torch Preprocessing - GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests - GPU
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=False INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests without Torch Preprocessing - GPU
id: regression_tests
run: |
MINIMUM_FPS=25 FUNCTIONAL=true PORT=9101 SKIP_LMM_TEST=True API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - GPU
run: make stop_test_docker

- name: 🔋 Start Test Docker with Torch Preprocessing - GPU
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=True INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests with Torch Preprocessing - GPU
id: regression_tests
run: |
MINIMUM_FPS=25 FUNCTIONAL=true PORT=9101 SKIP_LMM_TEST=True API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - GPU
run: make stop_test_docker

- name: 🔋 Start Test Docker - GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
Expand Down
21 changes: 18 additions & 3 deletions .github/workflows/test.nvidia_t4_parallel_server.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,25 @@ jobs:
- name: 🔨 Build and Push Test Docker - Parallel GPU
run: |
docker build -t roboflow/roboflow-inference-server-gpu-parallel:test -f docker/dockerfiles/Dockerfile.onnx.gpu.parallel .
- name: 🔋 Start Test Docker - Parallel GPU
- name: 🔋 Start Test Docker without Torch Preprocessing - Parallel GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu-parallel make start_test_docker_gpu
- name: 🧪 Regression Tests - Parallel GPU
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=False INFERENCE_SERVER_REPO=roboflow-inference-server-gpu-parallel make start_test_docker_gpu
- name: 🧪 Regression Tests without Torch Preprocessing - Parallel GPU
id: regression_tests
run: |
IS_PARALLEL_SERVER=true SKIP_VISUALISATION_TESTS=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/regression_test.py tests/inference/integration_tests/batch_regression_test.py
- name: 🚨 Show server logs on error
run: docker logs inference-test
if: failure()
- name: 🧹 Cleanup Test Docker - Parallel GPU
run: make stop_test_docker
if: success() || failure()

- name: 🔋 Start Test Docker with Torch Preprocessing - Parallel GPU
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=True INFERENCE_SERVER_REPO=roboflow-inference-server-gpu-parallel make start_test_docker_gpu
- name: 🧪 Regression Tests with Torch Preprocessing - Parallel GPU
id: regression_tests
run: |
IS_PARALLEL_SERVER=true SKIP_VISUALISATION_TESTS=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/regression_test.py tests/inference/integration_tests/batch_regression_test.py
Expand Down
5 changes: 5 additions & 0 deletions inference/core/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,11 @@
# ID of host device, default is None
DEVICE_ID = os.getenv("DEVICE_ID", None)

# Whether or not to use PyTorch for preprocessing, default is False
USE_PYTORCH_FOR_PREPROCESSING = str2bool(
os.getenv("USE_PYTORCH_FOR_PREPROCESSING", False)
)

# Flag to disable inference cache, default is False
DISABLE_INFERENCE_CACHE = str2bool(os.getenv("DISABLE_INFERENCE_CACHE", False))

Expand Down
Loading

0 comments on commit b9f473a

Please sign in to comment.