diff --git a/.github/workflows/hailo_publish.yaml b/.github/workflows/hailo_publish.yaml index f62c48e..2dacdde 100644 --- a/.github/workflows/hailo_publish.yaml +++ b/.github/workflows/hailo_publish.yaml @@ -10,8 +10,15 @@ on: jobs: hailo-publish: + + strategy: + fail-fast: false + matrix: + version: ["2024.04", "2024.07"] + uses: ./.github/workflows/publish.yaml secrets: inherit with: package: hailo + version: ${{ matrix.version }} diff --git a/.github/workflows/hailo_test.yaml b/.github/workflows/hailo_test.yaml index 047de74..4ddb609 100644 --- a/.github/workflows/hailo_test.yaml +++ b/.github/workflows/hailo_test.yaml @@ -16,8 +16,15 @@ on: jobs: hailo-test: + + strategy: + fail-fast: false + matrix: + version: ["2024.04", "2024.07"] + uses: ./.github/workflows/modelconverter_test.yaml secrets: inherit with: package: hailo + version: ${{ matrix.version }} diff --git a/.github/workflows/modelconverter_test.yaml b/.github/workflows/modelconverter_test.yaml index f7e7aeb..50f9c73 100644 --- a/.github/workflows/modelconverter_test.yaml +++ b/.github/workflows/modelconverter_test.yaml @@ -8,6 +8,12 @@ on: type: string description: Which package to test + version: + required: true + type: string + description: > + Which version of underlying conversion tools to use + permissions: contents: read packages: read @@ -17,6 +23,7 @@ env: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_S3_ENDPOINT_URL: ${{ secrets.AWS_S3_ENDPOINT_URL }} PACKAGE: ${{ inputs.package }} + VERSION: ${{ inputs.version }} jobs: tests: @@ -53,19 +60,24 @@ jobs: credentials_json: ${{ secrets.GCP_CREDENTIALS }} token_format: access_token - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1 - - - name: Download file from GCS + - name: Download build files from GCS + if: ${{ inputs.package }} != hailo + env: + GS_BUILD_ARTIFACTS: gs://luxonis-test-bucket/modelconverter/build-artifacts run: | + mkdir -p docker/extra_packages cd docker/extra_packages - if [ "$PACKAGE" = "rvc4" ]; then - gsutil cp gs://luxonis-test-bucket/modelconverter/build-artifacts/snpe.zip . - elif [ "$PACKAGE" = "rvc2" ] || [ "$PACKAGE" = "rvc3" ]; then - gsutil cp gs://luxonis-test-bucket/modelconverter/build-artifacts/openvino_2022_3_vpux_drop_patched.tar.gz . + if [ "${PACKAGE}" = "rvc4" ]; then + gcloud storage cp \ + "${GS_BUILD_ARTIFACTS}/snpe-${VERSION}.zip" \ + "snpe-${VERSION}.zip" + elif [ "${PACKAGE}" = "rvc2" ] || [ "${PACKAGE}" = "rvc3" ]; then + gcloud storage cp \ + "${GS_BUILD_ARTIFACTS}/openvino-${VERSION}.tar.gz" \ + "openvino-${VERSION}.tar.gz" fi - name: Run Tests run: | - pytest -s --verbose "tests/test_packages/test_$PACKAGE.py" + pytest -s --verbose "tests/test_packages/test_${PACKAGE}.py" --tool-version "${VERSION}" diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index fc17b1a..07c980c 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -8,21 +8,31 @@ on: type: string description: Which package to publish + version: + required: true + type: string + description: > + Which version of underlying conversion tools to use + permissions: contents: read packages: write env: - PROJECT_ID: easyml-394818 GAR_LOCATION: us-central1 DOCKERFILE: docker/${{ inputs.package }}/Dockerfile PACKAGE: ${{ inputs.package }} NAME: luxonis/modelconverter-${{ inputs.package }} STEM: modelconverter-${{ inputs.package }} + VERSION: ${{ inputs.version }} + GS_BUILD_ARTIFACTS: gs://luxonis-test-bucket/modelconverter/build-artifacts + GAR_STEM: us-central1-docker.pkg.dev/easyml-394818 jobs: ghcr-publish: runs-on: ubuntu-latest + env: + LOCAL_NAME: ${NAME}:${VERSION}-latest steps: - name: Checkout code @@ -35,58 +45,6 @@ jobs: credentials_json: ${{ secrets.GCP_CREDENTIALS }} token_format: access_token - - name: Docker login to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Get modelconverter version - id: commit - run: echo "sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1 - - - name: Download file from GCS - run: | - cd docker/extra_packages - if [ "$PACKAGE" = "rvc4" ]; then - gsutil cp gs://luxonis-test-bucket/modelconverter/build-artifacts/snpe.zip snpe.zip - elif [ "$PACKAGE" = "rvc2" ] || [ "$PACKAGE" = "rvc3" ]; then - gsutil cp gs://luxonis-test-bucket/modelconverter/build-artifacts/openvino_2022_3_vpux_drop_patched.tar.gz openvino_2022_3_vpux_drop_patched.tar.gz - fi - - name: Publish latest - run: | - docker build -f $DOCKERFILE -t $NAME:latest . - docker tag $NAME:latest ghcr.io/$NAME:latest - docker push ghcr.io/$NAME:latest - - - name: Publish tagged - run: | - VERSION=${{ steps.commit.outputs.sha }} - docker tag $NAME:latest ghcr.io/$NAME:$VERSION - docker push ghcr.io/$NAME:$VERSION - - gar-publish: - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Get modelconverter version - id: commit - run: echo "sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - - - name: Authenticate to Google Cloud - id: google-auth - uses: google-github-actions/auth@v2 - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - token_format: access_token - - name: Docker login to GAR uses: docker/login-action@v3 with: @@ -104,30 +62,56 @@ jobs: - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v1 - - name: Download file from GCS + - name: Download build files from GCS + if: ${{ inputs.package }} != hailo run: | cd docker/extra_packages if [ "$PACKAGE" = "rvc4" ]; then - gsutil cp gs://luxonis-test-bucket/modelconverter/build-artifacts/snpe.zip . - elif [ "$PACKAGE" = "rvc2" ] || [ "$PACKAGE" = "rvc3" ]; then - gsutil cp gs://luxonis-test-bucket/modelconverter/build-artifacts/openvino_2022_3_vpux_drop_patched.tar.gz . + gcloud storage cp \ + "${GS_BUILD_ARTIFACTS}/snpe-${VERSION}.zip" \ + "snpe-${VERSION}.zip" + elif [ "${PACKAGE}" = "rvc2" ] || [ "${PACKAGE}" = "rvc3" ]; then + gcloud storage cp \ + "${GS_BUILD_ARTIFACTS}/openvino-${VERSION}.tar.gz" \ + "openvino-${VERSION}.tar.gz" fi - - name: Publish + - name: Build image + run: | + docker build -f "${DOCKERFILE}" -t "${LOCAL_NAME}" . + + - name: GHCR publish latest run: | - docker build -f $DOCKERFILE -t $NAME:latest . + GHCR_NAME="ghcr.io/${LOCAL_NAME}" + docker tag "${LOCAL_NAME}" "${GHCR_NAME}" + docker push "${GHCR_NAME}" - VERSION=${{ steps.commit.outputs.sha }} - GAR_NAME="$GAR_LOCATION-docker.pkg.dev/$PROJECT_ID/internal/$STEM" + - name: GHCR publish SHA + run: | + SHA=$(git rev-parse --short HEAD) + GHCR_NAME="ghcr.io/${NAME}:${VERSION}-${SHA}" + docker tag "${LOCAL_NAME}" "${GHCR_NAME}" + docker push "${GHCR_NAME}" - for tag in latest $VERSION; do - docker tag "$NAME:latest" "$GAR_NAME:$tag" - docker push "$GAR_NAME:$tag" - done + - name: GAR publish latest + run: | + GAR_NAME="${GAR_STEM}/internal/${STEM}:${VERSION}-latest" + docker tag "${LOCAL_NAME}" "${GAR_NAME}" + docker push "${GAR_NAME}" + + - name: GAR publish SHA + run: | + SHA=$(git rev-parse --short HEAD) + GAR_NAME="${GAR_STEM}/internal/${STEM}:${VERSION}-${SHA}" + docker tag "${LOCAL_NAME}" "${GAR_NAME}" + docker push "${GAR_NAME}" + + - name: GAR publish clients + run: | + read -r -a REPO_ARRAY <<< "${{ vars.EXTERNAL_CLIENTS }}" - IFS=' ' read -r -a REPO_ARRAY <<< "${{ vars.EXTERNAL_CLIENTS }}" for REPO in "${REPO_ARRAY[@]}"; do - GAR_CLIENT_TAG="$GAR_LOCATION-docker.pkg.dev/$PROJECT_ID/$REPO/$STEM:$VERSION" - docker tag "$NAME:latest" "$GAR_CLIENT_TAG" - docker push "$GAR_CLIENT_TAG" + GAR_CLIENT_NAME="${GAR_STEM}/${REPO}/${STEM}:${VERSION}-${SHA}" + docker tag "${LOCAL_NAME}" "${GAR_CLIENT_NAME}" + docker push "${GAR_CLIENT_NAME}" done diff --git a/.github/workflows/rvc2_publish.yaml b/.github/workflows/rvc2_publish.yaml index a8808ae..150c3f1 100644 --- a/.github/workflows/rvc2_publish.yaml +++ b/.github/workflows/rvc2_publish.yaml @@ -10,7 +10,14 @@ on: jobs: rvc2-publish: + + strategy: + fail-fast: false + matrix: + version: ["2021.4.0", "2022.3.0"] + uses: ./.github/workflows/publish.yaml secrets: inherit with: package: rvc2 + verison: ${{ matrix.version }} diff --git a/.github/workflows/rvc2_test.yaml b/.github/workflows/rvc2_test.yaml index 4b3c312..0957110 100644 --- a/.github/workflows/rvc2_test.yaml +++ b/.github/workflows/rvc2_test.yaml @@ -16,7 +16,14 @@ on: jobs: rvc2-test: + + strategy: + fail-fast: false + matrix: + version: ["2021.4.0", "2022.3.0"] + uses: ./.github/workflows/modelconverter_test.yaml secrets: inherit with: package: rvc2 + version: ${{ matrix.version }} diff --git a/.github/workflows/rvc3_publish.yaml b/.github/workflows/rvc3_publish.yaml index 7d1f297..037c590 100644 --- a/.github/workflows/rvc3_publish.yaml +++ b/.github/workflows/rvc3_publish.yaml @@ -14,3 +14,4 @@ jobs: secrets: inherit with: package: rvc3 + version: "2022.3.0" diff --git a/.github/workflows/rvc3_test.yaml b/.github/workflows/rvc3_test.yaml index 1b5c263..282f6cc 100644 --- a/.github/workflows/rvc3_test.yaml +++ b/.github/workflows/rvc3_test.yaml @@ -20,3 +20,4 @@ jobs: secrets: inherit with: package: rvc3 + version: "2022.3.0" diff --git a/.github/workflows/rvc4_publish.yaml b/.github/workflows/rvc4_publish.yaml index 61aa5a1..604cd40 100644 --- a/.github/workflows/rvc4_publish.yaml +++ b/.github/workflows/rvc4_publish.yaml @@ -10,7 +10,19 @@ on: jobs: rvc4-publish: + + strategy: + fail-fast: false + matrix: + version: + - "2.23.0" + - "2.24.0" + - "2.25.0" + - "2.26.2" + - "2.27.0" + uses: ./.github/workflows/publish.yaml secrets: inherit with: package: rvc4 + version: ${{ matrix.version }} diff --git a/.github/workflows/rvc4_test.yaml b/.github/workflows/rvc4_test.yaml index 9045a4e..468b524 100644 --- a/.github/workflows/rvc4_test.yaml +++ b/.github/workflows/rvc4_test.yaml @@ -16,7 +16,19 @@ on: jobs: rvc4-test: + + strategy: + fail-fast: false + matrix: + version: + - "2.23.0" + - "2.24.0" + - "2.25.0" + - "2.26.2" + - "2.27.0" + uses: ./.github/workflows/modelconverter_test.yaml secrets: inherit with: package: rvc4 + version: ${{ matrix.version }} diff --git a/README.md b/README.md index 8fe5230..315ce36 100644 --- a/README.md +++ b/README.md @@ -51,20 +51,33 @@ Otherwise follow the installation instructions for your OS from the [official we `ModelConverter` is in an experimental public beta stage. Some parts might change in the future. -To build the images, you need to download additional packages depending on the selected target. +To build the images, you need to download additional packages depending on the selected target and the desired version of the underlying conversion tools. -**RVC2 and RVC3** +**RVC2** -Requires `openvino_2022_3_vpux_drop_patched.tar.gz` to be present in `docker/extra_packages`. -You can download the archive [here](https://drive.google.com/file/d/1IXtYi1Mwpsg3pr5cDXlEHdSUZlwJRTVP/view?usp=share_link). +Requires `openvino-.tar.gz` to be present in `docker/extra_packages/`. + +- Version `2023.2.0` archive can be downloaded from [here](https://drive.google.com/file/d/1IXtYi1Mwpsg3pr5cDXlEHdSUZlwJRTVP/view?usp=share_link). + +- Version `2021.4.0` archive can be downloaded from [here](https://storage.openvinotoolkit.org/repositories/openvino/packages/2021.4/l_openvino_toolkit_dev_ubuntu20_p_2021.4.582.tgz) + +You only need to rename the archive to either `openvino-2023.2.0.tar.gz` or `openvino-2021.4.0.tar.gz` and place it in the `docker/extra_packages` directory. + +**RVC3** + +Only the version `2023.2.0` of `OpenVino` is supported for `RVC3`. Follow the instructions for `RVC2` to use the correct archive. **RVC4** -Requires `snpe.zip` archive to be present in `docker/extra_packages`. You can download an archive with the current version [here](https://softwarecenter.qualcomm.com/api/download/software/qualcomm_neural_processing_sdk/v2.23.0.24.06.24.zip). You only need to rename it to `snpe.zip` and place it in the `docker/extra_packages` directory. +Requires `snpe-.zip` archive to be present in `docker/extra_packages`. You can download version `2.23.0` from [here](https://softwarecenter.qualcomm.com/api/download/software/qualcomm_neural_processing_sdk/v2.23.0.24.06.24.zip). You only need to rename it to `snpe-2.23.0.zip` and place it in the `docker/extra_packages` directory. **HAILO** -Requires `hailo_ai_sw_suite_2024-04:1` docker image to be present on the system. You can download the image from the [Hailo website](https://developer.hailo.ai/developer-zone/sw-downloads/). Furthermore, you need to use the `docker/hailo/Dockerfile.public` file to build the image. The `docker/hailo/Dockerfile` is for internal use only. +Requires `hailo_ai_sw_suite_:1` docker image to be present on the system. You can obtain the image by following the instructions on [Hailo website](https://developer.hailo.ai/developer-zone/sw-downloads/). + +After you obtain the image, you need to rename it to `hailo_ai_sw_suite_:1` using `docker tag hailo_ai_sw_suite_:1`. + +Furthermore, you need to use the `docker/hailo/Dockerfile.public` file to build the image. The `docker/hailo/Dockerfile` is for internal use only. ### Instructions diff --git a/docker/bases/Dockerfile.base-hailo b/docker/bases/Dockerfile.base-hailo deleted file mode 100644 index 8d027da..0000000 --- a/docker/bases/Dockerfile.base-hailo +++ /dev/null @@ -1,30 +0,0 @@ -FROM hailo_ai_sw_suite_2024-07:1 -USER root - -COPY requirements.txt requirements.txt - -RUN pip install --upgrade pip && \ - pip install -r requirements.txt && \ - pip install --extra-index-url \ - https://developer.download.nvidia.com/compute/redist \ - nvidia-dali-cuda110 && \ - pip install --extra-index-url \ - https://developer.download.nvidia.com/compute/redist \ - nvidia-dali-tf-plugin-cuda110 - -RUN rm -rf \ - /local/workspace/tappas \ - /opt/google \ - /local/workspace/doc \ - /local/workspace/hailort_examples \ - /usr/share \ - /usr/bin/docker* \ - /usr/bin/containerd* \ - /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_tutorials - -RUN pip install pip-autoremove && \ - pip-autoremove -y torch jupyter plotly matplotlib ipython \ - tensorboard pip-autoremove && \ - pip install psutil && \ - pip cache purge && \ - rm -rf ~/.cache diff --git a/docker/hailo/Dockerfile b/docker/hailo/Dockerfile index 75b6ca5..944848b 100644 --- a/docker/hailo/Dockerfile +++ b/docker/hailo/Dockerfile @@ -2,18 +2,40 @@ # If you want to build the Model Converter Docker image, # please use the Dockerfile.public file instead. -FROM ghcr.io/luxonis/modelconverter-base-hailo:latest +ARG VERSION=2.23.0 + +FROM ghcr.io/luxonis/modelconverter-hailo:${VERSION}-base + +ENV IN_DOCKER= +ENV VERSION=${VERSION} + +COPY requirements.txt requirements.txt + +RUN < /etc/profile.d/certifi.sh + set -e + + echo "export SSL_CERT_FILE=$(python -m certifi)" \ + > /etc/profile.d/certifi.sh + + pip install -e . --no-deps --no-cache-dir + chmod +x /app/entrypoint.sh + +EOF -ENV IN_DOCKER= ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docker/hailo/Dockerfile.base b/docker/hailo/Dockerfile.base new file mode 100644 index 0000000..9bfa3cd --- /dev/null +++ b/docker/hailo/Dockerfile.base @@ -0,0 +1,32 @@ +ARG VERSION=2024.07 + +FROM hailo_ai_sw_suite_${VERSION/./-}:1 as BASE +USER root + +RUN rm -rf \ + /local/workspace/tappas \ + /opt/google \ + /local/workspace/doc \ + /local/workspace/hailort_examples \ + /usr/share \ + /usr/bin/docker* \ + /usr/bin/containerd* \ + /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_tutorials + + +RUN pip install --extra-index-url \ + https://developer.download.nvidia.com/compute/redist \ + nvidia-dali-cuda110 + +RUN pip install --extra-index-url \ + https://developer.download.nvidia.com/compute/redist \ + nvidia-dali-tf-plugin-cuda110 + + +RUN pip install pip-autoremove +RUN pip-autoremove -y torch jupyter plotly matplotlib \ + ipython tensorboard pip-autoremove + +RUN pip install psutil +RUN pip cache purge +RUN rm -rf ~/.cache diff --git a/docker/hailo/Dockerfile.public b/docker/hailo/Dockerfile.public index 9171c31..20ceb03 100644 --- a/docker/hailo/Dockerfile.public +++ b/docker/hailo/Dockerfile.public @@ -1,17 +1,6 @@ FROM hailo_ai_sw_suite_2024-07:1 USER root -COPY requirements.txt . - -RUN pip install --upgrade pip && \ - pip install -r requirements.txt && \ - pip install --extra-index-url \ - https://developer.download.nvidia.com/compute/redist \ - nvidia-dali-cuda110 && \ - pip install --extra-index-url \ - https://developer.download.nvidia.com/compute/redist \ - nvidia-dali-tf-plugin-cuda110 - RUN rm -rf \ /local/workspace/tappas \ /opt/google \ @@ -22,22 +11,58 @@ RUN rm -rf \ /usr/bin/containerd* \ /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_tutorials -RUN pip install pip-autoremove && \ - pip-autoremove -y torch jupyter plotly matplotlib ipython \ - tensorboard pip-autoremove && \ - pip install psutil && \ - pip cache purge && \ +COPY requirements.txt . + +RUN < /etc/profile.d/certifi.sh + chmod +x /app/entrypoint.sh -COPY modelconverter pyproject.toml requirements.txt /app/modelconverter/ -RUN cd modelconverter -RUN cd modelconverter && pip install -e . --no-deps +EOF -RUN echo "export SSL_CERT_FILE=$(python -m certifi)" > /etc/profile.d/certifi.sh ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docker/patches/mo-2021.4.patch b/docker/patches/mo-2021.4.patch new file mode 100644 index 0000000..86fcc83 --- /dev/null +++ b/docker/patches/mo-2021.4.patch @@ -0,0 +1,26 @@ +--- /opt/intel/deployment_tools/model_optimizer/mo/main.py 2021-06-22 20:30:31.000000000 +0000 ++++ shared_with_container/main.py 2024-10-10 12:47:05.718612915 +0000 +@@ -29,7 +29,7 @@ + from mo.utils.cli_parser import get_placeholder_shapes, get_tuple_values, get_model_name, \ + get_common_cli_options, get_caffe_cli_options, get_tf_cli_options, get_mxnet_cli_options, get_kaldi_cli_options, \ + get_onnx_cli_options, get_mean_scale_dictionary, parse_tuple_pairs, get_freeze_placeholder_values, get_meta_info, \ +- parse_transform, check_available_transforms ++ parse_transform, check_available_transforms, parse_input_value + from mo.utils.error import Error, FrameworkError + from mo.utils.find_ie_version import find_ie_version + from mo.utils.get_ov_update_message import get_ov_update_message +@@ -206,7 +206,13 @@ + + mean_values = parse_tuple_pairs(argv.mean_values) + scale_values = parse_tuple_pairs(argv.scale_values) +- mean_scale = get_mean_scale_dictionary(mean_values, scale_values, argv.input) ++ ++ __input_names = [] ++ for input_value in argv.input.split(','): ++ __node_name = parse_input_value(input_value)[0] ++ __input_names.append(__node_name) ++ __input_names = ",".join(__input_names) ++ mean_scale = get_mean_scale_dictionary(mean_values, scale_values, __input_names) + argv.mean_scale_values = mean_scale + + if not os.path.exists(argv.output_dir): diff --git a/docker/extra_packages/mo_patch.diff b/docker/patches/mo-2022.3.patch similarity index 99% rename from docker/extra_packages/mo_patch.diff rename to docker/patches/mo-2022.3.patch index f9aaf7d..79e23b7 100644 --- a/docker/extra_packages/mo_patch.diff +++ b/docker/patches/mo-2022.3.patch @@ -1,11 +1,11 @@ --- convert_impl.py 2023-08-09 14:48:35.300720667 +0000 +++ /usr/local/lib/python3.8/site-packages/openvino/tools/mo/convert_impl.py 2023-08-09 14:50:48.555078595 +0000 @@ -275,7 +275,7 @@ - + mean_values = parse_tuple_pairs(argv.mean_values) scale_values = parse_tuple_pairs(argv.scale_values) - mean_scale = get_mean_scale_dictionary(mean_values, scale_values, argv.input) + mean_scale = get_mean_scale_dictionary(mean_values, scale_values, ','.join(argv.inputs_list)) argv.mean_scale_values = mean_scale argv.layout_values = get_layout_values(argv.layout, argv.source_layout, argv.target_layout) - + diff --git a/docker/rvc2/Dockerfile b/docker/rvc2/Dockerfile index 5b1234a..2a26b2e 100644 --- a/docker/rvc2/Dockerfile +++ b/docker/rvc2/Dockerfile @@ -1,37 +1,99 @@ -FROM python:3.8-slim as BASE +FROM python:3.8-slim AS base -RUN apt-get update && \ - apt-get install -y \ - cmake unzip perl libatomic1 libc++-dev ffmpeg libcurl4 libncurses5 llvm-14-runtime patch git +ARG VERSION=2022.3.0 +ENV VERSION=${VERSION} -COPY --link docker/scripts /scripts -RUN bash /scripts/install_openssl.sh -COPY --link docker/extra_packages/openvino_2022_3_vpux_drop_patched.tar.gz . -RUN mkdir /opt/intel -RUN tar xvf openvino_2022_3_vpux_drop_patched.tar.gz \ - -C /opt/intel/ --strip-components 1 -RUN sed -i 's/libtbb2/libtbbmalloc2/g' \ - /opt/intel/install_dependencies/install_openvino_dependencies.sh && \ - bash /opt/intel/install_dependencies/install_openvino_dependencies.sh -y +RUN <> ~/.bashrc && \ - chmod +x /app/entrypoint.sh && \ - mkdir /app/modelconverter +RUN <> ~/.bashrc + chmod +x /app/entrypoint.sh + +EOF -COPY --link modelconverter pyproject.toml /app/modelconverter/ -RUN cd modelconverter && pip install -e . --no-deps --no-cache-dir -ENV IN_DOCKER= ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docker/rvc2/entrypoint.sh b/docker/rvc2/entrypoint.sh index 09fd290..2be490d 100644 --- a/docker/rvc2/entrypoint.sh +++ b/docker/rvc2/entrypoint.sh @@ -7,7 +7,16 @@ for arg in "${args[@]}"; do done set -- -source /opt/intel/setupvars.sh + +if [ ${VERSION} = "2021.4.0" ]; then + source /opt/intel/bin/setupvars.sh +else + source /opt/intel/setupvars.sh +fi + +if [[ $PYTHONPATH != *: ]]; then + export PYTHONPATH=$PYTHONPATH: +fi export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/python3.8/site-packages/openvino/libs/ diff --git a/docker/rvc2/requirements.txt b/docker/rvc2/requirements.txt index 081a415..fb1ac24 100644 --- a/docker/rvc2/requirements.txt +++ b/docker/rvc2/requirements.txt @@ -1,2 +1,4 @@ tflite2onnx bsdiff4 +openvino==${VERSION} +openvino-dev==${VERSION} diff --git a/docker/rvc3/Dockerfile b/docker/rvc3/Dockerfile index 7b3c34c..9622c0d 100644 --- a/docker/rvc3/Dockerfile +++ b/docker/rvc3/Dockerfile @@ -1,36 +1,65 @@ -FROM python:3.8-slim as BASE +FROM python:3.8-slim AS base -RUN apt-get update && \ - apt-get install -y \ - cmake unzip perl libatomic1 libc++-dev ffmpeg libcurl4 libncurses5 llvm-14-runtime patch git +RUN <> ~/.bashrc && \ - chmod +x /app/entrypoint.sh && \ - mkdir /app/modelconverter +RUN <> ~/.bashrc + chmod +x /app/entrypoint.sh + +EOF -COPY --link modelconverter pyproject.toml /app/modelconverter/ -RUN cd modelconverter && pip install -e . --no-deps --no-cache-dir -ENV IN_DOCKER= ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docker/rvc4/Dockerfile b/docker/rvc4/Dockerfile index 0227236..5d3ad4d 100644 --- a/docker/rvc4/Dockerfile +++ b/docker/rvc4/Dockerfile @@ -1,40 +1,76 @@ -FROM python:3.10-slim as BASE - -RUN apt-get update && \ - apt-get install -y \ - cmake unzip perl libatomic1 libc++-dev ffmpeg \ - libcurl4 libncurses5 llvm-14-runtime git - -COPY --link docker/extra_packages/snpe.zip . -RUN unzip snpe.zip -d /opt - -RUN ls /opt/quairt && mv /opt/qairt/* /opt/snpe || true - -RUN rm -rf /opt/snpe/Uninstall && \ - rm -rf /opt/snpe/docs && \ - rm -rf /opt/snpe/examples && \ - rm -rf /opt/snpe/lib/hexagon* && \ - rm -rf /opt/snpe/lib/*android* && \ - rm -rf /opt/snpe/lib/*ubuntu* && \ - rm -rf /opt/snpe/lib/*windows* && \ - rm -rf /opt/snpe/bin/*windows* && \ - rm -rf /opt/snpe/bin/*android* && \ +FROM python:3.10-slim AS base + +ARG VERSION=2.23.0 +ENV VERSION=${VERSION} + +RUN < /etc/ld.so.conf.d/openssl-1.1.1b.conf -cd /opt/openssl/openssl-1.1.1b -./config --prefix=/opt/openssl --openssldir=/opt/openssl/ssl -make -make install diff --git a/modelconverter/__init__.py b/modelconverter/__init__.py index ca853c8..a8861ab 100644 --- a/modelconverter/__init__.py +++ b/modelconverter/__init__.py @@ -1,7 +1,7 @@ import pkg_resources from luxonis_ml.utils import PUT_FILE_REGISTRY -__version__ = "0.2.0" +__version__ = "0.3.0" def load_put_file_plugins() -> None: diff --git a/modelconverter/__main__.py b/modelconverter/__main__.py index a596240..869bc80 100644 --- a/modelconverter/__main__.py +++ b/modelconverter/__main__.py @@ -59,6 +59,35 @@ class Format(str, Enum): help="One of the supported formats.", ), ] +VersionOption: TypeAlias = Annotated[ + Optional[str], + typer.Option( + "-v", + "--version", + help="""Version of the underlying conversion tools to use. + Only takes effect when --dev is used. + Available options differ based on the target platform: + + - `RVC2`: + - `2021.4.0` + - `2022.3.0` (default) + + - `RVC3`: + - `2022.3.0` (default) + + - `RVC4`: + - `2.23.0` (default) + - `2.24.0` + - `2.25.0` + - `2.26.2` + - `2.27.0` + + - `HAILO`: + - `2024.04` (default), + - `2024.07` (default)""", + show_default=False, + ), +] PathOption: TypeAlias = Annotated[ Optional[str], typer.Option( @@ -234,6 +263,7 @@ def infer( ), ] = None, dev: DevOption = False, + version: VersionOption = None, gpu: Annotated[ bool, typer.Option(help="Use GPU for conversion. Only relevant for HAILO."), @@ -243,8 +273,6 @@ def infer( """Runs inference on the specified target platform.""" tag = "dev" if dev else "latest" - if dev: - docker_build(target.value, tag=tag) if in_docker(): setup_logging(file="modelconverter.log", use_rich=True) @@ -262,6 +290,9 @@ def infer( logger.exception("Encountered an unexpected error!") exit(2) else: + image = None + if dev: + image = docker_build(target.value, tag=tag, version=version) args = [ "infer", target.value, @@ -276,17 +307,23 @@ def infer( args.extend(["--output-dir", output_dir]) if opts is not None: args.extend(opts) - docker_exec(target.value, *args, tag=tag, use_gpu=gpu) + docker_exec(target.value, *args, tag=tag, use_gpu=gpu, image=image) @app.command() def shell( - target: TargetArgument, dev: DevOption = False, gpu: GPUOption = True + target: TargetArgument, + dev: DevOption = False, + version: VersionOption = None, + gpu: GPUOption = True, ): """Boots up a shell inside a docker container for the specified target platform.""" + image = None if dev: - docker_build(target.value, tag="dev") - docker_exec(target.value, tag="dev" if dev else "latest", use_gpu=gpu) + image = docker_build(target.value, tag="dev", version=version) + docker_exec( + target.value, tag="dev" if dev else "latest", use_gpu=gpu, image=image + ) @app.command( @@ -360,6 +397,7 @@ def convert( dev: DevOption = False, to: FormatOption = Format.NATIVE, gpu: GPUOption = True, + version: VersionOption = None, main_stage: Annotated[ Optional[str], typer.Option( @@ -385,8 +423,6 @@ def convert( """Exports the model for the specified target platform.""" tag = "dev" if dev else "latest" - if dev: - docker_build(target.value.lower(), tag=tag) if archive_preprocess and to != Format.NN_ARCHIVE: raise ValueError( @@ -489,6 +525,10 @@ def convert( logger.exception("Encountered an unexpected error!") exit(2) else: + image = None + if dev: + image = docker_build(target.value, tag=tag, version=version) + args = [ "convert", target.value, @@ -506,7 +546,7 @@ def convert( args.extend(["--path", path]) if opts is not None: args.extend(opts) - docker_exec(target.value, *args, tag=tag, use_gpu=gpu) + docker_exec(target.value, *args, tag=tag, use_gpu=gpu, image=image) @app.command() @@ -570,7 +610,7 @@ def archive( def version_callback(value: bool): if value: - typer.echo(f"ModelConverter Version: {version(__package__)}") + typer.echo(f"ModelConverter Version: {version('modelconv')}") raise typer.Exit() @@ -579,6 +619,7 @@ def common( _: Annotated[ bool, typer.Option( + "-v", "--version", callback=version_callback, help="Show version and exit.", diff --git a/modelconverter/packages/rvc2/exporter.py b/modelconverter/packages/rvc2/exporter.py index f49e20e..7a0817f 100644 --- a/modelconverter/packages/rvc2/exporter.py +++ b/modelconverter/packages/rvc2/exporter.py @@ -7,7 +7,7 @@ from os import environ as env from os import path from pathlib import Path -from typing import Any, Dict, Final +from typing import Any, Dict, Final, Iterable import tflite2onnx from rich.progress import track @@ -17,15 +17,28 @@ subprocess_run, ) from modelconverter.utils.config import SingleStageConfig -from modelconverter.utils.types import Encoding, InputFileType, Target +from modelconverter.utils.types import ( + DataType, + Encoding, + InputFileType, + Target, +) from ..base_exporter import Exporter logger = getLogger(__name__) -COMPILE_TOOL: Final[ - str -] = f'{env["INTEL_OPENVINO_DIR"]}/tools/compile_tool/compile_tool' +OV_VERSION: Final[str] = version("openvino") +COMPILE_TOOL: str + +OV_2021: Final[bool] = OV_VERSION.startswith("2021") + +if OV_2021: + COMPILE_TOOL = f'{env["INTEL_OPENVINO_DIR"]}/deployment_tools/tools/compile_tool/compile_tool' +else: + COMPILE_TOOL = ( + f'{env["INTEL_OPENVINO_DIR"]}/tools/compile_tool/compile_tool' + ) DEFAULT_SUPER_SHAVES: Final[int] = 8 @@ -54,11 +67,12 @@ def __init__(self, config: SingleStageConfig, output_dir: Path): def _export_openvino_ir(self) -> Path: args = self.mo_args self._add_args(args, ["--output_dir", self.intermediate_outputs_dir]) - self._add_args( - args, ["--output", ",".join(name for name in self.outputs)] - ) + self._add_args(args, ["--output", ",".join(self.outputs)]) if self.compress_to_fp16: - self._add_args(args, ["--compress_to_fp16"]) + if OV_2021: + self._add_args(args, ["--data_type", "FP16"]) + else: + self._add_args(args, ["--compress_to_fp16"]) if "--input" not in args: inp_str = "" @@ -67,14 +81,18 @@ def _export_openvino_ir(self) -> Path: inp_str += "," inp_str += name if inp.shape is not None: - inp_str += f'[{",".join(map(str, inp.shape))}]' + inp_str += f"{_lst_join(inp.shape, sep=' ')}" if inp.data_type is not None: - inp_str += f"{{{inp.data_type.as_openvino_dtype()}}}" + if OV_2021 and self.compress_to_fp16: + data_type = DataType("float16") + else: + data_type = inp.data_type + inp_str += f"{{{data_type.as_openvino_dtype()}}}" if inp.frozen_value is not None: if len(inp.frozen_value) == 1: value = inp.frozen_value[0] else: - value = f'[{",".join(map(str, inp.frozen_value))}]' + value = f"{_lst_join(inp.frozen_value)}" inp_str += f"->{value}" args.extend(["--input", inp_str]) @@ -100,11 +118,13 @@ def _export_openvino_ir(self) -> Path: for name, inp in self.inputs.items(): if inp.mean_values is not None: self._add_args( - args, ["--mean_values", f"{name}{inp.mean_values}"] + args, + ["--mean_values", f"{name}{_lst_join(inp.mean_values)}"], ) if inp.scale_values is not None: self._add_args( - args, ["--scale_values", f"{name}{inp.scale_values}"] + args, + ["--scale_values", f"{name}{_lst_join(inp.scale_values)}"], ) if inp.reverse_input_channels: self._add_args(args, ["--reverse_input_channels"]) @@ -123,7 +143,8 @@ def _check_reverse_channels(self): @staticmethod def _write_config(shaves: int, slices: int) -> str: with tempfile.NamedTemporaryFile(suffix=".conf", delete=False) as conf: - conf.write(b"MYRIAD_ENABLE_MX_BOOT NO\n") + if not OV_2021: + conf.write(b"MYRIAD_ENABLE_MX_BOOT NO\n") conf.write(f"MYRIAD_NUMBER_OF_SHAVES {shaves}\n".encode()) conf.write(f"MYRIAD_NUMBER_OF_CMX_SLICES {slices}\n".encode()) conf.write(b"MYRIAD_THROUGHPUT_STREAMS 1\n") @@ -152,16 +173,18 @@ def _transform_tflite_to_onnx(self) -> None: if lt[-1] == "C": if len(lt) == 4 and lt[0] == "N": - self._add_args( - self.mo_args, ["--layout", f"{name}(nchw->nhwc)"] - ) + if not OV_2021: + self._add_args( + self.mo_args, ["--layout", f"{name}(nchw->nhwc)"] + ) inp.shape = [sh[0], sh[3], sh[1], sh[2]] inp.layout = f"{lt[0]}{lt[3]}{lt[1]}{lt[2]}" elif len(inp.layout) == 3: - self._add_args( - self.mo_args, ["--layout", f"{name}(chw->hwc)"] - ) + if not OV_VERSION.startswith("2021.4"): + self._add_args( + self.mo_args, ["--layout", f"{name}(chw->hwc)"] + ) inp.shape = [sh[2], sh[0], sh[1]] inp.layout = f"{lt[2]}{lt[0]}{lt[1]}" @@ -329,3 +352,7 @@ def exporter_buildinfo(self) -> Dict[str, Any]: "target_devices": [self.device], **self._device_specific_buildinfo, } + + +def _lst_join(args: Iterable[Any], sep: str = ",") -> str: + return f"[{sep.join(map(str, args))}]" diff --git a/modelconverter/packages/rvc2/inferer.py b/modelconverter/packages/rvc2/inferer.py index 1adaa21..18113cf 100644 --- a/modelconverter/packages/rvc2/inferer.py +++ b/modelconverter/packages/rvc2/inferer.py @@ -2,7 +2,6 @@ from typing import Dict import numpy as np -from openvino.inference_engine.ie_api import IECore from modelconverter.utils import read_image @@ -11,6 +10,8 @@ class RVC2Inferer(Inferer): def setup(self): + from openvino.inference_engine.ie_api import IECore + self.xml_path = self.model_path self.bin_path = self.model_path.with_suffix(".bin") ie = IECore() diff --git a/modelconverter/utils/config.py b/modelconverter/utils/config.py index 8be54c4..aab6582 100644 --- a/modelconverter/utils/config.py +++ b/modelconverter/utils/config.py @@ -534,7 +534,6 @@ def _validate_single_stage_name(self) -> Self: return self - def _extract_bin_xml_from_ir(ir_path: Any) -> Tuple[Path, Path]: """Extracts the corresponding second path from a single IR path. diff --git a/modelconverter/utils/docker_utils.py b/modelconverter/utils/docker_utils.py index 072c048..cfdc349 100644 --- a/modelconverter/utils/docker_utils.py +++ b/modelconverter/utils/docker_utils.py @@ -4,7 +4,7 @@ import subprocess import tempfile from pathlib import Path -from typing import cast +from typing import Literal, Optional, cast import yaml from luxonis_ml.utils import environ @@ -16,6 +16,17 @@ logger = logging.getLogger(__name__) +def get_default_target_version( + target: Literal["rvc2", "rvc3", "rvc4", "hailo"], +) -> str: + return { + "rvc2": "2022.3.0", + "rvc3": "2022.3.0", + "rvc4": "2.23.0", + "hailo": "2024.04", + }[target] + + def generate_compose_config(image: str, gpu: bool = False) -> str: config = { "services": { @@ -65,20 +76,38 @@ def check_docker() -> None: # NOTE: docker SDK is not used here because it's too slow -def docker_build(target: str, tag: str) -> str: +def docker_build( + target: Literal["rvc2", "rvc3", "rvc4", "hailo"], + tag: str, + version: Optional[str] = None, +) -> str: check_docker() + if version is None: + version = get_default_target_version(target) + + tag = f"{version}-{tag}" repository = f"luxonis/modelconverter-{target}:{tag}" - result = subprocess.run( - f"docker build -f docker/{target}/Dockerfile " - f"-t {repository} .".split(), - ) + args = [ + "docker", + "build", + "-f", + f"docker/{target}/Dockerfile", + "-t", + repository, + ".", + ] + if version is not None: + args += ["--build-arg", f"VERSION={version}"] + result = subprocess.run(args) if result.returncode != 0: raise RuntimeError("Failed to build the docker image") return repository -def get_docker_image(target: str, tag: str) -> str: +def get_docker_image( + target: Literal["rvc2", "rvc3", "rvc4", "hailo"], tag: str +) -> str: check_docker() client = docker.from_env() @@ -102,8 +131,14 @@ def get_docker_image(target: str, tag: str) -> str: return image_name -def docker_exec(target: str, *args: str, tag: str, use_gpu: bool) -> None: - image = get_docker_image(target, tag) +def docker_exec( + target: Literal["rvc2", "rvc3", "rvc4", "hailo"], + *args: str, + tag: str, + use_gpu: bool, + image: Optional[str] = None, +) -> None: + image = image or get_docker_image(target, tag) with tempfile.NamedTemporaryFile(delete=False) as f: f.write( diff --git a/modelconverter/utils/metadata.py b/modelconverter/utils/metadata.py index 685ac6b..bdfe802 100644 --- a/modelconverter/utils/metadata.py +++ b/modelconverter/utils/metadata.py @@ -1,5 +1,6 @@ import io from dataclasses import dataclass +from importlib.metadata import version from pathlib import Path from typing import Dict, List @@ -76,6 +77,57 @@ def _get_metadata_dlc(model_path: Path) -> Metadata: def _get_metadata_ir(bin_path: Path, xml_path: Path) -> Metadata: + if version("openvino") == "2021.4.0": + return _get_metadata_ir_ie(bin_path, xml_path) + return _get_metadata_ir_runtime(bin_path, xml_path) + + +def _get_metadata_ir_ie(bin_path: Path, xml_path: Path) -> Metadata: + """Extracts metadata from an OpenVINO IR model using the Inference Engine API. + + Args: + bin_path (Path): Path to the model's .bin file. + xml_path (Path): Path to the model's .xml file. + + Returns: + Metadata: An object containing input/output shapes and data types. + """ + from openvino.inference_engine import IECore + + ie = IECore() + try: + network = ie.read_network(model=str(xml_path), weights=str(bin_path)) + except Exception as e: + raise ValueError( + f"Failed to load IR model: `{bin_path}` and `{xml_path}`" + ) from e + + input_shapes = {} + input_dtypes = {} + output_shapes = {} + output_dtypes = {} + + for input_name, input_info in network.input_info.items(): + input_shapes[input_name] = list(input_info.input_data.shape) + + ie_precision = input_info.input_data.precision + input_dtypes[input_name] = DataType.from_ir_ie_dtype(ie_precision) + + for output_name, output_data in network.outputs.items(): + output_shapes[output_name] = list(output_data.shape) + + ie_precision = output_data.precision + output_dtypes[output_name] = DataType.from_ir_ie_dtype(ie_precision) + + return Metadata( + input_shapes=input_shapes, + input_dtypes=input_dtypes, + output_shapes=output_shapes, + output_dtypes=output_dtypes, + ) + + +def _get_metadata_ir_runtime(bin_path: Path, xml_path: Path) -> Metadata: from openvino.runtime import Core ie = Core() @@ -94,13 +146,13 @@ def _get_metadata_ir(bin_path: Path, xml_path: Path) -> Metadata: for inp in model.inputs: name = list(inp.names)[0] input_shapes[name] = list(inp.shape) - input_dtypes[name] = DataType.from_ir_dtype( + input_dtypes[name] = DataType.from_ir_runtime_dtype( inp.element_type.get_type_name() ) for output in model.outputs: name = list(output.names)[0] output_shapes[name] = list(output.shape) - output_dtypes[name] = DataType.from_ir_dtype( + output_dtypes[name] = DataType.from_ir_runtime_dtype( output.element_type.get_type_name() ) diff --git a/modelconverter/utils/types.py b/modelconverter/utils/types.py index 26c77f7..fee1d89 100644 --- a/modelconverter/utils/types.py +++ b/modelconverter/utils/types.py @@ -1,7 +1,7 @@ from enum import Enum from pathlib import Path - from typing import Union + import numpy as np from onnx.onnx_pb import TensorProto @@ -131,7 +131,27 @@ def from_numpy_dtype(cls, dtype: np.dtype) -> "DataType": return cls(dtype_map[dtype]) @classmethod - def from_ir_dtype(cls, dtype: str) -> "DataType": + def from_ir_ie_dtype(cls, dtype: str) -> "DataType": + dtype_map = { + "FP16": "float16", + "FP32": "float32", + "FP64": "float64", + "I8": "int8", + "I16": "int16", + "I32": "int32", + "I64": "int64", + "U8": "uint8", + "U16": "uint16", + "U32": "uint32", + "U64": "uint64", + "BOOL": "boolean", + } + if dtype not in dtype_map: + raise ValueError(f"Unsupported IR data type: `{dtype}`") + return cls(dtype_map[dtype]) + + @classmethod + def from_ir_runtime_dtype(cls, dtype: str) -> "DataType": dtype_map = { "f16": "float16", "f32": "float32", @@ -147,7 +167,7 @@ def from_ir_dtype(cls, dtype: str) -> "DataType": "boolean": "boolean", } if dtype not in dtype_map: - raise ValueError(f"Unsupported IR data type: `{dtype}`") + raise ValueError(f"Unsupported IR runtime data type: `{dtype}`") return cls(dtype_map[dtype]) def as_numpy_dtype(self) -> np.dtype: diff --git a/tests/conftest.py b/tests/conftest.py index 0d1552b..4c675d5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -91,11 +91,26 @@ setup_logging(use_rich=True) +def pytest_addoption(parser): + parser.addoption( + "--tool-version", + action="store", + required=False, + default="0.0.0", + help="Version of the internal conversion tool", + ) + + +@pytest.fixture(scope="session") +def tool_version(request): + return request.config.getoption("--tool-version") + + def prepare_fixture( service, model_name, dataset_url, metric, input_names, extra_args ): @pytest.fixture(scope="session") - def _fixture(): + def _fixture(tool_version): return prepare( service=service, model_name=model_name, @@ -103,6 +118,7 @@ def _fixture(): metric=metric, input_names=input_names, extra_args=extra_args, + tool_version=tool_version, ) return _fixture @@ -130,6 +146,7 @@ def prepare( dataset_url: str, metric: Type[Metric], input_names: List[str], + tool_version: str, model_type: str = "onnx", extra_args: str = "", ) -> Tuple[ @@ -195,6 +212,7 @@ def prepare( f"--path {config_url} " f"--output-dir _{model_name}-test " "--dev " + f"--version {tool_version} " "--no-gpu " f"input_model {file_url} " "hailo.compression_level 0 " diff --git a/tests/test_packages/common.py b/tests/test_packages/common.py index 5692ef6..15b31e1 100644 --- a/tests/test_packages/common.py +++ b/tests/test_packages/common.py @@ -24,7 +24,7 @@ def check_convert(convert_env): assert result.returncode == 0 -def mnist_infer(mnist_env): +def mnist_infer(mnist_env, tool_version: str): ( config_url, converted_model_path, @@ -46,6 +46,7 @@ def mnist_infer(mnist_env): f"--input-path {input_files_dir.parent} " f"--path {config_url} " "--dev " + f"--version {tool_version} " "--no-gpu" ) assert result.returncode == 0, result.stderr + result.stdout @@ -60,7 +61,7 @@ def mnist_infer(mnist_env): compare_metrics(metric.get_result(), expected_metric) -def resnet18_infer(resnet18_env): +def resnet18_infer(resnet18_env, tool_version: str): ( config_url, converted_model_path, @@ -87,6 +88,7 @@ def resnet18_infer(resnet18_env): f"--input-path {input_files_dir.parent} " f"--path {config_url} " "--dev " + f"--version {tool_version} " "--no-gpu" ) assert result.returncode == 0, result.stderr + result.stdout @@ -101,7 +103,7 @@ def resnet18_infer(resnet18_env): compare_metrics(metric.get_result(), expected_metric) -def yolov6n_infer(yolov6n_env): +def yolov6n_infer(yolov6n_env, tool_version: str): output_names = [f"output{i}_yolov6r2" for i in range(1, 4)] ( config_url, @@ -132,6 +134,7 @@ def yolov6n_infer(yolov6n_env): f"--input-path {input_files_dir.parent} " f"--path {config_url} " "--dev " + f"--version {tool_version} " "--no-gpu" ) assert result.returncode == 0, result.stderr + result.stdout diff --git a/tests/test_packages/test_hailo.py b/tests/test_packages/test_hailo.py index 509da80..ecd0ccf 100644 --- a/tests/test_packages/test_hailo.py +++ b/tests/test_packages/test_hailo.py @@ -1,37 +1,25 @@ -import pytest - -from .common import check_convert, mnist_infer, resnet18_infer, yolov6n_infer +from .common import check_convert, mnist_infer, resnet18_infer def test_mnist_convert(hailo_mnist_onnx_env): check_convert(hailo_mnist_onnx_env) -def test_mnist_infer(hailo_mnist_onnx_env): - mnist_infer(hailo_mnist_onnx_env) +def test_mnist_infer(hailo_mnist_onnx_env, tool_version): + mnist_infer(hailo_mnist_onnx_env, tool_version) def test_resnet18_convert(hailo_resnet18_onnx_env): check_convert(hailo_resnet18_onnx_env) -def test_resnet18_infer(hailo_resnet18_onnx_env): - resnet18_infer(hailo_resnet18_onnx_env) +def test_resnet18_infer(hailo_resnet18_onnx_env, tool_version): + resnet18_infer(hailo_resnet18_onnx_env, tool_version) def test_resnet18_archive_convert(hailo_resnet18_archive_env): check_convert(hailo_resnet18_archive_env) -def test_resnet18_archive_infer(hailo_resnet18_archive_env): - resnet18_infer(hailo_resnet18_archive_env) - - -@pytest.mark.skip(reason="Cannot be converted.") -def test_yolov6_convert(hailo_yolov6n_env): - check_convert(hailo_yolov6n_env) - - -@pytest.mark.skip(reason="Cannot be converted.") -def test_yolov6n_infer(hailo_yolov6n_onnx_env): - yolov6n_infer(hailo_yolov6n_onnx_env) +def test_resnet18_archive_infer(hailo_resnet18_archive_env, tool_version): + resnet18_infer(hailo_resnet18_archive_env, tool_version) diff --git a/tests/test_packages/test_rvc2.py b/tests/test_packages/test_rvc2.py index 3f3fe0b..d4724a0 100644 --- a/tests/test_packages/test_rvc2.py +++ b/tests/test_packages/test_rvc2.py @@ -7,8 +7,8 @@ def test_mnist_convert(rvc2_mnist_onnx_env): check_convert(rvc2_mnist_onnx_env) -def test_mnist_infer(rvc2_mnist_onnx_env): - mnist_infer(rvc2_mnist_onnx_env) +def test_mnist_infer(rvc2_mnist_onnx_env, tool_version): + mnist_infer(rvc2_mnist_onnx_env, tool_version) def test_resnet18_convert(rvc2_resnet18_onnx_env): @@ -23,20 +23,20 @@ def test_resnet18_archive_convert(rvc2_resnet18_archive_env): check_convert(rvc2_resnet18_archive_env) -def test_resnet18_infer(rvc2_resnet18_onnx_env): - resnet18_infer(rvc2_resnet18_onnx_env) +def test_resnet18_infer(rvc2_resnet18_onnx_env, tool_version): + resnet18_infer(rvc2_resnet18_onnx_env, tool_version) -def test_resnet18_archive_infer(rvc2_resnet18_archive_env): - resnet18_infer(rvc2_resnet18_archive_env) +def test_resnet18_archive_infer(rvc2_resnet18_archive_env, tool_version): + resnet18_infer(rvc2_resnet18_archive_env, tool_version) def test_yolov6_convert(rvc2_yolov6n_onnx_env): check_convert(rvc2_yolov6n_onnx_env) -def test_yolov6n_infer(rvc2_yolov6n_onnx_env): - yolov6n_infer(rvc2_yolov6n_onnx_env) +def test_yolov6n_infer(rvc2_yolov6n_onnx_env, tool_version): + yolov6n_infer(rvc2_yolov6n_onnx_env, tool_version) def test_resnet18_superblob_convert(rvc2_superblob_resnet18_onnx_env): diff --git a/tests/test_packages/test_rvc3.py b/tests/test_packages/test_rvc3.py index 1848fa8..35026f4 100644 --- a/tests/test_packages/test_rvc3.py +++ b/tests/test_packages/test_rvc3.py @@ -1,18 +1,16 @@ -import pytest - -from .common import check_convert, mnist_infer, resnet18_infer, yolov6n_infer +from .common import check_convert, mnist_infer, resnet18_infer def test_mnist_convert(rvc3_mnist_onnx_env): check_convert(rvc3_mnist_onnx_env) -def test_mnist_infer(rvc3_mnist_onnx_env): - mnist_infer(rvc3_mnist_onnx_env) +def test_mnist_infer(rvc3_mnist_onnx_env, tool_version): + mnist_infer(rvc3_mnist_onnx_env, tool_version) -def test_mnist_infer_quant(rvc3_quant_mnist_onnx_env): - mnist_infer(rvc3_quant_mnist_onnx_env) +def test_mnist_infer_quant(rvc3_quant_mnist_onnx_env, tool_version): + mnist_infer(rvc3_quant_mnist_onnx_env, tool_version) def test_resnet18_convert(rvc3_resnet18_onnx_env): @@ -31,29 +29,9 @@ def test_resnet18_archive_convert(rvc3_resnet18_archive_env): check_convert(rvc3_resnet18_archive_env) -def test_resnet18_infer(rvc3_resnet18_onnx_env): - resnet18_infer(rvc3_resnet18_onnx_env) - - -def test_resnet18_archive_infer(rvc3_resnet18_archive_env): - resnet18_infer(rvc3_resnet18_archive_env) - - -@pytest.mark.skip(reason="Cannot be converted for RVC3") -def test_yolov6_convert(rvc3_yolov6n_env): - check_convert(rvc3_yolov6n_env) - - -@pytest.mark.skip(reason="Cannot be converted for RVC3") -def test_yolov6n_infer(rvc3_yolov6n_onnx_env): - yolov6n_infer(rvc3_yolov6n_onnx_env) - - -@pytest.mark.xfail(reason="Too degraded accuracy") -def test_resnet18_infer_quant(rvc3_quant_resnet18_onnx_env): - resnet18_infer(rvc3_quant_resnet18_onnx_env) +def test_resnet18_infer(rvc3_resnet18_onnx_env, tool_version): + resnet18_infer(rvc3_resnet18_onnx_env, tool_version) -@pytest.mark.skip(reason="Cannot be converted for RVC3") -def test_yolov6n_infer_quant(rvc3_quant_yolov6n_onnx_env): - yolov6n_infer(rvc3_quant_yolov6n_onnx_env) +def test_resnet18_archive_infer(rvc3_resnet18_archive_env, tool_version): + resnet18_infer(rvc3_resnet18_archive_env, tool_version) diff --git a/tests/test_packages/test_rvc4.py b/tests/test_packages/test_rvc4.py index 1bd8274..54f1eb4 100644 --- a/tests/test_packages/test_rvc4.py +++ b/tests/test_packages/test_rvc4.py @@ -10,37 +10,39 @@ def test_mnist_convert(rvc4_mnist_onnx_env): check_convert(rvc4_mnist_onnx_env) -def test_mnist_infer(rvc4_mnist_onnx_env): - mnist_infer(rvc4_mnist_onnx_env) +def test_mnist_infer(rvc4_mnist_onnx_env, tool_version): + mnist_infer(rvc4_mnist_onnx_env, tool_version) def test_resnet18_convert(rvc4_resnet18_onnx_env): check_convert(rvc4_resnet18_onnx_env) -def test_resnet18_infer(rvc4_resnet18_onnx_env): - resnet18_infer(rvc4_resnet18_onnx_env) +def test_resnet18_infer(rvc4_resnet18_onnx_env, tool_version): + resnet18_infer(rvc4_resnet18_onnx_env, tool_version) def test_resnet18_non_quant_convert(rvc4_non_quant_resnet18_onnx_env): check_convert(rvc4_non_quant_resnet18_onnx_env) -def test_resnet18_non_quant_infer(rvc4_non_quant_resnet18_onnx_env): - resnet18_infer(rvc4_non_quant_resnet18_onnx_env) +def test_resnet18_non_quant_infer( + rvc4_non_quant_resnet18_onnx_env, tool_version +): + resnet18_infer(rvc4_non_quant_resnet18_onnx_env, tool_version) def test_resnet18_archive_convert(rvc4_resnet18_archive_env): check_convert(rvc4_resnet18_archive_env) -def test_resnet18_archive_infer(rvc4_resnet18_archive_env): - resnet18_infer(rvc4_resnet18_archive_env) +def test_resnet18_archive_infer(rvc4_resnet18_archive_env, tool_version): + resnet18_infer(rvc4_resnet18_archive_env, tool_version) def test_yolov6_convert(rvc4_yolov6n_onnx_env): check_convert(rvc4_yolov6n_onnx_env) -def test_yolov6n_infer(rvc4_yolov6n_onnx_env): - yolov6n_infer(rvc4_yolov6n_onnx_env) +def test_yolov6n_infer(rvc4_yolov6n_onnx_env, tool_version): + yolov6n_infer(rvc4_yolov6n_onnx_env, tool_version)