From d059b545fcde1c95614590a4cdf8b8e10eca0f79 Mon Sep 17 00:00:00 2001 From: Yury Date: Sat, 17 Aug 2024 11:46:42 +0300 Subject: [PATCH 1/5] Update CUDA python requirements with onnxruntime 1.19 update --- server/requirements-cuda.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/requirements-cuda.txt b/server/requirements-cuda.txt index bd95d39c2..2783f4cb1 100644 --- a/server/requirements-cuda.txt +++ b/server/requirements-cuda.txt @@ -3,9 +3,9 @@ # # wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh # # bash Anaconda3-2022.10-Linux-x86_64.sh -# PyPI onnxruntime-gpu is compiled with CUDA 11.x ---extra-index-url https://download.pytorch.org/whl/cu118 -# torch 2.4.0 has problems with Linux builds +# PyPI onnxruntime-gpu>1.19 is compiled with CUDA 12.x and cuDNN 8.x +--extra-index-url https://download.pytorch.org/whl/cu121 +# torch>2.4.0 is compiled with cuDNN 9.x torch==2.3.1 torchaudio faiss-cpu==1.8.0; sys_platform!='linux' From a74fc4463797f82d5a837a08fe98501570a3a8c7 Mon Sep 17 00:00:00 2001 From: Yury Date: Sat, 17 Aug 2024 13:20:01 +0300 Subject: [PATCH 2/5] Try torch 2.4.0 with latest onnxruntime --- server/requirements-cuda.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/server/requirements-cuda.txt b/server/requirements-cuda.txt index 2783f4cb1..8e3081761 100644 --- a/server/requirements-cuda.txt +++ b/server/requirements-cuda.txt @@ -3,10 +3,9 @@ # # wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh # # bash Anaconda3-2022.10-Linux-x86_64.sh -# PyPI onnxruntime-gpu>1.19 is compiled with CUDA 12.x and cuDNN 8.x +# PyPI onnxruntime-gpu>=1.19 is compiled with CUDA 12.x and cuDNN 9.x --extra-index-url https://download.pytorch.org/whl/cu121 -# torch>2.4.0 is compiled with cuDNN 9.x -torch==2.3.1 +torch>=2.4.0 torchaudio faiss-cpu==1.8.0; sys_platform!='linux' faiss-gpu; sys_platform=='linux' From 543ff0b530e58185e4388e0ab992a4150e554eaa Mon Sep 17 00:00:00 2001 From: Yury Date: Sun, 18 Aug 2024 13:18:49 +0300 Subject: [PATCH 3/5] Reorder pytorch and onnxruntime imports --- server/requirements-dml.txt | 2 +- .../RVC/pitchExtractor/CrepeOnnxPitchExtractor.py | 2 +- .../RVC/pitchExtractor/FcpeOnnxPitchExtractor.py | 2 +- .../RVC/pitchExtractor/RMVPEOnnxPitchExtractor.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/server/requirements-dml.txt b/server/requirements-dml.txt index c7a6cf7ea..ba2a907a5 100644 --- a/server/requirements-dml.txt +++ b/server/requirements-dml.txt @@ -3,7 +3,7 @@ # # wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh # # bash Anaconda3-2022.10-Linux-x86_64.sh -torch==2.3.1 # torch-directml-0.2.2.dev240614 supports up to to 2.3.1 +torch==2.3.1 # torch-directml-0.2.4.dev240815 supports torch up to to 2.3.1 torchaudio torch-directml faiss-cpu==1.8.0 diff --git a/server/voice_changer/RVC/pitchExtractor/CrepeOnnxPitchExtractor.py b/server/voice_changer/RVC/pitchExtractor/CrepeOnnxPitchExtractor.py index 958c288e1..a101b9b6e 100644 --- a/server/voice_changer/RVC/pitchExtractor/CrepeOnnxPitchExtractor.py +++ b/server/voice_changer/RVC/pitchExtractor/CrepeOnnxPitchExtractor.py @@ -1,9 +1,9 @@ import numpy as np import torch +import onnxruntime from const import PitchExtractorType, F0_MIN, F0_MAX from voice_changer.common.deviceManager.DeviceManager import DeviceManager from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor -import onnxruntime from voice_changer.RVC.pitchExtractor import onnxcrepe diff --git a/server/voice_changer/RVC/pitchExtractor/FcpeOnnxPitchExtractor.py b/server/voice_changer/RVC/pitchExtractor/FcpeOnnxPitchExtractor.py index 33e27c67c..6abf2d6c8 100644 --- a/server/voice_changer/RVC/pitchExtractor/FcpeOnnxPitchExtractor.py +++ b/server/voice_changer/RVC/pitchExtractor/FcpeOnnxPitchExtractor.py @@ -1,6 +1,6 @@ import numpy as np -import onnxruntime import torch +import onnxruntime from const import PitchExtractorType from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor from voice_changer.common.deviceManager.DeviceManager import DeviceManager diff --git a/server/voice_changer/RVC/pitchExtractor/RMVPEOnnxPitchExtractor.py b/server/voice_changer/RVC/pitchExtractor/RMVPEOnnxPitchExtractor.py index 700095202..47396c59c 100644 --- a/server/voice_changer/RVC/pitchExtractor/RMVPEOnnxPitchExtractor.py +++ b/server/voice_changer/RVC/pitchExtractor/RMVPEOnnxPitchExtractor.py @@ -1,11 +1,11 @@ import numpy as np +import torch +import onnxruntime from const import PitchExtractorType from voice_changer.common.OnnxLoader import load_onnx_model from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor from voice_changer.common.deviceManager.DeviceManager import DeviceManager from voice_changer.common.MelExtractor import MelSpectrogram -import onnxruntime -import torch class RMVPEOnnxPitchExtractor(PitchExtractor): From b372636f5f5eb732ed23c3c459bdbbcd6d3a8a72 Mon Sep 17 00:00:00 2001 From: Yury Date: Fri, 23 Aug 2024 17:56:29 +0300 Subject: [PATCH 4/5] Reset CUDNN_PATH too --- server/app.py | 1 + 1 file changed, 1 insertion(+) diff --git a/server/app.py b/server/app.py index f3534e5a5..e03bfc283 100644 --- a/server/app.py +++ b/server/app.py @@ -9,6 +9,7 @@ # Reset CUDA_PATH since all libraries are already bundled. # Existing CUDA installations may be incompatible with PyTorch or ONNX runtime os.environ['CUDA_PATH'] = '' +os.environ['CUDNN_PATH'] = '' # Fix high CPU usage caused by faiss-cpu for AMD CPUs. # https://github.com/facebookresearch/faiss/issues/53#issuecomment-288351188 os.environ['OMP_WAIT_POLICY'] = 'PASSIVE' From 83d7b6f2f30ac18c0a1daec6a811aa0092f98270 Mon Sep 17 00:00:00 2001 From: Yury Date: Fri, 23 Aug 2024 17:56:46 +0300 Subject: [PATCH 5/5] Add symlinks to CUDA libraries in Linux build --- .github/workflows/build-executable.yml | 5 +++++ .github/workflows/make-release.yml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.github/workflows/build-executable.yml b/.github/workflows/build-executable.yml index 425f40127..a16a99168 100644 --- a/.github/workflows/build-executable.yml +++ b/.github/workflows/build-executable.yml @@ -83,6 +83,11 @@ jobs: run: cp ./server/{force_gpu_clocks.bat,reset_gpu_clocks.bat} ./server/dist/ shell: bash if: matrix.os == 'windows-latest' && matrix.backend == 'cuda' + - name: Add CUDA library symlinks + run: ln -svf nvidia/*/lib/*.so* . + shell: bash + if: matrix.os == 'ubuntu-20.04' && matrix.backend == 'cuda' + working-directory: ./server/dist/MMVCServerSIO/_internal - name: Pack artifact shell: bash run: | diff --git a/.github/workflows/make-release.yml b/.github/workflows/make-release.yml index abdc38437..b735cd88d 100644 --- a/.github/workflows/make-release.yml +++ b/.github/workflows/make-release.yml @@ -122,6 +122,11 @@ jobs: run: cp ./server/{force_gpu_clocks.bat,reset_gpu_clocks.bat} ./server/dist/ shell: bash if: matrix.os == 'windows-latest' && matrix.backend == 'cuda' + - name: Add CUDA library symlinks + run: ln -svf nvidia/*/lib/*.so* . + shell: bash + if: matrix.os == 'ubuntu-20.04' && matrix.backend == 'cuda' + working-directory: ./server/dist/MMVCServerSIO/_internal - name: Pack artifact shell: bash run: |